]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/bnx2x/bnx2x_main.c
bnx2x: Create bnx2x_cmn.* files
[net-next-2.6.git] / drivers / net / bnx2x / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52 #include <linux/stringify.h>
53
54 #define BNX2X_MAIN
55 #include "bnx2x.h"
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
59 #include "bnx2x_cmn.h"
60
61 #define DRV_MODULE_VERSION      "1.52.53-1"
62 #define DRV_MODULE_RELDATE      "2010/18/04"
63 #define BNX2X_BC_VER            0x040200
64
65 #include <linux/firmware.h>
66 #include "bnx2x_fw_file_hdr.h"
67 /* FW files */
68 #define FW_FILE_VERSION                                 \
69         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
70         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
71         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
72         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
73 #define FW_FILE_NAME_E1         "bnx2x-e1-" FW_FILE_VERSION ".fw"
74 #define FW_FILE_NAME_E1H        "bnx2x-e1h-" FW_FILE_VERSION ".fw"
75
76 /* Time in jiffies before concluding the transmitter is hung */
77 #define TX_TIMEOUT              (5*HZ)
78
79 static char version[] __devinitdata =
80         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
81         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
82
83 MODULE_AUTHOR("Eliezer Tamir");
84 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
85 MODULE_LICENSE("GPL");
86 MODULE_VERSION(DRV_MODULE_VERSION);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1);
88 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
89
90 static int multi_mode = 1;
91 module_param(multi_mode, int, 0);
92 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
93                              "(0 Disable; 1 Enable (default))");
94
95 static int num_queues;
96 module_param(num_queues, int, 0);
97 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
98                                 " (default is as a number of CPUs)");
99
100 static int disable_tpa;
101 module_param(disable_tpa, int, 0);
102 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
103
104 static int int_mode;
105 module_param(int_mode, int, 0);
106 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
107                                 "(1 INT#x; 2 MSI)");
108
109 static int dropless_fc;
110 module_param(dropless_fc, int, 0);
111 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
112
113 static int poll;
114 module_param(poll, int, 0);
115 MODULE_PARM_DESC(poll, " Use polling (for debug)");
116
117 static int mrrs = -1;
118 module_param(mrrs, int, 0);
119 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
120
121 static int debug;
122 module_param(debug, int, 0);
123 MODULE_PARM_DESC(debug, " Default debug msglevel");
124
125 static struct workqueue_struct *bnx2x_wq;
126
127 enum bnx2x_board_type {
128         BCM57710 = 0,
129         BCM57711 = 1,
130         BCM57711E = 2,
131 };
132
133 /* indexed by board_type, above */
134 static struct {
135         char *name;
136 } board_info[] __devinitdata = {
137         { "Broadcom NetXtreme II BCM57710 XGb" },
138         { "Broadcom NetXtreme II BCM57711 XGb" },
139         { "Broadcom NetXtreme II BCM57711E XGb" }
140 };
141
142
143 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
144         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
147         { 0 }
148 };
149
150 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
151
152 /****************************************************************************
153 * General service functions
154 ****************************************************************************/
155
156 /* used only at init
157  * locking is done by mcp
158  */
159 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
160 {
161         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164                                PCICFG_VENDOR_ID_OFFSET);
165 }
166
167 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168 {
169         u32 val;
170
171         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174                                PCICFG_VENDOR_ID_OFFSET);
175
176         return val;
177 }
178
179 static const u32 dmae_reg_go_c[] = {
180         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
184 };
185
186 /* copy command into DMAE command memory and set DMAE command go */
187 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188                             int idx)
189 {
190         u32 cmd_offset;
191         int i;
192
193         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
196
197                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
199         }
200         REG_WR(bp, dmae_reg_go_c[idx], 1);
201 }
202
203 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204                       u32 len32)
205 {
206         struct dmae_command dmae;
207         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
208         int cnt = 200;
209
210         if (!bp->dmae_ready) {
211                 u32 *data = bnx2x_sp(bp, wb_data[0]);
212
213                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
214                    "  using indirect\n", dst_addr, len32);
215                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216                 return;
217         }
218
219         memset(&dmae, 0, sizeof(struct dmae_command));
220
221         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
224 #ifdef __BIG_ENDIAN
225                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
226 #else
227                        DMAE_CMD_ENDIANITY_DW_SWAP |
228 #endif
229                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231         dmae.src_addr_lo = U64_LO(dma_addr);
232         dmae.src_addr_hi = U64_HI(dma_addr);
233         dmae.dst_addr_lo = dst_addr >> 2;
234         dmae.dst_addr_hi = 0;
235         dmae.len = len32;
236         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238         dmae.comp_val = DMAE_COMP_VAL;
239
240         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
241            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
242                     "dst_addr [%x:%08x (%08x)]\n"
243            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
244            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
247         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
248            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
250
251         mutex_lock(&bp->dmae_mutex);
252
253         *wb_comp = 0;
254
255         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
256
257         udelay(5);
258
259         while (*wb_comp != DMAE_COMP_VAL) {
260                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
261
262                 if (!cnt) {
263                         BNX2X_ERR("DMAE timeout!\n");
264                         break;
265                 }
266                 cnt--;
267                 /* adjust delay for emulation/FPGA */
268                 if (CHIP_REV_IS_SLOW(bp))
269                         msleep(100);
270                 else
271                         udelay(5);
272         }
273
274         mutex_unlock(&bp->dmae_mutex);
275 }
276
277 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
278 {
279         struct dmae_command dmae;
280         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
281         int cnt = 200;
282
283         if (!bp->dmae_ready) {
284                 u32 *data = bnx2x_sp(bp, wb_data[0]);
285                 int i;
286
287                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
288                    "  using indirect\n", src_addr, len32);
289                 for (i = 0; i < len32; i++)
290                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291                 return;
292         }
293
294         memset(&dmae, 0, sizeof(struct dmae_command));
295
296         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
299 #ifdef __BIG_ENDIAN
300                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
301 #else
302                        DMAE_CMD_ENDIANITY_DW_SWAP |
303 #endif
304                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306         dmae.src_addr_lo = src_addr >> 2;
307         dmae.src_addr_hi = 0;
308         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310         dmae.len = len32;
311         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313         dmae.comp_val = DMAE_COMP_VAL;
314
315         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
316            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
317                     "dst_addr [%x:%08x (%08x)]\n"
318            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
319            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
322
323         mutex_lock(&bp->dmae_mutex);
324
325         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
326         *wb_comp = 0;
327
328         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
329
330         udelay(5);
331
332         while (*wb_comp != DMAE_COMP_VAL) {
333
334                 if (!cnt) {
335                         BNX2X_ERR("DMAE timeout!\n");
336                         break;
337                 }
338                 cnt--;
339                 /* adjust delay for emulation/FPGA */
340                 if (CHIP_REV_IS_SLOW(bp))
341                         msleep(100);
342                 else
343                         udelay(5);
344         }
345         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
346            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
348
349         mutex_unlock(&bp->dmae_mutex);
350 }
351
352 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353                                u32 addr, u32 len)
354 {
355         int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
356         int offset = 0;
357
358         while (len > dmae_wr_max) {
359                 bnx2x_write_dmae(bp, phys_addr + offset,
360                                  addr + offset, dmae_wr_max);
361                 offset += dmae_wr_max * 4;
362                 len -= dmae_wr_max;
363         }
364
365         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
366 }
367
368 /* used only for slowpath so not inlined */
369 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
370 {
371         u32 wb_write[2];
372
373         wb_write[0] = val_hi;
374         wb_write[1] = val_lo;
375         REG_WR_DMAE(bp, reg, wb_write, 2);
376 }
377
378 #ifdef USE_WB_RD
379 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
380 {
381         u32 wb_data[2];
382
383         REG_RD_DMAE(bp, reg, wb_data, 2);
384
385         return HILO_U64(wb_data[0], wb_data[1]);
386 }
387 #endif
388
389 static int bnx2x_mc_assert(struct bnx2x *bp)
390 {
391         char last_idx;
392         int i, rc = 0;
393         u32 row0, row1, row2, row3;
394
395         /* XSTORM */
396         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
397                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
398         if (last_idx)
399                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
400
401         /* print the asserts */
402         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
403
404                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405                               XSTORM_ASSERT_LIST_OFFSET(i));
406                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
408                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
409                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
410                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
411                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
412
413                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
414                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
415                                   " 0x%08x 0x%08x 0x%08x\n",
416                                   i, row3, row2, row1, row0);
417                         rc++;
418                 } else {
419                         break;
420                 }
421         }
422
423         /* TSTORM */
424         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
425                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
426         if (last_idx)
427                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
428
429         /* print the asserts */
430         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
431
432                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433                               TSTORM_ASSERT_LIST_OFFSET(i));
434                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
436                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
437                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
438                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
439                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
440
441                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
442                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
443                                   " 0x%08x 0x%08x 0x%08x\n",
444                                   i, row3, row2, row1, row0);
445                         rc++;
446                 } else {
447                         break;
448                 }
449         }
450
451         /* CSTORM */
452         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
453                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
454         if (last_idx)
455                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
456
457         /* print the asserts */
458         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
459
460                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461                               CSTORM_ASSERT_LIST_OFFSET(i));
462                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
464                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
465                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
466                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
467                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
468
469                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
470                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
471                                   " 0x%08x 0x%08x 0x%08x\n",
472                                   i, row3, row2, row1, row0);
473                         rc++;
474                 } else {
475                         break;
476                 }
477         }
478
479         /* USTORM */
480         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
481                            USTORM_ASSERT_LIST_INDEX_OFFSET);
482         if (last_idx)
483                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
484
485         /* print the asserts */
486         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
487
488                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
489                               USTORM_ASSERT_LIST_OFFSET(i));
490                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
491                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
492                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
493                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
494                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
495                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
496
497                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
498                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
499                                   " 0x%08x 0x%08x 0x%08x\n",
500                                   i, row3, row2, row1, row0);
501                         rc++;
502                 } else {
503                         break;
504                 }
505         }
506
507         return rc;
508 }
509
510 static void bnx2x_fw_dump(struct bnx2x *bp)
511 {
512         u32 addr;
513         u32 mark, offset;
514         __be32 data[9];
515         int word;
516
517         if (BP_NOMCP(bp)) {
518                 BNX2X_ERR("NO MCP - can not dump\n");
519                 return;
520         }
521
522         addr = bp->common.shmem_base - 0x0800 + 4;
523         mark = REG_RD(bp, addr);
524         mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
525         pr_err("begin fw dump (mark 0x%x)\n", mark);
526
527         pr_err("");
528         for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
529                 for (word = 0; word < 8; word++)
530                         data[word] = htonl(REG_RD(bp, offset + 4*word));
531                 data[8] = 0x0;
532                 pr_cont("%s", (char *)data);
533         }
534         for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
535                 for (word = 0; word < 8; word++)
536                         data[word] = htonl(REG_RD(bp, offset + 4*word));
537                 data[8] = 0x0;
538                 pr_cont("%s", (char *)data);
539         }
540         pr_err("end of fw dump\n");
541 }
542
543 static void bnx2x_panic_dump(struct bnx2x *bp)
544 {
545         int i;
546         u16 j, start, end;
547
548         bp->stats_state = STATS_STATE_DISABLED;
549         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
550
551         BNX2X_ERR("begin crash dump -----------------\n");
552
553         /* Indices */
554         /* Common */
555         BNX2X_ERR("def_c_idx(0x%x)  def_u_idx(0x%x)  def_x_idx(0x%x)"
556                   "  def_t_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
557                   "  spq_prod_idx(0x%x)\n",
558                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
559                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
560
561         /* Rx */
562         for_each_queue(bp, i) {
563                 struct bnx2x_fastpath *fp = &bp->fp[i];
564
565                 BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
566                           "  *rx_bd_cons_sb(0x%x)  rx_comp_prod(0x%x)"
567                           "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
568                           i, fp->rx_bd_prod, fp->rx_bd_cons,
569                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
570                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
571                 BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
572                           "  fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
573                           fp->rx_sge_prod, fp->last_max_sge,
574                           le16_to_cpu(fp->fp_u_idx),
575                           fp->status_blk->u_status_block.status_block_index);
576         }
577
578         /* Tx */
579         for_each_queue(bp, i) {
580                 struct bnx2x_fastpath *fp = &bp->fp[i];
581
582                 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
583                           "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
584                           "  *tx_cons_sb(0x%x)\n",
585                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
586                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
587                 BNX2X_ERR("     fp_c_idx(0x%x)  *sb_c_idx(0x%x)"
588                           "  tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
589                           fp->status_blk->c_status_block.status_block_index,
590                           fp->tx_db.data.prod);
591         }
592
593         /* Rings */
594         /* Rx */
595         for_each_queue(bp, i) {
596                 struct bnx2x_fastpath *fp = &bp->fp[i];
597
598                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
599                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
600                 for (j = start; j != end; j = RX_BD(j + 1)) {
601                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
602                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
603
604                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
605                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
606                 }
607
608                 start = RX_SGE(fp->rx_sge_prod);
609                 end = RX_SGE(fp->last_max_sge);
610                 for (j = start; j != end; j = RX_SGE(j + 1)) {
611                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
612                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
613
614                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
615                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
616                 }
617
618                 start = RCQ_BD(fp->rx_comp_cons - 10);
619                 end = RCQ_BD(fp->rx_comp_cons + 503);
620                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
621                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
622
623                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
624                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
625                 }
626         }
627
628         /* Tx */
629         for_each_queue(bp, i) {
630                 struct bnx2x_fastpath *fp = &bp->fp[i];
631
632                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
633                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
634                 for (j = start; j != end; j = TX_BD(j + 1)) {
635                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
636
637                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
638                                   i, j, sw_bd->skb, sw_bd->first_bd);
639                 }
640
641                 start = TX_BD(fp->tx_bd_cons - 10);
642                 end = TX_BD(fp->tx_bd_cons + 254);
643                 for (j = start; j != end; j = TX_BD(j + 1)) {
644                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
645
646                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
647                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
648                 }
649         }
650
651         bnx2x_fw_dump(bp);
652         bnx2x_mc_assert(bp);
653         BNX2X_ERR("end crash dump -----------------\n");
654 }
655
656 void bnx2x_int_enable(struct bnx2x *bp)
657 {
658         int port = BP_PORT(bp);
659         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
660         u32 val = REG_RD(bp, addr);
661         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
662         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
663
664         if (msix) {
665                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
666                          HC_CONFIG_0_REG_INT_LINE_EN_0);
667                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
668                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
669         } else if (msi) {
670                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
671                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
672                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
673                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
674         } else {
675                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
676                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
677                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
678                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
679
680                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
681                    val, port, addr);
682
683                 REG_WR(bp, addr, val);
684
685                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
686         }
687
688         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
689            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
690
691         REG_WR(bp, addr, val);
692         /*
693          * Ensure that HC_CONFIG is written before leading/trailing edge config
694          */
695         mmiowb();
696         barrier();
697
698         if (CHIP_IS_E1H(bp)) {
699                 /* init leading/trailing edge */
700                 if (IS_E1HMF(bp)) {
701                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
702                         if (bp->port.pmf)
703                                 /* enable nig and gpio3 attention */
704                                 val |= 0x1100;
705                 } else
706                         val = 0xffff;
707
708                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
709                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
710         }
711
712         /* Make sure that interrupts are indeed enabled from here on */
713         mmiowb();
714 }
715
716 static void bnx2x_int_disable(struct bnx2x *bp)
717 {
718         int port = BP_PORT(bp);
719         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
720         u32 val = REG_RD(bp, addr);
721
722         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
723                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
724                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
725                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
726
727         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
728            val, port, addr);
729
730         /* flush all outstanding writes */
731         mmiowb();
732
733         REG_WR(bp, addr, val);
734         if (REG_RD(bp, addr) != val)
735                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
736 }
737
738 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
739 {
740         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
741         int i, offset;
742
743         /* disable interrupt handling */
744         atomic_inc(&bp->intr_sem);
745         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
746
747         if (disable_hw)
748                 /* prevent the HW from sending interrupts */
749                 bnx2x_int_disable(bp);
750
751         /* make sure all ISRs are done */
752         if (msix) {
753                 synchronize_irq(bp->msix_table[0].vector);
754                 offset = 1;
755 #ifdef BCM_CNIC
756                 offset++;
757 #endif
758                 for_each_queue(bp, i)
759                         synchronize_irq(bp->msix_table[i + offset].vector);
760         } else
761                 synchronize_irq(bp->pdev->irq);
762
763         /* make sure sp_task is not running */
764         cancel_delayed_work(&bp->sp_task);
765         flush_workqueue(bnx2x_wq);
766 }
767
768 /* fast path */
769
770 /*
771  * General service functions
772  */
773
774 /* Return true if succeeded to acquire the lock */
775 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
776 {
777         u32 lock_status;
778         u32 resource_bit = (1 << resource);
779         int func = BP_FUNC(bp);
780         u32 hw_lock_control_reg;
781
782         DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
783
784         /* Validating that the resource is within range */
785         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
786                 DP(NETIF_MSG_HW,
787                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
788                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
789                 return -EINVAL;
790         }
791
792         if (func <= 5)
793                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
794         else
795                 hw_lock_control_reg =
796                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
797
798         /* Try to acquire the lock */
799         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
800         lock_status = REG_RD(bp, hw_lock_control_reg);
801         if (lock_status & resource_bit)
802                 return true;
803
804         DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
805         return false;
806 }
807
808
809 #ifdef BCM_CNIC
810 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
811 #endif
812
813 void bnx2x_sp_event(struct bnx2x_fastpath *fp,
814                            union eth_rx_cqe *rr_cqe)
815 {
816         struct bnx2x *bp = fp->bp;
817         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
818         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
819
820         DP(BNX2X_MSG_SP,
821            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
822            fp->index, cid, command, bp->state,
823            rr_cqe->ramrod_cqe.ramrod_type);
824
825         bp->spq_left++;
826
827         if (fp->index) {
828                 switch (command | fp->state) {
829                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
830                                                 BNX2X_FP_STATE_OPENING):
831                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
832                            cid);
833                         fp->state = BNX2X_FP_STATE_OPEN;
834                         break;
835
836                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
837                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
838                            cid);
839                         fp->state = BNX2X_FP_STATE_HALTED;
840                         break;
841
842                 default:
843                         BNX2X_ERR("unexpected MC reply (%d)  "
844                                   "fp[%d] state is %x\n",
845                                   command, fp->index, fp->state);
846                         break;
847                 }
848                 mb(); /* force bnx2x_wait_ramrod() to see the change */
849                 return;
850         }
851
852         switch (command | bp->state) {
853         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
854                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
855                 bp->state = BNX2X_STATE_OPEN;
856                 break;
857
858         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
859                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
860                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
861                 fp->state = BNX2X_FP_STATE_HALTED;
862                 break;
863
864         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
865                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
866                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
867                 break;
868
869 #ifdef BCM_CNIC
870         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
871                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
872                 bnx2x_cnic_cfc_comp(bp, cid);
873                 break;
874 #endif
875
876         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
877         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
878                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
879                 bp->set_mac_pending--;
880                 smp_wmb();
881                 break;
882
883         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
884                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
885                 bp->set_mac_pending--;
886                 smp_wmb();
887                 break;
888
889         default:
890                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
891                           command, bp->state);
892                 break;
893         }
894         mb(); /* force bnx2x_wait_ramrod() to see the change */
895 }
896
897 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
898 {
899         struct bnx2x *bp = netdev_priv(dev_instance);
900         u16 status = bnx2x_ack_int(bp);
901         u16 mask;
902         int i;
903
904         /* Return here if interrupt is shared and it's not for us */
905         if (unlikely(status == 0)) {
906                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
907                 return IRQ_NONE;
908         }
909         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
910
911         /* Return here if interrupt is disabled */
912         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
913                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
914                 return IRQ_HANDLED;
915         }
916
917 #ifdef BNX2X_STOP_ON_ERROR
918         if (unlikely(bp->panic))
919                 return IRQ_HANDLED;
920 #endif
921
922         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
923                 struct bnx2x_fastpath *fp = &bp->fp[i];
924
925                 mask = 0x2 << fp->sb_id;
926                 if (status & mask) {
927                         /* Handle Rx and Tx according to SB id */
928                         prefetch(fp->rx_cons_sb);
929                         prefetch(&fp->status_blk->u_status_block.
930                                                 status_block_index);
931                         prefetch(fp->tx_cons_sb);
932                         prefetch(&fp->status_blk->c_status_block.
933                                                 status_block_index);
934                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
935                         status &= ~mask;
936                 }
937         }
938
939 #ifdef BCM_CNIC
940         mask = 0x2 << CNIC_SB_ID(bp);
941         if (status & (mask | 0x1)) {
942                 struct cnic_ops *c_ops = NULL;
943
944                 rcu_read_lock();
945                 c_ops = rcu_dereference(bp->cnic_ops);
946                 if (c_ops)
947                         c_ops->cnic_handler(bp->cnic_data, NULL);
948                 rcu_read_unlock();
949
950                 status &= ~mask;
951         }
952 #endif
953
954         if (unlikely(status & 0x1)) {
955                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
956
957                 status &= ~0x1;
958                 if (!status)
959                         return IRQ_HANDLED;
960         }
961
962         if (unlikely(status))
963                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
964                    status);
965
966         return IRQ_HANDLED;
967 }
968
969 /* end of fast path */
970
971
972 /* Link */
973
974 /*
975  * General service functions
976  */
977
978 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
979 {
980         u32 lock_status;
981         u32 resource_bit = (1 << resource);
982         int func = BP_FUNC(bp);
983         u32 hw_lock_control_reg;
984         int cnt;
985
986         /* Validating that the resource is within range */
987         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
988                 DP(NETIF_MSG_HW,
989                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
990                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
991                 return -EINVAL;
992         }
993
994         if (func <= 5) {
995                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
996         } else {
997                 hw_lock_control_reg =
998                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
999         }
1000
1001         /* Validating that the resource is not already taken */
1002         lock_status = REG_RD(bp, hw_lock_control_reg);
1003         if (lock_status & resource_bit) {
1004                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1005                    lock_status, resource_bit);
1006                 return -EEXIST;
1007         }
1008
1009         /* Try for 5 second every 5ms */
1010         for (cnt = 0; cnt < 1000; cnt++) {
1011                 /* Try to acquire the lock */
1012                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1013                 lock_status = REG_RD(bp, hw_lock_control_reg);
1014                 if (lock_status & resource_bit)
1015                         return 0;
1016
1017                 msleep(5);
1018         }
1019         DP(NETIF_MSG_HW, "Timeout\n");
1020         return -EAGAIN;
1021 }
1022
1023 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1024 {
1025         u32 lock_status;
1026         u32 resource_bit = (1 << resource);
1027         int func = BP_FUNC(bp);
1028         u32 hw_lock_control_reg;
1029
1030         DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1031
1032         /* Validating that the resource is within range */
1033         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1034                 DP(NETIF_MSG_HW,
1035                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1036                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1037                 return -EINVAL;
1038         }
1039
1040         if (func <= 5) {
1041                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1042         } else {
1043                 hw_lock_control_reg =
1044                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1045         }
1046
1047         /* Validating that the resource is currently taken */
1048         lock_status = REG_RD(bp, hw_lock_control_reg);
1049         if (!(lock_status & resource_bit)) {
1050                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1051                    lock_status, resource_bit);
1052                 return -EFAULT;
1053         }
1054
1055         REG_WR(bp, hw_lock_control_reg, resource_bit);
1056         return 0;
1057 }
1058
1059
1060 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1061 {
1062         /* The GPIO should be swapped if swap register is set and active */
1063         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1064                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1065         int gpio_shift = gpio_num +
1066                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1067         u32 gpio_mask = (1 << gpio_shift);
1068         u32 gpio_reg;
1069         int value;
1070
1071         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1072                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1073                 return -EINVAL;
1074         }
1075
1076         /* read GPIO value */
1077         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1078
1079         /* get the requested pin value */
1080         if ((gpio_reg & gpio_mask) == gpio_mask)
1081                 value = 1;
1082         else
1083                 value = 0;
1084
1085         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1086
1087         return value;
1088 }
1089
1090 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1091 {
1092         /* The GPIO should be swapped if swap register is set and active */
1093         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1094                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1095         int gpio_shift = gpio_num +
1096                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1097         u32 gpio_mask = (1 << gpio_shift);
1098         u32 gpio_reg;
1099
1100         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1101                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1102                 return -EINVAL;
1103         }
1104
1105         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1106         /* read GPIO and mask except the float bits */
1107         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1108
1109         switch (mode) {
1110         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1111                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1112                    gpio_num, gpio_shift);
1113                 /* clear FLOAT and set CLR */
1114                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1115                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1116                 break;
1117
1118         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1119                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1120                    gpio_num, gpio_shift);
1121                 /* clear FLOAT and set SET */
1122                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1123                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1124                 break;
1125
1126         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1127                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1128                    gpio_num, gpio_shift);
1129                 /* set FLOAT */
1130                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1131                 break;
1132
1133         default:
1134                 break;
1135         }
1136
1137         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1138         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1139
1140         return 0;
1141 }
1142
1143 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1144 {
1145         /* The GPIO should be swapped if swap register is set and active */
1146         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1147                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1148         int gpio_shift = gpio_num +
1149                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1150         u32 gpio_mask = (1 << gpio_shift);
1151         u32 gpio_reg;
1152
1153         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1154                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1155                 return -EINVAL;
1156         }
1157
1158         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1159         /* read GPIO int */
1160         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1161
1162         switch (mode) {
1163         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1164                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1165                                    "output low\n", gpio_num, gpio_shift);
1166                 /* clear SET and set CLR */
1167                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1168                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1169                 break;
1170
1171         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1172                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1173                                    "output high\n", gpio_num, gpio_shift);
1174                 /* clear CLR and set SET */
1175                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1176                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1177                 break;
1178
1179         default:
1180                 break;
1181         }
1182
1183         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1184         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1185
1186         return 0;
1187 }
1188
1189 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1190 {
1191         u32 spio_mask = (1 << spio_num);
1192         u32 spio_reg;
1193
1194         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1195             (spio_num > MISC_REGISTERS_SPIO_7)) {
1196                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1197                 return -EINVAL;
1198         }
1199
1200         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1201         /* read SPIO and mask except the float bits */
1202         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1203
1204         switch (mode) {
1205         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1206                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1207                 /* clear FLOAT and set CLR */
1208                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1209                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1210                 break;
1211
1212         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1213                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1214                 /* clear FLOAT and set SET */
1215                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1216                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1217                 break;
1218
1219         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1220                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1221                 /* set FLOAT */
1222                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1223                 break;
1224
1225         default:
1226                 break;
1227         }
1228
1229         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1230         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1231
1232         return 0;
1233 }
1234
1235 void bnx2x_calc_fc_adv(struct bnx2x *bp)
1236 {
1237         switch (bp->link_vars.ieee_fc &
1238                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1239         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1240                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1241                                           ADVERTISED_Pause);
1242                 break;
1243
1244         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1245                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1246                                          ADVERTISED_Pause);
1247                 break;
1248
1249         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1250                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1251                 break;
1252
1253         default:
1254                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1255                                           ADVERTISED_Pause);
1256                 break;
1257         }
1258 }
1259
1260
1261 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1262 {
1263         if (!BP_NOMCP(bp)) {
1264                 u8 rc;
1265
1266                 /* Initialize link parameters structure variables */
1267                 /* It is recommended to turn off RX FC for jumbo frames
1268                    for better performance */
1269                 if (bp->dev->mtu > 5000)
1270                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1271                 else
1272                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1273
1274                 bnx2x_acquire_phy_lock(bp);
1275
1276                 if (load_mode == LOAD_DIAG)
1277                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
1278
1279                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1280
1281                 bnx2x_release_phy_lock(bp);
1282
1283                 bnx2x_calc_fc_adv(bp);
1284
1285                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1286                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1287                         bnx2x_link_report(bp);
1288                 }
1289
1290                 return rc;
1291         }
1292         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1293         return -EINVAL;
1294 }
1295
1296 void bnx2x_link_set(struct bnx2x *bp)
1297 {
1298         if (!BP_NOMCP(bp)) {
1299                 bnx2x_acquire_phy_lock(bp);
1300                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1301                 bnx2x_release_phy_lock(bp);
1302
1303                 bnx2x_calc_fc_adv(bp);
1304         } else
1305                 BNX2X_ERR("Bootcode is missing - can not set link\n");
1306 }
1307
1308 static void bnx2x__link_reset(struct bnx2x *bp)
1309 {
1310         if (!BP_NOMCP(bp)) {
1311                 bnx2x_acquire_phy_lock(bp);
1312                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1313                 bnx2x_release_phy_lock(bp);
1314         } else
1315                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1316 }
1317
1318 u8 bnx2x_link_test(struct bnx2x *bp)
1319 {
1320         u8 rc = 0;
1321
1322         if (!BP_NOMCP(bp)) {
1323                 bnx2x_acquire_phy_lock(bp);
1324                 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1325                 bnx2x_release_phy_lock(bp);
1326         } else
1327                 BNX2X_ERR("Bootcode is missing - can not test link\n");
1328
1329         return rc;
1330 }
1331
1332 static void bnx2x_init_port_minmax(struct bnx2x *bp)
1333 {
1334         u32 r_param = bp->link_vars.line_speed / 8;
1335         u32 fair_periodic_timeout_usec;
1336         u32 t_fair;
1337
1338         memset(&(bp->cmng.rs_vars), 0,
1339                sizeof(struct rate_shaping_vars_per_port));
1340         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
1341
1342         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1343         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
1344
1345         /* this is the threshold below which no timer arming will occur
1346            1.25 coefficient is for the threshold to be a little bigger
1347            than the real time, to compensate for timer in-accuracy */
1348         bp->cmng.rs_vars.rs_threshold =
1349                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1350
1351         /* resolution of fairness timer */
1352         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1353         /* for 10G it is 1000usec. for 1G it is 10000usec. */
1354         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
1355
1356         /* this is the threshold below which we won't arm the timer anymore */
1357         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
1358
1359         /* we multiply by 1e3/8 to get bytes/msec.
1360            We don't want the credits to pass a credit
1361            of the t_fair*FAIR_MEM (algorithm resolution) */
1362         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1363         /* since each tick is 4 usec */
1364         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
1365 }
1366
1367 /* Calculates the sum of vn_min_rates.
1368    It's needed for further normalizing of the min_rates.
1369    Returns:
1370      sum of vn_min_rates.
1371        or
1372      0 - if all the min_rates are 0.
1373      In the later case fainess algorithm should be deactivated.
1374      If not all min_rates are zero then those that are zeroes will be set to 1.
1375  */
1376 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1377 {
1378         int all_zero = 1;
1379         int port = BP_PORT(bp);
1380         int vn;
1381
1382         bp->vn_weight_sum = 0;
1383         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1384                 int func = 2*vn + port;
1385                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1386                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1387                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1388
1389                 /* Skip hidden vns */
1390                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1391                         continue;
1392
1393                 /* If min rate is zero - set it to 1 */
1394                 if (!vn_min_rate)
1395                         vn_min_rate = DEF_MIN_RATE;
1396                 else
1397                         all_zero = 0;
1398
1399                 bp->vn_weight_sum += vn_min_rate;
1400         }
1401
1402         /* ... only if all min rates are zeros - disable fairness */
1403         if (all_zero) {
1404                 bp->cmng.flags.cmng_enables &=
1405                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1406                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1407                    "  fairness will be disabled\n");
1408         } else
1409                 bp->cmng.flags.cmng_enables |=
1410                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1411 }
1412
1413 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
1414 {
1415         struct rate_shaping_vars_per_vn m_rs_vn;
1416         struct fairness_vars_per_vn m_fair_vn;
1417         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1418         u16 vn_min_rate, vn_max_rate;
1419         int i;
1420
1421         /* If function is hidden - set min and max to zeroes */
1422         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1423                 vn_min_rate = 0;
1424                 vn_max_rate = 0;
1425
1426         } else {
1427                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1428                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1429                 /* If min rate is zero - set it to 1 */
1430                 if (!vn_min_rate)
1431                         vn_min_rate = DEF_MIN_RATE;
1432                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1433                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1434         }
1435         DP(NETIF_MSG_IFUP,
1436            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
1437            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
1438
1439         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1440         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1441
1442         /* global vn counter - maximal Mbps for this vn */
1443         m_rs_vn.vn_counter.rate = vn_max_rate;
1444
1445         /* quota - number of bytes transmitted in this period */
1446         m_rs_vn.vn_counter.quota =
1447                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1448
1449         if (bp->vn_weight_sum) {
1450                 /* credit for each period of the fairness algorithm:
1451                    number of bytes in T_FAIR (the vn share the port rate).
1452                    vn_weight_sum should not be larger than 10000, thus
1453                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1454                    than zero */
1455                 m_fair_vn.vn_credit_delta =
1456                         max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1457                                                    (8 * bp->vn_weight_sum))),
1458                               (bp->cmng.fair_vars.fair_threshold * 2));
1459                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
1460                    m_fair_vn.vn_credit_delta);
1461         }
1462
1463         /* Store it to internal memory */
1464         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1465                 REG_WR(bp, BAR_XSTRORM_INTMEM +
1466                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1467                        ((u32 *)(&m_rs_vn))[i]);
1468
1469         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1470                 REG_WR(bp, BAR_XSTRORM_INTMEM +
1471                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1472                        ((u32 *)(&m_fair_vn))[i]);
1473 }
1474
1475
1476 /* This function is called upon link interrupt */
1477 static void bnx2x_link_attn(struct bnx2x *bp)
1478 {
1479         u32 prev_link_status = bp->link_vars.link_status;
1480         /* Make sure that we are synced with the current statistics */
1481         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1482
1483         bnx2x_link_update(&bp->link_params, &bp->link_vars);
1484
1485         if (bp->link_vars.link_up) {
1486
1487                 /* dropless flow control */
1488                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1489                         int port = BP_PORT(bp);
1490                         u32 pause_enabled = 0;
1491
1492                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1493                                 pause_enabled = 1;
1494
1495                         REG_WR(bp, BAR_USTRORM_INTMEM +
1496                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1497                                pause_enabled);
1498                 }
1499
1500                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1501                         struct host_port_stats *pstats;
1502
1503                         pstats = bnx2x_sp(bp, port_stats);
1504                         /* reset old bmac stats */
1505                         memset(&(pstats->mac_stx[0]), 0,
1506                                sizeof(struct mac_stx));
1507                 }
1508                 if (bp->state == BNX2X_STATE_OPEN)
1509                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1510         }
1511
1512         /* indicate link status only if link status actually changed */
1513         if (prev_link_status != bp->link_vars.link_status)
1514                 bnx2x_link_report(bp);
1515
1516         if (IS_E1HMF(bp)) {
1517                 int port = BP_PORT(bp);
1518                 int func;
1519                 int vn;
1520
1521                 /* Set the attention towards other drivers on the same port */
1522                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1523                         if (vn == BP_E1HVN(bp))
1524                                 continue;
1525
1526                         func = ((vn << 1) | port);
1527                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1528                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1529                 }
1530
1531                 if (bp->link_vars.link_up) {
1532                         int i;
1533
1534                         /* Init rate shaping and fairness contexts */
1535                         bnx2x_init_port_minmax(bp);
1536
1537                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
1538                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
1539
1540                         /* Store it to internal memory */
1541                         for (i = 0;
1542                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
1543                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
1544                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1545                                        ((u32 *)(&bp->cmng))[i]);
1546                 }
1547         }
1548 }
1549
1550 void bnx2x__link_status_update(struct bnx2x *bp)
1551 {
1552         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
1553                 return;
1554
1555         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
1556
1557         if (bp->link_vars.link_up)
1558                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1559         else
1560                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1561
1562         bnx2x_calc_vn_weight_sum(bp);
1563
1564         /* indicate link status */
1565         bnx2x_link_report(bp);
1566 }
1567
1568 static void bnx2x_pmf_update(struct bnx2x *bp)
1569 {
1570         int port = BP_PORT(bp);
1571         u32 val;
1572
1573         bp->port.pmf = 1;
1574         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1575
1576         /* enable nig attention */
1577         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1578         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1579         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1580
1581         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1582 }
1583
1584 /* end of Link */
1585
1586 /* slow path */
1587
1588 /*
1589  * General service functions
1590  */
1591
1592 /* send the MCP a request, block until there is a reply */
1593 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
1594 {
1595         int func = BP_FUNC(bp);
1596         u32 seq = ++bp->fw_seq;
1597         u32 rc = 0;
1598         u32 cnt = 1;
1599         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1600
1601         mutex_lock(&bp->fw_mb_mutex);
1602         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
1603         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1604
1605         do {
1606                 /* let the FW do it's magic ... */
1607                 msleep(delay);
1608
1609                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
1610
1611                 /* Give the FW up to 5 second (500*10ms) */
1612         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
1613
1614         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
1615            cnt*delay, rc, seq);
1616
1617         /* is this a reply to our command? */
1618         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
1619                 rc &= FW_MSG_CODE_MASK;
1620         else {
1621                 /* FW BUG! */
1622                 BNX2X_ERR("FW failed to respond!\n");
1623                 bnx2x_fw_dump(bp);
1624                 rc = 0;
1625         }
1626         mutex_unlock(&bp->fw_mb_mutex);
1627
1628         return rc;
1629 }
1630
1631 static void bnx2x_e1h_disable(struct bnx2x *bp)
1632 {
1633         int port = BP_PORT(bp);
1634
1635         netif_tx_disable(bp->dev);
1636
1637         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
1638
1639         netif_carrier_off(bp->dev);
1640 }
1641
1642 static void bnx2x_e1h_enable(struct bnx2x *bp)
1643 {
1644         int port = BP_PORT(bp);
1645
1646         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
1647
1648         /* Tx queue should be only reenabled */
1649         netif_tx_wake_all_queues(bp->dev);
1650
1651         /*
1652          * Should not call netif_carrier_on since it will be called if the link
1653          * is up when checking for link state
1654          */
1655 }
1656
1657 static void bnx2x_update_min_max(struct bnx2x *bp)
1658 {
1659         int port = BP_PORT(bp);
1660         int vn, i;
1661
1662         /* Init rate shaping and fairness contexts */
1663         bnx2x_init_port_minmax(bp);
1664
1665         bnx2x_calc_vn_weight_sum(bp);
1666
1667         for (vn = VN_0; vn < E1HVN_MAX; vn++)
1668                 bnx2x_init_vn_minmax(bp, 2*vn + port);
1669
1670         if (bp->port.pmf) {
1671                 int func;
1672
1673                 /* Set the attention towards other drivers on the same port */
1674                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1675                         if (vn == BP_E1HVN(bp))
1676                                 continue;
1677
1678                         func = ((vn << 1) | port);
1679                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1680                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1681                 }
1682
1683                 /* Store it to internal memory */
1684                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
1685                         REG_WR(bp, BAR_XSTRORM_INTMEM +
1686                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1687                                ((u32 *)(&bp->cmng))[i]);
1688         }
1689 }
1690
1691 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1692 {
1693         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
1694
1695         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
1696
1697                 /*
1698                  * This is the only place besides the function initialization
1699                  * where the bp->flags can change so it is done without any
1700                  * locks
1701                  */
1702                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1703                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
1704                         bp->flags |= MF_FUNC_DIS;
1705
1706                         bnx2x_e1h_disable(bp);
1707                 } else {
1708                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
1709                         bp->flags &= ~MF_FUNC_DIS;
1710
1711                         bnx2x_e1h_enable(bp);
1712                 }
1713                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
1714         }
1715         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
1716
1717                 bnx2x_update_min_max(bp);
1718                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
1719         }
1720
1721         /* Report results to MCP */
1722         if (dcc_event)
1723                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
1724         else
1725                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
1726 }
1727
1728 /* must be called under the spq lock */
1729 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
1730 {
1731         struct eth_spe *next_spe = bp->spq_prod_bd;
1732
1733         if (bp->spq_prod_bd == bp->spq_last_bd) {
1734                 bp->spq_prod_bd = bp->spq;
1735                 bp->spq_prod_idx = 0;
1736                 DP(NETIF_MSG_TIMER, "end of spq\n");
1737         } else {
1738                 bp->spq_prod_bd++;
1739                 bp->spq_prod_idx++;
1740         }
1741         return next_spe;
1742 }
1743
1744 /* must be called under the spq lock */
1745 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
1746 {
1747         int func = BP_FUNC(bp);
1748
1749         /* Make sure that BD data is updated before writing the producer */
1750         wmb();
1751
1752         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
1753                bp->spq_prod_idx);
1754         mmiowb();
1755 }
1756
1757 /* the slow path queue is odd since completions arrive on the fastpath ring */
1758 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1759                          u32 data_hi, u32 data_lo, int common)
1760 {
1761         struct eth_spe *spe;
1762
1763 #ifdef BNX2X_STOP_ON_ERROR
1764         if (unlikely(bp->panic))
1765                 return -EIO;
1766 #endif
1767
1768         spin_lock_bh(&bp->spq_lock);
1769
1770         if (!bp->spq_left) {
1771                 BNX2X_ERR("BUG! SPQ ring full!\n");
1772                 spin_unlock_bh(&bp->spq_lock);
1773                 bnx2x_panic();
1774                 return -EBUSY;
1775         }
1776
1777         spe = bnx2x_sp_get_next(bp);
1778
1779         /* CID needs port number to be encoded int it */
1780         spe->hdr.conn_and_cmd_data =
1781                         cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
1782                                     HW_CID(bp, cid));
1783         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
1784         if (common)
1785                 spe->hdr.type |=
1786                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
1787
1788         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
1789         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
1790
1791         bp->spq_left--;
1792
1793         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
1794            "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
1795            bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
1796            (u32)(U64_LO(bp->spq_mapping) +
1797            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1798            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
1799
1800         bnx2x_sp_prod_update(bp);
1801         spin_unlock_bh(&bp->spq_lock);
1802         return 0;
1803 }
1804
1805 /* acquire split MCP access lock register */
1806 static int bnx2x_acquire_alr(struct bnx2x *bp)
1807 {
1808         u32 j, val;
1809         int rc = 0;
1810
1811         might_sleep();
1812         for (j = 0; j < 1000; j++) {
1813                 val = (1UL << 31);
1814                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1815                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
1816                 if (val & (1L << 31))
1817                         break;
1818
1819                 msleep(5);
1820         }
1821         if (!(val & (1L << 31))) {
1822                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
1823                 rc = -EBUSY;
1824         }
1825
1826         return rc;
1827 }
1828
1829 /* release split MCP access lock register */
1830 static void bnx2x_release_alr(struct bnx2x *bp)
1831 {
1832         REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
1833 }
1834
1835 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1836 {
1837         struct host_def_status_block *def_sb = bp->def_status_blk;
1838         u16 rc = 0;
1839
1840         barrier(); /* status block is written to by the chip */
1841         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1842                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1843                 rc |= 1;
1844         }
1845         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1846                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1847                 rc |= 2;
1848         }
1849         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1850                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1851                 rc |= 4;
1852         }
1853         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1854                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1855                 rc |= 8;
1856         }
1857         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
1858                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
1859                 rc |= 16;
1860         }
1861         return rc;
1862 }
1863
1864 /*
1865  * slow path service functions
1866  */
1867
1868 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1869 {
1870         int port = BP_PORT(bp);
1871         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
1872                        COMMAND_REG_ATTN_BITS_SET);
1873         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1874                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
1875         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
1876                                        NIG_REG_MASK_INTERRUPT_PORT0;
1877         u32 aeu_mask;
1878         u32 nig_mask = 0;
1879
1880         if (bp->attn_state & asserted)
1881                 BNX2X_ERR("IGU ERROR\n");
1882
1883         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1884         aeu_mask = REG_RD(bp, aeu_addr);
1885
1886         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
1887            aeu_mask, asserted);
1888         aeu_mask &= ~(asserted & 0x3ff);
1889         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
1890
1891         REG_WR(bp, aeu_addr, aeu_mask);
1892         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1893
1894         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
1895         bp->attn_state |= asserted;
1896         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
1897
1898         if (asserted & ATTN_HARD_WIRED_MASK) {
1899                 if (asserted & ATTN_NIG_FOR_FUNC) {
1900
1901                         bnx2x_acquire_phy_lock(bp);
1902
1903                         /* save nig interrupt mask */
1904                         nig_mask = REG_RD(bp, nig_int_mask_addr);
1905                         REG_WR(bp, nig_int_mask_addr, 0);
1906
1907                         bnx2x_link_attn(bp);
1908
1909                         /* handle unicore attn? */
1910                 }
1911                 if (asserted & ATTN_SW_TIMER_4_FUNC)
1912                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
1913
1914                 if (asserted & GPIO_2_FUNC)
1915                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
1916
1917                 if (asserted & GPIO_3_FUNC)
1918                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
1919
1920                 if (asserted & GPIO_4_FUNC)
1921                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
1922
1923                 if (port == 0) {
1924                         if (asserted & ATTN_GENERAL_ATTN_1) {
1925                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
1926                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
1927                         }
1928                         if (asserted & ATTN_GENERAL_ATTN_2) {
1929                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
1930                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
1931                         }
1932                         if (asserted & ATTN_GENERAL_ATTN_3) {
1933                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
1934                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
1935                         }
1936                 } else {
1937                         if (asserted & ATTN_GENERAL_ATTN_4) {
1938                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
1939                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
1940                         }
1941                         if (asserted & ATTN_GENERAL_ATTN_5) {
1942                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
1943                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
1944                         }
1945                         if (asserted & ATTN_GENERAL_ATTN_6) {
1946                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
1947                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
1948                         }
1949                 }
1950
1951         } /* if hardwired */
1952
1953         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
1954            asserted, hc_addr);
1955         REG_WR(bp, hc_addr, asserted);
1956
1957         /* now set back the mask */
1958         if (asserted & ATTN_NIG_FOR_FUNC) {
1959                 REG_WR(bp, nig_int_mask_addr, nig_mask);
1960                 bnx2x_release_phy_lock(bp);
1961         }
1962 }
1963
1964 static inline void bnx2x_fan_failure(struct bnx2x *bp)
1965 {
1966         int port = BP_PORT(bp);
1967
1968         /* mark the failure */
1969         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
1970         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
1971         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
1972                  bp->link_params.ext_phy_config);
1973
1974         /* log the failure */
1975         netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
1976                " the driver to shutdown the card to prevent permanent"
1977                " damage.  Please contact OEM Support for assistance\n");
1978 }
1979
1980 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
1981 {
1982         int port = BP_PORT(bp);
1983         int reg_offset;
1984         u32 val, swap_val, swap_override;
1985
1986         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
1987                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
1988
1989         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
1990
1991                 val = REG_RD(bp, reg_offset);
1992                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
1993                 REG_WR(bp, reg_offset, val);
1994
1995                 BNX2X_ERR("SPIO5 hw attention\n");
1996
1997                 /* Fan failure attention */
1998                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
1999                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2000                         /* Low power mode is controlled by GPIO 2 */
2001                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2002                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2003                         /* The PHY reset is controlled by GPIO 1 */
2004                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2005                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2006                         break;
2007
2008                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2009                         /* The PHY reset is controlled by GPIO 1 */
2010                         /* fake the port number to cancel the swap done in
2011                            set_gpio() */
2012                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2013                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2014                         port = (swap_val && swap_override) ^ 1;
2015                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2016                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2017                         break;
2018
2019                 default:
2020                         break;
2021                 }
2022                 bnx2x_fan_failure(bp);
2023         }
2024
2025         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2026                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2027                 bnx2x_acquire_phy_lock(bp);
2028                 bnx2x_handle_module_detect_int(&bp->link_params);
2029                 bnx2x_release_phy_lock(bp);
2030         }
2031
2032         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2033
2034                 val = REG_RD(bp, reg_offset);
2035                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2036                 REG_WR(bp, reg_offset, val);
2037
2038                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2039                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2040                 bnx2x_panic();
2041         }
2042 }
2043
2044 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2045 {
2046         u32 val;
2047
2048         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2049
2050                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2051                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2052                 /* DORQ discard attention */
2053                 if (val & 0x2)
2054                         BNX2X_ERR("FATAL error from DORQ\n");
2055         }
2056
2057         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2058
2059                 int port = BP_PORT(bp);
2060                 int reg_offset;
2061
2062                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2063                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2064
2065                 val = REG_RD(bp, reg_offset);
2066                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2067                 REG_WR(bp, reg_offset, val);
2068
2069                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2070                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2071                 bnx2x_panic();
2072         }
2073 }
2074
2075 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2076 {
2077         u32 val;
2078
2079         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2080
2081                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2082                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2083                 /* CFC error attention */
2084                 if (val & 0x2)
2085                         BNX2X_ERR("FATAL error from CFC\n");
2086         }
2087
2088         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2089
2090                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2091                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2092                 /* RQ_USDMDP_FIFO_OVERFLOW */
2093                 if (val & 0x18000)
2094                         BNX2X_ERR("FATAL error from PXP\n");
2095         }
2096
2097         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2098
2099                 int port = BP_PORT(bp);
2100                 int reg_offset;
2101
2102                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2103                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2104
2105                 val = REG_RD(bp, reg_offset);
2106                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2107                 REG_WR(bp, reg_offset, val);
2108
2109                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2110                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
2111                 bnx2x_panic();
2112         }
2113 }
2114
2115 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2116 {
2117         u32 val;
2118
2119         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2120
2121                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2122                         int func = BP_FUNC(bp);
2123
2124                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2125                         bp->mf_config = SHMEM_RD(bp,
2126                                            mf_cfg.func_mf_config[func].config);
2127                         val = SHMEM_RD(bp, func_mb[func].drv_status);
2128                         if (val & DRV_STATUS_DCC_EVENT_MASK)
2129                                 bnx2x_dcc_event(bp,
2130                                             (val & DRV_STATUS_DCC_EVENT_MASK));
2131                         bnx2x__link_status_update(bp);
2132                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
2133                                 bnx2x_pmf_update(bp);
2134
2135                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2136
2137                         BNX2X_ERR("MC assert!\n");
2138                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2139                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2140                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2141                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2142                         bnx2x_panic();
2143
2144                 } else if (attn & BNX2X_MCP_ASSERT) {
2145
2146                         BNX2X_ERR("MCP assert!\n");
2147                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2148                         bnx2x_fw_dump(bp);
2149
2150                 } else
2151                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2152         }
2153
2154         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2155                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2156                 if (attn & BNX2X_GRC_TIMEOUT) {
2157                         val = CHIP_IS_E1H(bp) ?
2158                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2159                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2160                 }
2161                 if (attn & BNX2X_GRC_RSV) {
2162                         val = CHIP_IS_E1H(bp) ?
2163                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2164                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2165                 }
2166                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2167         }
2168 }
2169
2170 #define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
2171 #define LOAD_COUNTER_BITS       16 /* Number of bits for load counter */
2172 #define LOAD_COUNTER_MASK       (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2173 #define RESET_DONE_FLAG_MASK    (~LOAD_COUNTER_MASK)
2174 #define RESET_DONE_FLAG_SHIFT   LOAD_COUNTER_BITS
2175 #define CHIP_PARITY_SUPPORTED(bp)   (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
2176 /*
2177  * should be run under rtnl lock
2178  */
2179 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
2180 {
2181         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2182         val &= ~(1 << RESET_DONE_FLAG_SHIFT);
2183         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2184         barrier();
2185         mmiowb();
2186 }
2187
2188 /*
2189  * should be run under rtnl lock
2190  */
2191 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
2192 {
2193         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2194         val |= (1 << 16);
2195         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2196         barrier();
2197         mmiowb();
2198 }
2199
2200 /*
2201  * should be run under rtnl lock
2202  */
2203 bool bnx2x_reset_is_done(struct bnx2x *bp)
2204 {
2205         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2206         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
2207         return (val & RESET_DONE_FLAG_MASK) ? false : true;
2208 }
2209
2210 /*
2211  * should be run under rtnl lock
2212  */
2213 inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
2214 {
2215         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2216
2217         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2218
2219         val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
2220         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2221         barrier();
2222         mmiowb();
2223 }
2224
2225 /*
2226  * should be run under rtnl lock
2227  */
2228 u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
2229 {
2230         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2231
2232         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2233
2234         val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
2235         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2236         barrier();
2237         mmiowb();
2238
2239         return val1;
2240 }
2241
2242 /*
2243  * should be run under rtnl lock
2244  */
2245 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
2246 {
2247         return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
2248 }
2249
2250 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
2251 {
2252         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2253         REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
2254 }
2255
2256 static inline void _print_next_block(int idx, const char *blk)
2257 {
2258         if (idx)
2259                 pr_cont(", ");
2260         pr_cont("%s", blk);
2261 }
2262
2263 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
2264 {
2265         int i = 0;
2266         u32 cur_bit = 0;
2267         for (i = 0; sig; i++) {
2268                 cur_bit = ((u32)0x1 << i);
2269                 if (sig & cur_bit) {
2270                         switch (cur_bit) {
2271                         case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
2272                                 _print_next_block(par_num++, "BRB");
2273                                 break;
2274                         case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
2275                                 _print_next_block(par_num++, "PARSER");
2276                                 break;
2277                         case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
2278                                 _print_next_block(par_num++, "TSDM");
2279                                 break;
2280                         case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
2281                                 _print_next_block(par_num++, "SEARCHER");
2282                                 break;
2283                         case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
2284                                 _print_next_block(par_num++, "TSEMI");
2285                                 break;
2286                         }
2287
2288                         /* Clear the bit */
2289                         sig &= ~cur_bit;
2290                 }
2291         }
2292
2293         return par_num;
2294 }
2295
2296 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
2297 {
2298         int i = 0;
2299         u32 cur_bit = 0;
2300         for (i = 0; sig; i++) {
2301                 cur_bit = ((u32)0x1 << i);
2302                 if (sig & cur_bit) {
2303                         switch (cur_bit) {
2304                         case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
2305                                 _print_next_block(par_num++, "PBCLIENT");
2306                                 break;
2307                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
2308                                 _print_next_block(par_num++, "QM");
2309                                 break;
2310                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
2311                                 _print_next_block(par_num++, "XSDM");
2312                                 break;
2313                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
2314                                 _print_next_block(par_num++, "XSEMI");
2315                                 break;
2316                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
2317                                 _print_next_block(par_num++, "DOORBELLQ");
2318                                 break;
2319                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
2320                                 _print_next_block(par_num++, "VAUX PCI CORE");
2321                                 break;
2322                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
2323                                 _print_next_block(par_num++, "DEBUG");
2324                                 break;
2325                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
2326                                 _print_next_block(par_num++, "USDM");
2327                                 break;
2328                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
2329                                 _print_next_block(par_num++, "USEMI");
2330                                 break;
2331                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
2332                                 _print_next_block(par_num++, "UPB");
2333                                 break;
2334                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
2335                                 _print_next_block(par_num++, "CSDM");
2336                                 break;
2337                         }
2338
2339                         /* Clear the bit */
2340                         sig &= ~cur_bit;
2341                 }
2342         }
2343
2344         return par_num;
2345 }
2346
2347 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
2348 {
2349         int i = 0;
2350         u32 cur_bit = 0;
2351         for (i = 0; sig; i++) {
2352                 cur_bit = ((u32)0x1 << i);
2353                 if (sig & cur_bit) {
2354                         switch (cur_bit) {
2355                         case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
2356                                 _print_next_block(par_num++, "CSEMI");
2357                                 break;
2358                         case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
2359                                 _print_next_block(par_num++, "PXP");
2360                                 break;
2361                         case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
2362                                 _print_next_block(par_num++,
2363                                         "PXPPCICLOCKCLIENT");
2364                                 break;
2365                         case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
2366                                 _print_next_block(par_num++, "CFC");
2367                                 break;
2368                         case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
2369                                 _print_next_block(par_num++, "CDU");
2370                                 break;
2371                         case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
2372                                 _print_next_block(par_num++, "IGU");
2373                                 break;
2374                         case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
2375                                 _print_next_block(par_num++, "MISC");
2376                                 break;
2377                         }
2378
2379                         /* Clear the bit */
2380                         sig &= ~cur_bit;
2381                 }
2382         }
2383
2384         return par_num;
2385 }
2386
2387 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
2388 {
2389         int i = 0;
2390         u32 cur_bit = 0;
2391         for (i = 0; sig; i++) {
2392                 cur_bit = ((u32)0x1 << i);
2393                 if (sig & cur_bit) {
2394                         switch (cur_bit) {
2395                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
2396                                 _print_next_block(par_num++, "MCP ROM");
2397                                 break;
2398                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
2399                                 _print_next_block(par_num++, "MCP UMP RX");
2400                                 break;
2401                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
2402                                 _print_next_block(par_num++, "MCP UMP TX");
2403                                 break;
2404                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
2405                                 _print_next_block(par_num++, "MCP SCPAD");
2406                                 break;
2407                         }
2408
2409                         /* Clear the bit */
2410                         sig &= ~cur_bit;
2411                 }
2412         }
2413
2414         return par_num;
2415 }
2416
2417 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
2418                                      u32 sig2, u32 sig3)
2419 {
2420         if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
2421             (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
2422                 int par_num = 0;
2423                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
2424                         "[0]:0x%08x [1]:0x%08x "
2425                         "[2]:0x%08x [3]:0x%08x\n",
2426                           sig0 & HW_PRTY_ASSERT_SET_0,
2427                           sig1 & HW_PRTY_ASSERT_SET_1,
2428                           sig2 & HW_PRTY_ASSERT_SET_2,
2429                           sig3 & HW_PRTY_ASSERT_SET_3);
2430                 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
2431                        bp->dev->name);
2432                 par_num = bnx2x_print_blocks_with_parity0(
2433                         sig0 & HW_PRTY_ASSERT_SET_0, par_num);
2434                 par_num = bnx2x_print_blocks_with_parity1(
2435                         sig1 & HW_PRTY_ASSERT_SET_1, par_num);
2436                 par_num = bnx2x_print_blocks_with_parity2(
2437                         sig2 & HW_PRTY_ASSERT_SET_2, par_num);
2438                 par_num = bnx2x_print_blocks_with_parity3(
2439                         sig3 & HW_PRTY_ASSERT_SET_3, par_num);
2440                 printk("\n");
2441                 return true;
2442         } else
2443                 return false;
2444 }
2445
2446 bool bnx2x_chk_parity_attn(struct bnx2x *bp)
2447 {
2448         struct attn_route attn;
2449         int port = BP_PORT(bp);
2450
2451         attn.sig[0] = REG_RD(bp,
2452                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
2453                              port*4);
2454         attn.sig[1] = REG_RD(bp,
2455                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
2456                              port*4);
2457         attn.sig[2] = REG_RD(bp,
2458                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
2459                              port*4);
2460         attn.sig[3] = REG_RD(bp,
2461                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
2462                              port*4);
2463
2464         return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
2465                                         attn.sig[3]);
2466 }
2467
2468 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2469 {
2470         struct attn_route attn, *group_mask;
2471         int port = BP_PORT(bp);
2472         int index;
2473         u32 reg_addr;
2474         u32 val;
2475         u32 aeu_mask;
2476
2477         /* need to take HW lock because MCP or other port might also
2478            try to handle this event */
2479         bnx2x_acquire_alr(bp);
2480
2481         if (bnx2x_chk_parity_attn(bp)) {
2482                 bp->recovery_state = BNX2X_RECOVERY_INIT;
2483                 bnx2x_set_reset_in_progress(bp);
2484                 schedule_delayed_work(&bp->reset_task, 0);
2485                 /* Disable HW interrupts */
2486                 bnx2x_int_disable(bp);
2487                 bnx2x_release_alr(bp);
2488                 /* In case of parity errors don't handle attentions so that
2489                  * other function would "see" parity errors.
2490                  */
2491                 return;
2492         }
2493
2494         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2495         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2496         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2497         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2498         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2499            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2500
2501         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2502                 if (deasserted & (1 << index)) {
2503                         group_mask = &bp->attn_group[index];
2504
2505                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2506                            index, group_mask->sig[0], group_mask->sig[1],
2507                            group_mask->sig[2], group_mask->sig[3]);
2508
2509                         bnx2x_attn_int_deasserted3(bp,
2510                                         attn.sig[3] & group_mask->sig[3]);
2511                         bnx2x_attn_int_deasserted1(bp,
2512                                         attn.sig[1] & group_mask->sig[1]);
2513                         bnx2x_attn_int_deasserted2(bp,
2514                                         attn.sig[2] & group_mask->sig[2]);
2515                         bnx2x_attn_int_deasserted0(bp,
2516                                         attn.sig[0] & group_mask->sig[0]);
2517                 }
2518         }
2519
2520         bnx2x_release_alr(bp);
2521
2522         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2523
2524         val = ~deasserted;
2525         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2526            val, reg_addr);
2527         REG_WR(bp, reg_addr, val);
2528
2529         if (~bp->attn_state & deasserted)
2530                 BNX2X_ERR("IGU ERROR\n");
2531
2532         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2533                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2534
2535         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2536         aeu_mask = REG_RD(bp, reg_addr);
2537
2538         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2539            aeu_mask, deasserted);
2540         aeu_mask |= (deasserted & 0x3ff);
2541         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2542
2543         REG_WR(bp, reg_addr, aeu_mask);
2544         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2545
2546         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2547         bp->attn_state &= ~deasserted;
2548         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2549 }
2550
2551 static void bnx2x_attn_int(struct bnx2x *bp)
2552 {
2553         /* read local copy of bits */
2554         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2555                                                                 attn_bits);
2556         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2557                                                                 attn_bits_ack);
2558         u32 attn_state = bp->attn_state;
2559
2560         /* look for changed bits */
2561         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2562         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2563
2564         DP(NETIF_MSG_HW,
2565            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2566            attn_bits, attn_ack, asserted, deasserted);
2567
2568         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2569                 BNX2X_ERR("BAD attention state\n");
2570
2571         /* handle bits that were raised */
2572         if (asserted)
2573                 bnx2x_attn_int_asserted(bp, asserted);
2574
2575         if (deasserted)
2576                 bnx2x_attn_int_deasserted(bp, deasserted);
2577 }
2578
2579 static void bnx2x_sp_task(struct work_struct *work)
2580 {
2581         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2582         u16 status;
2583
2584         /* Return here if interrupt is disabled */
2585         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2586                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2587                 return;
2588         }
2589
2590         status = bnx2x_update_dsb_idx(bp);
2591 /*      if (status == 0)                                     */
2592 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2593
2594         DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
2595
2596         /* HW attentions */
2597         if (status & 0x1) {
2598                 bnx2x_attn_int(bp);
2599                 status &= ~0x1;
2600         }
2601
2602         /* CStorm events: STAT_QUERY */
2603         if (status & 0x2) {
2604                 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
2605                 status &= ~0x2;
2606         }
2607
2608         if (unlikely(status))
2609                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
2610                    status);
2611
2612         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2613                      IGU_INT_NOP, 1);
2614         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2615                      IGU_INT_NOP, 1);
2616         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2617                      IGU_INT_NOP, 1);
2618         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2619                      IGU_INT_NOP, 1);
2620         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2621                      IGU_INT_ENABLE, 1);
2622 }
2623
2624 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2625 {
2626         struct net_device *dev = dev_instance;
2627         struct bnx2x *bp = netdev_priv(dev);
2628
2629         /* Return here if interrupt is disabled */
2630         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2631                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2632                 return IRQ_HANDLED;
2633         }
2634
2635         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2636
2637 #ifdef BNX2X_STOP_ON_ERROR
2638         if (unlikely(bp->panic))
2639                 return IRQ_HANDLED;
2640 #endif
2641
2642 #ifdef BCM_CNIC
2643         {
2644                 struct cnic_ops *c_ops;
2645
2646                 rcu_read_lock();
2647                 c_ops = rcu_dereference(bp->cnic_ops);
2648                 if (c_ops)
2649                         c_ops->cnic_handler(bp->cnic_data, NULL);
2650                 rcu_read_unlock();
2651         }
2652 #endif
2653         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2654
2655         return IRQ_HANDLED;
2656 }
2657
2658 /* end of slow path */
2659
2660 /* Statistics */
2661
2662 /****************************************************************************
2663 * Macros
2664 ****************************************************************************/
2665
2666 /* sum[hi:lo] += add[hi:lo] */
2667 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2668         do { \
2669                 s_lo += a_lo; \
2670                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2671         } while (0)
2672
2673 /* difference = minuend - subtrahend */
2674 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2675         do { \
2676                 if (m_lo < s_lo) { \
2677                         /* underflow */ \
2678                         d_hi = m_hi - s_hi; \
2679                         if (d_hi > 0) { \
2680                                 /* we can 'loan' 1 */ \
2681                                 d_hi--; \
2682                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2683                         } else { \
2684                                 /* m_hi <= s_hi */ \
2685                                 d_hi = 0; \
2686                                 d_lo = 0; \
2687                         } \
2688                 } else { \
2689                         /* m_lo >= s_lo */ \
2690                         if (m_hi < s_hi) { \
2691                                 d_hi = 0; \
2692                                 d_lo = 0; \
2693                         } else { \
2694                                 /* m_hi >= s_hi */ \
2695                                 d_hi = m_hi - s_hi; \
2696                                 d_lo = m_lo - s_lo; \
2697                         } \
2698                 } \
2699         } while (0)
2700
2701 #define UPDATE_STAT64(s, t) \
2702         do { \
2703                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2704                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2705                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2706                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2707                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2708                        pstats->mac_stx[1].t##_lo, diff.lo); \
2709         } while (0)
2710
2711 #define UPDATE_STAT64_NIG(s, t) \
2712         do { \
2713                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2714                         diff.lo, new->s##_lo, old->s##_lo); \
2715                 ADD_64(estats->t##_hi, diff.hi, \
2716                        estats->t##_lo, diff.lo); \
2717         } while (0)
2718
2719 /* sum[hi:lo] += add */
2720 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2721         do { \
2722                 s_lo += a; \
2723                 s_hi += (s_lo < a) ? 1 : 0; \
2724         } while (0)
2725
2726 #define UPDATE_EXTEND_STAT(s) \
2727         do { \
2728                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2729                               pstats->mac_stx[1].s##_lo, \
2730                               new->s); \
2731         } while (0)
2732
2733 #define UPDATE_EXTEND_TSTAT(s, t) \
2734         do { \
2735                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
2736                 old_tclient->s = tclient->s; \
2737                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2738         } while (0)
2739
2740 #define UPDATE_EXTEND_USTAT(s, t) \
2741         do { \
2742                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
2743                 old_uclient->s = uclient->s; \
2744                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2745         } while (0)
2746
2747 #define UPDATE_EXTEND_XSTAT(s, t) \
2748         do { \
2749                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
2750                 old_xclient->s = xclient->s; \
2751                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2752         } while (0)
2753
2754 /* minuend -= subtrahend */
2755 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
2756         do { \
2757                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
2758         } while (0)
2759
2760 /* minuend[hi:lo] -= subtrahend */
2761 #define SUB_EXTEND_64(m_hi, m_lo, s) \
2762         do { \
2763                 SUB_64(m_hi, 0, m_lo, s); \
2764         } while (0)
2765
2766 #define SUB_EXTEND_USTAT(s, t) \
2767         do { \
2768                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
2769                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2770         } while (0)
2771
2772 /*
2773  * General service functions
2774  */
2775
2776 static inline long bnx2x_hilo(u32 *hiref)
2777 {
2778         u32 lo = *(hiref + 1);
2779 #if (BITS_PER_LONG == 64)
2780         u32 hi = *hiref;
2781
2782         return HILO_U64(hi, lo);
2783 #else
2784         return lo;
2785 #endif
2786 }
2787
2788 /*
2789  * Init service functions
2790  */
2791
2792 static void bnx2x_storm_stats_post(struct bnx2x *bp)
2793 {
2794         if (!bp->stats_pending) {
2795                 struct eth_query_ramrod_data ramrod_data = {0};
2796                 int i, rc;
2797
2798                 ramrod_data.drv_counter = bp->stats_counter++;
2799                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
2800                 for_each_queue(bp, i)
2801                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
2802
2803                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
2804                                    ((u32 *)&ramrod_data)[1],
2805                                    ((u32 *)&ramrod_data)[0], 0);
2806                 if (rc == 0) {
2807                         /* stats ramrod has it's own slot on the spq */
2808                         bp->spq_left++;
2809                         bp->stats_pending = 1;
2810                 }
2811         }
2812 }
2813
2814 static void bnx2x_hw_stats_post(struct bnx2x *bp)
2815 {
2816         struct dmae_command *dmae = &bp->stats_dmae;
2817         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
2818
2819         *stats_comp = DMAE_COMP_VAL;
2820         if (CHIP_REV_IS_SLOW(bp))
2821                 return;
2822
2823         /* loader */
2824         if (bp->executer_idx) {
2825                 int loader_idx = PMF_DMAE_C(bp);
2826
2827                 memset(dmae, 0, sizeof(struct dmae_command));
2828
2829                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
2830                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
2831                                 DMAE_CMD_DST_RESET |
2832 #ifdef __BIG_ENDIAN
2833                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
2834 #else
2835                                 DMAE_CMD_ENDIANITY_DW_SWAP |
2836 #endif
2837                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
2838                                                DMAE_CMD_PORT_0) |
2839                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
2840                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
2841                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
2842                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
2843                                      sizeof(struct dmae_command) *
2844                                      (loader_idx + 1)) >> 2;
2845                 dmae->dst_addr_hi = 0;
2846                 dmae->len = sizeof(struct dmae_command) >> 2;
2847                 if (CHIP_IS_E1(bp))
2848                         dmae->len--;
2849                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
2850                 dmae->comp_addr_hi = 0;
2851                 dmae->comp_val = 1;
2852
2853                 *stats_comp = 0;
2854                 bnx2x_post_dmae(bp, dmae, loader_idx);
2855
2856         } else if (bp->func_stx) {
2857                 *stats_comp = 0;
2858                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
2859         }
2860 }
2861
2862 static int bnx2x_stats_comp(struct bnx2x *bp)
2863 {
2864         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
2865         int cnt = 10;
2866
2867         might_sleep();
2868         while (*stats_comp != DMAE_COMP_VAL) {
2869                 if (!cnt) {
2870                         BNX2X_ERR("timeout waiting for stats finished\n");
2871                         break;
2872                 }
2873                 cnt--;
2874                 msleep(1);
2875         }
2876         return 1;
2877 }
2878
2879 /*
2880  * Statistics service functions
2881  */
2882
2883 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
2884 {
2885         struct dmae_command *dmae;
2886         u32 opcode;
2887         int loader_idx = PMF_DMAE_C(bp);
2888         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
2889
2890         /* sanity */
2891         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
2892                 BNX2X_ERR("BUG!\n");
2893                 return;
2894         }
2895
2896         bp->executer_idx = 0;
2897
2898         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
2899                   DMAE_CMD_C_ENABLE |
2900                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
2901 #ifdef __BIG_ENDIAN
2902                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
2903 #else
2904                   DMAE_CMD_ENDIANITY_DW_SWAP |
2905 #endif
2906                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
2907                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
2908
2909         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2910         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
2911         dmae->src_addr_lo = bp->port.port_stx >> 2;
2912         dmae->src_addr_hi = 0;
2913         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
2914         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
2915         dmae->len = DMAE_LEN32_RD_MAX;
2916         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2917         dmae->comp_addr_hi = 0;
2918         dmae->comp_val = 1;
2919
2920         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2921         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
2922         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
2923         dmae->src_addr_hi = 0;
2924         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
2925                                    DMAE_LEN32_RD_MAX * 4);
2926         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
2927                                    DMAE_LEN32_RD_MAX * 4);
2928         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
2929         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
2930         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
2931         dmae->comp_val = DMAE_COMP_VAL;
2932
2933         *stats_comp = 0;
2934         bnx2x_hw_stats_post(bp);
2935         bnx2x_stats_comp(bp);
2936 }
2937
2938 static void bnx2x_port_stats_init(struct bnx2x *bp)
2939 {
2940         struct dmae_command *dmae;
2941         int port = BP_PORT(bp);
2942         int vn = BP_E1HVN(bp);
2943         u32 opcode;
2944         int loader_idx = PMF_DMAE_C(bp);
2945         u32 mac_addr;
2946         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
2947
2948         /* sanity */
2949         if (!bp->link_vars.link_up || !bp->port.pmf) {
2950                 BNX2X_ERR("BUG!\n");
2951                 return;
2952         }
2953
2954         bp->executer_idx = 0;
2955
2956         /* MCP */
2957         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
2958                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
2959                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
2960 #ifdef __BIG_ENDIAN
2961                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
2962 #else
2963                   DMAE_CMD_ENDIANITY_DW_SWAP |
2964 #endif
2965                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
2966                   (vn << DMAE_CMD_E1HVN_SHIFT));
2967
2968         if (bp->port.port_stx) {
2969
2970                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2971                 dmae->opcode = opcode;
2972                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
2973                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
2974                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
2975                 dmae->dst_addr_hi = 0;
2976                 dmae->len = sizeof(struct host_port_stats) >> 2;
2977                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2978                 dmae->comp_addr_hi = 0;
2979                 dmae->comp_val = 1;
2980         }
2981
2982         if (bp->func_stx) {
2983
2984                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2985                 dmae->opcode = opcode;
2986                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
2987                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
2988                 dmae->dst_addr_lo = bp->func_stx >> 2;
2989                 dmae->dst_addr_hi = 0;
2990                 dmae->len = sizeof(struct host_func_stats) >> 2;
2991                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2992                 dmae->comp_addr_hi = 0;
2993                 dmae->comp_val = 1;
2994         }
2995
2996         /* MAC */
2997         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
2998                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
2999                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3000 #ifdef __BIG_ENDIAN
3001                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3002 #else
3003                   DMAE_CMD_ENDIANITY_DW_SWAP |
3004 #endif
3005                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3006                   (vn << DMAE_CMD_E1HVN_SHIFT));
3007
3008         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3009
3010                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3011                                    NIG_REG_INGRESS_BMAC0_MEM);
3012
3013                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3014                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3015                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3016                 dmae->opcode = opcode;
3017                 dmae->src_addr_lo = (mac_addr +
3018                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3019                 dmae->src_addr_hi = 0;
3020                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3021                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3022                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3023                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3024                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3025                 dmae->comp_addr_hi = 0;
3026                 dmae->comp_val = 1;
3027
3028                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3029                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3030                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3031                 dmae->opcode = opcode;
3032                 dmae->src_addr_lo = (mac_addr +
3033                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3034                 dmae->src_addr_hi = 0;
3035                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3036                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3037                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3038                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3039                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3040                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3041                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3042                 dmae->comp_addr_hi = 0;
3043                 dmae->comp_val = 1;
3044
3045         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3046
3047                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3048
3049                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3050                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3051                 dmae->opcode = opcode;
3052                 dmae->src_addr_lo = (mac_addr +
3053                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3054                 dmae->src_addr_hi = 0;
3055                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3056                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3057                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3058                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3059                 dmae->comp_addr_hi = 0;
3060                 dmae->comp_val = 1;
3061
3062                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3063                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3064                 dmae->opcode = opcode;
3065                 dmae->src_addr_lo = (mac_addr +
3066                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3067                 dmae->src_addr_hi = 0;
3068                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3069                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3070                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3071                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3072                 dmae->len = 1;
3073                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3074                 dmae->comp_addr_hi = 0;
3075                 dmae->comp_val = 1;
3076
3077                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3078                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3079                 dmae->opcode = opcode;
3080                 dmae->src_addr_lo = (mac_addr +
3081                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3082                 dmae->src_addr_hi = 0;
3083                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3084                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3085                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3086                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3087                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3088                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3089                 dmae->comp_addr_hi = 0;
3090                 dmae->comp_val = 1;
3091         }
3092
3093         /* NIG */
3094         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3095         dmae->opcode = opcode;
3096         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3097                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3098         dmae->src_addr_hi = 0;
3099         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3100         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3101         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3102         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3103         dmae->comp_addr_hi = 0;
3104         dmae->comp_val = 1;
3105
3106         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3107         dmae->opcode = opcode;
3108         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3109                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3110         dmae->src_addr_hi = 0;
3111         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3112                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3113         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3114                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3115         dmae->len = (2*sizeof(u32)) >> 2;
3116         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3117         dmae->comp_addr_hi = 0;
3118         dmae->comp_val = 1;
3119
3120         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3121         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3122                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3123                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3124 #ifdef __BIG_ENDIAN
3125                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3126 #else
3127                         DMAE_CMD_ENDIANITY_DW_SWAP |
3128 #endif
3129                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3130                         (vn << DMAE_CMD_E1HVN_SHIFT));
3131         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3132                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3133         dmae->src_addr_hi = 0;
3134         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3135                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3136         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3137                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3138         dmae->len = (2*sizeof(u32)) >> 2;
3139         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3140         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3141         dmae->comp_val = DMAE_COMP_VAL;
3142
3143         *stats_comp = 0;
3144 }
3145
3146 static void bnx2x_func_stats_init(struct bnx2x *bp)
3147 {
3148         struct dmae_command *dmae = &bp->stats_dmae;
3149         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3150
3151         /* sanity */
3152         if (!bp->func_stx) {
3153                 BNX2X_ERR("BUG!\n");
3154                 return;
3155         }
3156
3157         bp->executer_idx = 0;
3158         memset(dmae, 0, sizeof(struct dmae_command));
3159
3160         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3161                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3162                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3163 #ifdef __BIG_ENDIAN
3164                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3165 #else
3166                         DMAE_CMD_ENDIANITY_DW_SWAP |
3167 #endif
3168                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3169                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3170         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3171         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3172         dmae->dst_addr_lo = bp->func_stx >> 2;
3173         dmae->dst_addr_hi = 0;
3174         dmae->len = sizeof(struct host_func_stats) >> 2;
3175         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3176         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3177         dmae->comp_val = DMAE_COMP_VAL;
3178
3179         *stats_comp = 0;
3180 }
3181
3182 static void bnx2x_stats_start(struct bnx2x *bp)
3183 {
3184         if (bp->port.pmf)
3185                 bnx2x_port_stats_init(bp);
3186
3187         else if (bp->func_stx)
3188                 bnx2x_func_stats_init(bp);
3189
3190         bnx2x_hw_stats_post(bp);
3191         bnx2x_storm_stats_post(bp);
3192 }
3193
3194 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3195 {
3196         bnx2x_stats_comp(bp);
3197         bnx2x_stats_pmf_update(bp);
3198         bnx2x_stats_start(bp);
3199 }
3200
3201 static void bnx2x_stats_restart(struct bnx2x *bp)
3202 {
3203         bnx2x_stats_comp(bp);
3204         bnx2x_stats_start(bp);
3205 }
3206
3207 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3208 {
3209         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3210         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3211         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3212         struct {
3213                 u32 lo;
3214                 u32 hi;
3215         } diff;
3216
3217         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3218         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3219         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3220         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3221         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3222         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3223         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3224         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3225         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3226         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3227         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3228         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3229         UPDATE_STAT64(tx_stat_gt127,
3230                                 tx_stat_etherstatspkts65octetsto127octets);
3231         UPDATE_STAT64(tx_stat_gt255,
3232                                 tx_stat_etherstatspkts128octetsto255octets);
3233         UPDATE_STAT64(tx_stat_gt511,
3234                                 tx_stat_etherstatspkts256octetsto511octets);
3235         UPDATE_STAT64(tx_stat_gt1023,
3236                                 tx_stat_etherstatspkts512octetsto1023octets);
3237         UPDATE_STAT64(tx_stat_gt1518,
3238                                 tx_stat_etherstatspkts1024octetsto1522octets);
3239         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3240         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3241         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3242         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3243         UPDATE_STAT64(tx_stat_gterr,
3244                                 tx_stat_dot3statsinternalmactransmiterrors);
3245         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3246
3247         estats->pause_frames_received_hi =
3248                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3249         estats->pause_frames_received_lo =
3250                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3251
3252         estats->pause_frames_sent_hi =
3253                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3254         estats->pause_frames_sent_lo =
3255                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3256 }
3257
3258 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3259 {
3260         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3261         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3262         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3263
3264         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3265         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3266         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3267         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3268         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3269         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3270         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3271         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3272         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3273         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3274         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3275         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3276         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3277         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3278         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3279         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3280         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3281         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3282         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3283         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3284         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3285         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3286         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3287         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3288         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3289         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3290         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3291         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3292         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3293         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3294         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3295
3296         estats->pause_frames_received_hi =
3297                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3298         estats->pause_frames_received_lo =
3299                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3300         ADD_64(estats->pause_frames_received_hi,
3301                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3302                estats->pause_frames_received_lo,
3303                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3304
3305         estats->pause_frames_sent_hi =
3306                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3307         estats->pause_frames_sent_lo =
3308                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3309         ADD_64(estats->pause_frames_sent_hi,
3310                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3311                estats->pause_frames_sent_lo,
3312                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3313 }
3314
3315 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3316 {
3317         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3318         struct nig_stats *old = &(bp->port.old_nig_stats);
3319         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3320         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3321         struct {
3322                 u32 lo;
3323                 u32 hi;
3324         } diff;
3325
3326         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3327                 bnx2x_bmac_stats_update(bp);
3328
3329         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3330                 bnx2x_emac_stats_update(bp);
3331
3332         else { /* unreached */
3333                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3334                 return -1;
3335         }
3336
3337         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3338                       new->brb_discard - old->brb_discard);
3339         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3340                       new->brb_truncate - old->brb_truncate);
3341
3342         UPDATE_STAT64_NIG(egress_mac_pkt0,
3343                                         etherstatspkts1024octetsto1522octets);
3344         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3345
3346         memcpy(old, new, sizeof(struct nig_stats));
3347
3348         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3349                sizeof(struct mac_stx));
3350         estats->brb_drop_hi = pstats->brb_drop_hi;
3351         estats->brb_drop_lo = pstats->brb_drop_lo;
3352
3353         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3354
3355         if (!BP_NOMCP(bp)) {
3356                 u32 nig_timer_max =
3357                         SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3358                 if (nig_timer_max != estats->nig_timer_max) {
3359                         estats->nig_timer_max = nig_timer_max;
3360                         BNX2X_ERR("NIG timer max (%u)\n",
3361                                   estats->nig_timer_max);
3362                 }
3363         }
3364
3365         return 0;
3366 }
3367
3368 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3369 {
3370         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3371         struct tstorm_per_port_stats *tport =
3372                                         &stats->tstorm_common.port_statistics;
3373         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3374         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3375         int i;
3376
3377         memcpy(&(fstats->total_bytes_received_hi),
3378                &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
3379                sizeof(struct host_func_stats) - 2*sizeof(u32));
3380         estats->error_bytes_received_hi = 0;
3381         estats->error_bytes_received_lo = 0;
3382         estats->etherstatsoverrsizepkts_hi = 0;
3383         estats->etherstatsoverrsizepkts_lo = 0;
3384         estats->no_buff_discard_hi = 0;
3385         estats->no_buff_discard_lo = 0;
3386
3387         for_each_queue(bp, i) {
3388                 struct bnx2x_fastpath *fp = &bp->fp[i];
3389                 int cl_id = fp->cl_id;
3390                 struct tstorm_per_client_stats *tclient =
3391                                 &stats->tstorm_common.client_statistics[cl_id];
3392                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3393                 struct ustorm_per_client_stats *uclient =
3394                                 &stats->ustorm_common.client_statistics[cl_id];
3395                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3396                 struct xstorm_per_client_stats *xclient =
3397                                 &stats->xstorm_common.client_statistics[cl_id];
3398                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3399                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3400                 u32 diff;
3401
3402                 /* are storm stats valid? */
3403                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3404                                                         bp->stats_counter) {
3405                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3406                            "  xstorm counter (0x%x) != stats_counter (0x%x)\n",
3407                            i, xclient->stats_counter, bp->stats_counter);
3408                         return -1;
3409                 }
3410                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3411                                                         bp->stats_counter) {
3412                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3413                            "  tstorm counter (0x%x) != stats_counter (0x%x)\n",
3414                            i, tclient->stats_counter, bp->stats_counter);
3415                         return -2;
3416                 }
3417                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3418                                                         bp->stats_counter) {
3419                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3420                            "  ustorm counter (0x%x) != stats_counter (0x%x)\n",
3421                            i, uclient->stats_counter, bp->stats_counter);
3422                         return -4;
3423                 }
3424
3425                 qstats->total_bytes_received_hi =
3426                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3427                 qstats->total_bytes_received_lo =
3428                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3429
3430                 ADD_64(qstats->total_bytes_received_hi,
3431                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
3432                        qstats->total_bytes_received_lo,
3433                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
3434
3435                 ADD_64(qstats->total_bytes_received_hi,
3436                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
3437                        qstats->total_bytes_received_lo,
3438                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
3439
3440                 SUB_64(qstats->total_bytes_received_hi,
3441                        le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
3442                        qstats->total_bytes_received_lo,
3443                        le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
3444
3445                 SUB_64(qstats->total_bytes_received_hi,
3446                        le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
3447                        qstats->total_bytes_received_lo,
3448                        le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
3449
3450                 SUB_64(qstats->total_bytes_received_hi,
3451                        le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
3452                        qstats->total_bytes_received_lo,
3453                        le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
3454
3455                 qstats->valid_bytes_received_hi =
3456                                         qstats->total_bytes_received_hi;
3457                 qstats->valid_bytes_received_lo =
3458                                         qstats->total_bytes_received_lo;
3459
3460                 qstats->error_bytes_received_hi =
3461                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3462                 qstats->error_bytes_received_lo =
3463                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3464
3465                 ADD_64(qstats->total_bytes_received_hi,
3466                        qstats->error_bytes_received_hi,
3467                        qstats->total_bytes_received_lo,
3468                        qstats->error_bytes_received_lo);
3469
3470                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3471                                         total_unicast_packets_received);
3472                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3473                                         total_multicast_packets_received);
3474                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3475                                         total_broadcast_packets_received);
3476                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3477                                         etherstatsoverrsizepkts);
3478                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3479
3480                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3481                                         total_unicast_packets_received);
3482                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3483                                         total_multicast_packets_received);
3484                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3485                                         total_broadcast_packets_received);
3486                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3487                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3488                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3489
3490                 qstats->total_bytes_transmitted_hi =
3491                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3492                 qstats->total_bytes_transmitted_lo =
3493                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3494
3495                 ADD_64(qstats->total_bytes_transmitted_hi,
3496                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
3497                        qstats->total_bytes_transmitted_lo,
3498                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
3499
3500                 ADD_64(qstats->total_bytes_transmitted_hi,
3501                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
3502                        qstats->total_bytes_transmitted_lo,
3503                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
3504
3505                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3506                                         total_unicast_packets_transmitted);
3507                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3508                                         total_multicast_packets_transmitted);
3509                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3510                                         total_broadcast_packets_transmitted);
3511
3512                 old_tclient->checksum_discard = tclient->checksum_discard;
3513                 old_tclient->ttl0_discard = tclient->ttl0_discard;
3514
3515                 ADD_64(fstats->total_bytes_received_hi,
3516                        qstats->total_bytes_received_hi,
3517                        fstats->total_bytes_received_lo,
3518                        qstats->total_bytes_received_lo);
3519                 ADD_64(fstats->total_bytes_transmitted_hi,
3520                        qstats->total_bytes_transmitted_hi,
3521                        fstats->total_bytes_transmitted_lo,
3522                        qstats->total_bytes_transmitted_lo);
3523                 ADD_64(fstats->total_unicast_packets_received_hi,
3524                        qstats->total_unicast_packets_received_hi,
3525                        fstats->total_unicast_packets_received_lo,
3526                        qstats->total_unicast_packets_received_lo);
3527                 ADD_64(fstats->total_multicast_packets_received_hi,
3528                        qstats->total_multicast_packets_received_hi,
3529                        fstats->total_multicast_packets_received_lo,
3530                        qstats->total_multicast_packets_received_lo);
3531                 ADD_64(fstats->total_broadcast_packets_received_hi,
3532                        qstats->total_broadcast_packets_received_hi,
3533                        fstats->total_broadcast_packets_received_lo,
3534                        qstats->total_broadcast_packets_received_lo);
3535                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3536                        qstats->total_unicast_packets_transmitted_hi,
3537                        fstats->total_unicast_packets_transmitted_lo,
3538                        qstats->total_unicast_packets_transmitted_lo);
3539                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3540                        qstats->total_multicast_packets_transmitted_hi,
3541                        fstats->total_multicast_packets_transmitted_lo,
3542                        qstats->total_multicast_packets_transmitted_lo);
3543                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3544                        qstats->total_broadcast_packets_transmitted_hi,
3545                        fstats->total_broadcast_packets_transmitted_lo,
3546                        qstats->total_broadcast_packets_transmitted_lo);
3547                 ADD_64(fstats->valid_bytes_received_hi,
3548                        qstats->valid_bytes_received_hi,
3549                        fstats->valid_bytes_received_lo,
3550                        qstats->valid_bytes_received_lo);
3551
3552                 ADD_64(estats->error_bytes_received_hi,
3553                        qstats->error_bytes_received_hi,
3554                        estats->error_bytes_received_lo,
3555                        qstats->error_bytes_received_lo);
3556                 ADD_64(estats->etherstatsoverrsizepkts_hi,
3557                        qstats->etherstatsoverrsizepkts_hi,
3558                        estats->etherstatsoverrsizepkts_lo,
3559                        qstats->etherstatsoverrsizepkts_lo);
3560                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3561                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3562         }
3563
3564         ADD_64(fstats->total_bytes_received_hi,
3565                estats->rx_stat_ifhcinbadoctets_hi,
3566                fstats->total_bytes_received_lo,
3567                estats->rx_stat_ifhcinbadoctets_lo);
3568
3569         memcpy(estats, &(fstats->total_bytes_received_hi),
3570                sizeof(struct host_func_stats) - 2*sizeof(u32));
3571
3572         ADD_64(estats->etherstatsoverrsizepkts_hi,
3573                estats->rx_stat_dot3statsframestoolong_hi,
3574                estats->etherstatsoverrsizepkts_lo,
3575                estats->rx_stat_dot3statsframestoolong_lo);
3576         ADD_64(estats->error_bytes_received_hi,
3577                estats->rx_stat_ifhcinbadoctets_hi,
3578                estats->error_bytes_received_lo,
3579                estats->rx_stat_ifhcinbadoctets_lo);
3580
3581         if (bp->port.pmf) {
3582                 estats->mac_filter_discard =
3583                                 le32_to_cpu(tport->mac_filter_discard);
3584                 estats->xxoverflow_discard =
3585                                 le32_to_cpu(tport->xxoverflow_discard);
3586                 estats->brb_truncate_discard =
3587                                 le32_to_cpu(tport->brb_truncate_discard);
3588                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3589         }
3590
3591         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3592
3593         bp->stats_pending = 0;
3594
3595         return 0;
3596 }
3597
3598 static void bnx2x_net_stats_update(struct bnx2x *bp)
3599 {
3600         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3601         struct net_device_stats *nstats = &bp->dev->stats;
3602         int i;
3603
3604         nstats->rx_packets =
3605                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3606                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3607                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3608
3609         nstats->tx_packets =
3610                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3611                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3612                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3613
3614         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3615
3616         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3617
3618         nstats->rx_dropped = estats->mac_discard;
3619         for_each_queue(bp, i)
3620                 nstats->rx_dropped +=
3621                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3622
3623         nstats->tx_dropped = 0;
3624
3625         nstats->multicast =
3626                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3627
3628         nstats->collisions =
3629                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3630
3631         nstats->rx_length_errors =
3632                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3633                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3634         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3635                                  bnx2x_hilo(&estats->brb_truncate_hi);
3636         nstats->rx_crc_errors =
3637                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3638         nstats->rx_frame_errors =
3639                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3640         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3641         nstats->rx_missed_errors = estats->xxoverflow_discard;
3642
3643         nstats->rx_errors = nstats->rx_length_errors +
3644                             nstats->rx_over_errors +
3645                             nstats->rx_crc_errors +
3646                             nstats->rx_frame_errors +
3647                             nstats->rx_fifo_errors +
3648                             nstats->rx_missed_errors;
3649
3650         nstats->tx_aborted_errors =
3651                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3652                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3653         nstats->tx_carrier_errors =
3654                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3655         nstats->tx_fifo_errors = 0;
3656         nstats->tx_heartbeat_errors = 0;
3657         nstats->tx_window_errors = 0;
3658
3659         nstats->tx_errors = nstats->tx_aborted_errors +
3660                             nstats->tx_carrier_errors +
3661             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3662 }
3663
3664 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3665 {
3666         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3667         int i;
3668
3669         estats->driver_xoff = 0;
3670         estats->rx_err_discard_pkt = 0;
3671         estats->rx_skb_alloc_failed = 0;
3672         estats->hw_csum_err = 0;
3673         for_each_queue(bp, i) {
3674                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3675
3676                 estats->driver_xoff += qstats->driver_xoff;
3677                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3678                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3679                 estats->hw_csum_err += qstats->hw_csum_err;
3680         }
3681 }
3682
3683 static void bnx2x_stats_update(struct bnx2x *bp)
3684 {
3685         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3686
3687         if (*stats_comp != DMAE_COMP_VAL)
3688                 return;
3689
3690         if (bp->port.pmf)
3691                 bnx2x_hw_stats_update(bp);
3692
3693         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3694                 BNX2X_ERR("storm stats were not updated for 3 times\n");
3695                 bnx2x_panic();
3696                 return;
3697         }
3698
3699         bnx2x_net_stats_update(bp);
3700         bnx2x_drv_stats_update(bp);
3701
3702         if (netif_msg_timer(bp)) {
3703                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3704                 int i;
3705
3706                 printk(KERN_DEBUG "%s: brb drops %u  brb truncate %u\n",
3707                        bp->dev->name,
3708                        estats->brb_drop_lo, estats->brb_truncate_lo);
3709
3710                 for_each_queue(bp, i) {
3711                         struct bnx2x_fastpath *fp = &bp->fp[i];
3712                         struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3713
3714                         printk(KERN_DEBUG "%s: rx usage(%4u)  *rx_cons_sb(%u)"
3715                                           "  rx pkt(%lu)  rx calls(%lu %lu)\n",
3716                                fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
3717                                fp->rx_comp_cons),
3718                                le16_to_cpu(*fp->rx_cons_sb),
3719                                bnx2x_hilo(&qstats->
3720                                           total_unicast_packets_received_hi),
3721                                fp->rx_calls, fp->rx_pkt);
3722                 }
3723
3724                 for_each_queue(bp, i) {
3725                         struct bnx2x_fastpath *fp = &bp->fp[i];
3726                         struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3727                         struct netdev_queue *txq =
3728                                 netdev_get_tx_queue(bp->dev, i);
3729
3730                         printk(KERN_DEBUG "%s: tx avail(%4u)  *tx_cons_sb(%u)"
3731                                           "  tx pkt(%lu) tx calls (%lu)"
3732                                           "  %s (Xoff events %u)\n",
3733                                fp->name, bnx2x_tx_avail(fp),
3734                                le16_to_cpu(*fp->tx_cons_sb),
3735                                bnx2x_hilo(&qstats->
3736                                           total_unicast_packets_transmitted_hi),
3737                                fp->tx_pkt,
3738                                (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
3739                                qstats->driver_xoff);
3740                 }
3741         }
3742
3743         bnx2x_hw_stats_post(bp);
3744         bnx2x_storm_stats_post(bp);
3745 }
3746
3747 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3748 {
3749         struct dmae_command *dmae;
3750         u32 opcode;
3751         int loader_idx = PMF_DMAE_C(bp);
3752         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3753
3754         bp->executer_idx = 0;
3755
3756         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3757                   DMAE_CMD_C_ENABLE |
3758                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3759 #ifdef __BIG_ENDIAN
3760                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3761 #else
3762                   DMAE_CMD_ENDIANITY_DW_SWAP |
3763 #endif
3764                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3765                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3766
3767         if (bp->port.port_stx) {
3768
3769                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3770                 if (bp->func_stx)
3771                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3772                 else
3773                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3774                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3775                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3776                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3777                 dmae->dst_addr_hi = 0;
3778                 dmae->len = sizeof(struct host_port_stats) >> 2;
3779                 if (bp->func_stx) {
3780                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3781                         dmae->comp_addr_hi = 0;
3782                         dmae->comp_val = 1;
3783                 } else {
3784                         dmae->comp_addr_lo =
3785                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3786                         dmae->comp_addr_hi =
3787                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3788                         dmae->comp_val = DMAE_COMP_VAL;
3789
3790                         *stats_comp = 0;
3791                 }
3792         }
3793
3794         if (bp->func_stx) {
3795
3796                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3797                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3798                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3799                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3800                 dmae->dst_addr_lo = bp->func_stx >> 2;
3801                 dmae->dst_addr_hi = 0;
3802                 dmae->len = sizeof(struct host_func_stats) >> 2;
3803                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3804                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3805                 dmae->comp_val = DMAE_COMP_VAL;
3806
3807                 *stats_comp = 0;
3808         }
3809 }
3810
3811 static void bnx2x_stats_stop(struct bnx2x *bp)
3812 {
3813         int update = 0;
3814
3815         bnx2x_stats_comp(bp);
3816
3817         if (bp->port.pmf)
3818                 update = (bnx2x_hw_stats_update(bp) == 0);
3819
3820         update |= (bnx2x_storm_stats_update(bp) == 0);
3821
3822         if (update) {
3823                 bnx2x_net_stats_update(bp);
3824
3825                 if (bp->port.pmf)
3826                         bnx2x_port_stats_stop(bp);
3827
3828                 bnx2x_hw_stats_post(bp);
3829                 bnx2x_stats_comp(bp);
3830         }
3831 }
3832
3833 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3834 {
3835 }
3836
3837 static const struct {
3838         void (*action)(struct bnx2x *bp);
3839         enum bnx2x_stats_state next_state;
3840 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3841 /* state        event   */
3842 {
3843 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3844 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
3845 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3846 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3847 },
3848 {
3849 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
3850 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
3851 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
3852 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
3853 }
3854 };
3855
3856 void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3857 {
3858         enum bnx2x_stats_state state = bp->stats_state;
3859
3860         if (unlikely(bp->panic))
3861                 return;
3862
3863         bnx2x_stats_stm[state][event].action(bp);
3864         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3865
3866         /* Make sure the state has been "changed" */
3867         smp_wmb();
3868
3869         if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
3870                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3871                    state, event, bp->stats_state);
3872 }
3873
3874 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
3875 {
3876         struct dmae_command *dmae;
3877         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3878
3879         /* sanity */
3880         if (!bp->port.pmf || !bp->port.port_stx) {
3881                 BNX2X_ERR("BUG!\n");
3882                 return;
3883         }
3884
3885         bp->executer_idx = 0;
3886
3887         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3888         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3889                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3890                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3891 #ifdef __BIG_ENDIAN
3892                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3893 #else
3894                         DMAE_CMD_ENDIANITY_DW_SWAP |
3895 #endif
3896                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3897                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3898         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3899         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3900         dmae->dst_addr_lo = bp->port.port_stx >> 2;
3901         dmae->dst_addr_hi = 0;
3902         dmae->len = sizeof(struct host_port_stats) >> 2;
3903         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3904         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3905         dmae->comp_val = DMAE_COMP_VAL;
3906
3907         *stats_comp = 0;
3908         bnx2x_hw_stats_post(bp);
3909         bnx2x_stats_comp(bp);
3910 }
3911
3912 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
3913 {
3914         int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
3915         int port = BP_PORT(bp);
3916         int func;
3917         u32 func_stx;
3918
3919         /* sanity */
3920         if (!bp->port.pmf || !bp->func_stx) {
3921                 BNX2X_ERR("BUG!\n");
3922                 return;
3923         }
3924
3925         /* save our func_stx */
3926         func_stx = bp->func_stx;
3927
3928         for (vn = VN_0; vn < vn_max; vn++) {
3929                 func = 2*vn + port;
3930
3931                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
3932                 bnx2x_func_stats_init(bp);
3933                 bnx2x_hw_stats_post(bp);
3934                 bnx2x_stats_comp(bp);
3935         }
3936
3937         /* restore our func_stx */
3938         bp->func_stx = func_stx;
3939 }
3940
3941 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
3942 {
3943         struct dmae_command *dmae = &bp->stats_dmae;
3944         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3945
3946         /* sanity */
3947         if (!bp->func_stx) {
3948                 BNX2X_ERR("BUG!\n");
3949                 return;
3950         }
3951
3952         bp->executer_idx = 0;
3953         memset(dmae, 0, sizeof(struct dmae_command));
3954
3955         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3956                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3957                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3958 #ifdef __BIG_ENDIAN
3959                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3960 #else
3961                         DMAE_CMD_ENDIANITY_DW_SWAP |
3962 #endif
3963                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3964                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3965         dmae->src_addr_lo = bp->func_stx >> 2;
3966         dmae->src_addr_hi = 0;
3967         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
3968         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
3969         dmae->len = sizeof(struct host_func_stats) >> 2;
3970         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3971         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3972         dmae->comp_val = DMAE_COMP_VAL;
3973
3974         *stats_comp = 0;
3975         bnx2x_hw_stats_post(bp);
3976         bnx2x_stats_comp(bp);
3977 }
3978
3979 static void bnx2x_stats_init(struct bnx2x *bp)
3980 {
3981         int port = BP_PORT(bp);
3982         int func = BP_FUNC(bp);
3983         int i;
3984
3985         bp->stats_pending = 0;
3986         bp->executer_idx = 0;
3987         bp->stats_counter = 0;
3988
3989         /* port and func stats for management */
3990         if (!BP_NOMCP(bp)) {
3991                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3992                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
3993
3994         } else {
3995                 bp->port.port_stx = 0;
3996                 bp->func_stx = 0;
3997         }
3998         DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
3999            bp->port.port_stx, bp->func_stx);
4000
4001         /* port stats */
4002         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4003         bp->port.old_nig_stats.brb_discard =
4004                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4005         bp->port.old_nig_stats.brb_truncate =
4006                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4007         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4008                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4009         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4010                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4011
4012         /* function stats */
4013         for_each_queue(bp, i) {
4014                 struct bnx2x_fastpath *fp = &bp->fp[i];
4015
4016                 memset(&fp->old_tclient, 0,
4017                        sizeof(struct tstorm_per_client_stats));
4018                 memset(&fp->old_uclient, 0,
4019                        sizeof(struct ustorm_per_client_stats));
4020                 memset(&fp->old_xclient, 0,
4021                        sizeof(struct xstorm_per_client_stats));
4022                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4023         }
4024
4025         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4026         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4027
4028         bp->stats_state = STATS_STATE_DISABLED;
4029
4030         if (bp->port.pmf) {
4031                 if (bp->port.port_stx)
4032                         bnx2x_port_stats_base_init(bp);
4033
4034                 if (bp->func_stx)
4035                         bnx2x_func_stats_base_init(bp);
4036
4037         } else if (bp->func_stx)
4038                 bnx2x_func_stats_base_update(bp);
4039 }
4040
4041 static void bnx2x_timer(unsigned long data)
4042 {
4043         struct bnx2x *bp = (struct bnx2x *) data;
4044
4045         if (!netif_running(bp->dev))
4046                 return;
4047
4048         if (atomic_read(&bp->intr_sem) != 0)
4049                 goto timer_restart;
4050
4051         if (poll) {
4052                 struct bnx2x_fastpath *fp = &bp->fp[0];
4053                 int rc;
4054
4055                 bnx2x_tx_int(fp);
4056                 rc = bnx2x_rx_int(fp, 1000);
4057         }
4058
4059         if (!BP_NOMCP(bp)) {
4060                 int func = BP_FUNC(bp);
4061                 u32 drv_pulse;
4062                 u32 mcp_pulse;
4063
4064                 ++bp->fw_drv_pulse_wr_seq;
4065                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4066                 /* TBD - add SYSTEM_TIME */
4067                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4068                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4069
4070                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4071                              MCP_PULSE_SEQ_MASK);
4072                 /* The delta between driver pulse and mcp response
4073                  * should be 1 (before mcp response) or 0 (after mcp response)
4074                  */
4075                 if ((drv_pulse != mcp_pulse) &&
4076                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4077                         /* someone lost a heartbeat... */
4078                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4079                                   drv_pulse, mcp_pulse);
4080                 }
4081         }
4082
4083         if (bp->state == BNX2X_STATE_OPEN)
4084                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4085
4086 timer_restart:
4087         mod_timer(&bp->timer, jiffies + bp->current_interval);
4088 }
4089
4090 /* end of Statistics */
4091
4092 /* nic init */
4093
4094 /*
4095  * nic init service functions
4096  */
4097
4098 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4099 {
4100         int port = BP_PORT(bp);
4101
4102         /* "CSTORM" */
4103         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4104                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4105                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4106         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4107                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4108                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4109 }
4110
4111 void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4112                           dma_addr_t mapping, int sb_id)
4113 {
4114         int port = BP_PORT(bp);
4115         int func = BP_FUNC(bp);
4116         int index;
4117         u64 section;
4118
4119         /* USTORM */
4120         section = ((u64)mapping) + offsetof(struct host_status_block,
4121                                             u_status_block);
4122         sb->u_status_block.status_block_id = sb_id;
4123
4124         REG_WR(bp, BAR_CSTRORM_INTMEM +
4125                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4126         REG_WR(bp, BAR_CSTRORM_INTMEM +
4127                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4128                U64_HI(section));
4129         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4130                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4131
4132         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4133                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4134                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4135
4136         /* CSTORM */
4137         section = ((u64)mapping) + offsetof(struct host_status_block,
4138                                             c_status_block);
4139         sb->c_status_block.status_block_id = sb_id;
4140
4141         REG_WR(bp, BAR_CSTRORM_INTMEM +
4142                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4143         REG_WR(bp, BAR_CSTRORM_INTMEM +
4144                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4145                U64_HI(section));
4146         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4147                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4148
4149         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4150                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4151                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4152
4153         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4154 }
4155
4156 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4157 {
4158         int func = BP_FUNC(bp);
4159
4160         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4161                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4162                         sizeof(struct tstorm_def_status_block)/4);
4163         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4164                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4165                         sizeof(struct cstorm_def_status_block_u)/4);
4166         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4167                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4168                         sizeof(struct cstorm_def_status_block_c)/4);
4169         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4170                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4171                         sizeof(struct xstorm_def_status_block)/4);
4172 }
4173
4174 static void bnx2x_init_def_sb(struct bnx2x *bp,
4175                               struct host_def_status_block *def_sb,
4176                               dma_addr_t mapping, int sb_id)
4177 {
4178         int port = BP_PORT(bp);
4179         int func = BP_FUNC(bp);
4180         int index, val, reg_offset;
4181         u64 section;
4182
4183         /* ATTN */
4184         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4185                                             atten_status_block);
4186         def_sb->atten_status_block.status_block_id = sb_id;
4187
4188         bp->attn_state = 0;
4189
4190         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4191                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4192
4193         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4194                 bp->attn_group[index].sig[0] = REG_RD(bp,
4195                                                      reg_offset + 0x10*index);
4196                 bp->attn_group[index].sig[1] = REG_RD(bp,
4197                                                reg_offset + 0x4 + 0x10*index);
4198                 bp->attn_group[index].sig[2] = REG_RD(bp,
4199                                                reg_offset + 0x8 + 0x10*index);
4200                 bp->attn_group[index].sig[3] = REG_RD(bp,
4201                                                reg_offset + 0xc + 0x10*index);
4202         }
4203
4204         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4205                              HC_REG_ATTN_MSG0_ADDR_L);
4206
4207         REG_WR(bp, reg_offset, U64_LO(section));
4208         REG_WR(bp, reg_offset + 4, U64_HI(section));
4209
4210         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4211
4212         val = REG_RD(bp, reg_offset);
4213         val |= sb_id;
4214         REG_WR(bp, reg_offset, val);
4215
4216         /* USTORM */
4217         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4218                                             u_def_status_block);
4219         def_sb->u_def_status_block.status_block_id = sb_id;
4220
4221         REG_WR(bp, BAR_CSTRORM_INTMEM +
4222                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4223         REG_WR(bp, BAR_CSTRORM_INTMEM +
4224                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4225                U64_HI(section));
4226         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4227                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4228
4229         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4230                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4231                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4232
4233         /* CSTORM */
4234         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4235                                             c_def_status_block);
4236         def_sb->c_def_status_block.status_block_id = sb_id;
4237
4238         REG_WR(bp, BAR_CSTRORM_INTMEM +
4239                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4240         REG_WR(bp, BAR_CSTRORM_INTMEM +
4241                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4242                U64_HI(section));
4243         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4244                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4245
4246         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4247                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4248                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4249
4250         /* TSTORM */
4251         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4252                                             t_def_status_block);
4253         def_sb->t_def_status_block.status_block_id = sb_id;
4254
4255         REG_WR(bp, BAR_TSTRORM_INTMEM +
4256                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4257         REG_WR(bp, BAR_TSTRORM_INTMEM +
4258                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4259                U64_HI(section));
4260         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4261                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4262
4263         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4264                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4265                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4266
4267         /* XSTORM */
4268         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4269                                             x_def_status_block);
4270         def_sb->x_def_status_block.status_block_id = sb_id;
4271
4272         REG_WR(bp, BAR_XSTRORM_INTMEM +
4273                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4274         REG_WR(bp, BAR_XSTRORM_INTMEM +
4275                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4276                U64_HI(section));
4277         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4278                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4279
4280         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4281                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4282                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4283
4284         bp->stats_pending = 0;
4285         bp->set_mac_pending = 0;
4286
4287         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4288 }
4289
4290 void bnx2x_update_coalesce(struct bnx2x *bp)
4291 {
4292         int port = BP_PORT(bp);
4293         int i;
4294
4295         for_each_queue(bp, i) {
4296                 int sb_id = bp->fp[i].sb_id;
4297
4298                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4299                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4300                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4301                                                       U_SB_ETH_RX_CQ_INDEX),
4302                         bp->rx_ticks/(4 * BNX2X_BTR));
4303                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4304                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4305                                                        U_SB_ETH_RX_CQ_INDEX),
4306                          (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4307
4308                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4309                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4310                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4311                                                       C_SB_ETH_TX_CQ_INDEX),
4312                         bp->tx_ticks/(4 * BNX2X_BTR));
4313                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4314                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4315                                                        C_SB_ETH_TX_CQ_INDEX),
4316                          (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4317         }
4318 }
4319
4320 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4321 {
4322         int func = BP_FUNC(bp);
4323
4324         spin_lock_init(&bp->spq_lock);
4325
4326         bp->spq_left = MAX_SPQ_PENDING;
4327         bp->spq_prod_idx = 0;
4328         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4329         bp->spq_prod_bd = bp->spq;
4330         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4331
4332         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4333                U64_LO(bp->spq_mapping));
4334         REG_WR(bp,
4335                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4336                U64_HI(bp->spq_mapping));
4337
4338         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4339                bp->spq_prod_idx);
4340 }
4341
4342 static void bnx2x_init_context(struct bnx2x *bp)
4343 {
4344         int i;
4345
4346         /* Rx */
4347         for_each_queue(bp, i) {
4348                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4349                 struct bnx2x_fastpath *fp = &bp->fp[i];
4350                 u8 cl_id = fp->cl_id;
4351
4352                 context->ustorm_st_context.common.sb_index_numbers =
4353                                                 BNX2X_RX_SB_INDEX_NUM;
4354                 context->ustorm_st_context.common.clientId = cl_id;
4355                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
4356                 context->ustorm_st_context.common.flags =
4357                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4358                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4359                 context->ustorm_st_context.common.statistics_counter_id =
4360                                                 cl_id;
4361                 context->ustorm_st_context.common.mc_alignment_log_size =
4362                                                 BNX2X_RX_ALIGN_SHIFT;
4363                 context->ustorm_st_context.common.bd_buff_size =
4364                                                 bp->rx_buf_size;
4365                 context->ustorm_st_context.common.bd_page_base_hi =
4366                                                 U64_HI(fp->rx_desc_mapping);
4367                 context->ustorm_st_context.common.bd_page_base_lo =
4368                                                 U64_LO(fp->rx_desc_mapping);
4369                 if (!fp->disable_tpa) {
4370                         context->ustorm_st_context.common.flags |=
4371                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
4372                         context->ustorm_st_context.common.sge_buff_size =
4373                                 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
4374                                            0xffff);
4375                         context->ustorm_st_context.common.sge_page_base_hi =
4376                                                 U64_HI(fp->rx_sge_mapping);
4377                         context->ustorm_st_context.common.sge_page_base_lo =
4378                                                 U64_LO(fp->rx_sge_mapping);
4379
4380                         context->ustorm_st_context.common.max_sges_for_packet =
4381                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
4382                         context->ustorm_st_context.common.max_sges_for_packet =
4383                                 ((context->ustorm_st_context.common.
4384                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
4385                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
4386                 }
4387
4388                 context->ustorm_ag_context.cdu_usage =
4389                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4390                                                CDU_REGION_NUMBER_UCM_AG,
4391                                                ETH_CONNECTION_TYPE);
4392
4393                 context->xstorm_ag_context.cdu_reserved =
4394                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4395                                                CDU_REGION_NUMBER_XCM_AG,
4396                                                ETH_CONNECTION_TYPE);
4397         }
4398
4399         /* Tx */
4400         for_each_queue(bp, i) {
4401                 struct bnx2x_fastpath *fp = &bp->fp[i];
4402                 struct eth_context *context =
4403                         bnx2x_sp(bp, context[i].eth);
4404
4405                 context->cstorm_st_context.sb_index_number =
4406                                                 C_SB_ETH_TX_CQ_INDEX;
4407                 context->cstorm_st_context.status_block_id = fp->sb_id;
4408
4409                 context->xstorm_st_context.tx_bd_page_base_hi =
4410                                                 U64_HI(fp->tx_desc_mapping);
4411                 context->xstorm_st_context.tx_bd_page_base_lo =
4412                                                 U64_LO(fp->tx_desc_mapping);
4413                 context->xstorm_st_context.statistics_data = (fp->cl_id |
4414                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4415         }
4416 }
4417
4418 static void bnx2x_init_ind_table(struct bnx2x *bp)
4419 {
4420         int func = BP_FUNC(bp);
4421         int i;
4422
4423         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4424                 return;
4425
4426         DP(NETIF_MSG_IFUP,
4427            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
4428         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4429                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4430                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4431                         bp->fp->cl_id + (i % bp->num_queues));
4432 }
4433
4434 void bnx2x_set_client_config(struct bnx2x *bp)
4435 {
4436         struct tstorm_eth_client_config tstorm_client = {0};
4437         int port = BP_PORT(bp);
4438         int i;
4439
4440         tstorm_client.mtu = bp->dev->mtu;
4441         tstorm_client.config_flags =
4442                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4443                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4444 #ifdef BCM_VLAN
4445         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4446                 tstorm_client.config_flags |=
4447                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4448                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4449         }
4450 #endif
4451
4452         for_each_queue(bp, i) {
4453                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4454
4455                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4456                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4457                        ((u32 *)&tstorm_client)[0]);
4458                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4459                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4460                        ((u32 *)&tstorm_client)[1]);
4461         }
4462
4463         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4464            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4465 }
4466
4467 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4468 {
4469         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4470         int mode = bp->rx_mode;
4471         int mask = bp->rx_mode_cl_mask;
4472         int func = BP_FUNC(bp);
4473         int port = BP_PORT(bp);
4474         int i;
4475         /* All but management unicast packets should pass to the host as well */
4476         u32 llh_mask =
4477                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4478                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4479                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4480                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
4481
4482         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4483
4484         switch (mode) {
4485         case BNX2X_RX_MODE_NONE: /* no Rx */
4486                 tstorm_mac_filter.ucast_drop_all = mask;
4487                 tstorm_mac_filter.mcast_drop_all = mask;
4488                 tstorm_mac_filter.bcast_drop_all = mask;
4489                 break;
4490
4491         case BNX2X_RX_MODE_NORMAL:
4492                 tstorm_mac_filter.bcast_accept_all = mask;
4493                 break;
4494
4495         case BNX2X_RX_MODE_ALLMULTI:
4496                 tstorm_mac_filter.mcast_accept_all = mask;
4497                 tstorm_mac_filter.bcast_accept_all = mask;
4498                 break;
4499
4500         case BNX2X_RX_MODE_PROMISC:
4501                 tstorm_mac_filter.ucast_accept_all = mask;
4502                 tstorm_mac_filter.mcast_accept_all = mask;
4503                 tstorm_mac_filter.bcast_accept_all = mask;
4504                 /* pass management unicast packets as well */
4505                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
4506                 break;
4507
4508         default:
4509                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4510                 break;
4511         }
4512
4513         REG_WR(bp,
4514                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
4515                llh_mask);
4516
4517         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4518                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4519                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4520                        ((u32 *)&tstorm_mac_filter)[i]);
4521
4522 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4523                    ((u32 *)&tstorm_mac_filter)[i]); */
4524         }
4525
4526         if (mode != BNX2X_RX_MODE_NONE)
4527                 bnx2x_set_client_config(bp);
4528 }
4529
4530 static void bnx2x_init_internal_common(struct bnx2x *bp)
4531 {
4532         int i;
4533
4534         /* Zero this manually as its initialization is
4535            currently missing in the initTool */
4536         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4537                 REG_WR(bp, BAR_USTRORM_INTMEM +
4538                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4539 }
4540
4541 static void bnx2x_init_internal_port(struct bnx2x *bp)
4542 {
4543         int port = BP_PORT(bp);
4544
4545         REG_WR(bp,
4546                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
4547         REG_WR(bp,
4548                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
4549         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4550         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4551 }
4552
4553 static void bnx2x_init_internal_func(struct bnx2x *bp)
4554 {
4555         struct tstorm_eth_function_common_config tstorm_config = {0};
4556         struct stats_indication_flags stats_flags = {0};
4557         int port = BP_PORT(bp);
4558         int func = BP_FUNC(bp);
4559         int i, j;
4560         u32 offset;
4561         u16 max_agg_size;
4562
4563         tstorm_config.config_flags = RSS_FLAGS(bp);
4564
4565         if (is_multi(bp))
4566                 tstorm_config.rss_result_mask = MULTI_MASK;
4567
4568         /* Enable TPA if needed */
4569         if (bp->flags & TPA_ENABLE_FLAG)
4570                 tstorm_config.config_flags |=
4571                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
4572
4573         if (IS_E1HMF(bp))
4574                 tstorm_config.config_flags |=
4575                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4576
4577         tstorm_config.leading_client_id = BP_L_ID(bp);
4578
4579         REG_WR(bp, BAR_TSTRORM_INTMEM +
4580                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4581                (*(u32 *)&tstorm_config));
4582
4583         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4584         bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
4585         bnx2x_set_storm_rx_mode(bp);
4586
4587         for_each_queue(bp, i) {
4588                 u8 cl_id = bp->fp[i].cl_id;
4589
4590                 /* reset xstorm per client statistics */
4591                 offset = BAR_XSTRORM_INTMEM +
4592                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4593                 for (j = 0;
4594                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4595                         REG_WR(bp, offset + j*4, 0);
4596
4597                 /* reset tstorm per client statistics */
4598                 offset = BAR_TSTRORM_INTMEM +
4599                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4600                 for (j = 0;
4601                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4602                         REG_WR(bp, offset + j*4, 0);
4603
4604                 /* reset ustorm per client statistics */
4605                 offset = BAR_USTRORM_INTMEM +
4606                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4607                 for (j = 0;
4608                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4609                         REG_WR(bp, offset + j*4, 0);
4610         }
4611
4612         /* Init statistics related context */
4613         stats_flags.collect_eth = 1;
4614
4615         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4616                ((u32 *)&stats_flags)[0]);
4617         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4618                ((u32 *)&stats_flags)[1]);
4619
4620         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4621                ((u32 *)&stats_flags)[0]);
4622         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4623                ((u32 *)&stats_flags)[1]);
4624
4625         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4626                ((u32 *)&stats_flags)[0]);
4627         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4628                ((u32 *)&stats_flags)[1]);
4629
4630         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4631                ((u32 *)&stats_flags)[0]);
4632         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4633                ((u32 *)&stats_flags)[1]);
4634
4635         REG_WR(bp, BAR_XSTRORM_INTMEM +
4636                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4637                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4638         REG_WR(bp, BAR_XSTRORM_INTMEM +
4639                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4640                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4641
4642         REG_WR(bp, BAR_TSTRORM_INTMEM +
4643                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4644                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4645         REG_WR(bp, BAR_TSTRORM_INTMEM +
4646                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4647                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4648
4649         REG_WR(bp, BAR_USTRORM_INTMEM +
4650                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4651                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4652         REG_WR(bp, BAR_USTRORM_INTMEM +
4653                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4654                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4655
4656         if (CHIP_IS_E1H(bp)) {
4657                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4658                         IS_E1HMF(bp));
4659                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4660                         IS_E1HMF(bp));
4661                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4662                         IS_E1HMF(bp));
4663                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4664                         IS_E1HMF(bp));
4665
4666                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4667                          bp->e1hov);
4668         }
4669
4670         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4671         max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
4672                                    SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
4673         for_each_queue(bp, i) {
4674                 struct bnx2x_fastpath *fp = &bp->fp[i];
4675
4676                 REG_WR(bp, BAR_USTRORM_INTMEM +
4677                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
4678                        U64_LO(fp->rx_comp_mapping));
4679                 REG_WR(bp, BAR_USTRORM_INTMEM +
4680                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
4681                        U64_HI(fp->rx_comp_mapping));
4682
4683                 /* Next page */
4684                 REG_WR(bp, BAR_USTRORM_INTMEM +
4685                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
4686                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
4687                 REG_WR(bp, BAR_USTRORM_INTMEM +
4688                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
4689                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
4690
4691                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4692                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
4693                          max_agg_size);
4694         }
4695
4696         /* dropless flow control */
4697         if (CHIP_IS_E1H(bp)) {
4698                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
4699
4700                 rx_pause.bd_thr_low = 250;
4701                 rx_pause.cqe_thr_low = 250;
4702                 rx_pause.cos = 1;
4703                 rx_pause.sge_thr_low = 0;
4704                 rx_pause.bd_thr_high = 350;
4705                 rx_pause.cqe_thr_high = 350;
4706                 rx_pause.sge_thr_high = 0;
4707
4708                 for_each_queue(bp, i) {
4709                         struct bnx2x_fastpath *fp = &bp->fp[i];
4710
4711                         if (!fp->disable_tpa) {
4712                                 rx_pause.sge_thr_low = 150;
4713                                 rx_pause.sge_thr_high = 250;
4714                         }
4715
4716
4717                         offset = BAR_USTRORM_INTMEM +
4718                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
4719                                                                    fp->cl_id);
4720                         for (j = 0;
4721                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
4722                              j++)
4723                                 REG_WR(bp, offset + j*4,
4724                                        ((u32 *)&rx_pause)[j]);
4725                 }
4726         }
4727
4728         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
4729
4730         /* Init rate shaping and fairness contexts */
4731         if (IS_E1HMF(bp)) {
4732                 int vn;
4733
4734                 /* During init there is no active link
4735                    Until link is up, set link rate to 10Gbps */
4736                 bp->link_vars.line_speed = SPEED_10000;
4737                 bnx2x_init_port_minmax(bp);
4738
4739                 if (!BP_NOMCP(bp))
4740                         bp->mf_config =
4741                               SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4742                 bnx2x_calc_vn_weight_sum(bp);
4743
4744                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
4745                         bnx2x_init_vn_minmax(bp, 2*vn + port);
4746
4747                 /* Enable rate shaping and fairness */
4748                 bp->cmng.flags.cmng_enables |=
4749                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
4750
4751         } else {
4752                 /* rate shaping and fairness are disabled */
4753                 DP(NETIF_MSG_IFUP,
4754                    "single function mode  minmax will be disabled\n");
4755         }
4756
4757
4758         /* Store cmng structures to internal memory */
4759         if (bp->port.pmf)
4760                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
4761                         REG_WR(bp, BAR_XSTRORM_INTMEM +
4762                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
4763                                ((u32 *)(&bp->cmng))[i]);
4764 }
4765
4766 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4767 {
4768         switch (load_code) {
4769         case FW_MSG_CODE_DRV_LOAD_COMMON:
4770                 bnx2x_init_internal_common(bp);
4771                 /* no break */
4772
4773         case FW_MSG_CODE_DRV_LOAD_PORT:
4774                 bnx2x_init_internal_port(bp);
4775                 /* no break */
4776
4777         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4778                 bnx2x_init_internal_func(bp);
4779                 break;
4780
4781         default:
4782                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4783                 break;
4784         }
4785 }
4786
4787 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4788 {
4789         int i;
4790
4791         for_each_queue(bp, i) {
4792                 struct bnx2x_fastpath *fp = &bp->fp[i];
4793
4794                 fp->bp = bp;
4795                 fp->state = BNX2X_FP_STATE_CLOSED;
4796                 fp->index = i;
4797                 fp->cl_id = BP_L_ID(bp) + i;
4798 #ifdef BCM_CNIC
4799                 fp->sb_id = fp->cl_id + 1;
4800 #else
4801                 fp->sb_id = fp->cl_id;
4802 #endif
4803                 DP(NETIF_MSG_IFUP,
4804                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
4805                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
4806                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4807                               fp->sb_id);
4808                 bnx2x_update_fpsb_idx(fp);
4809         }
4810
4811         /* ensure status block indices were read */
4812         rmb();
4813
4814
4815         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4816                           DEF_SB_ID);
4817         bnx2x_update_dsb_idx(bp);
4818         bnx2x_update_coalesce(bp);
4819         bnx2x_init_rx_rings(bp);
4820         bnx2x_init_tx_ring(bp);
4821         bnx2x_init_sp_ring(bp);
4822         bnx2x_init_context(bp);
4823         bnx2x_init_internal(bp, load_code);
4824         bnx2x_init_ind_table(bp);
4825         bnx2x_stats_init(bp);
4826
4827         /* At this point, we are ready for interrupts */
4828         atomic_set(&bp->intr_sem, 0);
4829
4830         /* flush all before enabling interrupts */
4831         mb();
4832         mmiowb();
4833
4834         bnx2x_int_enable(bp);
4835
4836         /* Check for SPIO5 */
4837         bnx2x_attn_int_deasserted0(bp,
4838                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4839                                    AEU_INPUTS_ATTN_BITS_SPIO5);
4840 }
4841
4842 /* end of nic init */
4843
4844 /*
4845  * gzip service functions
4846  */
4847
4848 static int bnx2x_gunzip_init(struct bnx2x *bp)
4849 {
4850         bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4851                                             &bp->gunzip_mapping, GFP_KERNEL);
4852         if (bp->gunzip_buf  == NULL)
4853                 goto gunzip_nomem1;
4854
4855         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4856         if (bp->strm  == NULL)
4857                 goto gunzip_nomem2;
4858
4859         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4860                                       GFP_KERNEL);
4861         if (bp->strm->workspace == NULL)
4862                 goto gunzip_nomem3;
4863
4864         return 0;
4865
4866 gunzip_nomem3:
4867         kfree(bp->strm);
4868         bp->strm = NULL;
4869
4870 gunzip_nomem2:
4871         dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4872                           bp->gunzip_mapping);
4873         bp->gunzip_buf = NULL;
4874
4875 gunzip_nomem1:
4876         netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4877                " un-compression\n");
4878         return -ENOMEM;
4879 }
4880
4881 static void bnx2x_gunzip_end(struct bnx2x *bp)
4882 {
4883         kfree(bp->strm->workspace);
4884
4885         kfree(bp->strm);
4886         bp->strm = NULL;
4887
4888         if (bp->gunzip_buf) {
4889                 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4890                                   bp->gunzip_mapping);
4891                 bp->gunzip_buf = NULL;
4892         }
4893 }
4894
4895 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
4896 {
4897         int n, rc;
4898
4899         /* check gzip header */
4900         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4901                 BNX2X_ERR("Bad gzip header\n");
4902                 return -EINVAL;
4903         }
4904
4905         n = 10;
4906
4907 #define FNAME                           0x8
4908
4909         if (zbuf[3] & FNAME)
4910                 while ((zbuf[n++] != 0) && (n < len));
4911
4912         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
4913         bp->strm->avail_in = len - n;
4914         bp->strm->next_out = bp->gunzip_buf;
4915         bp->strm->avail_out = FW_BUF_SIZE;
4916
4917         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4918         if (rc != Z_OK)
4919                 return rc;
4920
4921         rc = zlib_inflate(bp->strm, Z_FINISH);
4922         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4923                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4924                            bp->strm->msg);
4925
4926         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4927         if (bp->gunzip_outlen & 0x3)
4928                 netdev_err(bp->dev, "Firmware decompression error:"
4929                                     " gunzip_outlen (%d) not aligned\n",
4930                                 bp->gunzip_outlen);
4931         bp->gunzip_outlen >>= 2;
4932
4933         zlib_inflateEnd(bp->strm);
4934
4935         if (rc == Z_STREAM_END)
4936                 return 0;
4937
4938         return rc;
4939 }
4940
4941 /* nic load/unload */
4942
4943 /*
4944  * General service functions
4945  */
4946
4947 /* send a NIG loopback debug packet */
4948 static void bnx2x_lb_pckt(struct bnx2x *bp)
4949 {
4950         u32 wb_write[3];
4951
4952         /* Ethernet source and destination addresses */
4953         wb_write[0] = 0x55555555;
4954         wb_write[1] = 0x55555555;
4955         wb_write[2] = 0x20;             /* SOP */
4956         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4957
4958         /* NON-IP protocol */
4959         wb_write[0] = 0x09000000;
4960         wb_write[1] = 0x55555555;
4961         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4962         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4963 }
4964
4965 /* some of the internal memories
4966  * are not directly readable from the driver
4967  * to test them we send debug packets
4968  */
4969 static int bnx2x_int_mem_test(struct bnx2x *bp)
4970 {
4971         int factor;
4972         int count, i;
4973         u32 val = 0;
4974
4975         if (CHIP_REV_IS_FPGA(bp))
4976                 factor = 120;
4977         else if (CHIP_REV_IS_EMUL(bp))
4978                 factor = 200;
4979         else
4980                 factor = 1;
4981
4982         DP(NETIF_MSG_HW, "start part1\n");
4983
4984         /* Disable inputs of parser neighbor blocks */
4985         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4986         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4987         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4988         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4989
4990         /*  Write 0 to parser credits for CFC search request */
4991         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4992
4993         /* send Ethernet packet */
4994         bnx2x_lb_pckt(bp);
4995
4996         /* TODO do i reset NIG statistic? */
4997         /* Wait until NIG register shows 1 packet of size 0x10 */
4998         count = 1000 * factor;
4999         while (count) {
5000
5001                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5002                 val = *bnx2x_sp(bp, wb_data[0]);
5003                 if (val == 0x10)
5004                         break;
5005
5006                 msleep(10);
5007                 count--;
5008         }
5009         if (val != 0x10) {
5010                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5011                 return -1;
5012         }
5013
5014         /* Wait until PRS register shows 1 packet */
5015         count = 1000 * factor;
5016         while (count) {
5017                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5018                 if (val == 1)
5019                         break;
5020
5021                 msleep(10);
5022                 count--;
5023         }
5024         if (val != 0x1) {
5025                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5026                 return -2;
5027         }
5028
5029         /* Reset and init BRB, PRS */
5030         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5031         msleep(50);
5032         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5033         msleep(50);
5034         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5035         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5036
5037         DP(NETIF_MSG_HW, "part2\n");
5038
5039         /* Disable inputs of parser neighbor blocks */
5040         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5041         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5042         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5043         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5044
5045         /* Write 0 to parser credits for CFC search request */
5046         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5047
5048         /* send 10 Ethernet packets */
5049         for (i = 0; i < 10; i++)
5050                 bnx2x_lb_pckt(bp);
5051
5052         /* Wait until NIG register shows 10 + 1
5053            packets of size 11*0x10 = 0xb0 */
5054         count = 1000 * factor;
5055         while (count) {
5056
5057                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5058                 val = *bnx2x_sp(bp, wb_data[0]);
5059                 if (val == 0xb0)
5060                         break;
5061
5062                 msleep(10);
5063                 count--;
5064         }
5065         if (val != 0xb0) {
5066                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5067                 return -3;
5068         }
5069
5070         /* Wait until PRS register shows 2 packets */
5071         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5072         if (val != 2)
5073                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5074
5075         /* Write 1 to parser credits for CFC search request */
5076         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5077
5078         /* Wait until PRS register shows 3 packets */
5079         msleep(10 * factor);
5080         /* Wait until NIG register shows 1 packet of size 0x10 */
5081         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5082         if (val != 3)
5083                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5084
5085         /* clear NIG EOP FIFO */
5086         for (i = 0; i < 11; i++)
5087                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5088         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5089         if (val != 1) {
5090                 BNX2X_ERR("clear of NIG failed\n");
5091                 return -4;
5092         }
5093
5094         /* Reset and init BRB, PRS, NIG */
5095         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5096         msleep(50);
5097         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5098         msleep(50);
5099         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5100         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5101 #ifndef BCM_CNIC
5102         /* set NIC mode */
5103         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5104 #endif
5105
5106         /* Enable inputs of parser neighbor blocks */
5107         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5108         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5109         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5110         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5111
5112         DP(NETIF_MSG_HW, "done\n");
5113
5114         return 0; /* OK */
5115 }
5116
5117 static void enable_blocks_attention(struct bnx2x *bp)
5118 {
5119         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5120         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5121         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5122         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5123         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5124         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5125         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5126         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5127         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5128 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5129 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5130         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5131         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5132         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5133 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5134 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5135         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5136         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5137         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5138         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5139 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5140 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5141         if (CHIP_REV_IS_FPGA(bp))
5142                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5143         else
5144                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5145         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5146         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5147         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5148 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5149 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5150         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5151         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5152 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5153         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5154 }
5155
5156 static const struct {
5157         u32 addr;
5158         u32 mask;
5159 } bnx2x_parity_mask[] = {
5160         {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
5161         {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
5162         {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
5163         {HC_REG_HC_PRTY_MASK, 0xffffffff},
5164         {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
5165         {QM_REG_QM_PRTY_MASK, 0x0},
5166         {DORQ_REG_DORQ_PRTY_MASK, 0x0},
5167         {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
5168         {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
5169         {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
5170         {CDU_REG_CDU_PRTY_MASK, 0x0},
5171         {CFC_REG_CFC_PRTY_MASK, 0x0},
5172         {DBG_REG_DBG_PRTY_MASK, 0x0},
5173         {DMAE_REG_DMAE_PRTY_MASK, 0x0},
5174         {BRB1_REG_BRB1_PRTY_MASK, 0x0},
5175         {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
5176         {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
5177         {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
5178         {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
5179         {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
5180         {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
5181         {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
5182         {USEM_REG_USEM_PRTY_MASK_0, 0x0},
5183         {USEM_REG_USEM_PRTY_MASK_1, 0x0},
5184         {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
5185         {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
5186         {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
5187         {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
5188 };
5189
5190 static void enable_blocks_parity(struct bnx2x *bp)
5191 {
5192         int i, mask_arr_len =
5193                 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
5194
5195         for (i = 0; i < mask_arr_len; i++)
5196                 REG_WR(bp, bnx2x_parity_mask[i].addr,
5197                         bnx2x_parity_mask[i].mask);
5198 }
5199
5200
5201 static void bnx2x_reset_common(struct bnx2x *bp)
5202 {
5203         /* reset_common */
5204         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5205                0xd3ffff7f);
5206         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5207 }
5208
5209 static void bnx2x_init_pxp(struct bnx2x *bp)
5210 {
5211         u16 devctl;
5212         int r_order, w_order;
5213
5214         pci_read_config_word(bp->pdev,
5215                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5216         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5217         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5218         if (bp->mrrs == -1)
5219                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5220         else {
5221                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5222                 r_order = bp->mrrs;
5223         }
5224
5225         bnx2x_init_pxp_arb(bp, r_order, w_order);
5226 }
5227
5228 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5229 {
5230         int is_required;
5231         u32 val;
5232         int port;
5233
5234         if (BP_NOMCP(bp))
5235                 return;
5236
5237         is_required = 0;
5238         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5239               SHARED_HW_CFG_FAN_FAILURE_MASK;
5240
5241         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5242                 is_required = 1;
5243
5244         /*
5245          * The fan failure mechanism is usually related to the PHY type since
5246          * the power consumption of the board is affected by the PHY. Currently,
5247          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5248          */
5249         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5250                 for (port = PORT_0; port < PORT_MAX; port++) {
5251                         u32 phy_type =
5252                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
5253                                          external_phy_config) &
5254                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5255                         is_required |=
5256                                 ((phy_type ==
5257                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5258                                  (phy_type ==
5259                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
5260                                  (phy_type ==
5261                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5262                 }
5263
5264         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5265
5266         if (is_required == 0)
5267                 return;
5268
5269         /* Fan failure is indicated by SPIO 5 */
5270         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5271                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
5272
5273         /* set to active low mode */
5274         val = REG_RD(bp, MISC_REG_SPIO_INT);
5275         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5276                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5277         REG_WR(bp, MISC_REG_SPIO_INT, val);
5278
5279         /* enable interrupt to signal the IGU */
5280         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5281         val |= (1 << MISC_REGISTERS_SPIO_5);
5282         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5283 }
5284
5285 static int bnx2x_init_common(struct bnx2x *bp)
5286 {
5287         u32 val, i;
5288 #ifdef BCM_CNIC
5289         u32 wb_write[2];
5290 #endif
5291
5292         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5293
5294         bnx2x_reset_common(bp);
5295         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5296         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5297
5298         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5299         if (CHIP_IS_E1H(bp))
5300                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5301
5302         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5303         msleep(30);
5304         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5305
5306         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5307         if (CHIP_IS_E1(bp)) {
5308                 /* enable HW interrupt from PXP on USDM overflow
5309                    bit 16 on INT_MASK_0 */
5310                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5311         }
5312
5313         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
5314         bnx2x_init_pxp(bp);
5315
5316 #ifdef __BIG_ENDIAN
5317         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5318         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5319         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5320         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5321         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5322         /* make sure this value is 0 */
5323         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5324
5325 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5326         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5327         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5328         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5329         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5330 #endif
5331
5332         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5333 #ifdef BCM_CNIC
5334         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5335         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5336         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5337 #endif
5338
5339         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5340                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5341
5342         /* let the HW do it's magic ... */
5343         msleep(100);
5344         /* finish PXP init */
5345         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5346         if (val != 1) {
5347                 BNX2X_ERR("PXP2 CFG failed\n");
5348                 return -EBUSY;
5349         }
5350         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5351         if (val != 1) {
5352                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5353                 return -EBUSY;
5354         }
5355
5356         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5357         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5358
5359         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
5360
5361         /* clean the DMAE memory */
5362         bp->dmae_ready = 1;
5363         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5364
5365         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5366         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5367         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5368         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5369
5370         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5371         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5372         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5373         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5374
5375         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5376
5377 #ifdef BCM_CNIC
5378         wb_write[0] = 0;
5379         wb_write[1] = 0;
5380         for (i = 0; i < 64; i++) {
5381                 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
5382                 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
5383
5384                 if (CHIP_IS_E1H(bp)) {
5385                         REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
5386                         bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
5387                                           wb_write, 2);
5388                 }
5389         }
5390 #endif
5391         /* soft reset pulse */
5392         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5393         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5394
5395 #ifdef BCM_CNIC
5396         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5397 #endif
5398
5399         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5400         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5401         if (!CHIP_REV_IS_SLOW(bp)) {
5402                 /* enable hw interrupt from doorbell Q */
5403                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5404         }
5405
5406         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5407         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5408         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5409 #ifndef BCM_CNIC
5410         /* set NIC mode */
5411         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5412 #endif
5413         if (CHIP_IS_E1H(bp))
5414                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5415
5416         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5417         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5418         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5419         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5420
5421         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5422         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5423         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5424         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5425
5426         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5427         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5428         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5429         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5430
5431         /* sync semi rtc */
5432         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5433                0x80000000);
5434         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5435                0x80000000);
5436
5437         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5438         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5439         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5440
5441         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5442         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5443                 REG_WR(bp, i, random32());
5444         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5445 #ifdef BCM_CNIC
5446         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5447         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5448         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5449         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5450         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5451         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5452         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5453         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5454         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5455         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5456 #endif
5457         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5458
5459         if (sizeof(union cdu_context) != 1024)
5460                 /* we currently assume that a context is 1024 bytes */
5461                 dev_alert(&bp->pdev->dev, "please adjust the size "
5462                                           "of cdu_context(%ld)\n",
5463                          (long)sizeof(union cdu_context));
5464
5465         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5466         val = (4 << 24) + (0 << 12) + 1024;
5467         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5468
5469         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5470         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5471         /* enable context validation interrupt from CFC */
5472         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5473
5474         /* set the thresholds to prevent CFC/CDU race */
5475         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5476
5477         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5478         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5479
5480         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5481         /* Reset PCIE errors for debug */
5482         REG_WR(bp, 0x2814, 0xffffffff);
5483         REG_WR(bp, 0x3820, 0xffffffff);
5484
5485         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
5486         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
5487         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
5488         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
5489
5490         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5491         if (CHIP_IS_E1H(bp)) {
5492                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5493                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5494         }
5495
5496         if (CHIP_REV_IS_SLOW(bp))
5497                 msleep(200);
5498
5499         /* finish CFC init */
5500         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5501         if (val != 1) {
5502                 BNX2X_ERR("CFC LL_INIT failed\n");
5503                 return -EBUSY;
5504         }
5505         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5506         if (val != 1) {
5507                 BNX2X_ERR("CFC AC_INIT failed\n");
5508                 return -EBUSY;
5509         }
5510         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5511         if (val != 1) {
5512                 BNX2X_ERR("CFC CAM_INIT failed\n");
5513                 return -EBUSY;
5514         }
5515         REG_WR(bp, CFC_REG_DEBUG0, 0);
5516
5517         /* read NIG statistic
5518            to see if this is our first up since powerup */
5519         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5520         val = *bnx2x_sp(bp, wb_data[0]);
5521
5522         /* do internal memory self test */
5523         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5524                 BNX2X_ERR("internal mem self test failed\n");
5525                 return -EBUSY;
5526         }
5527
5528         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5529         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5530         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5531         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5532         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
5533                 bp->port.need_hw_lock = 1;
5534                 break;
5535
5536         default:
5537                 break;
5538         }
5539
5540         bnx2x_setup_fan_failure_detection(bp);
5541
5542         /* clear PXP2 attentions */
5543         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5544
5545         enable_blocks_attention(bp);
5546         if (CHIP_PARITY_SUPPORTED(bp))
5547                 enable_blocks_parity(bp);
5548
5549         if (!BP_NOMCP(bp)) {
5550                 bnx2x_acquire_phy_lock(bp);
5551                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5552                 bnx2x_release_phy_lock(bp);
5553         } else
5554                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5555
5556         return 0;
5557 }
5558
5559 static int bnx2x_init_port(struct bnx2x *bp)
5560 {
5561         int port = BP_PORT(bp);
5562         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
5563         u32 low, high;
5564         u32 val;
5565
5566         DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
5567
5568         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5569
5570         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
5571         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
5572
5573         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5574         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5575         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
5576         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
5577
5578 #ifdef BCM_CNIC
5579         REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
5580
5581         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
5582         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5583         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
5584 #endif
5585
5586         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
5587
5588         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5589         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5590                 /* no pause for emulation and FPGA */
5591                 low = 0;
5592                 high = 513;
5593         } else {
5594                 if (IS_E1HMF(bp))
5595                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5596                 else if (bp->dev->mtu > 4096) {
5597                         if (bp->flags & ONE_PORT_FLAG)
5598                                 low = 160;
5599                         else {
5600                                 val = bp->dev->mtu;
5601                                 /* (24*1024 + val*4)/256 */
5602                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5603                         }
5604                 } else
5605                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5606                 high = low + 56;        /* 14*1024/256 */
5607         }
5608         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5609         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5610
5611
5612         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
5613
5614         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
5615         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
5616         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
5617         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
5618
5619         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5620         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5621         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5622         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5623
5624         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
5625         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
5626
5627         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
5628
5629         /* configure PBF to work without PAUSE mtu 9000 */
5630         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5631
5632         /* update threshold */
5633         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5634         /* update init credit */
5635         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5636
5637         /* probe changes */
5638         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5639         msleep(5);
5640         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5641
5642 #ifdef BCM_CNIC
5643         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
5644 #endif
5645         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
5646         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
5647
5648         if (CHIP_IS_E1(bp)) {
5649                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5650                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5651         }
5652         bnx2x_init_block(bp, HC_BLOCK, init_stage);
5653
5654         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
5655         /* init aeu_mask_attn_func_0/1:
5656          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5657          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5658          *             bits 4-7 are used for "per vn group attention" */
5659         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5660                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5661
5662         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
5663         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
5664         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
5665         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
5666         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
5667
5668         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
5669
5670         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5671
5672         if (CHIP_IS_E1H(bp)) {
5673                 /* 0x2 disable e1hov, 0x1 enable */
5674                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5675                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5676
5677                 {
5678                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5679                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5680                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5681                 }
5682         }
5683
5684         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
5685         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
5686
5687         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5688         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5689                 {
5690                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5691
5692                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5693                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5694
5695                 /* The GPIO should be swapped if the swap register is
5696                    set and active */
5697                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5698                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5699
5700                 /* Select function upon port-swap configuration */
5701                 if (port == 0) {
5702                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5703                         aeu_gpio_mask = (swap_val && swap_override) ?
5704                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5705                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5706                 } else {
5707                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5708                         aeu_gpio_mask = (swap_val && swap_override) ?
5709                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5710                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5711                 }
5712                 val = REG_RD(bp, offset);
5713                 /* add GPIO3 to group */
5714                 val |= aeu_gpio_mask;
5715                 REG_WR(bp, offset, val);
5716                 }
5717                 break;
5718
5719         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5720         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
5721                 /* add SPIO 5 to group 0 */
5722                 {
5723                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5724                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5725                 val = REG_RD(bp, reg_addr);
5726                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5727                 REG_WR(bp, reg_addr, val);
5728                 }
5729                 break;
5730
5731         default:
5732                 break;
5733         }
5734
5735         bnx2x__link_reset(bp);
5736
5737         return 0;
5738 }
5739
5740 #define ILT_PER_FUNC            (768/2)
5741 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
5742 /* the phys address is shifted right 12 bits and has an added
5743    1=valid bit added to the 53rd bit
5744    then since this is a wide register(TM)
5745    we split it into two 32 bit writes
5746  */
5747 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5748 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
5749 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
5750 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
5751
5752 #ifdef BCM_CNIC
5753 #define CNIC_ILT_LINES          127
5754 #define CNIC_CTX_PER_ILT        16
5755 #else
5756 #define CNIC_ILT_LINES          0
5757 #endif
5758
5759 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5760 {
5761         int reg;
5762
5763         if (CHIP_IS_E1H(bp))
5764                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5765         else /* E1 */
5766                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5767
5768         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5769 }
5770
5771 static int bnx2x_init_func(struct bnx2x *bp)
5772 {
5773         int port = BP_PORT(bp);
5774         int func = BP_FUNC(bp);
5775         u32 addr, val;
5776         int i;
5777
5778         DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
5779
5780         /* set MSI reconfigure capability */
5781         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5782         val = REG_RD(bp, addr);
5783         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5784         REG_WR(bp, addr, val);
5785
5786         i = FUNC_ILT_BASE(func);
5787
5788         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5789         if (CHIP_IS_E1H(bp)) {
5790                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5791                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5792         } else /* E1 */
5793                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5794                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5795
5796 #ifdef BCM_CNIC
5797         i += 1 + CNIC_ILT_LINES;
5798         bnx2x_ilt_wr(bp, i, bp->timers_mapping);
5799         if (CHIP_IS_E1(bp))
5800                 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5801         else {
5802                 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
5803                 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
5804         }
5805
5806         i++;
5807         bnx2x_ilt_wr(bp, i, bp->qm_mapping);
5808         if (CHIP_IS_E1(bp))
5809                 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5810         else {
5811                 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
5812                 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
5813         }
5814
5815         i++;
5816         bnx2x_ilt_wr(bp, i, bp->t1_mapping);
5817         if (CHIP_IS_E1(bp))
5818                 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5819         else {
5820                 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
5821                 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
5822         }
5823
5824         /* tell the searcher where the T2 table is */
5825         REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
5826
5827         bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
5828                     U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
5829
5830         bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
5831                     U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
5832                     U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
5833
5834         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
5835 #endif
5836
5837         if (CHIP_IS_E1H(bp)) {
5838                 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5839                 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5840                 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5841                 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5842                 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5843                 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5844                 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5845                 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5846                 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5847
5848                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5849                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5850         }
5851
5852         /* HC init per function */
5853         if (CHIP_IS_E1H(bp)) {
5854                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5855
5856                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5857                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5858         }
5859         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5860
5861         /* Reset PCIE errors for debug */
5862         REG_WR(bp, 0x2114, 0xffffffff);
5863         REG_WR(bp, 0x2120, 0xffffffff);
5864
5865         return 0;
5866 }
5867
5868 int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5869 {
5870         int i, rc = 0;
5871
5872         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5873            BP_FUNC(bp), load_code);
5874
5875         bp->dmae_ready = 0;
5876         mutex_init(&bp->dmae_mutex);
5877         rc = bnx2x_gunzip_init(bp);
5878         if (rc)
5879                 return rc;
5880
5881         switch (load_code) {
5882         case FW_MSG_CODE_DRV_LOAD_COMMON:
5883                 rc = bnx2x_init_common(bp);
5884                 if (rc)
5885                         goto init_hw_err;
5886                 /* no break */
5887
5888         case FW_MSG_CODE_DRV_LOAD_PORT:
5889                 bp->dmae_ready = 1;
5890                 rc = bnx2x_init_port(bp);
5891                 if (rc)
5892                         goto init_hw_err;
5893                 /* no break */
5894
5895         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5896                 bp->dmae_ready = 1;
5897                 rc = bnx2x_init_func(bp);
5898                 if (rc)
5899                         goto init_hw_err;
5900                 break;
5901
5902         default:
5903                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5904                 break;
5905         }
5906
5907         if (!BP_NOMCP(bp)) {
5908                 int func = BP_FUNC(bp);
5909
5910                 bp->fw_drv_pulse_wr_seq =
5911                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5912                                  DRV_PULSE_SEQ_MASK);
5913                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5914         }
5915
5916         /* this needs to be done before gunzip end */
5917         bnx2x_zero_def_sb(bp);
5918         for_each_queue(bp, i)
5919                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5920 #ifdef BCM_CNIC
5921         bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5922 #endif
5923
5924 init_hw_err:
5925         bnx2x_gunzip_end(bp);
5926
5927         return rc;
5928 }
5929
5930 void bnx2x_free_mem(struct bnx2x *bp)
5931 {
5932
5933 #define BNX2X_PCI_FREE(x, y, size) \
5934         do { \
5935                 if (x) { \
5936                         dma_free_coherent(&bp->pdev->dev, size, x, y); \
5937                         x = NULL; \
5938                         y = 0; \
5939                 } \
5940         } while (0)
5941
5942 #define BNX2X_FREE(x) \
5943         do { \
5944                 if (x) { \
5945                         vfree(x); \
5946                         x = NULL; \
5947                 } \
5948         } while (0)
5949
5950         int i;
5951
5952         /* fastpath */
5953         /* Common */
5954         for_each_queue(bp, i) {
5955
5956                 /* status blocks */
5957                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5958                                bnx2x_fp(bp, i, status_blk_mapping),
5959                                sizeof(struct host_status_block));
5960         }
5961         /* Rx */
5962         for_each_queue(bp, i) {
5963
5964                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5965                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5966                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5967                                bnx2x_fp(bp, i, rx_desc_mapping),
5968                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5969
5970                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5971                                bnx2x_fp(bp, i, rx_comp_mapping),
5972                                sizeof(struct eth_fast_path_rx_cqe) *
5973                                NUM_RCQ_BD);
5974
5975                 /* SGE ring */
5976                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5977                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5978                                bnx2x_fp(bp, i, rx_sge_mapping),
5979                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5980         }
5981         /* Tx */
5982         for_each_queue(bp, i) {
5983
5984                 /* fastpath tx rings: tx_buf tx_desc */
5985                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5986                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5987                                bnx2x_fp(bp, i, tx_desc_mapping),
5988                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
5989         }
5990         /* end of fastpath */
5991
5992         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5993                        sizeof(struct host_def_status_block));
5994
5995         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5996                        sizeof(struct bnx2x_slowpath));
5997
5998 #ifdef BCM_CNIC
5999         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6000         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6001         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6002         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6003         BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6004                        sizeof(struct host_status_block));
6005 #endif
6006         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6007
6008 #undef BNX2X_PCI_FREE
6009 #undef BNX2X_KFREE
6010 }
6011
6012 int bnx2x_alloc_mem(struct bnx2x *bp)
6013 {
6014
6015 #define BNX2X_PCI_ALLOC(x, y, size) \
6016         do { \
6017                 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
6018                 if (x == NULL) \
6019                         goto alloc_mem_err; \
6020                 memset(x, 0, size); \
6021         } while (0)
6022
6023 #define BNX2X_ALLOC(x, size) \
6024         do { \
6025                 x = vmalloc(size); \
6026                 if (x == NULL) \
6027                         goto alloc_mem_err; \
6028                 memset(x, 0, size); \
6029         } while (0)
6030
6031         int i;
6032
6033         /* fastpath */
6034         /* Common */
6035         for_each_queue(bp, i) {
6036                 bnx2x_fp(bp, i, bp) = bp;
6037
6038                 /* status blocks */
6039                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6040                                 &bnx2x_fp(bp, i, status_blk_mapping),
6041                                 sizeof(struct host_status_block));
6042         }
6043         /* Rx */
6044         for_each_queue(bp, i) {
6045
6046                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6047                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6048                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6049                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6050                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6051                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6052
6053                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6054                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6055                                 sizeof(struct eth_fast_path_rx_cqe) *
6056                                 NUM_RCQ_BD);
6057
6058                 /* SGE ring */
6059                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6060                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6061                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6062                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6063                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6064         }
6065         /* Tx */
6066         for_each_queue(bp, i) {
6067
6068                 /* fastpath tx rings: tx_buf tx_desc */
6069                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6070                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6071                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6072                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6073                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6074         }
6075         /* end of fastpath */
6076
6077         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6078                         sizeof(struct host_def_status_block));
6079
6080         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6081                         sizeof(struct bnx2x_slowpath));
6082
6083 #ifdef BCM_CNIC
6084         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6085
6086         /* allocate searcher T2 table
6087            we allocate 1/4 of alloc num for T2
6088           (which is not entered into the ILT) */
6089         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6090
6091         /* Initialize T2 (for 1024 connections) */
6092         for (i = 0; i < 16*1024; i += 64)
6093                 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6094
6095         /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
6096         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6097
6098         /* QM queues (128*MAX_CONN) */
6099         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6100
6101         BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6102                         sizeof(struct host_status_block));
6103 #endif
6104
6105         /* Slow path ring */
6106         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6107
6108         return 0;
6109
6110 alloc_mem_err:
6111         bnx2x_free_mem(bp);
6112         return -ENOMEM;
6113
6114 #undef BNX2X_PCI_ALLOC
6115 #undef BNX2X_ALLOC
6116 }
6117
6118
6119 /*
6120  * Init service functions
6121  */
6122
6123 /**
6124  * Sets a MAC in a CAM for a few L2 Clients for E1 chip
6125  *
6126  * @param bp driver descriptor
6127  * @param set set or clear an entry (1 or 0)
6128  * @param mac pointer to a buffer containing a MAC
6129  * @param cl_bit_vec bit vector of clients to register a MAC for
6130  * @param cam_offset offset in a CAM to use
6131  * @param with_bcast set broadcast MAC as well
6132  */
6133 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
6134                                       u32 cl_bit_vec, u8 cam_offset,
6135                                       u8 with_bcast)
6136 {
6137         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6138         int port = BP_PORT(bp);
6139
6140         /* CAM allocation
6141          * unicasts 0-31:port0 32-63:port1
6142          * multicast 64-127:port0 128-191:port1
6143          */
6144         config->hdr.length = 1 + (with_bcast ? 1 : 0);
6145         config->hdr.offset = cam_offset;
6146         config->hdr.client_id = 0xff;
6147         config->hdr.reserved1 = 0;
6148
6149         /* primary MAC */
6150         config->config_table[0].cam_entry.msb_mac_addr =
6151                                         swab16(*(u16 *)&mac[0]);
6152         config->config_table[0].cam_entry.middle_mac_addr =
6153                                         swab16(*(u16 *)&mac[2]);
6154         config->config_table[0].cam_entry.lsb_mac_addr =
6155                                         swab16(*(u16 *)&mac[4]);
6156         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6157         if (set)
6158                 config->config_table[0].target_table_entry.flags = 0;
6159         else
6160                 CAM_INVALIDATE(config->config_table[0]);
6161         config->config_table[0].target_table_entry.clients_bit_vector =
6162                                                 cpu_to_le32(cl_bit_vec);
6163         config->config_table[0].target_table_entry.vlan_id = 0;
6164
6165         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6166            (set ? "setting" : "clearing"),
6167            config->config_table[0].cam_entry.msb_mac_addr,
6168            config->config_table[0].cam_entry.middle_mac_addr,
6169            config->config_table[0].cam_entry.lsb_mac_addr);
6170
6171         /* broadcast */
6172         if (with_bcast) {
6173                 config->config_table[1].cam_entry.msb_mac_addr =
6174                         cpu_to_le16(0xffff);
6175                 config->config_table[1].cam_entry.middle_mac_addr =
6176                         cpu_to_le16(0xffff);
6177                 config->config_table[1].cam_entry.lsb_mac_addr =
6178                         cpu_to_le16(0xffff);
6179                 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6180                 if (set)
6181                         config->config_table[1].target_table_entry.flags =
6182                                         TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6183                 else
6184                         CAM_INVALIDATE(config->config_table[1]);
6185                 config->config_table[1].target_table_entry.clients_bit_vector =
6186                                                         cpu_to_le32(cl_bit_vec);
6187                 config->config_table[1].target_table_entry.vlan_id = 0;
6188         }
6189
6190         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6191                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6192                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6193 }
6194
6195 /**
6196  * Sets a MAC in a CAM for a few L2 Clients for E1H chip
6197  *
6198  * @param bp driver descriptor
6199  * @param set set or clear an entry (1 or 0)
6200  * @param mac pointer to a buffer containing a MAC
6201  * @param cl_bit_vec bit vector of clients to register a MAC for
6202  * @param cam_offset offset in a CAM to use
6203  */
6204 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
6205                                        u32 cl_bit_vec, u8 cam_offset)
6206 {
6207         struct mac_configuration_cmd_e1h *config =
6208                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6209
6210         config->hdr.length = 1;
6211         config->hdr.offset = cam_offset;
6212         config->hdr.client_id = 0xff;
6213         config->hdr.reserved1 = 0;
6214
6215         /* primary MAC */
6216         config->config_table[0].msb_mac_addr =
6217                                         swab16(*(u16 *)&mac[0]);
6218         config->config_table[0].middle_mac_addr =
6219                                         swab16(*(u16 *)&mac[2]);
6220         config->config_table[0].lsb_mac_addr =
6221                                         swab16(*(u16 *)&mac[4]);
6222         config->config_table[0].clients_bit_vector =
6223                                         cpu_to_le32(cl_bit_vec);
6224         config->config_table[0].vlan_id = 0;
6225         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6226         if (set)
6227                 config->config_table[0].flags = BP_PORT(bp);
6228         else
6229                 config->config_table[0].flags =
6230                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6231
6232         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
6233            (set ? "setting" : "clearing"),
6234            config->config_table[0].msb_mac_addr,
6235            config->config_table[0].middle_mac_addr,
6236            config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
6237
6238         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6239                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6240                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6241 }
6242
6243 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6244                              int *state_p, int poll)
6245 {
6246         /* can take a while if any port is running */
6247         int cnt = 5000;
6248
6249         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6250            poll ? "polling" : "waiting", state, idx);
6251
6252         might_sleep();
6253         while (cnt--) {
6254                 if (poll) {
6255                         bnx2x_rx_int(bp->fp, 10);
6256                         /* if index is different from 0
6257                          * the reply for some commands will
6258                          * be on the non default queue
6259                          */
6260                         if (idx)
6261                                 bnx2x_rx_int(&bp->fp[idx], 10);
6262                 }
6263
6264                 mb(); /* state is changed by bnx2x_sp_event() */
6265                 if (*state_p == state) {
6266 #ifdef BNX2X_STOP_ON_ERROR
6267                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
6268 #endif
6269                         return 0;
6270                 }
6271
6272                 msleep(1);
6273
6274                 if (bp->panic)
6275                         return -EIO;
6276         }
6277
6278         /* timeout! */
6279         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6280                   poll ? "polling" : "waiting", state, idx);
6281 #ifdef BNX2X_STOP_ON_ERROR
6282         bnx2x_panic();
6283 #endif
6284
6285         return -EBUSY;
6286 }
6287
6288 void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
6289 {
6290         bp->set_mac_pending++;
6291         smp_wmb();
6292
6293         bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
6294                                    (1 << bp->fp->cl_id), BP_FUNC(bp));
6295
6296         /* Wait for a completion */
6297         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
6298 }
6299
6300 void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
6301 {
6302         bp->set_mac_pending++;
6303         smp_wmb();
6304
6305         bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
6306                                   (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
6307                                   1);
6308
6309         /* Wait for a completion */
6310         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
6311 }
6312
6313 #ifdef BCM_CNIC
6314 /**
6315  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6316  * MAC(s). This function will wait until the ramdord completion
6317  * returns.
6318  *
6319  * @param bp driver handle
6320  * @param set set or clear the CAM entry
6321  *
6322  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6323  */
6324 int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
6325 {
6326         u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
6327
6328         bp->set_mac_pending++;
6329         smp_wmb();
6330
6331         /* Send a SET_MAC ramrod */
6332         if (CHIP_IS_E1(bp))
6333                 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
6334                                   cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
6335                                   1);
6336         else
6337                 /* CAM allocation for E1H
6338                 * unicasts: by func number
6339                 * multicast: 20+FUNC*20, 20 each
6340                 */
6341                 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
6342                                    cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
6343
6344         /* Wait for a completion when setting */
6345         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
6346
6347         return 0;
6348 }
6349 #endif
6350
6351 int bnx2x_setup_leading(struct bnx2x *bp)
6352 {
6353         int rc;
6354
6355         /* reset IGU state */
6356         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6357
6358         /* SETUP ramrod */
6359         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6360
6361         /* Wait for completion */
6362         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6363
6364         return rc;
6365 }
6366
6367 int bnx2x_setup_multi(struct bnx2x *bp, int index)
6368 {
6369         struct bnx2x_fastpath *fp = &bp->fp[index];
6370
6371         /* reset IGU state */
6372         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6373
6374         /* SETUP ramrod */
6375         fp->state = BNX2X_FP_STATE_OPENING;
6376         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6377                       fp->cl_id, 0);
6378
6379         /* Wait for completion */
6380         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6381                                  &(fp->state), 0);
6382 }
6383
6384
6385 void bnx2x_set_num_queues_msix(struct bnx2x *bp)
6386 {
6387
6388         switch (bp->multi_mode) {
6389         case ETH_RSS_MODE_DISABLED:
6390                 bp->num_queues = 1;
6391                 break;
6392
6393         case ETH_RSS_MODE_REGULAR:
6394                 if (num_queues)
6395                         bp->num_queues = min_t(u32, num_queues,
6396                                                   BNX2X_MAX_QUEUES(bp));
6397                 else
6398                         bp->num_queues = min_t(u32, num_online_cpus(),
6399                                                   BNX2X_MAX_QUEUES(bp));
6400                 break;
6401
6402
6403         default:
6404                 bp->num_queues = 1;
6405                 break;
6406         }
6407 }
6408
6409
6410
6411 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6412 {
6413         struct bnx2x_fastpath *fp = &bp->fp[index];
6414         int rc;
6415
6416         /* halt the connection */
6417         fp->state = BNX2X_FP_STATE_HALTING;
6418         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
6419
6420         /* Wait for completion */
6421         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6422                                &(fp->state), 1);
6423         if (rc) /* timeout */
6424                 return rc;
6425
6426         /* delete cfc entry */
6427         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6428
6429         /* Wait for completion */
6430         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6431                                &(fp->state), 1);
6432         return rc;
6433 }
6434
6435 static int bnx2x_stop_leading(struct bnx2x *bp)
6436 {
6437         __le16 dsb_sp_prod_idx;
6438         /* if the other port is handling traffic,
6439            this can take a lot of time */
6440         int cnt = 500;
6441         int rc;
6442
6443         might_sleep();
6444
6445         /* Send HALT ramrod */
6446         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6447         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
6448
6449         /* Wait for completion */
6450         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6451                                &(bp->fp[0].state), 1);
6452         if (rc) /* timeout */
6453                 return rc;
6454
6455         dsb_sp_prod_idx = *bp->dsb_sp_prod;
6456
6457         /* Send PORT_DELETE ramrod */
6458         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6459
6460         /* Wait for completion to arrive on default status block
6461            we are going to reset the chip anyway
6462            so there is not much to do if this times out
6463          */
6464         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6465                 if (!cnt) {
6466                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6467                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6468                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
6469 #ifdef BNX2X_STOP_ON_ERROR
6470                         bnx2x_panic();
6471 #endif
6472                         rc = -EBUSY;
6473                         break;
6474                 }
6475                 cnt--;
6476                 msleep(1);
6477                 rmb(); /* Refresh the dsb_sp_prod */
6478         }
6479         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6480         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6481
6482         return rc;
6483 }
6484
6485 static void bnx2x_reset_func(struct bnx2x *bp)
6486 {
6487         int port = BP_PORT(bp);
6488         int func = BP_FUNC(bp);
6489         int base, i;
6490
6491         /* Configure IGU */
6492         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6493         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6494
6495 #ifdef BCM_CNIC
6496         /* Disable Timer scan */
6497         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6498         /*
6499          * Wait for at least 10ms and up to 2 second for the timers scan to
6500          * complete
6501          */
6502         for (i = 0; i < 200; i++) {
6503                 msleep(10);
6504                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6505                         break;
6506         }
6507 #endif
6508         /* Clear ILT */
6509         base = FUNC_ILT_BASE(func);
6510         for (i = base; i < base + ILT_PER_FUNC; i++)
6511                 bnx2x_ilt_wr(bp, i, 0);
6512 }
6513
6514 static void bnx2x_reset_port(struct bnx2x *bp)
6515 {
6516         int port = BP_PORT(bp);
6517         u32 val;
6518
6519         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6520
6521         /* Do not rcv packets to BRB */
6522         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6523         /* Do not direct rcv packets that are not for MCP to the BRB */
6524         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6525                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6526
6527         /* Configure AEU */
6528         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6529
6530         msleep(100);
6531         /* Check for BRB port occupancy */
6532         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6533         if (val)
6534                 DP(NETIF_MSG_IFDOWN,
6535                    "BRB1 is not empty  %d blocks are occupied\n", val);
6536
6537         /* TODO: Close Doorbell port? */
6538 }
6539
6540 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6541 {
6542         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
6543            BP_FUNC(bp), reset_code);
6544
6545         switch (reset_code) {
6546         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6547                 bnx2x_reset_port(bp);
6548                 bnx2x_reset_func(bp);
6549                 bnx2x_reset_common(bp);
6550                 break;
6551
6552         case FW_MSG_CODE_DRV_UNLOAD_PORT:
6553                 bnx2x_reset_port(bp);
6554                 bnx2x_reset_func(bp);
6555                 break;
6556
6557         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6558                 bnx2x_reset_func(bp);
6559                 break;
6560
6561         default:
6562                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6563                 break;
6564         }
6565 }
6566
6567 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
6568 {
6569         int port = BP_PORT(bp);
6570         u32 reset_code = 0;
6571         int i, cnt, rc;
6572
6573         /* Wait until tx fastpath tasks complete */
6574         for_each_queue(bp, i) {
6575                 struct bnx2x_fastpath *fp = &bp->fp[i];
6576
6577                 cnt = 1000;
6578                 while (bnx2x_has_tx_work_unload(fp)) {
6579
6580                         bnx2x_tx_int(fp);
6581                         if (!cnt) {
6582                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
6583                                           i);
6584 #ifdef BNX2X_STOP_ON_ERROR
6585                                 bnx2x_panic();
6586                                 return -EBUSY;
6587 #else
6588                                 break;
6589 #endif
6590                         }
6591                         cnt--;
6592                         msleep(1);
6593                 }
6594         }
6595         /* Give HW time to discard old tx messages */
6596         msleep(1);
6597
6598         if (CHIP_IS_E1(bp)) {
6599                 struct mac_configuration_cmd *config =
6600                                                 bnx2x_sp(bp, mcast_config);
6601
6602                 bnx2x_set_eth_mac_addr_e1(bp, 0);
6603
6604                 for (i = 0; i < config->hdr.length; i++)
6605                         CAM_INVALIDATE(config->config_table[i]);
6606
6607                 config->hdr.length = i;
6608                 if (CHIP_REV_IS_SLOW(bp))
6609                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6610                 else
6611                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6612                 config->hdr.client_id = bp->fp->cl_id;
6613                 config->hdr.reserved1 = 0;
6614
6615                 bp->set_mac_pending++;
6616                 smp_wmb();
6617
6618                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6619                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6620                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6621
6622         } else { /* E1H */
6623                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6624
6625                 bnx2x_set_eth_mac_addr_e1h(bp, 0);
6626
6627                 for (i = 0; i < MC_HASH_SIZE; i++)
6628                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6629
6630                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6631         }
6632 #ifdef BCM_CNIC
6633         /* Clear iSCSI L2 MAC */
6634         mutex_lock(&bp->cnic_mutex);
6635         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
6636                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
6637                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
6638         }
6639         mutex_unlock(&bp->cnic_mutex);
6640 #endif
6641
6642         if (unload_mode == UNLOAD_NORMAL)
6643                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6644
6645         else if (bp->flags & NO_WOL_FLAG)
6646                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6647
6648         else if (bp->wol) {
6649                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6650                 u8 *mac_addr = bp->dev->dev_addr;
6651                 u32 val;
6652                 /* The mac address is written to entries 1-4 to
6653                    preserve entry 0 which is used by the PMF */
6654                 u8 entry = (BP_E1HVN(bp) + 1)*8;
6655
6656                 val = (mac_addr[0] << 8) | mac_addr[1];
6657                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6658
6659                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6660                       (mac_addr[4] << 8) | mac_addr[5];
6661                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6662
6663                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6664
6665         } else
6666                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6667
6668         /* Close multi and leading connections
6669            Completions for ramrods are collected in a synchronous way */
6670         for_each_nondefault_queue(bp, i)
6671                 if (bnx2x_stop_multi(bp, i))
6672                         goto unload_error;
6673
6674         rc = bnx2x_stop_leading(bp);
6675         if (rc) {
6676                 BNX2X_ERR("Stop leading failed!\n");
6677 #ifdef BNX2X_STOP_ON_ERROR
6678                 return -EBUSY;
6679 #else
6680                 goto unload_error;
6681 #endif
6682         }
6683
6684 unload_error:
6685         if (!BP_NOMCP(bp))
6686                 reset_code = bnx2x_fw_command(bp, reset_code);
6687         else {
6688                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
6689                    load_count[0], load_count[1], load_count[2]);
6690                 load_count[0]--;
6691                 load_count[1 + port]--;
6692                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
6693                    load_count[0], load_count[1], load_count[2]);
6694                 if (load_count[0] == 0)
6695                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6696                 else if (load_count[1 + port] == 0)
6697                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6698                 else
6699                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6700         }
6701
6702         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6703             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6704                 bnx2x__link_reset(bp);
6705
6706         /* Reset the chip */
6707         bnx2x_reset_chip(bp, reset_code);
6708
6709         /* Report UNLOAD_DONE to MCP */
6710         if (!BP_NOMCP(bp))
6711                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6712
6713 }
6714
6715 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
6716 {
6717         u32 val;
6718
6719         DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
6720
6721         if (CHIP_IS_E1(bp)) {
6722                 int port = BP_PORT(bp);
6723                 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6724                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
6725
6726                 val = REG_RD(bp, addr);
6727                 val &= ~(0x300);
6728                 REG_WR(bp, addr, val);
6729         } else if (CHIP_IS_E1H(bp)) {
6730                 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
6731                 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
6732                          MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
6733                 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
6734         }
6735 }
6736
6737
6738 /* Close gates #2, #3 and #4: */
6739 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
6740 {
6741         u32 val, addr;
6742
6743         /* Gates #2 and #4a are closed/opened for "not E1" only */
6744         if (!CHIP_IS_E1(bp)) {
6745                 /* #4 */
6746                 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
6747                 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
6748                        close ? (val | 0x1) : (val & (~(u32)1)));
6749                 /* #2 */
6750                 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
6751                 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
6752                        close ? (val | 0x1) : (val & (~(u32)1)));
6753         }
6754
6755         /* #3 */
6756         addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
6757         val = REG_RD(bp, addr);
6758         REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
6759
6760         DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
6761                 close ? "closing" : "opening");
6762         mmiowb();
6763 }
6764
6765 #define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
6766
6767 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
6768 {
6769         /* Do some magic... */
6770         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6771         *magic_val = val & SHARED_MF_CLP_MAGIC;
6772         MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
6773 }
6774
6775 /* Restore the value of the `magic' bit.
6776  *
6777  * @param pdev Device handle.
6778  * @param magic_val Old value of the `magic' bit.
6779  */
6780 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
6781 {
6782         /* Restore the `magic' bit value... */
6783         /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
6784         SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
6785                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
6786         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6787         MF_CFG_WR(bp, shared_mf_config.clp_mb,
6788                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
6789 }
6790
6791 /* Prepares for MCP reset: takes care of CLP configurations.
6792  *
6793  * @param bp
6794  * @param magic_val Old value of 'magic' bit.
6795  */
6796 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
6797 {
6798         u32 shmem;
6799         u32 validity_offset;
6800
6801         DP(NETIF_MSG_HW, "Starting\n");
6802
6803         /* Set `magic' bit in order to save MF config */
6804         if (!CHIP_IS_E1(bp))
6805                 bnx2x_clp_reset_prep(bp, magic_val);
6806
6807         /* Get shmem offset */
6808         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6809         validity_offset = offsetof(struct shmem_region, validity_map[0]);
6810
6811         /* Clear validity map flags */
6812         if (shmem > 0)
6813                 REG_WR(bp, shmem + validity_offset, 0);
6814 }
6815
6816 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
6817 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
6818
6819 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
6820  * depending on the HW type.
6821  *
6822  * @param bp
6823  */
6824 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
6825 {
6826         /* special handling for emulation and FPGA,
6827            wait 10 times longer */
6828         if (CHIP_REV_IS_SLOW(bp))
6829                 msleep(MCP_ONE_TIMEOUT*10);
6830         else
6831                 msleep(MCP_ONE_TIMEOUT);
6832 }
6833
6834 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
6835 {
6836         u32 shmem, cnt, validity_offset, val;
6837         int rc = 0;
6838
6839         msleep(100);
6840
6841         /* Get shmem offset */
6842         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6843         if (shmem == 0) {
6844                 BNX2X_ERR("Shmem 0 return failure\n");
6845                 rc = -ENOTTY;
6846                 goto exit_lbl;
6847         }
6848
6849         validity_offset = offsetof(struct shmem_region, validity_map[0]);
6850
6851         /* Wait for MCP to come up */
6852         for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
6853                 /* TBD: its best to check validity map of last port.
6854                  * currently checks on port 0.
6855                  */
6856                 val = REG_RD(bp, shmem + validity_offset);
6857                 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
6858                    shmem + validity_offset, val);
6859
6860                 /* check that shared memory is valid. */
6861                 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6862                     == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6863                         break;
6864
6865                 bnx2x_mcp_wait_one(bp);
6866         }
6867
6868         DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
6869
6870         /* Check that shared memory is valid. This indicates that MCP is up. */
6871         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
6872             (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
6873                 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
6874                 rc = -ENOTTY;
6875                 goto exit_lbl;
6876         }
6877
6878 exit_lbl:
6879         /* Restore the `magic' bit value */
6880         if (!CHIP_IS_E1(bp))
6881                 bnx2x_clp_reset_done(bp, magic_val);
6882
6883         return rc;
6884 }
6885
6886 static void bnx2x_pxp_prep(struct bnx2x *bp)
6887 {
6888         if (!CHIP_IS_E1(bp)) {
6889                 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
6890                 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
6891                 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
6892                 mmiowb();
6893         }
6894 }
6895
6896 /*
6897  * Reset the whole chip except for:
6898  *      - PCIE core
6899  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
6900  *              one reset bit)
6901  *      - IGU
6902  *      - MISC (including AEU)
6903  *      - GRC
6904  *      - RBCN, RBCP
6905  */
6906 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
6907 {
6908         u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
6909
6910         not_reset_mask1 =
6911                 MISC_REGISTERS_RESET_REG_1_RST_HC |
6912                 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
6913                 MISC_REGISTERS_RESET_REG_1_RST_PXP;
6914
6915         not_reset_mask2 =
6916                 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
6917                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
6918                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
6919                 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
6920                 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
6921                 MISC_REGISTERS_RESET_REG_2_RST_GRC  |
6922                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
6923                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
6924
6925         reset_mask1 = 0xffffffff;
6926
6927         if (CHIP_IS_E1(bp))
6928                 reset_mask2 = 0xffff;
6929         else
6930                 reset_mask2 = 0x1ffff;
6931
6932         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6933                reset_mask1 & (~not_reset_mask1));
6934         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6935                reset_mask2 & (~not_reset_mask2));
6936
6937         barrier();
6938         mmiowb();
6939
6940         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
6941         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
6942         mmiowb();
6943 }
6944
6945 static int bnx2x_process_kill(struct bnx2x *bp)
6946 {
6947         int cnt = 1000;
6948         u32 val = 0;
6949         u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
6950
6951
6952         /* Empty the Tetris buffer, wait for 1s */
6953         do {
6954                 sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
6955                 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
6956                 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
6957                 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
6958                 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
6959                 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
6960                     ((port_is_idle_0 & 0x1) == 0x1) &&
6961                     ((port_is_idle_1 & 0x1) == 0x1) &&
6962                     (pgl_exp_rom2 == 0xffffffff))
6963                         break;
6964                 msleep(1);
6965         } while (cnt-- > 0);
6966
6967         if (cnt <= 0) {
6968                 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
6969                           " are still"
6970                           " outstanding read requests after 1s!\n");
6971                 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
6972                           " port_is_idle_0=0x%08x,"
6973                           " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
6974                           sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
6975                           pgl_exp_rom2);
6976                 return -EAGAIN;
6977         }
6978
6979         barrier();
6980
6981         /* Close gates #2, #3 and #4 */
6982         bnx2x_set_234_gates(bp, true);
6983
6984         /* TBD: Indicate that "process kill" is in progress to MCP */
6985
6986         /* Clear "unprepared" bit */
6987         REG_WR(bp, MISC_REG_UNPREPARED, 0);
6988         barrier();
6989
6990         /* Make sure all is written to the chip before the reset */
6991         mmiowb();
6992
6993         /* Wait for 1ms to empty GLUE and PCI-E core queues,
6994          * PSWHST, GRC and PSWRD Tetris buffer.
6995          */
6996         msleep(1);
6997
6998         /* Prepare to chip reset: */
6999         /* MCP */
7000         bnx2x_reset_mcp_prep(bp, &val);
7001
7002         /* PXP */
7003         bnx2x_pxp_prep(bp);
7004         barrier();
7005
7006         /* reset the chip */
7007         bnx2x_process_kill_chip_reset(bp);
7008         barrier();
7009
7010         /* Recover after reset: */
7011         /* MCP */
7012         if (bnx2x_reset_mcp_comp(bp, val))
7013                 return -EAGAIN;
7014
7015         /* PXP */
7016         bnx2x_pxp_prep(bp);
7017
7018         /* Open the gates #2, #3 and #4 */
7019         bnx2x_set_234_gates(bp, false);
7020
7021         /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7022          * reset state, re-enable attentions. */
7023
7024         return 0;
7025 }
7026
7027 static int bnx2x_leader_reset(struct bnx2x *bp)
7028 {
7029         int rc = 0;
7030         /* Try to recover after the failure */
7031         if (bnx2x_process_kill(bp)) {
7032                 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7033                        bp->dev->name);
7034                 rc = -EAGAIN;
7035                 goto exit_leader_reset;
7036         }
7037
7038         /* Clear "reset is in progress" bit and update the driver state */
7039         bnx2x_set_reset_done(bp);
7040         bp->recovery_state = BNX2X_RECOVERY_DONE;
7041
7042 exit_leader_reset:
7043         bp->is_leader = 0;
7044         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7045         smp_wmb();
7046         return rc;
7047 }
7048
7049 /* Assumption: runs under rtnl lock. This together with the fact
7050  * that it's called only from bnx2x_reset_task() ensure that it
7051  * will never be called when netif_running(bp->dev) is false.
7052  */
7053 static void bnx2x_parity_recover(struct bnx2x *bp)
7054 {
7055         DP(NETIF_MSG_HW, "Handling parity\n");
7056         while (1) {
7057                 switch (bp->recovery_state) {
7058                 case BNX2X_RECOVERY_INIT:
7059                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7060                         /* Try to get a LEADER_LOCK HW lock */
7061                         if (bnx2x_trylock_hw_lock(bp,
7062                                 HW_LOCK_RESOURCE_RESERVED_08))
7063                                 bp->is_leader = 1;
7064
7065                         /* Stop the driver */
7066                         /* If interface has been removed - break */
7067                         if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7068                                 return;
7069
7070                         bp->recovery_state = BNX2X_RECOVERY_WAIT;
7071                         /* Ensure "is_leader" and "recovery_state"
7072                          *  update values are seen on other CPUs
7073                          */
7074                         smp_wmb();
7075                         break;
7076
7077                 case BNX2X_RECOVERY_WAIT:
7078                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7079                         if (bp->is_leader) {
7080                                 u32 load_counter = bnx2x_get_load_cnt(bp);
7081                                 if (load_counter) {
7082                                         /* Wait until all other functions get
7083                                          * down.
7084                                          */
7085                                         schedule_delayed_work(&bp->reset_task,
7086                                                                 HZ/10);
7087                                         return;
7088                                 } else {
7089                                         /* If all other functions got down -
7090                                          * try to bring the chip back to
7091                                          * normal. In any case it's an exit
7092                                          * point for a leader.
7093                                          */
7094                                         if (bnx2x_leader_reset(bp) ||
7095                                         bnx2x_nic_load(bp, LOAD_NORMAL)) {
7096                                                 printk(KERN_ERR"%s: Recovery "
7097                                                 "has failed. Power cycle is "
7098                                                 "needed.\n", bp->dev->name);
7099                                                 /* Disconnect this device */
7100                                                 netif_device_detach(bp->dev);
7101                                                 /* Block ifup for all function
7102                                                  * of this ASIC until
7103                                                  * "process kill" or power
7104                                                  * cycle.
7105                                                  */
7106                                                 bnx2x_set_reset_in_progress(bp);
7107                                                 /* Shut down the power */
7108                                                 bnx2x_set_power_state(bp,
7109                                                                 PCI_D3hot);
7110                                                 return;
7111                                         }
7112
7113                                         return;
7114                                 }
7115                         } else { /* non-leader */
7116                                 if (!bnx2x_reset_is_done(bp)) {
7117                                         /* Try to get a LEADER_LOCK HW lock as
7118                                          * long as a former leader may have
7119                                          * been unloaded by the user or
7120                                          * released a leadership by another
7121                                          * reason.
7122                                          */
7123                                         if (bnx2x_trylock_hw_lock(bp,
7124                                             HW_LOCK_RESOURCE_RESERVED_08)) {
7125                                                 /* I'm a leader now! Restart a
7126                                                  * switch case.
7127                                                  */
7128                                                 bp->is_leader = 1;
7129                                                 break;
7130                                         }
7131
7132                                         schedule_delayed_work(&bp->reset_task,
7133                                                                 HZ/10);
7134                                         return;
7135
7136                                 } else { /* A leader has completed
7137                                           * the "process kill". It's an exit
7138                                           * point for a non-leader.
7139                                           */
7140                                         bnx2x_nic_load(bp, LOAD_NORMAL);
7141                                         bp->recovery_state =
7142                                                 BNX2X_RECOVERY_DONE;
7143                                         smp_wmb();
7144                                         return;
7145                                 }
7146                         }
7147                 default:
7148                         return;
7149                 }
7150         }
7151 }
7152
7153 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7154  * scheduled on a general queue in order to prevent a dead lock.
7155  */
7156 static void bnx2x_reset_task(struct work_struct *work)
7157 {
7158         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
7159
7160 #ifdef BNX2X_STOP_ON_ERROR
7161         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7162                   " so reset not done to allow debug dump,\n"
7163          KERN_ERR " you will need to reboot when done\n");
7164         return;
7165 #endif
7166
7167         rtnl_lock();
7168
7169         if (!netif_running(bp->dev))
7170                 goto reset_task_exit;
7171
7172         if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7173                 bnx2x_parity_recover(bp);
7174         else {
7175                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7176                 bnx2x_nic_load(bp, LOAD_NORMAL);
7177         }
7178
7179 reset_task_exit:
7180         rtnl_unlock();
7181 }
7182
7183 /* end of nic load/unload */
7184
7185 /* ethtool_ops */
7186
7187 /*
7188  * Init service functions
7189  */
7190
7191 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7192 {
7193         switch (func) {
7194         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7195         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7196         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7197         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7198         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7199         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7200         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7201         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7202         default:
7203                 BNX2X_ERR("Unsupported function index: %d\n", func);
7204                 return (u32)(-1);
7205         }
7206 }
7207
7208 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7209 {
7210         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7211
7212         /* Flush all outstanding writes */
7213         mmiowb();
7214
7215         /* Pretend to be function 0 */
7216         REG_WR(bp, reg, 0);
7217         /* Flush the GRC transaction (in the chip) */
7218         new_val = REG_RD(bp, reg);
7219         if (new_val != 0) {
7220                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7221                           new_val);
7222                 BUG();
7223         }
7224
7225         /* From now we are in the "like-E1" mode */
7226         bnx2x_int_disable(bp);
7227
7228         /* Flush all outstanding writes */
7229         mmiowb();
7230
7231         /* Restore the original funtion settings */
7232         REG_WR(bp, reg, orig_func);
7233         new_val = REG_RD(bp, reg);
7234         if (new_val != orig_func) {
7235                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7236                           orig_func, new_val);
7237                 BUG();
7238         }
7239 }
7240
7241 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7242 {
7243         if (CHIP_IS_E1H(bp))
7244                 bnx2x_undi_int_disable_e1h(bp, func);
7245         else
7246                 bnx2x_int_disable(bp);
7247 }
7248
7249 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7250 {
7251         u32 val;
7252
7253         /* Check if there is any driver already loaded */
7254         val = REG_RD(bp, MISC_REG_UNPREPARED);
7255         if (val == 0x1) {
7256                 /* Check if it is the UNDI driver
7257                  * UNDI driver initializes CID offset for normal bell to 0x7
7258                  */
7259                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7260                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7261                 if (val == 0x7) {
7262                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7263                         /* save our func */
7264                         int func = BP_FUNC(bp);
7265                         u32 swap_en;
7266                         u32 swap_val;
7267
7268                         /* clear the UNDI indication */
7269                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7270
7271                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
7272
7273                         /* try unload UNDI on port 0 */
7274                         bp->func = 0;
7275                         bp->fw_seq =
7276                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7277                                 DRV_MSG_SEQ_NUMBER_MASK);
7278                         reset_code = bnx2x_fw_command(bp, reset_code);
7279
7280                         /* if UNDI is loaded on the other port */
7281                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7282
7283                                 /* send "DONE" for previous unload */
7284                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7285
7286                                 /* unload UNDI on port 1 */
7287                                 bp->func = 1;
7288                                 bp->fw_seq =
7289                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7290                                         DRV_MSG_SEQ_NUMBER_MASK);
7291                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7292
7293                                 bnx2x_fw_command(bp, reset_code);
7294                         }
7295
7296                         /* now it's safe to release the lock */
7297                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7298
7299                         bnx2x_undi_int_disable(bp, func);
7300
7301                         /* close input traffic and wait for it */
7302                         /* Do not rcv packets to BRB */
7303                         REG_WR(bp,
7304                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7305                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7306                         /* Do not direct rcv packets that are not for MCP to
7307                          * the BRB */
7308                         REG_WR(bp,
7309                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7310                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7311                         /* clear AEU */
7312                         REG_WR(bp,
7313                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7314                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7315                         msleep(10);
7316
7317                         /* save NIG port swap info */
7318                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7319                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7320                         /* reset device */
7321                         REG_WR(bp,
7322                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7323                                0xd3ffffff);
7324                         REG_WR(bp,
7325                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7326                                0x1403);
7327                         /* take the NIG out of reset and restore swap values */
7328                         REG_WR(bp,
7329                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7330                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
7331                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7332                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7333
7334                         /* send unload done to the MCP */
7335                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7336
7337                         /* restore our func and fw_seq */
7338                         bp->func = func;
7339                         bp->fw_seq =
7340                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7341                                 DRV_MSG_SEQ_NUMBER_MASK);
7342
7343                 } else
7344                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7345         }
7346 }
7347
7348 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7349 {
7350         u32 val, val2, val3, val4, id;
7351         u16 pmc;
7352
7353         /* Get the chip revision id and number. */
7354         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7355         val = REG_RD(bp, MISC_REG_CHIP_NUM);
7356         id = ((val & 0xffff) << 16);
7357         val = REG_RD(bp, MISC_REG_CHIP_REV);
7358         id |= ((val & 0xf) << 12);
7359         val = REG_RD(bp, MISC_REG_CHIP_METAL);
7360         id |= ((val & 0xff) << 4);
7361         val = REG_RD(bp, MISC_REG_BOND_ID);
7362         id |= (val & 0xf);
7363         bp->common.chip_id = id;
7364         bp->link_params.chip_id = bp->common.chip_id;
7365         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7366
7367         val = (REG_RD(bp, 0x2874) & 0x55);
7368         if ((bp->common.chip_id & 0x1) ||
7369             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7370                 bp->flags |= ONE_PORT_FLAG;
7371                 BNX2X_DEV_INFO("single port device\n");
7372         }
7373
7374         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7375         bp->common.flash_size = (NVRAM_1MB_SIZE <<
7376                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
7377         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7378                        bp->common.flash_size, bp->common.flash_size);
7379
7380         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7381         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
7382         bp->link_params.shmem_base = bp->common.shmem_base;
7383         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
7384                        bp->common.shmem_base, bp->common.shmem2_base);
7385
7386         if (!bp->common.shmem_base ||
7387             (bp->common.shmem_base < 0xA0000) ||
7388             (bp->common.shmem_base >= 0xC0000)) {
7389                 BNX2X_DEV_INFO("MCP not active\n");
7390                 bp->flags |= NO_MCP_FLAG;
7391                 return;
7392         }
7393
7394         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7395         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7396                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7397                 BNX2X_ERROR("BAD MCP validity signature\n");
7398
7399         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7400         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7401
7402         bp->link_params.hw_led_mode = ((bp->common.hw_config &
7403                                         SHARED_HW_CFG_LED_MODE_MASK) >>
7404                                        SHARED_HW_CFG_LED_MODE_SHIFT);
7405
7406         bp->link_params.feature_config_flags = 0;
7407         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7408         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7409                 bp->link_params.feature_config_flags |=
7410                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7411         else
7412                 bp->link_params.feature_config_flags &=
7413                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7414
7415         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7416         bp->common.bc_ver = val;
7417         BNX2X_DEV_INFO("bc_ver %X\n", val);
7418         if (val < BNX2X_BC_VER) {
7419                 /* for now only warn
7420                  * later we might need to enforce this */
7421                 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
7422                             "please upgrade BC\n", BNX2X_BC_VER, val);
7423         }
7424         bp->link_params.feature_config_flags |=
7425                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
7426                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7427
7428         if (BP_E1HVN(bp) == 0) {
7429                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7430                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7431         } else {
7432                 /* no WOL capability for E1HVN != 0 */
7433                 bp->flags |= NO_WOL_FLAG;
7434         }
7435         BNX2X_DEV_INFO("%sWoL capable\n",
7436                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
7437
7438         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7439         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7440         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7441         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7442
7443         dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7444                  val, val2, val3, val4);
7445 }
7446
7447 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7448                                                     u32 switch_cfg)
7449 {
7450         int port = BP_PORT(bp);
7451         u32 ext_phy_type;
7452
7453         switch (switch_cfg) {
7454         case SWITCH_CFG_1G:
7455                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7456
7457                 ext_phy_type =
7458                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7459                 switch (ext_phy_type) {
7460                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7461                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7462                                        ext_phy_type);
7463
7464                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7465                                                SUPPORTED_10baseT_Full |
7466                                                SUPPORTED_100baseT_Half |
7467                                                SUPPORTED_100baseT_Full |
7468                                                SUPPORTED_1000baseT_Full |
7469                                                SUPPORTED_2500baseX_Full |
7470                                                SUPPORTED_TP |
7471                                                SUPPORTED_FIBRE |
7472                                                SUPPORTED_Autoneg |
7473                                                SUPPORTED_Pause |
7474                                                SUPPORTED_Asym_Pause);
7475                         break;
7476
7477                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7478                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7479                                        ext_phy_type);
7480
7481                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7482                                                SUPPORTED_10baseT_Full |
7483                                                SUPPORTED_100baseT_Half |
7484                                                SUPPORTED_100baseT_Full |
7485                                                SUPPORTED_1000baseT_Full |
7486                                                SUPPORTED_TP |
7487                                                SUPPORTED_FIBRE |
7488                                                SUPPORTED_Autoneg |
7489                                                SUPPORTED_Pause |
7490                                                SUPPORTED_Asym_Pause);
7491                         break;
7492
7493                 default:
7494                         BNX2X_ERR("NVRAM config error. "
7495                                   "BAD SerDes ext_phy_config 0x%x\n",
7496                                   bp->link_params.ext_phy_config);
7497                         return;
7498                 }
7499
7500                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7501                                            port*0x10);
7502                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7503                 break;
7504
7505         case SWITCH_CFG_10G:
7506                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7507
7508                 ext_phy_type =
7509                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7510                 switch (ext_phy_type) {
7511                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7512                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7513                                        ext_phy_type);
7514
7515                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7516                                                SUPPORTED_10baseT_Full |
7517                                                SUPPORTED_100baseT_Half |
7518                                                SUPPORTED_100baseT_Full |
7519                                                SUPPORTED_1000baseT_Full |
7520                                                SUPPORTED_2500baseX_Full |
7521                                                SUPPORTED_10000baseT_Full |
7522                                                SUPPORTED_TP |
7523                                                SUPPORTED_FIBRE |
7524                                                SUPPORTED_Autoneg |
7525                                                SUPPORTED_Pause |
7526                                                SUPPORTED_Asym_Pause);
7527                         break;
7528
7529                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7530                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7531                                        ext_phy_type);
7532
7533                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7534                                                SUPPORTED_1000baseT_Full |
7535                                                SUPPORTED_FIBRE |
7536                                                SUPPORTED_Autoneg |
7537                                                SUPPORTED_Pause |
7538                                                SUPPORTED_Asym_Pause);
7539                         break;
7540
7541                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7542                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7543                                        ext_phy_type);
7544
7545                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7546                                                SUPPORTED_2500baseX_Full |
7547                                                SUPPORTED_1000baseT_Full |
7548                                                SUPPORTED_FIBRE |
7549                                                SUPPORTED_Autoneg |
7550                                                SUPPORTED_Pause |
7551                                                SUPPORTED_Asym_Pause);
7552                         break;
7553
7554                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7555                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7556                                        ext_phy_type);
7557
7558                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7559                                                SUPPORTED_FIBRE |
7560                                                SUPPORTED_Pause |
7561                                                SUPPORTED_Asym_Pause);
7562                         break;
7563
7564                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7565                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7566                                        ext_phy_type);
7567
7568                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7569                                                SUPPORTED_1000baseT_Full |
7570                                                SUPPORTED_FIBRE |
7571                                                SUPPORTED_Pause |
7572                                                SUPPORTED_Asym_Pause);
7573                         break;
7574
7575                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7576                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7577                                        ext_phy_type);
7578
7579                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7580                                                SUPPORTED_1000baseT_Full |
7581                                                SUPPORTED_Autoneg |
7582                                                SUPPORTED_FIBRE |
7583                                                SUPPORTED_Pause |
7584                                                SUPPORTED_Asym_Pause);
7585                         break;
7586
7587                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
7588                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
7589                                        ext_phy_type);
7590
7591                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7592                                                SUPPORTED_1000baseT_Full |
7593                                                SUPPORTED_Autoneg |
7594                                                SUPPORTED_FIBRE |
7595                                                SUPPORTED_Pause |
7596                                                SUPPORTED_Asym_Pause);
7597                         break;
7598
7599                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7600                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7601                                        ext_phy_type);
7602
7603                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7604                                                SUPPORTED_TP |
7605                                                SUPPORTED_Autoneg |
7606                                                SUPPORTED_Pause |
7607                                                SUPPORTED_Asym_Pause);
7608                         break;
7609
7610                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7611                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7612                                        ext_phy_type);
7613
7614                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7615                                                SUPPORTED_10baseT_Full |
7616                                                SUPPORTED_100baseT_Half |
7617                                                SUPPORTED_100baseT_Full |
7618                                                SUPPORTED_1000baseT_Full |
7619                                                SUPPORTED_10000baseT_Full |
7620                                                SUPPORTED_TP |
7621                                                SUPPORTED_Autoneg |
7622                                                SUPPORTED_Pause |
7623                                                SUPPORTED_Asym_Pause);
7624                         break;
7625
7626                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7627                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7628                                   bp->link_params.ext_phy_config);
7629                         break;
7630
7631                 default:
7632                         BNX2X_ERR("NVRAM config error. "
7633                                   "BAD XGXS ext_phy_config 0x%x\n",
7634                                   bp->link_params.ext_phy_config);
7635                         return;
7636                 }
7637
7638                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7639                                            port*0x18);
7640                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7641
7642                 break;
7643
7644         default:
7645                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7646                           bp->port.link_config);
7647                 return;
7648         }
7649         bp->link_params.phy_addr = bp->port.phy_addr;
7650
7651         /* mask what we support according to speed_cap_mask */
7652         if (!(bp->link_params.speed_cap_mask &
7653                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7654                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7655
7656         if (!(bp->link_params.speed_cap_mask &
7657                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7658                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7659
7660         if (!(bp->link_params.speed_cap_mask &
7661                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7662                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7663
7664         if (!(bp->link_params.speed_cap_mask &
7665                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7666                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7667
7668         if (!(bp->link_params.speed_cap_mask &
7669                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7670                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7671                                         SUPPORTED_1000baseT_Full);
7672
7673         if (!(bp->link_params.speed_cap_mask &
7674                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7675                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7676
7677         if (!(bp->link_params.speed_cap_mask &
7678                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7679                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7680
7681         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7682 }
7683
7684 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7685 {
7686         bp->link_params.req_duplex = DUPLEX_FULL;
7687
7688         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7689         case PORT_FEATURE_LINK_SPEED_AUTO:
7690                 if (bp->port.supported & SUPPORTED_Autoneg) {
7691                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7692                         bp->port.advertising = bp->port.supported;
7693                 } else {
7694                         u32 ext_phy_type =
7695                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7696
7697                         if ((ext_phy_type ==
7698                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7699                             (ext_phy_type ==
7700                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7701                                 /* force 10G, no AN */
7702                                 bp->link_params.req_line_speed = SPEED_10000;
7703                                 bp->port.advertising =
7704                                                 (ADVERTISED_10000baseT_Full |
7705                                                  ADVERTISED_FIBRE);
7706                                 break;
7707                         }
7708                         BNX2X_ERR("NVRAM config error. "
7709                                   "Invalid link_config 0x%x"
7710                                   "  Autoneg not supported\n",
7711                                   bp->port.link_config);
7712                         return;
7713                 }
7714                 break;
7715
7716         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7717                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7718                         bp->link_params.req_line_speed = SPEED_10;
7719                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7720                                                 ADVERTISED_TP);
7721                 } else {
7722                         BNX2X_ERROR("NVRAM config error. "
7723                                     "Invalid link_config 0x%x"
7724                                     "  speed_cap_mask 0x%x\n",
7725                                     bp->port.link_config,
7726                                     bp->link_params.speed_cap_mask);
7727                         return;
7728                 }
7729                 break;
7730
7731         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7732                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7733                         bp->link_params.req_line_speed = SPEED_10;
7734                         bp->link_params.req_duplex = DUPLEX_HALF;
7735                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7736                                                 ADVERTISED_TP);
7737                 } else {
7738                         BNX2X_ERROR("NVRAM config error. "
7739                                     "Invalid link_config 0x%x"
7740                                     "  speed_cap_mask 0x%x\n",
7741                                     bp->port.link_config,
7742                                     bp->link_params.speed_cap_mask);
7743                         return;
7744                 }
7745                 break;
7746
7747         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7748                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7749                         bp->link_params.req_line_speed = SPEED_100;
7750                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7751                                                 ADVERTISED_TP);
7752                 } else {
7753                         BNX2X_ERROR("NVRAM config error. "
7754                                     "Invalid link_config 0x%x"
7755                                     "  speed_cap_mask 0x%x\n",
7756                                     bp->port.link_config,
7757                                     bp->link_params.speed_cap_mask);
7758                         return;
7759                 }
7760                 break;
7761
7762         case PORT_FEATURE_LINK_SPEED_100M_HALF:
7763                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7764                         bp->link_params.req_line_speed = SPEED_100;
7765                         bp->link_params.req_duplex = DUPLEX_HALF;
7766                         bp->port.advertising = (ADVERTISED_100baseT_Half |
7767                                                 ADVERTISED_TP);
7768                 } else {
7769                         BNX2X_ERROR("NVRAM config error. "
7770                                     "Invalid link_config 0x%x"
7771                                     "  speed_cap_mask 0x%x\n",
7772                                     bp->port.link_config,
7773                                     bp->link_params.speed_cap_mask);
7774                         return;
7775                 }
7776                 break;
7777
7778         case PORT_FEATURE_LINK_SPEED_1G:
7779                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7780                         bp->link_params.req_line_speed = SPEED_1000;
7781                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
7782                                                 ADVERTISED_TP);
7783                 } else {
7784                         BNX2X_ERROR("NVRAM config error. "
7785                                     "Invalid link_config 0x%x"
7786                                     "  speed_cap_mask 0x%x\n",
7787                                     bp->port.link_config,
7788                                     bp->link_params.speed_cap_mask);
7789                         return;
7790                 }
7791                 break;
7792
7793         case PORT_FEATURE_LINK_SPEED_2_5G:
7794                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7795                         bp->link_params.req_line_speed = SPEED_2500;
7796                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
7797                                                 ADVERTISED_TP);
7798                 } else {
7799                         BNX2X_ERROR("NVRAM config error. "
7800                                     "Invalid link_config 0x%x"
7801                                     "  speed_cap_mask 0x%x\n",
7802                                     bp->port.link_config,
7803                                     bp->link_params.speed_cap_mask);
7804                         return;
7805                 }
7806                 break;
7807
7808         case PORT_FEATURE_LINK_SPEED_10G_CX4:
7809         case PORT_FEATURE_LINK_SPEED_10G_KX4:
7810         case PORT_FEATURE_LINK_SPEED_10G_KR:
7811                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7812                         bp->link_params.req_line_speed = SPEED_10000;
7813                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
7814                                                 ADVERTISED_FIBRE);
7815                 } else {
7816                         BNX2X_ERROR("NVRAM config error. "
7817                                     "Invalid link_config 0x%x"
7818                                     "  speed_cap_mask 0x%x\n",
7819                                     bp->port.link_config,
7820                                     bp->link_params.speed_cap_mask);
7821                         return;
7822                 }
7823                 break;
7824
7825         default:
7826                 BNX2X_ERROR("NVRAM config error. "
7827                             "BAD link speed link_config 0x%x\n",
7828                             bp->port.link_config);
7829                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7830                 bp->port.advertising = bp->port.supported;
7831                 break;
7832         }
7833
7834         bp->link_params.req_flow_ctrl = (bp->port.link_config &
7835                                          PORT_FEATURE_FLOW_CONTROL_MASK);
7836         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7837             !(bp->port.supported & SUPPORTED_Autoneg))
7838                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7839
7840         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
7841                        "  advertising 0x%x\n",
7842                        bp->link_params.req_line_speed,
7843                        bp->link_params.req_duplex,
7844                        bp->link_params.req_flow_ctrl, bp->port.advertising);
7845 }
7846
7847 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
7848 {
7849         mac_hi = cpu_to_be16(mac_hi);
7850         mac_lo = cpu_to_be32(mac_lo);
7851         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
7852         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
7853 }
7854
7855 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7856 {
7857         int port = BP_PORT(bp);
7858         u32 val, val2;
7859         u32 config;
7860         u16 i;
7861         u32 ext_phy_type;
7862
7863         bp->link_params.bp = bp;
7864         bp->link_params.port = port;
7865
7866         bp->link_params.lane_config =
7867                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7868         bp->link_params.ext_phy_config =
7869                 SHMEM_RD(bp,
7870                          dev_info.port_hw_config[port].external_phy_config);
7871         /* BCM8727_NOC => BCM8727 no over current */
7872         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
7873             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
7874                 bp->link_params.ext_phy_config &=
7875                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
7876                 bp->link_params.ext_phy_config |=
7877                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
7878                 bp->link_params.feature_config_flags |=
7879                         FEATURE_CONFIG_BCM8727_NOC;
7880         }
7881
7882         bp->link_params.speed_cap_mask =
7883                 SHMEM_RD(bp,
7884                          dev_info.port_hw_config[port].speed_capability_mask);
7885
7886         bp->port.link_config =
7887                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7888
7889         /* Get the 4 lanes xgxs config rx and tx */
7890         for (i = 0; i < 2; i++) {
7891                 val = SHMEM_RD(bp,
7892                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
7893                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
7894                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
7895
7896                 val = SHMEM_RD(bp,
7897                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
7898                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
7899                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
7900         }
7901
7902         /* If the device is capable of WoL, set the default state according
7903          * to the HW
7904          */
7905         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
7906         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
7907                    (config & PORT_FEATURE_WOL_ENABLED));
7908
7909         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
7910                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
7911                        bp->link_params.lane_config,
7912                        bp->link_params.ext_phy_config,
7913                        bp->link_params.speed_cap_mask, bp->port.link_config);
7914
7915         bp->link_params.switch_cfg |= (bp->port.link_config &
7916                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
7917         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7918
7919         bnx2x_link_settings_requested(bp);
7920
7921         /*
7922          * If connected directly, work with the internal PHY, otherwise, work
7923          * with the external PHY
7924          */
7925         ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7926         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
7927                 bp->mdio.prtad = bp->link_params.phy_addr;
7928
7929         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
7930                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
7931                 bp->mdio.prtad =
7932                         XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
7933
7934         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7935         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7936         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
7937         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7938         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7939
7940 #ifdef BCM_CNIC
7941         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
7942         val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
7943         bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
7944 #endif
7945 }
7946
7947 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7948 {
7949         int func = BP_FUNC(bp);
7950         u32 val, val2;
7951         int rc = 0;
7952
7953         bnx2x_get_common_hwinfo(bp);
7954
7955         bp->e1hov = 0;
7956         bp->e1hmf = 0;
7957         if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
7958                 bp->mf_config =
7959                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7960
7961                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
7962                        FUNC_MF_CFG_E1HOV_TAG_MASK);
7963                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
7964                         bp->e1hmf = 1;
7965                 BNX2X_DEV_INFO("%s function mode\n",
7966                                IS_E1HMF(bp) ? "multi" : "single");
7967
7968                 if (IS_E1HMF(bp)) {
7969                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
7970                                                                 e1hov_tag) &
7971                                FUNC_MF_CFG_E1HOV_TAG_MASK);
7972                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7973                                 bp->e1hov = val;
7974                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
7975                                                "(0x%04x)\n",
7976                                                func, bp->e1hov, bp->e1hov);
7977                         } else {
7978                                 BNX2X_ERROR("No valid E1HOV for func %d,"
7979                                             "  aborting\n", func);
7980                                 rc = -EPERM;
7981                         }
7982                 } else {
7983                         if (BP_E1HVN(bp)) {
7984                                 BNX2X_ERROR("VN %d in single function mode,"
7985                                             "  aborting\n", BP_E1HVN(bp));
7986                                 rc = -EPERM;
7987                         }
7988                 }
7989         }
7990
7991         if (!BP_NOMCP(bp)) {
7992                 bnx2x_get_port_hwinfo(bp);
7993
7994                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7995                               DRV_MSG_SEQ_NUMBER_MASK);
7996                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7997         }
7998
7999         if (IS_E1HMF(bp)) {
8000                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8001                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
8002                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8003                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8004                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8005                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8006                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8007                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8008                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8009                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8010                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8011                                ETH_ALEN);
8012                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8013                                ETH_ALEN);
8014                 }
8015
8016                 return rc;
8017         }
8018
8019         if (BP_NOMCP(bp)) {
8020                 /* only supposed to happen on emulation/FPGA */
8021                 BNX2X_ERROR("warning: random MAC workaround active\n");
8022                 random_ether_addr(bp->dev->dev_addr);
8023                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8024         }
8025
8026         return rc;
8027 }
8028
8029 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8030 {
8031         int cnt, i, block_end, rodi;
8032         char vpd_data[BNX2X_VPD_LEN+1];
8033         char str_id_reg[VENDOR_ID_LEN+1];
8034         char str_id_cap[VENDOR_ID_LEN+1];
8035         u8 len;
8036
8037         cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8038         memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8039
8040         if (cnt < BNX2X_VPD_LEN)
8041                 goto out_not_found;
8042
8043         i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8044                              PCI_VPD_LRDT_RO_DATA);
8045         if (i < 0)
8046                 goto out_not_found;
8047
8048
8049         block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8050                     pci_vpd_lrdt_size(&vpd_data[i]);
8051
8052         i += PCI_VPD_LRDT_TAG_SIZE;
8053
8054         if (block_end > BNX2X_VPD_LEN)
8055                 goto out_not_found;
8056
8057         rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8058                                    PCI_VPD_RO_KEYWORD_MFR_ID);
8059         if (rodi < 0)
8060                 goto out_not_found;
8061
8062         len = pci_vpd_info_field_size(&vpd_data[rodi]);
8063
8064         if (len != VENDOR_ID_LEN)
8065                 goto out_not_found;
8066
8067         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8068
8069         /* vendor specific info */
8070         snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8071         snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8072         if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8073             !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8074
8075                 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8076                                                 PCI_VPD_RO_KEYWORD_VENDOR0);
8077                 if (rodi >= 0) {
8078                         len = pci_vpd_info_field_size(&vpd_data[rodi]);
8079
8080                         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8081
8082                         if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8083                                 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8084                                 bp->fw_ver[len] = ' ';
8085                         }
8086                 }
8087                 return;
8088         }
8089 out_not_found:
8090         return;
8091 }
8092
8093 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8094 {
8095         int func = BP_FUNC(bp);
8096         int timer_interval;
8097         int rc;
8098
8099         /* Disable interrupt handling until HW is initialized */
8100         atomic_set(&bp->intr_sem, 1);
8101         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8102
8103         mutex_init(&bp->port.phy_mutex);
8104         mutex_init(&bp->fw_mb_mutex);
8105 #ifdef BCM_CNIC
8106         mutex_init(&bp->cnic_mutex);
8107 #endif
8108
8109         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8110         INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
8111
8112         rc = bnx2x_get_hwinfo(bp);
8113
8114         bnx2x_read_fwinfo(bp);
8115         /* need to reset chip if undi was active */
8116         if (!BP_NOMCP(bp))
8117                 bnx2x_undi_unload(bp);
8118
8119         if (CHIP_REV_IS_FPGA(bp))
8120                 dev_err(&bp->pdev->dev, "FPGA detected\n");
8121
8122         if (BP_NOMCP(bp) && (func == 0))
8123                 dev_err(&bp->pdev->dev, "MCP disabled, "
8124                                         "must load devices in order!\n");
8125
8126         /* Set multi queue mode */
8127         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8128             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8129                 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
8130                                         "requested is not MSI-X\n");
8131                 multi_mode = ETH_RSS_MODE_DISABLED;
8132         }
8133         bp->multi_mode = multi_mode;
8134         bp->int_mode = int_mode;
8135
8136         bp->dev->features |= NETIF_F_GRO;
8137
8138         /* Set TPA flags */
8139         if (disable_tpa) {
8140                 bp->flags &= ~TPA_ENABLE_FLAG;
8141                 bp->dev->features &= ~NETIF_F_LRO;
8142         } else {
8143                 bp->flags |= TPA_ENABLE_FLAG;
8144                 bp->dev->features |= NETIF_F_LRO;
8145         }
8146         bp->disable_tpa = disable_tpa;
8147
8148         if (CHIP_IS_E1(bp))
8149                 bp->dropless_fc = 0;
8150         else
8151                 bp->dropless_fc = dropless_fc;
8152
8153         bp->mrrs = mrrs;
8154
8155         bp->tx_ring_size = MAX_TX_AVAIL;
8156         bp->rx_ring_size = MAX_RX_AVAIL;
8157
8158         bp->rx_csum = 1;
8159
8160         /* make sure that the numbers are in the right granularity */
8161         bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8162         bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8163
8164         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8165         bp->current_interval = (poll ? poll : timer_interval);
8166
8167         init_timer(&bp->timer);
8168         bp->timer.expires = jiffies + bp->current_interval;
8169         bp->timer.data = (unsigned long) bp;
8170         bp->timer.function = bnx2x_timer;
8171
8172         return rc;
8173 }
8174
8175 /*
8176  * ethtool service functions
8177  */
8178
8179 /* All ethtool functions called with rtnl_lock */
8180
8181 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8182 {
8183         struct bnx2x *bp = netdev_priv(dev);
8184
8185         cmd->supported = bp->port.supported;
8186         cmd->advertising = bp->port.advertising;
8187
8188         if ((bp->state == BNX2X_STATE_OPEN) &&
8189             !(bp->flags & MF_FUNC_DIS) &&
8190             (bp->link_vars.link_up)) {
8191                 cmd->speed = bp->link_vars.line_speed;
8192                 cmd->duplex = bp->link_vars.duplex;
8193                 if (IS_E1HMF(bp)) {
8194                         u16 vn_max_rate;
8195
8196                         vn_max_rate =
8197                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8198                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8199                         if (vn_max_rate < cmd->speed)
8200                                 cmd->speed = vn_max_rate;
8201                 }
8202         } else {
8203                 cmd->speed = -1;
8204                 cmd->duplex = -1;
8205         }
8206
8207         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8208                 u32 ext_phy_type =
8209                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8210
8211                 switch (ext_phy_type) {
8212                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8213                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8214                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8215                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8216                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8217                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8218                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8219                         cmd->port = PORT_FIBRE;
8220                         break;
8221
8222                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8223                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8224                         cmd->port = PORT_TP;
8225                         break;
8226
8227                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8228                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8229                                   bp->link_params.ext_phy_config);
8230                         break;
8231
8232                 default:
8233                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8234                            bp->link_params.ext_phy_config);
8235                         break;
8236                 }
8237         } else
8238                 cmd->port = PORT_TP;
8239
8240         cmd->phy_address = bp->mdio.prtad;
8241         cmd->transceiver = XCVR_INTERNAL;
8242
8243         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8244                 cmd->autoneg = AUTONEG_ENABLE;
8245         else
8246                 cmd->autoneg = AUTONEG_DISABLE;
8247
8248         cmd->maxtxpkt = 0;
8249         cmd->maxrxpkt = 0;
8250
8251         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8252            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8253            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8254            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8255            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8256            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8257            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8258
8259         return 0;
8260 }
8261
8262 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8263 {
8264         struct bnx2x *bp = netdev_priv(dev);
8265         u32 advertising;
8266
8267         if (IS_E1HMF(bp))
8268                 return 0;
8269
8270         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8271            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8272            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8273            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8274            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8275            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8276            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8277
8278         if (cmd->autoneg == AUTONEG_ENABLE) {
8279                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8280                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8281                         return -EINVAL;
8282                 }
8283
8284                 /* advertise the requested speed and duplex if supported */
8285                 cmd->advertising &= bp->port.supported;
8286
8287                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8288                 bp->link_params.req_duplex = DUPLEX_FULL;
8289                 bp->port.advertising |= (ADVERTISED_Autoneg |
8290                                          cmd->advertising);
8291
8292         } else { /* forced speed */
8293                 /* advertise the requested speed and duplex if supported */
8294                 switch (cmd->speed) {
8295                 case SPEED_10:
8296                         if (cmd->duplex == DUPLEX_FULL) {
8297                                 if (!(bp->port.supported &
8298                                       SUPPORTED_10baseT_Full)) {
8299                                         DP(NETIF_MSG_LINK,
8300                                            "10M full not supported\n");
8301                                         return -EINVAL;
8302                                 }
8303
8304                                 advertising = (ADVERTISED_10baseT_Full |
8305                                                ADVERTISED_TP);
8306                         } else {
8307                                 if (!(bp->port.supported &
8308                                       SUPPORTED_10baseT_Half)) {
8309                                         DP(NETIF_MSG_LINK,
8310                                            "10M half not supported\n");
8311                                         return -EINVAL;
8312                                 }
8313
8314                                 advertising = (ADVERTISED_10baseT_Half |
8315                                                ADVERTISED_TP);
8316                         }
8317                         break;
8318
8319                 case SPEED_100:
8320                         if (cmd->duplex == DUPLEX_FULL) {
8321                                 if (!(bp->port.supported &
8322                                                 SUPPORTED_100baseT_Full)) {
8323                                         DP(NETIF_MSG_LINK,
8324                                            "100M full not supported\n");
8325                                         return -EINVAL;
8326                                 }
8327
8328                                 advertising = (ADVERTISED_100baseT_Full |
8329                                                ADVERTISED_TP);
8330                         } else {
8331                                 if (!(bp->port.supported &
8332                                                 SUPPORTED_100baseT_Half)) {
8333                                         DP(NETIF_MSG_LINK,
8334                                            "100M half not supported\n");
8335                                         return -EINVAL;
8336                                 }
8337
8338                                 advertising = (ADVERTISED_100baseT_Half |
8339                                                ADVERTISED_TP);
8340                         }
8341                         break;
8342
8343                 case SPEED_1000:
8344                         if (cmd->duplex != DUPLEX_FULL) {
8345                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
8346                                 return -EINVAL;
8347                         }
8348
8349                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8350                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
8351                                 return -EINVAL;
8352                         }
8353
8354                         advertising = (ADVERTISED_1000baseT_Full |
8355                                        ADVERTISED_TP);
8356                         break;
8357
8358                 case SPEED_2500:
8359                         if (cmd->duplex != DUPLEX_FULL) {
8360                                 DP(NETIF_MSG_LINK,
8361                                    "2.5G half not supported\n");
8362                                 return -EINVAL;
8363                         }
8364
8365                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8366                                 DP(NETIF_MSG_LINK,
8367                                    "2.5G full not supported\n");
8368                                 return -EINVAL;
8369                         }
8370
8371                         advertising = (ADVERTISED_2500baseX_Full |
8372                                        ADVERTISED_TP);
8373                         break;
8374
8375                 case SPEED_10000:
8376                         if (cmd->duplex != DUPLEX_FULL) {
8377                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
8378                                 return -EINVAL;
8379                         }
8380
8381                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8382                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
8383                                 return -EINVAL;
8384                         }
8385
8386                         advertising = (ADVERTISED_10000baseT_Full |
8387                                        ADVERTISED_FIBRE);
8388                         break;
8389
8390                 default:
8391                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
8392                         return -EINVAL;
8393                 }
8394
8395                 bp->link_params.req_line_speed = cmd->speed;
8396                 bp->link_params.req_duplex = cmd->duplex;
8397                 bp->port.advertising = advertising;
8398         }
8399
8400         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8401            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
8402            bp->link_params.req_line_speed, bp->link_params.req_duplex,
8403            bp->port.advertising);
8404
8405         if (netif_running(dev)) {
8406                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8407                 bnx2x_link_set(bp);
8408         }
8409
8410         return 0;
8411 }
8412
8413 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8414 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8415
8416 static int bnx2x_get_regs_len(struct net_device *dev)
8417 {
8418         struct bnx2x *bp = netdev_priv(dev);
8419         int regdump_len = 0;
8420         int i;
8421
8422         if (CHIP_IS_E1(bp)) {
8423                 for (i = 0; i < REGS_COUNT; i++)
8424                         if (IS_E1_ONLINE(reg_addrs[i].info))
8425                                 regdump_len += reg_addrs[i].size;
8426
8427                 for (i = 0; i < WREGS_COUNT_E1; i++)
8428                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8429                                 regdump_len += wreg_addrs_e1[i].size *
8430                                         (1 + wreg_addrs_e1[i].read_regs_count);
8431
8432         } else { /* E1H */
8433                 for (i = 0; i < REGS_COUNT; i++)
8434                         if (IS_E1H_ONLINE(reg_addrs[i].info))
8435                                 regdump_len += reg_addrs[i].size;
8436
8437                 for (i = 0; i < WREGS_COUNT_E1H; i++)
8438                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8439                                 regdump_len += wreg_addrs_e1h[i].size *
8440                                         (1 + wreg_addrs_e1h[i].read_regs_count);
8441         }
8442         regdump_len *= 4;
8443         regdump_len += sizeof(struct dump_hdr);
8444
8445         return regdump_len;
8446 }
8447
8448 static void bnx2x_get_regs(struct net_device *dev,
8449                            struct ethtool_regs *regs, void *_p)
8450 {
8451         u32 *p = _p, i, j;
8452         struct bnx2x *bp = netdev_priv(dev);
8453         struct dump_hdr dump_hdr = {0};
8454
8455         regs->version = 0;
8456         memset(p, 0, regs->len);
8457
8458         if (!netif_running(bp->dev))
8459                 return;
8460
8461         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8462         dump_hdr.dump_sign = dump_sign_all;
8463         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8464         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8465         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8466         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8467         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8468
8469         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8470         p += dump_hdr.hdr_size + 1;
8471
8472         if (CHIP_IS_E1(bp)) {
8473                 for (i = 0; i < REGS_COUNT; i++)
8474                         if (IS_E1_ONLINE(reg_addrs[i].info))
8475                                 for (j = 0; j < reg_addrs[i].size; j++)
8476                                         *p++ = REG_RD(bp,
8477                                                       reg_addrs[i].addr + j*4);
8478
8479         } else { /* E1H */
8480                 for (i = 0; i < REGS_COUNT; i++)
8481                         if (IS_E1H_ONLINE(reg_addrs[i].info))
8482                                 for (j = 0; j < reg_addrs[i].size; j++)
8483                                         *p++ = REG_RD(bp,
8484                                                       reg_addrs[i].addr + j*4);
8485         }
8486 }
8487
8488 #define PHY_FW_VER_LEN                  10
8489
8490 static void bnx2x_get_drvinfo(struct net_device *dev,
8491                               struct ethtool_drvinfo *info)
8492 {
8493         struct bnx2x *bp = netdev_priv(dev);
8494         u8 phy_fw_ver[PHY_FW_VER_LEN];
8495
8496         strcpy(info->driver, DRV_MODULE_NAME);
8497         strcpy(info->version, DRV_MODULE_VERSION);
8498
8499         phy_fw_ver[0] = '\0';
8500         if (bp->port.pmf) {
8501                 bnx2x_acquire_phy_lock(bp);
8502                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8503                                              (bp->state != BNX2X_STATE_CLOSED),
8504                                              phy_fw_ver, PHY_FW_VER_LEN);
8505                 bnx2x_release_phy_lock(bp);
8506         }
8507
8508         strncpy(info->fw_version, bp->fw_ver, 32);
8509         snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
8510                  "bc %d.%d.%d%s%s",
8511                  (bp->common.bc_ver & 0xff0000) >> 16,
8512                  (bp->common.bc_ver & 0xff00) >> 8,
8513                  (bp->common.bc_ver & 0xff),
8514                  ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
8515         strcpy(info->bus_info, pci_name(bp->pdev));
8516         info->n_stats = BNX2X_NUM_STATS;
8517         info->testinfo_len = BNX2X_NUM_TESTS;
8518         info->eedump_len = bp->common.flash_size;
8519         info->regdump_len = bnx2x_get_regs_len(dev);
8520 }
8521
8522 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8523 {
8524         struct bnx2x *bp = netdev_priv(dev);
8525
8526         if (bp->flags & NO_WOL_FLAG) {
8527                 wol->supported = 0;
8528                 wol->wolopts = 0;
8529         } else {
8530                 wol->supported = WAKE_MAGIC;
8531                 if (bp->wol)
8532                         wol->wolopts = WAKE_MAGIC;
8533                 else
8534                         wol->wolopts = 0;
8535         }
8536         memset(&wol->sopass, 0, sizeof(wol->sopass));
8537 }
8538
8539 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8540 {
8541         struct bnx2x *bp = netdev_priv(dev);
8542
8543         if (wol->wolopts & ~WAKE_MAGIC)
8544                 return -EINVAL;
8545
8546         if (wol->wolopts & WAKE_MAGIC) {
8547                 if (bp->flags & NO_WOL_FLAG)
8548                         return -EINVAL;
8549
8550                 bp->wol = 1;
8551         } else
8552                 bp->wol = 0;
8553
8554         return 0;
8555 }
8556
8557 static u32 bnx2x_get_msglevel(struct net_device *dev)
8558 {
8559         struct bnx2x *bp = netdev_priv(dev);
8560
8561         return bp->msg_enable;
8562 }
8563
8564 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8565 {
8566         struct bnx2x *bp = netdev_priv(dev);
8567
8568         if (capable(CAP_NET_ADMIN))
8569                 bp->msg_enable = level;
8570 }
8571
8572 static int bnx2x_nway_reset(struct net_device *dev)
8573 {
8574         struct bnx2x *bp = netdev_priv(dev);
8575
8576         if (!bp->port.pmf)
8577                 return 0;
8578
8579         if (netif_running(dev)) {
8580                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8581                 bnx2x_link_set(bp);
8582         }
8583
8584         return 0;
8585 }
8586
8587 static u32 bnx2x_get_link(struct net_device *dev)
8588 {
8589         struct bnx2x *bp = netdev_priv(dev);
8590
8591         if (bp->flags & MF_FUNC_DIS)
8592                 return 0;
8593
8594         return bp->link_vars.link_up;
8595 }
8596
8597 static int bnx2x_get_eeprom_len(struct net_device *dev)
8598 {
8599         struct bnx2x *bp = netdev_priv(dev);
8600
8601         return bp->common.flash_size;
8602 }
8603
8604 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8605 {
8606         int port = BP_PORT(bp);
8607         int count, i;
8608         u32 val = 0;
8609
8610         /* adjust timeout for emulation/FPGA */
8611         count = NVRAM_TIMEOUT_COUNT;
8612         if (CHIP_REV_IS_SLOW(bp))
8613                 count *= 100;
8614
8615         /* request access to nvram interface */
8616         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8617                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8618
8619         for (i = 0; i < count*10; i++) {
8620                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8621                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8622                         break;
8623
8624                 udelay(5);
8625         }
8626
8627         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8628                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8629                 return -EBUSY;
8630         }
8631
8632         return 0;
8633 }
8634
8635 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8636 {
8637         int port = BP_PORT(bp);
8638         int count, i;
8639         u32 val = 0;
8640
8641         /* adjust timeout for emulation/FPGA */
8642         count = NVRAM_TIMEOUT_COUNT;
8643         if (CHIP_REV_IS_SLOW(bp))
8644                 count *= 100;
8645
8646         /* relinquish nvram interface */
8647         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8648                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8649
8650         for (i = 0; i < count*10; i++) {
8651                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8652                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8653                         break;
8654
8655                 udelay(5);
8656         }
8657
8658         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8659                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8660                 return -EBUSY;
8661         }
8662
8663         return 0;
8664 }
8665
8666 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8667 {
8668         u32 val;
8669
8670         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8671
8672         /* enable both bits, even on read */
8673         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8674                (val | MCPR_NVM_ACCESS_ENABLE_EN |
8675                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
8676 }
8677
8678 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8679 {
8680         u32 val;
8681
8682         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8683
8684         /* disable both bits, even after read */
8685         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8686                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8687                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8688 }
8689
8690 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
8691                                   u32 cmd_flags)
8692 {
8693         int count, i, rc;
8694         u32 val;
8695
8696         /* build the command word */
8697         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8698
8699         /* need to clear DONE bit separately */
8700         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8701
8702         /* address of the NVRAM to read from */
8703         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8704                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8705
8706         /* issue a read command */
8707         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8708
8709         /* adjust timeout for emulation/FPGA */
8710         count = NVRAM_TIMEOUT_COUNT;
8711         if (CHIP_REV_IS_SLOW(bp))
8712                 count *= 100;
8713
8714         /* wait for completion */
8715         *ret_val = 0;
8716         rc = -EBUSY;
8717         for (i = 0; i < count; i++) {
8718                 udelay(5);
8719                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8720
8721                 if (val & MCPR_NVM_COMMAND_DONE) {
8722                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8723                         /* we read nvram data in cpu order
8724                          * but ethtool sees it as an array of bytes
8725                          * converting to big-endian will do the work */
8726                         *ret_val = cpu_to_be32(val);
8727                         rc = 0;
8728                         break;
8729                 }
8730         }
8731
8732         return rc;
8733 }
8734
8735 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8736                             int buf_size)
8737 {
8738         int rc;
8739         u32 cmd_flags;
8740         __be32 val;
8741
8742         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8743                 DP(BNX2X_MSG_NVM,
8744                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8745                    offset, buf_size);
8746                 return -EINVAL;
8747         }
8748
8749         if (offset + buf_size > bp->common.flash_size) {
8750                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8751                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8752                    offset, buf_size, bp->common.flash_size);
8753                 return -EINVAL;
8754         }
8755
8756         /* request access to nvram interface */
8757         rc = bnx2x_acquire_nvram_lock(bp);
8758         if (rc)
8759                 return rc;
8760
8761         /* enable access to nvram interface */
8762         bnx2x_enable_nvram_access(bp);
8763
8764         /* read the first word(s) */
8765         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8766         while ((buf_size > sizeof(u32)) && (rc == 0)) {
8767                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8768                 memcpy(ret_buf, &val, 4);
8769
8770                 /* advance to the next dword */
8771                 offset += sizeof(u32);
8772                 ret_buf += sizeof(u32);
8773                 buf_size -= sizeof(u32);
8774                 cmd_flags = 0;
8775         }
8776
8777         if (rc == 0) {
8778                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8779                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8780                 memcpy(ret_buf, &val, 4);
8781         }
8782
8783         /* disable access to nvram interface */
8784         bnx2x_disable_nvram_access(bp);
8785         bnx2x_release_nvram_lock(bp);
8786
8787         return rc;
8788 }
8789
8790 static int bnx2x_get_eeprom(struct net_device *dev,
8791                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8792 {
8793         struct bnx2x *bp = netdev_priv(dev);
8794         int rc;
8795
8796         if (!netif_running(dev))
8797                 return -EAGAIN;
8798
8799         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8800            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8801            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8802            eeprom->len, eeprom->len);
8803
8804         /* parameters already validated in ethtool_get_eeprom */
8805
8806         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8807
8808         return rc;
8809 }
8810
8811 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8812                                    u32 cmd_flags)
8813 {
8814         int count, i, rc;
8815
8816         /* build the command word */
8817         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8818
8819         /* need to clear DONE bit separately */
8820         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8821
8822         /* write the data */
8823         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8824
8825         /* address of the NVRAM to write to */
8826         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8827                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8828
8829         /* issue the write command */
8830         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8831
8832         /* adjust timeout for emulation/FPGA */
8833         count = NVRAM_TIMEOUT_COUNT;
8834         if (CHIP_REV_IS_SLOW(bp))
8835                 count *= 100;
8836
8837         /* wait for completion */
8838         rc = -EBUSY;
8839         for (i = 0; i < count; i++) {
8840                 udelay(5);
8841                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8842                 if (val & MCPR_NVM_COMMAND_DONE) {
8843                         rc = 0;
8844                         break;
8845                 }
8846         }
8847
8848         return rc;
8849 }
8850
8851 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
8852
8853 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8854                               int buf_size)
8855 {
8856         int rc;
8857         u32 cmd_flags;
8858         u32 align_offset;
8859         __be32 val;
8860
8861         if (offset + buf_size > bp->common.flash_size) {
8862                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8863                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8864                    offset, buf_size, bp->common.flash_size);
8865                 return -EINVAL;
8866         }
8867
8868         /* request access to nvram interface */
8869         rc = bnx2x_acquire_nvram_lock(bp);
8870         if (rc)
8871                 return rc;
8872
8873         /* enable access to nvram interface */
8874         bnx2x_enable_nvram_access(bp);
8875
8876         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8877         align_offset = (offset & ~0x03);
8878         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8879
8880         if (rc == 0) {
8881                 val &= ~(0xff << BYTE_OFFSET(offset));
8882                 val |= (*data_buf << BYTE_OFFSET(offset));
8883
8884                 /* nvram data is returned as an array of bytes
8885                  * convert it back to cpu order */
8886                 val = be32_to_cpu(val);
8887
8888                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8889                                              cmd_flags);
8890         }
8891
8892         /* disable access to nvram interface */
8893         bnx2x_disable_nvram_access(bp);
8894         bnx2x_release_nvram_lock(bp);
8895
8896         return rc;
8897 }
8898
8899 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8900                              int buf_size)
8901 {
8902         int rc;
8903         u32 cmd_flags;
8904         u32 val;
8905         u32 written_so_far;
8906
8907         if (buf_size == 1)      /* ethtool */
8908                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8909
8910         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8911                 DP(BNX2X_MSG_NVM,
8912                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8913                    offset, buf_size);
8914                 return -EINVAL;
8915         }
8916
8917         if (offset + buf_size > bp->common.flash_size) {
8918                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8919                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8920                    offset, buf_size, bp->common.flash_size);
8921                 return -EINVAL;
8922         }
8923
8924         /* request access to nvram interface */
8925         rc = bnx2x_acquire_nvram_lock(bp);
8926         if (rc)
8927                 return rc;
8928
8929         /* enable access to nvram interface */
8930         bnx2x_enable_nvram_access(bp);
8931
8932         written_so_far = 0;
8933         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8934         while ((written_so_far < buf_size) && (rc == 0)) {
8935                 if (written_so_far == (buf_size - sizeof(u32)))
8936                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8937                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8938                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8939                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8940                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8941
8942                 memcpy(&val, data_buf, 4);
8943
8944                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8945
8946                 /* advance to the next dword */
8947                 offset += sizeof(u32);
8948                 data_buf += sizeof(u32);
8949                 written_so_far += sizeof(u32);
8950                 cmd_flags = 0;
8951         }
8952
8953         /* disable access to nvram interface */
8954         bnx2x_disable_nvram_access(bp);
8955         bnx2x_release_nvram_lock(bp);
8956
8957         return rc;
8958 }
8959
8960 static int bnx2x_set_eeprom(struct net_device *dev,
8961                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8962 {
8963         struct bnx2x *bp = netdev_priv(dev);
8964         int port = BP_PORT(bp);
8965         int rc = 0;
8966
8967         if (!netif_running(dev))
8968                 return -EAGAIN;
8969
8970         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8971            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8972            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8973            eeprom->len, eeprom->len);
8974
8975         /* parameters already validated in ethtool_set_eeprom */
8976
8977         /* PHY eeprom can be accessed only by the PMF */
8978         if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
8979             !bp->port.pmf)
8980                 return -EINVAL;
8981
8982         if (eeprom->magic == 0x50485950) {
8983                 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
8984                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8985
8986                 bnx2x_acquire_phy_lock(bp);
8987                 rc |= bnx2x_link_reset(&bp->link_params,
8988                                        &bp->link_vars, 0);
8989                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8990                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
8991                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
8992                                        MISC_REGISTERS_GPIO_HIGH, port);
8993                 bnx2x_release_phy_lock(bp);
8994                 bnx2x_link_report(bp);
8995
8996         } else if (eeprom->magic == 0x50485952) {
8997                 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
8998                 if (bp->state == BNX2X_STATE_OPEN) {
8999                         bnx2x_acquire_phy_lock(bp);
9000                         rc |= bnx2x_link_reset(&bp->link_params,
9001                                                &bp->link_vars, 1);
9002
9003                         rc |= bnx2x_phy_init(&bp->link_params,
9004                                              &bp->link_vars);
9005                         bnx2x_release_phy_lock(bp);
9006                         bnx2x_calc_fc_adv(bp);
9007                 }
9008         } else if (eeprom->magic == 0x53985943) {
9009                 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9010                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9011                                        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9012                         u8 ext_phy_addr =
9013                              XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9014
9015                         /* DSP Remove Download Mode */
9016                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9017                                        MISC_REGISTERS_GPIO_LOW, port);
9018
9019                         bnx2x_acquire_phy_lock(bp);
9020
9021                         bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9022
9023                         /* wait 0.5 sec to allow it to run */
9024                         msleep(500);
9025                         bnx2x_ext_phy_hw_reset(bp, port);
9026                         msleep(500);
9027                         bnx2x_release_phy_lock(bp);
9028                 }
9029         } else
9030                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9031
9032         return rc;
9033 }
9034
9035 static int bnx2x_get_coalesce(struct net_device *dev,
9036                               struct ethtool_coalesce *coal)
9037 {
9038         struct bnx2x *bp = netdev_priv(dev);
9039
9040         memset(coal, 0, sizeof(struct ethtool_coalesce));
9041
9042         coal->rx_coalesce_usecs = bp->rx_ticks;
9043         coal->tx_coalesce_usecs = bp->tx_ticks;
9044
9045         return 0;
9046 }
9047
9048 static int bnx2x_set_coalesce(struct net_device *dev,
9049                               struct ethtool_coalesce *coal)
9050 {
9051         struct bnx2x *bp = netdev_priv(dev);
9052
9053         bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
9054         if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
9055                 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
9056
9057         bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
9058         if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
9059                 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
9060
9061         if (netif_running(dev))
9062                 bnx2x_update_coalesce(bp);
9063
9064         return 0;
9065 }
9066
9067 static void bnx2x_get_ringparam(struct net_device *dev,
9068                                 struct ethtool_ringparam *ering)
9069 {
9070         struct bnx2x *bp = netdev_priv(dev);
9071
9072         ering->rx_max_pending = MAX_RX_AVAIL;
9073         ering->rx_mini_max_pending = 0;
9074         ering->rx_jumbo_max_pending = 0;
9075
9076         ering->rx_pending = bp->rx_ring_size;
9077         ering->rx_mini_pending = 0;
9078         ering->rx_jumbo_pending = 0;
9079
9080         ering->tx_max_pending = MAX_TX_AVAIL;
9081         ering->tx_pending = bp->tx_ring_size;
9082 }
9083
9084 static int bnx2x_set_ringparam(struct net_device *dev,
9085                                struct ethtool_ringparam *ering)
9086 {
9087         struct bnx2x *bp = netdev_priv(dev);
9088         int rc = 0;
9089
9090         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
9091                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
9092                 return -EAGAIN;
9093         }
9094
9095         if ((ering->rx_pending > MAX_RX_AVAIL) ||
9096             (ering->tx_pending > MAX_TX_AVAIL) ||
9097             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9098                 return -EINVAL;
9099
9100         bp->rx_ring_size = ering->rx_pending;
9101         bp->tx_ring_size = ering->tx_pending;
9102
9103         if (netif_running(dev)) {
9104                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9105                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9106         }
9107
9108         return rc;
9109 }
9110
9111 static void bnx2x_get_pauseparam(struct net_device *dev,
9112                                  struct ethtool_pauseparam *epause)
9113 {
9114         struct bnx2x *bp = netdev_priv(dev);
9115
9116         epause->autoneg = (bp->link_params.req_flow_ctrl ==
9117                            BNX2X_FLOW_CTRL_AUTO) &&
9118                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9119
9120         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9121                             BNX2X_FLOW_CTRL_RX);
9122         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9123                             BNX2X_FLOW_CTRL_TX);
9124
9125         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9126            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9127            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9128 }
9129
9130 static int bnx2x_set_pauseparam(struct net_device *dev,
9131                                 struct ethtool_pauseparam *epause)
9132 {
9133         struct bnx2x *bp = netdev_priv(dev);
9134
9135         if (IS_E1HMF(bp))
9136                 return 0;
9137
9138         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9139            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9140            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9141
9142         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9143
9144         if (epause->rx_pause)
9145                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9146
9147         if (epause->tx_pause)
9148                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9149
9150         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9151                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9152
9153         if (epause->autoneg) {
9154                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9155                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
9156                         return -EINVAL;
9157                 }
9158
9159                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9160                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9161         }
9162
9163         DP(NETIF_MSG_LINK,
9164            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9165
9166         if (netif_running(dev)) {
9167                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9168                 bnx2x_link_set(bp);
9169         }
9170
9171         return 0;
9172 }
9173
9174 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9175 {
9176         struct bnx2x *bp = netdev_priv(dev);
9177         int changed = 0;
9178         int rc = 0;
9179
9180         if (data & ~(ETH_FLAG_LRO | ETH_FLAG_RXHASH))
9181                 return -EINVAL;
9182
9183         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
9184                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
9185                 return -EAGAIN;
9186         }
9187
9188         /* TPA requires Rx CSUM offloading */
9189         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9190                 if (!bp->disable_tpa) {
9191                         if (!(dev->features & NETIF_F_LRO)) {
9192                                 dev->features |= NETIF_F_LRO;
9193                                 bp->flags |= TPA_ENABLE_FLAG;
9194                                 changed = 1;
9195                         }
9196                 } else
9197                         rc = -EINVAL;
9198         } else if (dev->features & NETIF_F_LRO) {
9199                 dev->features &= ~NETIF_F_LRO;
9200                 bp->flags &= ~TPA_ENABLE_FLAG;
9201                 changed = 1;
9202         }
9203
9204         if (data & ETH_FLAG_RXHASH)
9205                 dev->features |= NETIF_F_RXHASH;
9206         else
9207                 dev->features &= ~NETIF_F_RXHASH;
9208
9209         if (changed && netif_running(dev)) {
9210                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9211                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9212         }
9213
9214         return rc;
9215 }
9216
9217 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9218 {
9219         struct bnx2x *bp = netdev_priv(dev);
9220
9221         return bp->rx_csum;
9222 }
9223
9224 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9225 {
9226         struct bnx2x *bp = netdev_priv(dev);
9227         int rc = 0;
9228
9229         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
9230                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
9231                 return -EAGAIN;
9232         }
9233
9234         bp->rx_csum = data;
9235
9236         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9237            TPA'ed packets will be discarded due to wrong TCP CSUM */
9238         if (!data) {
9239                 u32 flags = ethtool_op_get_flags(dev);
9240
9241                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9242         }
9243
9244         return rc;
9245 }
9246
9247 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9248 {
9249         if (data) {
9250                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9251                 dev->features |= NETIF_F_TSO6;
9252         } else {
9253                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9254                 dev->features &= ~NETIF_F_TSO6;
9255         }
9256
9257         return 0;
9258 }
9259
9260 static const struct {
9261         char string[ETH_GSTRING_LEN];
9262 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9263         { "register_test (offline)" },
9264         { "memory_test (offline)" },
9265         { "loopback_test (offline)" },
9266         { "nvram_test (online)" },
9267         { "interrupt_test (online)" },
9268         { "link_test (online)" },
9269         { "idle check (online)" }
9270 };
9271
9272 static int bnx2x_test_registers(struct bnx2x *bp)
9273 {
9274         int idx, i, rc = -ENODEV;
9275         u32 wr_val = 0;
9276         int port = BP_PORT(bp);
9277         static const struct {
9278                 u32 offset0;
9279                 u32 offset1;
9280                 u32 mask;
9281         } reg_tbl[] = {
9282 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
9283                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
9284                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
9285                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
9286                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
9287                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
9288                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
9289                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
9290                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
9291                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
9292 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
9293                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
9294                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
9295                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
9296                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
9297                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9298                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
9299                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
9300                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
9301                 { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
9302 /* 20 */        { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
9303                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
9304                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
9305                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
9306                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
9307                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
9308                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
9309                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
9310                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
9311                 { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
9312 /* 30 */        { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
9313                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
9314                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
9315                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9316                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
9317                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9318                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
9319
9320                 { 0xffffffff, 0, 0x00000000 }
9321         };
9322
9323         if (!netif_running(bp->dev))
9324                 return rc;
9325
9326         /* Repeat the test twice:
9327            First by writing 0x00000000, second by writing 0xffffffff */
9328         for (idx = 0; idx < 2; idx++) {
9329
9330                 switch (idx) {
9331                 case 0:
9332                         wr_val = 0;
9333                         break;
9334                 case 1:
9335                         wr_val = 0xffffffff;
9336                         break;
9337                 }
9338
9339                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9340                         u32 offset, mask, save_val, val;
9341
9342                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9343                         mask = reg_tbl[i].mask;
9344
9345                         save_val = REG_RD(bp, offset);
9346
9347                         REG_WR(bp, offset, (wr_val & mask));
9348                         val = REG_RD(bp, offset);
9349
9350                         /* Restore the original register's value */
9351                         REG_WR(bp, offset, save_val);
9352
9353                         /* verify value is as expected */
9354                         if ((val & mask) != (wr_val & mask)) {
9355                                 DP(NETIF_MSG_PROBE,
9356                                    "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
9357                                    offset, val, wr_val, mask);
9358                                 goto test_reg_exit;
9359                         }
9360                 }
9361         }
9362
9363         rc = 0;
9364
9365 test_reg_exit:
9366         return rc;
9367 }
9368
9369 static int bnx2x_test_memory(struct bnx2x *bp)
9370 {
9371         int i, j, rc = -ENODEV;
9372         u32 val;
9373         static const struct {
9374                 u32 offset;
9375                 int size;
9376         } mem_tbl[] = {
9377                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
9378                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9379                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
9380                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
9381                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
9382                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
9383                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
9384
9385                 { 0xffffffff, 0 }
9386         };
9387         static const struct {
9388                 char *name;
9389                 u32 offset;
9390                 u32 e1_mask;
9391                 u32 e1h_mask;
9392         } prty_tbl[] = {
9393                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
9394                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
9395                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
9396                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
9397                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
9398                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
9399
9400                 { NULL, 0xffffffff, 0, 0 }
9401         };
9402
9403         if (!netif_running(bp->dev))
9404                 return rc;
9405
9406         /* Go through all the memories */
9407         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9408                 for (j = 0; j < mem_tbl[i].size; j++)
9409                         REG_RD(bp, mem_tbl[i].offset + j*4);
9410
9411         /* Check the parity status */
9412         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9413                 val = REG_RD(bp, prty_tbl[i].offset);
9414                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9415                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9416                         DP(NETIF_MSG_HW,
9417                            "%s is 0x%x\n", prty_tbl[i].name, val);
9418                         goto test_mem_exit;
9419                 }
9420         }
9421
9422         rc = 0;
9423
9424 test_mem_exit:
9425         return rc;
9426 }
9427
9428 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9429 {
9430         int cnt = 1000;
9431
9432         if (link_up)
9433                 while (bnx2x_link_test(bp) && cnt--)
9434                         msleep(10);
9435 }
9436
9437 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9438 {
9439         unsigned int pkt_size, num_pkts, i;
9440         struct sk_buff *skb;
9441         unsigned char *packet;
9442         struct bnx2x_fastpath *fp_rx = &bp->fp[0];
9443         struct bnx2x_fastpath *fp_tx = &bp->fp[0];
9444         u16 tx_start_idx, tx_idx;
9445         u16 rx_start_idx, rx_idx;
9446         u16 pkt_prod, bd_prod;
9447         struct sw_tx_bd *tx_buf;
9448         struct eth_tx_start_bd *tx_start_bd;
9449         struct eth_tx_parse_bd *pbd = NULL;
9450         dma_addr_t mapping;
9451         union eth_rx_cqe *cqe;
9452         u8 cqe_fp_flags;
9453         struct sw_rx_bd *rx_buf;
9454         u16 len;
9455         int rc = -ENODEV;
9456
9457         /* check the loopback mode */
9458         switch (loopback_mode) {
9459         case BNX2X_PHY_LOOPBACK:
9460                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9461                         return -EINVAL;
9462                 break;
9463         case BNX2X_MAC_LOOPBACK:
9464                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9465                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9466                 break;
9467         default:
9468                 return -EINVAL;
9469         }
9470
9471         /* prepare the loopback packet */
9472         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9473                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9474         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9475         if (!skb) {
9476                 rc = -ENOMEM;
9477                 goto test_loopback_exit;
9478         }
9479         packet = skb_put(skb, pkt_size);
9480         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9481         memset(packet + ETH_ALEN, 0, ETH_ALEN);
9482         memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
9483         for (i = ETH_HLEN; i < pkt_size; i++)
9484                 packet[i] = (unsigned char) (i & 0xff);
9485
9486         /* send the loopback packet */
9487         num_pkts = 0;
9488         tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
9489         rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
9490
9491         pkt_prod = fp_tx->tx_pkt_prod++;
9492         tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
9493         tx_buf->first_bd = fp_tx->tx_bd_prod;
9494         tx_buf->skb = skb;
9495         tx_buf->flags = 0;
9496
9497         bd_prod = TX_BD(fp_tx->tx_bd_prod);
9498         tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
9499         mapping = dma_map_single(&bp->pdev->dev, skb->data,
9500                                  skb_headlen(skb), DMA_TO_DEVICE);
9501         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9502         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9503         tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
9504         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9505         tx_start_bd->vlan = cpu_to_le16(pkt_prod);
9506         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9507         tx_start_bd->general_data = ((UNICAST_ADDRESS <<
9508                                 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9509
9510         /* turn on parsing and get a BD */
9511         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9512         pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
9513
9514         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9515
9516         wmb();
9517
9518         fp_tx->tx_db.data.prod += 2;
9519         barrier();
9520         DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
9521
9522         mmiowb();
9523
9524         num_pkts++;
9525         fp_tx->tx_bd_prod += 2; /* start + pbd */
9526
9527         udelay(100);
9528
9529         tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
9530         if (tx_idx != tx_start_idx + num_pkts)
9531                 goto test_loopback_exit;
9532
9533         rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
9534         if (rx_idx != rx_start_idx + num_pkts)
9535                 goto test_loopback_exit;
9536
9537         cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
9538         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9539         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9540                 goto test_loopback_rx_exit;
9541
9542         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9543         if (len != pkt_size)
9544                 goto test_loopback_rx_exit;
9545
9546         rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
9547         skb = rx_buf->skb;
9548         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9549         for (i = ETH_HLEN; i < pkt_size; i++)
9550                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9551                         goto test_loopback_rx_exit;
9552
9553         rc = 0;
9554
9555 test_loopback_rx_exit:
9556
9557         fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
9558         fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
9559         fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
9560         fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
9561
9562         /* Update producers */
9563         bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
9564                              fp_rx->rx_sge_prod);
9565
9566 test_loopback_exit:
9567         bp->link_params.loopback_mode = LOOPBACK_NONE;
9568
9569         return rc;
9570 }
9571
9572 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9573 {
9574         int rc = 0, res;
9575
9576         if (BP_NOMCP(bp))
9577                 return rc;
9578
9579         if (!netif_running(bp->dev))
9580                 return BNX2X_LOOPBACK_FAILED;
9581
9582         bnx2x_netif_stop(bp, 1);
9583         bnx2x_acquire_phy_lock(bp);
9584
9585         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9586         if (res) {
9587                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
9588                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9589         }
9590
9591         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9592         if (res) {
9593                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
9594                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9595         }
9596
9597         bnx2x_release_phy_lock(bp);
9598         bnx2x_netif_start(bp);
9599
9600         return rc;
9601 }
9602
9603 #define CRC32_RESIDUAL                  0xdebb20e3
9604
9605 static int bnx2x_test_nvram(struct bnx2x *bp)
9606 {
9607         static const struct {
9608                 int offset;
9609                 int size;
9610         } nvram_tbl[] = {
9611                 {     0,  0x14 }, /* bootstrap */
9612                 {  0x14,  0xec }, /* dir */
9613                 { 0x100, 0x350 }, /* manuf_info */
9614                 { 0x450,  0xf0 }, /* feature_info */
9615                 { 0x640,  0x64 }, /* upgrade_key_info */
9616                 { 0x6a4,  0x64 },
9617                 { 0x708,  0x70 }, /* manuf_key_info */
9618                 { 0x778,  0x70 },
9619                 {     0,     0 }
9620         };
9621         __be32 buf[0x350 / 4];
9622         u8 *data = (u8 *)buf;
9623         int i, rc;
9624         u32 magic, crc;
9625
9626         if (BP_NOMCP(bp))
9627                 return 0;
9628
9629         rc = bnx2x_nvram_read(bp, 0, data, 4);
9630         if (rc) {
9631                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
9632                 goto test_nvram_exit;
9633         }
9634
9635         magic = be32_to_cpu(buf[0]);
9636         if (magic != 0x669955aa) {
9637                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9638                 rc = -ENODEV;
9639                 goto test_nvram_exit;
9640         }
9641
9642         for (i = 0; nvram_tbl[i].size; i++) {
9643
9644                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9645                                       nvram_tbl[i].size);
9646                 if (rc) {
9647                         DP(NETIF_MSG_PROBE,
9648                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
9649                         goto test_nvram_exit;
9650                 }
9651
9652                 crc = ether_crc_le(nvram_tbl[i].size, data);
9653                 if (crc != CRC32_RESIDUAL) {
9654                         DP(NETIF_MSG_PROBE,
9655                            "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
9656                         rc = -ENODEV;
9657                         goto test_nvram_exit;
9658                 }
9659         }
9660
9661 test_nvram_exit:
9662         return rc;
9663 }
9664
9665 static int bnx2x_test_intr(struct bnx2x *bp)
9666 {
9667         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9668         int i, rc;
9669
9670         if (!netif_running(bp->dev))
9671                 return -ENODEV;
9672
9673         config->hdr.length = 0;
9674         if (CHIP_IS_E1(bp))
9675                 /* use last unicast entries */
9676                 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
9677         else
9678                 config->hdr.offset = BP_FUNC(bp);
9679         config->hdr.client_id = bp->fp->cl_id;
9680         config->hdr.reserved1 = 0;
9681
9682         bp->set_mac_pending++;
9683         smp_wmb();
9684         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9685                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9686                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9687         if (rc == 0) {
9688                 for (i = 0; i < 10; i++) {
9689                         if (!bp->set_mac_pending)
9690                                 break;
9691                         smp_rmb();
9692                         msleep_interruptible(10);
9693                 }
9694                 if (i == 10)
9695                         rc = -ENODEV;
9696         }
9697
9698         return rc;
9699 }
9700
9701 static void bnx2x_self_test(struct net_device *dev,
9702                             struct ethtool_test *etest, u64 *buf)
9703 {
9704         struct bnx2x *bp = netdev_priv(dev);
9705
9706         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
9707                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
9708                 etest->flags |= ETH_TEST_FL_FAILED;
9709                 return;
9710         }
9711
9712         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9713
9714         if (!netif_running(dev))
9715                 return;
9716
9717         /* offline tests are not supported in MF mode */
9718         if (IS_E1HMF(bp))
9719                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9720
9721         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9722                 int port = BP_PORT(bp);
9723                 u32 val;
9724                 u8 link_up;
9725
9726                 /* save current value of input enable for TX port IF */
9727                 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
9728                 /* disable input for TX port IF */
9729                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
9730
9731                 link_up = (bnx2x_link_test(bp) == 0);
9732                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9733                 bnx2x_nic_load(bp, LOAD_DIAG);
9734                 /* wait until link state is restored */
9735                 bnx2x_wait_for_link(bp, link_up);
9736
9737                 if (bnx2x_test_registers(bp) != 0) {
9738                         buf[0] = 1;
9739                         etest->flags |= ETH_TEST_FL_FAILED;
9740                 }
9741                 if (bnx2x_test_memory(bp) != 0) {
9742                         buf[1] = 1;
9743                         etest->flags |= ETH_TEST_FL_FAILED;
9744                 }
9745                 buf[2] = bnx2x_test_loopback(bp, link_up);
9746                 if (buf[2] != 0)
9747                         etest->flags |= ETH_TEST_FL_FAILED;
9748
9749                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9750
9751                 /* restore input for TX port IF */
9752                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
9753
9754                 bnx2x_nic_load(bp, LOAD_NORMAL);
9755                 /* wait until link state is restored */
9756                 bnx2x_wait_for_link(bp, link_up);
9757         }
9758         if (bnx2x_test_nvram(bp) != 0) {
9759                 buf[3] = 1;
9760                 etest->flags |= ETH_TEST_FL_FAILED;
9761         }
9762         if (bnx2x_test_intr(bp) != 0) {
9763                 buf[4] = 1;
9764                 etest->flags |= ETH_TEST_FL_FAILED;
9765         }
9766         if (bp->port.pmf)
9767                 if (bnx2x_link_test(bp) != 0) {
9768                         buf[5] = 1;
9769                         etest->flags |= ETH_TEST_FL_FAILED;
9770                 }
9771
9772 #ifdef BNX2X_EXTRA_DEBUG
9773         bnx2x_panic_dump(bp);
9774 #endif
9775 }
9776
9777 static const struct {
9778         long offset;
9779         int size;
9780         u8 string[ETH_GSTRING_LEN];
9781 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9782 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9783         { Q_STATS_OFFSET32(error_bytes_received_hi),
9784                                                 8, "[%d]: rx_error_bytes" },
9785         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9786                                                 8, "[%d]: rx_ucast_packets" },
9787         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9788                                                 8, "[%d]: rx_mcast_packets" },
9789         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9790                                                 8, "[%d]: rx_bcast_packets" },
9791         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9792         { Q_STATS_OFFSET32(rx_err_discard_pkt),
9793                                          4, "[%d]: rx_phy_ip_err_discards"},
9794         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9795                                          4, "[%d]: rx_skb_alloc_discard" },
9796         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9797
9798 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9799         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9800                                                 8, "[%d]: tx_ucast_packets" },
9801         { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
9802                                                 8, "[%d]: tx_mcast_packets" },
9803         { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
9804                                                 8, "[%d]: tx_bcast_packets" }
9805 };
9806
9807 static const struct {
9808         long offset;
9809         int size;
9810         u32 flags;
9811 #define STATS_FLAGS_PORT                1
9812 #define STATS_FLAGS_FUNC                2
9813 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9814         u8 string[ETH_GSTRING_LEN];
9815 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9816 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9817                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
9818         { STATS_OFFSET32(error_bytes_received_hi),
9819                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9820         { STATS_OFFSET32(total_unicast_packets_received_hi),
9821                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9822         { STATS_OFFSET32(total_multicast_packets_received_hi),
9823                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9824         { STATS_OFFSET32(total_broadcast_packets_received_hi),
9825                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9826         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9827                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9828         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9829                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
9830         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9831                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9832         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9833                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9834 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9835                                 8, STATS_FLAGS_PORT, "rx_fragments" },
9836         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9837                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
9838         { STATS_OFFSET32(no_buff_discard_hi),
9839                                 8, STATS_FLAGS_BOTH, "rx_discards" },
9840         { STATS_OFFSET32(mac_filter_discard),
9841                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9842         { STATS_OFFSET32(xxoverflow_discard),
9843                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9844         { STATS_OFFSET32(brb_drop_hi),
9845                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9846         { STATS_OFFSET32(brb_truncate_hi),
9847                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9848         { STATS_OFFSET32(pause_frames_received_hi),
9849                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9850         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9851                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9852         { STATS_OFFSET32(nig_timer_max),
9853                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9854 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9855                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9856         { STATS_OFFSET32(rx_skb_alloc_failed),
9857                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9858         { STATS_OFFSET32(hw_csum_err),
9859                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9860
9861         { STATS_OFFSET32(total_bytes_transmitted_hi),
9862                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
9863         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9864                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9865         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9866                                 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
9867         { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
9868                                 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
9869         { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
9870                                 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
9871         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9872                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9873         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9874                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9875 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9876                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9877         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9878                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9879         { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9880                                 8, STATS_FLAGS_PORT, "tx_deferred" },
9881         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9882                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9883         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9884                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9885         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9886                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9887         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9888                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9889         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9890                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9891         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9892                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9893         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9894                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9895 /* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9896                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9897         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9898                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9899         { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9900                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9901         { STATS_OFFSET32(pause_frames_sent_hi),
9902                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9903 };
9904
9905 #define IS_PORT_STAT(i) \
9906         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9907 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9908 #define IS_E1HMF_MODE_STAT(bp) \
9909                         (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
9910
9911 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
9912 {
9913         struct bnx2x *bp = netdev_priv(dev);
9914         int i, num_stats;
9915
9916         switch (stringset) {
9917         case ETH_SS_STATS:
9918                 if (is_multi(bp)) {
9919                         num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
9920                         if (!IS_E1HMF_MODE_STAT(bp))
9921                                 num_stats += BNX2X_NUM_STATS;
9922                 } else {
9923                         if (IS_E1HMF_MODE_STAT(bp)) {
9924                                 num_stats = 0;
9925                                 for (i = 0; i < BNX2X_NUM_STATS; i++)
9926                                         if (IS_FUNC_STAT(i))
9927                                                 num_stats++;
9928                         } else
9929                                 num_stats = BNX2X_NUM_STATS;
9930                 }
9931                 return num_stats;
9932
9933         case ETH_SS_TEST:
9934                 return BNX2X_NUM_TESTS;
9935
9936         default:
9937                 return -EINVAL;
9938         }
9939 }
9940
9941 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9942 {
9943         struct bnx2x *bp = netdev_priv(dev);
9944         int i, j, k;
9945
9946         switch (stringset) {
9947         case ETH_SS_STATS:
9948                 if (is_multi(bp)) {
9949                         k = 0;
9950                         for_each_queue(bp, i) {
9951                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9952                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9953                                                 bnx2x_q_stats_arr[j].string, i);
9954                                 k += BNX2X_NUM_Q_STATS;
9955                         }
9956                         if (IS_E1HMF_MODE_STAT(bp))
9957                                 break;
9958                         for (j = 0; j < BNX2X_NUM_STATS; j++)
9959                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9960                                        bnx2x_stats_arr[j].string);
9961                 } else {
9962                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9963                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9964                                         continue;
9965                                 strcpy(buf + j*ETH_GSTRING_LEN,
9966                                        bnx2x_stats_arr[i].string);
9967                                 j++;
9968                         }
9969                 }
9970                 break;
9971
9972         case ETH_SS_TEST:
9973                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9974                 break;
9975         }
9976 }
9977
9978 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9979                                     struct ethtool_stats *stats, u64 *buf)
9980 {
9981         struct bnx2x *bp = netdev_priv(dev);
9982         u32 *hw_stats, *offset;
9983         int i, j, k;
9984
9985         if (is_multi(bp)) {
9986                 k = 0;
9987                 for_each_queue(bp, i) {
9988                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9989                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9990                                 if (bnx2x_q_stats_arr[j].size == 0) {
9991                                         /* skip this counter */
9992                                         buf[k + j] = 0;
9993                                         continue;
9994                                 }
9995                                 offset = (hw_stats +
9996                                           bnx2x_q_stats_arr[j].offset);
9997                                 if (bnx2x_q_stats_arr[j].size == 4) {
9998                                         /* 4-byte counter */
9999                                         buf[k + j] = (u64) *offset;
10000                                         continue;
10001                                 }
10002                                 /* 8-byte counter */
10003                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10004                         }
10005                         k += BNX2X_NUM_Q_STATS;
10006                 }
10007                 if (IS_E1HMF_MODE_STAT(bp))
10008                         return;
10009                 hw_stats = (u32 *)&bp->eth_stats;
10010                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10011                         if (bnx2x_stats_arr[j].size == 0) {
10012                                 /* skip this counter */
10013                                 buf[k + j] = 0;
10014                                 continue;
10015                         }
10016                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
10017                         if (bnx2x_stats_arr[j].size == 4) {
10018                                 /* 4-byte counter */
10019                                 buf[k + j] = (u64) *offset;
10020                                 continue;
10021                         }
10022                         /* 8-byte counter */
10023                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
10024                 }
10025         } else {
10026                 hw_stats = (u32 *)&bp->eth_stats;
10027                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10028                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10029                                 continue;
10030                         if (bnx2x_stats_arr[i].size == 0) {
10031                                 /* skip this counter */
10032                                 buf[j] = 0;
10033                                 j++;
10034                                 continue;
10035                         }
10036                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
10037                         if (bnx2x_stats_arr[i].size == 4) {
10038                                 /* 4-byte counter */
10039                                 buf[j] = (u64) *offset;
10040                                 j++;
10041                                 continue;
10042                         }
10043                         /* 8-byte counter */
10044                         buf[j] = HILO_U64(*offset, *(offset + 1));
10045                         j++;
10046                 }
10047         }
10048 }
10049
10050 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10051 {
10052         struct bnx2x *bp = netdev_priv(dev);
10053         int i;
10054
10055         if (!netif_running(dev))
10056                 return 0;
10057
10058         if (!bp->port.pmf)
10059                 return 0;
10060
10061         if (data == 0)
10062                 data = 2;
10063
10064         for (i = 0; i < (data * 2); i++) {
10065                 if ((i % 2) == 0)
10066                         bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10067                                       SPEED_1000);
10068                 else
10069                         bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
10070
10071                 msleep_interruptible(500);
10072                 if (signal_pending(current))
10073                         break;
10074         }
10075
10076         if (bp->link_vars.link_up)
10077                 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10078                               bp->link_vars.line_speed);
10079
10080         return 0;
10081 }
10082
10083 static const struct ethtool_ops bnx2x_ethtool_ops = {
10084         .get_settings           = bnx2x_get_settings,
10085         .set_settings           = bnx2x_set_settings,
10086         .get_drvinfo            = bnx2x_get_drvinfo,
10087         .get_regs_len           = bnx2x_get_regs_len,
10088         .get_regs               = bnx2x_get_regs,
10089         .get_wol                = bnx2x_get_wol,
10090         .set_wol                = bnx2x_set_wol,
10091         .get_msglevel           = bnx2x_get_msglevel,
10092         .set_msglevel           = bnx2x_set_msglevel,
10093         .nway_reset             = bnx2x_nway_reset,
10094         .get_link               = bnx2x_get_link,
10095         .get_eeprom_len         = bnx2x_get_eeprom_len,
10096         .get_eeprom             = bnx2x_get_eeprom,
10097         .set_eeprom             = bnx2x_set_eeprom,
10098         .get_coalesce           = bnx2x_get_coalesce,
10099         .set_coalesce           = bnx2x_set_coalesce,
10100         .get_ringparam          = bnx2x_get_ringparam,
10101         .set_ringparam          = bnx2x_set_ringparam,
10102         .get_pauseparam         = bnx2x_get_pauseparam,
10103         .set_pauseparam         = bnx2x_set_pauseparam,
10104         .get_rx_csum            = bnx2x_get_rx_csum,
10105         .set_rx_csum            = bnx2x_set_rx_csum,
10106         .get_tx_csum            = ethtool_op_get_tx_csum,
10107         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
10108         .set_flags              = bnx2x_set_flags,
10109         .get_flags              = ethtool_op_get_flags,
10110         .get_sg                 = ethtool_op_get_sg,
10111         .set_sg                 = ethtool_op_set_sg,
10112         .get_tso                = ethtool_op_get_tso,
10113         .set_tso                = bnx2x_set_tso,
10114         .self_test              = bnx2x_self_test,
10115         .get_sset_count         = bnx2x_get_sset_count,
10116         .get_strings            = bnx2x_get_strings,
10117         .phys_id                = bnx2x_phys_id,
10118         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
10119 };
10120
10121 /* end of ethtool_ops */
10122
10123
10124 /* called with rtnl_lock */
10125 static int bnx2x_open(struct net_device *dev)
10126 {
10127         struct bnx2x *bp = netdev_priv(dev);
10128
10129         netif_carrier_off(dev);
10130
10131         bnx2x_set_power_state(bp, PCI_D0);
10132
10133         if (!bnx2x_reset_is_done(bp)) {
10134                 do {
10135                         /* Reset MCP mail box sequence if there is on going
10136                          * recovery
10137                          */
10138                         bp->fw_seq = 0;
10139
10140                         /* If it's the first function to load and reset done
10141                          * is still not cleared it may mean that. We don't
10142                          * check the attention state here because it may have
10143                          * already been cleared by a "common" reset but we
10144                          * shell proceed with "process kill" anyway.
10145                          */
10146                         if ((bnx2x_get_load_cnt(bp) == 0) &&
10147                                 bnx2x_trylock_hw_lock(bp,
10148                                 HW_LOCK_RESOURCE_RESERVED_08) &&
10149                                 (!bnx2x_leader_reset(bp))) {
10150                                 DP(NETIF_MSG_HW, "Recovered in open\n");
10151                                 break;
10152                         }
10153
10154                         bnx2x_set_power_state(bp, PCI_D3hot);
10155
10156                         printk(KERN_ERR"%s: Recovery flow hasn't been properly"
10157                         " completed yet. Try again later. If u still see this"
10158                         " message after a few retries then power cycle is"
10159                         " required.\n", bp->dev->name);
10160
10161                         return -EAGAIN;
10162                 } while (0);
10163         }
10164
10165         bp->recovery_state = BNX2X_RECOVERY_DONE;
10166
10167         return bnx2x_nic_load(bp, LOAD_OPEN);
10168 }
10169
10170 /* called with rtnl_lock */
10171 static int bnx2x_close(struct net_device *dev)
10172 {
10173         struct bnx2x *bp = netdev_priv(dev);
10174
10175         /* Unload the driver, release IRQs */
10176         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10177         bnx2x_set_power_state(bp, PCI_D3hot);
10178
10179         return 0;
10180 }
10181
10182 /* called with netif_tx_lock from dev_mcast.c */
10183 void bnx2x_set_rx_mode(struct net_device *dev)
10184 {
10185         struct bnx2x *bp = netdev_priv(dev);
10186         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10187         int port = BP_PORT(bp);
10188
10189         if (bp->state != BNX2X_STATE_OPEN) {
10190                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10191                 return;
10192         }
10193
10194         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10195
10196         if (dev->flags & IFF_PROMISC)
10197                 rx_mode = BNX2X_RX_MODE_PROMISC;
10198
10199         else if ((dev->flags & IFF_ALLMULTI) ||
10200                  ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
10201                   CHIP_IS_E1(bp)))
10202                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10203
10204         else { /* some multicasts */
10205                 if (CHIP_IS_E1(bp)) {
10206                         int i, old, offset;
10207                         struct netdev_hw_addr *ha;
10208                         struct mac_configuration_cmd *config =
10209                                                 bnx2x_sp(bp, mcast_config);
10210
10211                         i = 0;
10212                         netdev_for_each_mc_addr(ha, dev) {
10213                                 config->config_table[i].
10214                                         cam_entry.msb_mac_addr =
10215                                         swab16(*(u16 *)&ha->addr[0]);
10216                                 config->config_table[i].
10217                                         cam_entry.middle_mac_addr =
10218                                         swab16(*(u16 *)&ha->addr[2]);
10219                                 config->config_table[i].
10220                                         cam_entry.lsb_mac_addr =
10221                                         swab16(*(u16 *)&ha->addr[4]);
10222                                 config->config_table[i].cam_entry.flags =
10223                                                         cpu_to_le16(port);
10224                                 config->config_table[i].
10225                                         target_table_entry.flags = 0;
10226                                 config->config_table[i].target_table_entry.
10227                                         clients_bit_vector =
10228                                                 cpu_to_le32(1 << BP_L_ID(bp));
10229                                 config->config_table[i].
10230                                         target_table_entry.vlan_id = 0;
10231
10232                                 DP(NETIF_MSG_IFUP,
10233                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10234                                    config->config_table[i].
10235                                                 cam_entry.msb_mac_addr,
10236                                    config->config_table[i].
10237                                                 cam_entry.middle_mac_addr,
10238                                    config->config_table[i].
10239                                                 cam_entry.lsb_mac_addr);
10240                                 i++;
10241                         }
10242                         old = config->hdr.length;
10243                         if (old > i) {
10244                                 for (; i < old; i++) {
10245                                         if (CAM_IS_INVALID(config->
10246                                                            config_table[i])) {
10247                                                 /* already invalidated */
10248                                                 break;
10249                                         }
10250                                         /* invalidate */
10251                                         CAM_INVALIDATE(config->
10252                                                        config_table[i]);
10253                                 }
10254                         }
10255
10256                         if (CHIP_REV_IS_SLOW(bp))
10257                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10258                         else
10259                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
10260
10261                         config->hdr.length = i;
10262                         config->hdr.offset = offset;
10263                         config->hdr.client_id = bp->fp->cl_id;
10264                         config->hdr.reserved1 = 0;
10265
10266                         bp->set_mac_pending++;
10267                         smp_wmb();
10268
10269                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10270                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10271                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10272                                       0);
10273                 } else { /* E1H */
10274                         /* Accept one or more multicasts */
10275                         struct netdev_hw_addr *ha;
10276                         u32 mc_filter[MC_HASH_SIZE];
10277                         u32 crc, bit, regidx;
10278                         int i;
10279
10280                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10281
10282                         netdev_for_each_mc_addr(ha, dev) {
10283                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10284                                    ha->addr);
10285
10286                                 crc = crc32c_le(0, ha->addr, ETH_ALEN);
10287                                 bit = (crc >> 24) & 0xff;
10288                                 regidx = bit >> 5;
10289                                 bit &= 0x1f;
10290                                 mc_filter[regidx] |= (1 << bit);
10291                         }
10292
10293                         for (i = 0; i < MC_HASH_SIZE; i++)
10294                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10295                                        mc_filter[i]);
10296                 }
10297         }
10298
10299         bp->rx_mode = rx_mode;
10300         bnx2x_set_storm_rx_mode(bp);
10301 }
10302
10303
10304 /* called with rtnl_lock */
10305 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
10306                            int devad, u16 addr)
10307 {
10308         struct bnx2x *bp = netdev_priv(netdev);
10309         u16 value;
10310         int rc;
10311         u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
10312
10313         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
10314            prtad, devad, addr);
10315
10316         if (prtad != bp->mdio.prtad) {
10317                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
10318                    prtad, bp->mdio.prtad);
10319                 return -EINVAL;
10320         }
10321
10322         /* The HW expects different devad if CL22 is used */
10323         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
10324
10325         bnx2x_acquire_phy_lock(bp);
10326         rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
10327                              devad, addr, &value);
10328         bnx2x_release_phy_lock(bp);
10329         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
10330
10331         if (!rc)
10332                 rc = value;
10333         return rc;
10334 }
10335
10336 /* called with rtnl_lock */
10337 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
10338                             u16 addr, u16 value)
10339 {
10340         struct bnx2x *bp = netdev_priv(netdev);
10341         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
10342         int rc;
10343
10344         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
10345                            " value 0x%x\n", prtad, devad, addr, value);
10346
10347         if (prtad != bp->mdio.prtad) {
10348                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
10349                    prtad, bp->mdio.prtad);
10350                 return -EINVAL;
10351         }
10352
10353         /* The HW expects different devad if CL22 is used */
10354         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
10355
10356         bnx2x_acquire_phy_lock(bp);
10357         rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
10358                               devad, addr, value);
10359         bnx2x_release_phy_lock(bp);
10360         return rc;
10361 }
10362
10363 /* called with rtnl_lock */
10364 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10365 {
10366         struct bnx2x *bp = netdev_priv(dev);
10367         struct mii_ioctl_data *mdio = if_mii(ifr);
10368
10369         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
10370            mdio->phy_id, mdio->reg_num, mdio->val_in);
10371
10372         if (!netif_running(dev))
10373                 return -EAGAIN;
10374
10375         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
10376 }
10377
10378 #ifdef CONFIG_NET_POLL_CONTROLLER
10379 static void poll_bnx2x(struct net_device *dev)
10380 {
10381         struct bnx2x *bp = netdev_priv(dev);
10382
10383         disable_irq(bp->pdev->irq);
10384         bnx2x_interrupt(bp->pdev->irq, dev);
10385         enable_irq(bp->pdev->irq);
10386 }
10387 #endif
10388
10389 static const struct net_device_ops bnx2x_netdev_ops = {
10390         .ndo_open               = bnx2x_open,
10391         .ndo_stop               = bnx2x_close,
10392         .ndo_start_xmit         = bnx2x_start_xmit,
10393         .ndo_set_multicast_list = bnx2x_set_rx_mode,
10394         .ndo_set_mac_address    = bnx2x_change_mac_addr,
10395         .ndo_validate_addr      = eth_validate_addr,
10396         .ndo_do_ioctl           = bnx2x_ioctl,
10397         .ndo_change_mtu         = bnx2x_change_mtu,
10398         .ndo_tx_timeout         = bnx2x_tx_timeout,
10399 #ifdef BCM_VLAN
10400         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
10401 #endif
10402 #ifdef CONFIG_NET_POLL_CONTROLLER
10403         .ndo_poll_controller    = poll_bnx2x,
10404 #endif
10405 };
10406
10407 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10408                                     struct net_device *dev)
10409 {
10410         struct bnx2x *bp;
10411         int rc;
10412
10413         SET_NETDEV_DEV(dev, &pdev->dev);
10414         bp = netdev_priv(dev);
10415
10416         bp->dev = dev;
10417         bp->pdev = pdev;
10418         bp->flags = 0;
10419         bp->func = PCI_FUNC(pdev->devfn);
10420
10421         rc = pci_enable_device(pdev);
10422         if (rc) {
10423                 dev_err(&bp->pdev->dev,
10424                         "Cannot enable PCI device, aborting\n");
10425                 goto err_out;
10426         }
10427
10428         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10429                 dev_err(&bp->pdev->dev,
10430                         "Cannot find PCI device base address, aborting\n");
10431                 rc = -ENODEV;
10432                 goto err_out_disable;
10433         }
10434
10435         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10436                 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
10437                        " base address, aborting\n");
10438                 rc = -ENODEV;
10439                 goto err_out_disable;
10440         }
10441
10442         if (atomic_read(&pdev->enable_cnt) == 1) {
10443                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10444                 if (rc) {
10445                         dev_err(&bp->pdev->dev,
10446                                 "Cannot obtain PCI resources, aborting\n");
10447                         goto err_out_disable;
10448                 }
10449
10450                 pci_set_master(pdev);
10451                 pci_save_state(pdev);
10452         }
10453
10454         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10455         if (bp->pm_cap == 0) {
10456                 dev_err(&bp->pdev->dev,
10457                         "Cannot find power management capability, aborting\n");
10458                 rc = -EIO;
10459                 goto err_out_release;
10460         }
10461
10462         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10463         if (bp->pcie_cap == 0) {
10464                 dev_err(&bp->pdev->dev,
10465                         "Cannot find PCI Express capability, aborting\n");
10466                 rc = -EIO;
10467                 goto err_out_release;
10468         }
10469
10470         if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
10471                 bp->flags |= USING_DAC_FLAG;
10472                 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
10473                         dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
10474                                " failed, aborting\n");
10475                         rc = -EIO;
10476                         goto err_out_release;
10477                 }
10478
10479         } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
10480                 dev_err(&bp->pdev->dev,
10481                         "System does not support DMA, aborting\n");
10482                 rc = -EIO;
10483                 goto err_out_release;
10484         }
10485
10486         dev->mem_start = pci_resource_start(pdev, 0);
10487         dev->base_addr = dev->mem_start;
10488         dev->mem_end = pci_resource_end(pdev, 0);
10489
10490         dev->irq = pdev->irq;
10491
10492         bp->regview = pci_ioremap_bar(pdev, 0);
10493         if (!bp->regview) {
10494                 dev_err(&bp->pdev->dev,
10495                         "Cannot map register space, aborting\n");
10496                 rc = -ENOMEM;
10497                 goto err_out_release;
10498         }
10499
10500         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10501                                         min_t(u64, BNX2X_DB_SIZE,
10502                                               pci_resource_len(pdev, 2)));
10503         if (!bp->doorbells) {
10504                 dev_err(&bp->pdev->dev,
10505                         "Cannot map doorbell space, aborting\n");
10506                 rc = -ENOMEM;
10507                 goto err_out_unmap;
10508         }
10509
10510         bnx2x_set_power_state(bp, PCI_D0);
10511
10512         /* clean indirect addresses */
10513         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10514                                PCICFG_VENDOR_ID_OFFSET);
10515         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10516         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10517         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10518         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10519
10520         /* Reset the load counter */
10521         bnx2x_clear_load_cnt(bp);
10522
10523         dev->watchdog_timeo = TX_TIMEOUT;
10524
10525         dev->netdev_ops = &bnx2x_netdev_ops;
10526         dev->ethtool_ops = &bnx2x_ethtool_ops;
10527         dev->features |= NETIF_F_SG;
10528         dev->features |= NETIF_F_HW_CSUM;
10529         if (bp->flags & USING_DAC_FLAG)
10530                 dev->features |= NETIF_F_HIGHDMA;
10531         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10532         dev->features |= NETIF_F_TSO6;
10533 #ifdef BCM_VLAN
10534         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10535         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10536
10537         dev->vlan_features |= NETIF_F_SG;
10538         dev->vlan_features |= NETIF_F_HW_CSUM;
10539         if (bp->flags & USING_DAC_FLAG)
10540                 dev->vlan_features |= NETIF_F_HIGHDMA;
10541         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10542         dev->vlan_features |= NETIF_F_TSO6;
10543 #endif
10544
10545         /* get_port_hwinfo() will set prtad and mmds properly */
10546         bp->mdio.prtad = MDIO_PRTAD_NONE;
10547         bp->mdio.mmds = 0;
10548         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
10549         bp->mdio.dev = dev;
10550         bp->mdio.mdio_read = bnx2x_mdio_read;
10551         bp->mdio.mdio_write = bnx2x_mdio_write;
10552
10553         return 0;
10554
10555 err_out_unmap:
10556         if (bp->regview) {
10557                 iounmap(bp->regview);
10558                 bp->regview = NULL;
10559         }
10560         if (bp->doorbells) {
10561                 iounmap(bp->doorbells);
10562                 bp->doorbells = NULL;
10563         }
10564
10565 err_out_release:
10566         if (atomic_read(&pdev->enable_cnt) == 1)
10567                 pci_release_regions(pdev);
10568
10569 err_out_disable:
10570         pci_disable_device(pdev);
10571         pci_set_drvdata(pdev, NULL);
10572
10573 err_out:
10574         return rc;
10575 }
10576
10577 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
10578                                                  int *width, int *speed)
10579 {
10580         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10581
10582         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10583
10584         /* return value of 1=2.5GHz 2=5GHz */
10585         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10586 }
10587
10588 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
10589 {
10590         const struct firmware *firmware = bp->firmware;
10591         struct bnx2x_fw_file_hdr *fw_hdr;
10592         struct bnx2x_fw_file_section *sections;
10593         u32 offset, len, num_ops;
10594         u16 *ops_offsets;
10595         int i;
10596         const u8 *fw_ver;
10597
10598         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
10599                 return -EINVAL;
10600
10601         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
10602         sections = (struct bnx2x_fw_file_section *)fw_hdr;
10603
10604         /* Make sure none of the offsets and sizes make us read beyond
10605          * the end of the firmware data */
10606         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
10607                 offset = be32_to_cpu(sections[i].offset);
10608                 len = be32_to_cpu(sections[i].len);
10609                 if (offset + len > firmware->size) {
10610                         dev_err(&bp->pdev->dev,
10611                                 "Section %d length is out of bounds\n", i);
10612                         return -EINVAL;
10613                 }
10614         }
10615
10616         /* Likewise for the init_ops offsets */
10617         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
10618         ops_offsets = (u16 *)(firmware->data + offset);
10619         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
10620
10621         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
10622                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
10623                         dev_err(&bp->pdev->dev,
10624                                 "Section offset %d is out of bounds\n", i);
10625                         return -EINVAL;
10626                 }
10627         }
10628
10629         /* Check FW version */
10630         offset = be32_to_cpu(fw_hdr->fw_version.offset);
10631         fw_ver = firmware->data + offset;
10632         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
10633             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
10634             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
10635             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
10636                 dev_err(&bp->pdev->dev,
10637                         "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
10638                        fw_ver[0], fw_ver[1], fw_ver[2],
10639                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
10640                        BCM_5710_FW_MINOR_VERSION,
10641                        BCM_5710_FW_REVISION_VERSION,
10642                        BCM_5710_FW_ENGINEERING_VERSION);
10643                 return -EINVAL;
10644         }
10645
10646         return 0;
10647 }
10648
10649 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
10650 {
10651         const __be32 *source = (const __be32 *)_source;
10652         u32 *target = (u32 *)_target;
10653         u32 i;
10654
10655         for (i = 0; i < n/4; i++)
10656                 target[i] = be32_to_cpu(source[i]);
10657 }
10658
10659 /*
10660    Ops array is stored in the following format:
10661    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
10662  */
10663 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
10664 {
10665         const __be32 *source = (const __be32 *)_source;
10666         struct raw_op *target = (struct raw_op *)_target;
10667         u32 i, j, tmp;
10668
10669         for (i = 0, j = 0; i < n/8; i++, j += 2) {
10670                 tmp = be32_to_cpu(source[j]);
10671                 target[i].op = (tmp >> 24) & 0xff;
10672                 target[i].offset = tmp & 0xffffff;
10673                 target[i].raw_data = be32_to_cpu(source[j + 1]);
10674         }
10675 }
10676
10677 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
10678 {
10679         const __be16 *source = (const __be16 *)_source;
10680         u16 *target = (u16 *)_target;
10681         u32 i;
10682
10683         for (i = 0; i < n/2; i++)
10684                 target[i] = be16_to_cpu(source[i]);
10685 }
10686
10687 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
10688 do {                                                                    \
10689         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
10690         bp->arr = kmalloc(len, GFP_KERNEL);                             \
10691         if (!bp->arr) {                                                 \
10692                 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
10693                 goto lbl;                                               \
10694         }                                                               \
10695         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
10696              (u8 *)bp->arr, len);                                       \
10697 } while (0)
10698
10699 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
10700 {
10701         const char *fw_file_name;
10702         struct bnx2x_fw_file_hdr *fw_hdr;
10703         int rc;
10704
10705         if (CHIP_IS_E1(bp))
10706                 fw_file_name = FW_FILE_NAME_E1;
10707         else if (CHIP_IS_E1H(bp))
10708                 fw_file_name = FW_FILE_NAME_E1H;
10709         else {
10710                 dev_err(dev, "Unsupported chip revision\n");
10711                 return -EINVAL;
10712         }
10713
10714         dev_info(dev, "Loading %s\n", fw_file_name);
10715
10716         rc = request_firmware(&bp->firmware, fw_file_name, dev);
10717         if (rc) {
10718                 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
10719                 goto request_firmware_exit;
10720         }
10721
10722         rc = bnx2x_check_firmware(bp);
10723         if (rc) {
10724                 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
10725                 goto request_firmware_exit;
10726         }
10727
10728         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
10729
10730         /* Initialize the pointers to the init arrays */
10731         /* Blob */
10732         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
10733
10734         /* Opcodes */
10735         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
10736
10737         /* Offsets */
10738         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
10739                             be16_to_cpu_n);
10740
10741         /* STORMs firmware */
10742         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
10743                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
10744         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
10745                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
10746         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
10747                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
10748         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
10749                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
10750         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
10751                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
10752         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
10753                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
10754         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
10755                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
10756         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
10757                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
10758
10759         return 0;
10760
10761 init_offsets_alloc_err:
10762         kfree(bp->init_ops);
10763 init_ops_alloc_err:
10764         kfree(bp->init_data);
10765 request_firmware_exit:
10766         release_firmware(bp->firmware);
10767
10768         return rc;
10769 }
10770
10771
10772 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10773                                     const struct pci_device_id *ent)
10774 {
10775         struct net_device *dev = NULL;
10776         struct bnx2x *bp;
10777         int pcie_width, pcie_speed;
10778         int rc;
10779
10780         /* dev zeroed in init_etherdev */
10781         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
10782         if (!dev) {
10783                 dev_err(&pdev->dev, "Cannot allocate net device\n");
10784                 return -ENOMEM;
10785         }
10786
10787         bp = netdev_priv(dev);
10788         bp->msg_enable = debug;
10789
10790         pci_set_drvdata(pdev, dev);
10791
10792         rc = bnx2x_init_dev(pdev, dev);
10793         if (rc < 0) {
10794                 free_netdev(dev);
10795                 return rc;
10796         }
10797
10798         rc = bnx2x_init_bp(bp);
10799         if (rc)
10800                 goto init_one_exit;
10801
10802         /* Set init arrays */
10803         rc = bnx2x_init_firmware(bp, &pdev->dev);
10804         if (rc) {
10805                 dev_err(&pdev->dev, "Error loading firmware\n");
10806                 goto init_one_exit;
10807         }
10808
10809         rc = register_netdev(dev);
10810         if (rc) {
10811                 dev_err(&pdev->dev, "Cannot register net device\n");
10812                 goto init_one_exit;
10813         }
10814
10815         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
10816         netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
10817                " IRQ %d, ", board_info[ent->driver_data].name,
10818                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10819                pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
10820                dev->base_addr, bp->pdev->irq);
10821         pr_cont("node addr %pM\n", dev->dev_addr);
10822
10823         return 0;
10824
10825 init_one_exit:
10826         if (bp->regview)
10827                 iounmap(bp->regview);
10828
10829         if (bp->doorbells)
10830                 iounmap(bp->doorbells);
10831
10832         free_netdev(dev);
10833
10834         if (atomic_read(&pdev->enable_cnt) == 1)
10835                 pci_release_regions(pdev);
10836
10837         pci_disable_device(pdev);
10838         pci_set_drvdata(pdev, NULL);
10839
10840         return rc;
10841 }
10842
10843 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10844 {
10845         struct net_device *dev = pci_get_drvdata(pdev);
10846         struct bnx2x *bp;
10847
10848         if (!dev) {
10849                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
10850                 return;
10851         }
10852         bp = netdev_priv(dev);
10853
10854         unregister_netdev(dev);
10855
10856         /* Make sure RESET task is not scheduled before continuing */
10857         cancel_delayed_work_sync(&bp->reset_task);
10858
10859         kfree(bp->init_ops_offsets);
10860         kfree(bp->init_ops);
10861         kfree(bp->init_data);
10862         release_firmware(bp->firmware);
10863
10864         if (bp->regview)
10865                 iounmap(bp->regview);
10866
10867         if (bp->doorbells)
10868                 iounmap(bp->doorbells);
10869
10870         free_netdev(dev);
10871
10872         if (atomic_read(&pdev->enable_cnt) == 1)
10873                 pci_release_regions(pdev);
10874
10875         pci_disable_device(pdev);
10876         pci_set_drvdata(pdev, NULL);
10877 }
10878
10879 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10880 {
10881         int i;
10882
10883         bp->state = BNX2X_STATE_ERROR;
10884
10885         bp->rx_mode = BNX2X_RX_MODE_NONE;
10886
10887         bnx2x_netif_stop(bp, 0);
10888         netif_carrier_off(bp->dev);
10889
10890         del_timer_sync(&bp->timer);
10891         bp->stats_state = STATS_STATE_DISABLED;
10892         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10893
10894         /* Release IRQs */
10895         bnx2x_free_irq(bp, false);
10896
10897         if (CHIP_IS_E1(bp)) {
10898                 struct mac_configuration_cmd *config =
10899                                                 bnx2x_sp(bp, mcast_config);
10900
10901                 for (i = 0; i < config->hdr.length; i++)
10902                         CAM_INVALIDATE(config->config_table[i]);
10903         }
10904
10905         /* Free SKBs, SGEs, TPA pool and driver internals */
10906         bnx2x_free_skbs(bp);
10907         for_each_queue(bp, i)
10908                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10909         for_each_queue(bp, i)
10910                 netif_napi_del(&bnx2x_fp(bp, i, napi));
10911         bnx2x_free_mem(bp);
10912
10913         bp->state = BNX2X_STATE_CLOSED;
10914
10915         return 0;
10916 }
10917
10918 static void bnx2x_eeh_recover(struct bnx2x *bp)
10919 {
10920         u32 val;
10921
10922         mutex_init(&bp->port.phy_mutex);
10923
10924         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10925         bp->link_params.shmem_base = bp->common.shmem_base;
10926         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10927
10928         if (!bp->common.shmem_base ||
10929             (bp->common.shmem_base < 0xA0000) ||
10930             (bp->common.shmem_base >= 0xC0000)) {
10931                 BNX2X_DEV_INFO("MCP not active\n");
10932                 bp->flags |= NO_MCP_FLAG;
10933                 return;
10934         }
10935
10936         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10937         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10938                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10939                 BNX2X_ERR("BAD MCP validity signature\n");
10940
10941         if (!BP_NOMCP(bp)) {
10942                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10943                               & DRV_MSG_SEQ_NUMBER_MASK);
10944                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10945         }
10946 }
10947
10948 /**
10949  * bnx2x_io_error_detected - called when PCI error is detected
10950  * @pdev: Pointer to PCI device
10951  * @state: The current pci connection state
10952  *
10953  * This function is called after a PCI bus error affecting
10954  * this device has been detected.
10955  */
10956 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10957                                                 pci_channel_state_t state)
10958 {
10959         struct net_device *dev = pci_get_drvdata(pdev);
10960         struct bnx2x *bp = netdev_priv(dev);
10961
10962         rtnl_lock();
10963
10964         netif_device_detach(dev);
10965
10966         if (state == pci_channel_io_perm_failure) {
10967                 rtnl_unlock();
10968                 return PCI_ERS_RESULT_DISCONNECT;
10969         }
10970
10971         if (netif_running(dev))
10972                 bnx2x_eeh_nic_unload(bp);
10973
10974         pci_disable_device(pdev);
10975
10976         rtnl_unlock();
10977
10978         /* Request a slot reset */
10979         return PCI_ERS_RESULT_NEED_RESET;
10980 }
10981
10982 /**
10983  * bnx2x_io_slot_reset - called after the PCI bus has been reset
10984  * @pdev: Pointer to PCI device
10985  *
10986  * Restart the card from scratch, as if from a cold-boot.
10987  */
10988 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10989 {
10990         struct net_device *dev = pci_get_drvdata(pdev);
10991         struct bnx2x *bp = netdev_priv(dev);
10992
10993         rtnl_lock();
10994
10995         if (pci_enable_device(pdev)) {
10996                 dev_err(&pdev->dev,
10997                         "Cannot re-enable PCI device after reset\n");
10998                 rtnl_unlock();
10999                 return PCI_ERS_RESULT_DISCONNECT;
11000         }
11001
11002         pci_set_master(pdev);
11003         pci_restore_state(pdev);
11004
11005         if (netif_running(dev))
11006                 bnx2x_set_power_state(bp, PCI_D0);
11007
11008         rtnl_unlock();
11009
11010         return PCI_ERS_RESULT_RECOVERED;
11011 }
11012
11013 /**
11014  * bnx2x_io_resume - called when traffic can start flowing again
11015  * @pdev: Pointer to PCI device
11016  *
11017  * This callback is called when the error recovery driver tells us that
11018  * its OK to resume normal operation.
11019  */
11020 static void bnx2x_io_resume(struct pci_dev *pdev)
11021 {
11022         struct net_device *dev = pci_get_drvdata(pdev);
11023         struct bnx2x *bp = netdev_priv(dev);
11024
11025         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11026                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11027                 return;
11028         }
11029
11030         rtnl_lock();
11031
11032         bnx2x_eeh_recover(bp);
11033
11034         if (netif_running(dev))
11035                 bnx2x_nic_load(bp, LOAD_NORMAL);
11036
11037         netif_device_attach(dev);
11038
11039         rtnl_unlock();
11040 }
11041
11042 static struct pci_error_handlers bnx2x_err_handler = {
11043         .error_detected = bnx2x_io_error_detected,
11044         .slot_reset     = bnx2x_io_slot_reset,
11045         .resume         = bnx2x_io_resume,
11046 };
11047
11048 static struct pci_driver bnx2x_pci_driver = {
11049         .name        = DRV_MODULE_NAME,
11050         .id_table    = bnx2x_pci_tbl,
11051         .probe       = bnx2x_init_one,
11052         .remove      = __devexit_p(bnx2x_remove_one),
11053         .suspend     = bnx2x_suspend,
11054         .resume      = bnx2x_resume,
11055         .err_handler = &bnx2x_err_handler,
11056 };
11057
11058 static int __init bnx2x_init(void)
11059 {
11060         int ret;
11061
11062         pr_info("%s", version);
11063
11064         bnx2x_wq = create_singlethread_workqueue("bnx2x");
11065         if (bnx2x_wq == NULL) {
11066                 pr_err("Cannot create workqueue\n");
11067                 return -ENOMEM;
11068         }
11069
11070         ret = pci_register_driver(&bnx2x_pci_driver);
11071         if (ret) {
11072                 pr_err("Cannot register driver\n");
11073                 destroy_workqueue(bnx2x_wq);
11074         }
11075         return ret;
11076 }
11077
11078 static void __exit bnx2x_cleanup(void)
11079 {
11080         pci_unregister_driver(&bnx2x_pci_driver);
11081
11082         destroy_workqueue(bnx2x_wq);
11083 }
11084
11085 module_init(bnx2x_init);
11086 module_exit(bnx2x_cleanup);
11087
11088 #ifdef BCM_CNIC
11089
11090 /* count denotes the number of new completions we have seen */
11091 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
11092 {
11093         struct eth_spe *spe;
11094
11095 #ifdef BNX2X_STOP_ON_ERROR
11096         if (unlikely(bp->panic))
11097                 return;
11098 #endif
11099
11100         spin_lock_bh(&bp->spq_lock);
11101         bp->cnic_spq_pending -= count;
11102
11103         for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
11104              bp->cnic_spq_pending++) {
11105
11106                 if (!bp->cnic_kwq_pending)
11107                         break;
11108
11109                 spe = bnx2x_sp_get_next(bp);
11110                 *spe = *bp->cnic_kwq_cons;
11111
11112                 bp->cnic_kwq_pending--;
11113
11114                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
11115                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
11116
11117                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
11118                         bp->cnic_kwq_cons = bp->cnic_kwq;
11119                 else
11120                         bp->cnic_kwq_cons++;
11121         }
11122         bnx2x_sp_prod_update(bp);
11123         spin_unlock_bh(&bp->spq_lock);
11124 }
11125
11126 static int bnx2x_cnic_sp_queue(struct net_device *dev,
11127                                struct kwqe_16 *kwqes[], u32 count)
11128 {
11129         struct bnx2x *bp = netdev_priv(dev);
11130         int i;
11131
11132 #ifdef BNX2X_STOP_ON_ERROR
11133         if (unlikely(bp->panic))
11134                 return -EIO;
11135 #endif
11136
11137         spin_lock_bh(&bp->spq_lock);
11138
11139         for (i = 0; i < count; i++) {
11140                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
11141
11142                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
11143                         break;
11144
11145                 *bp->cnic_kwq_prod = *spe;
11146
11147                 bp->cnic_kwq_pending++;
11148
11149                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
11150                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
11151                    spe->data.mac_config_addr.hi,
11152                    spe->data.mac_config_addr.lo,
11153                    bp->cnic_kwq_pending);
11154
11155                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
11156                         bp->cnic_kwq_prod = bp->cnic_kwq;
11157                 else
11158                         bp->cnic_kwq_prod++;
11159         }
11160
11161         spin_unlock_bh(&bp->spq_lock);
11162
11163         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
11164                 bnx2x_cnic_sp_post(bp, 0);
11165
11166         return i;
11167 }
11168
11169 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
11170 {
11171         struct cnic_ops *c_ops;
11172         int rc = 0;
11173
11174         mutex_lock(&bp->cnic_mutex);
11175         c_ops = bp->cnic_ops;
11176         if (c_ops)
11177                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
11178         mutex_unlock(&bp->cnic_mutex);
11179
11180         return rc;
11181 }
11182
11183 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
11184 {
11185         struct cnic_ops *c_ops;
11186         int rc = 0;
11187
11188         rcu_read_lock();
11189         c_ops = rcu_dereference(bp->cnic_ops);
11190         if (c_ops)
11191                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
11192         rcu_read_unlock();
11193
11194         return rc;
11195 }
11196
11197 /*
11198  * for commands that have no data
11199  */
11200 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
11201 {
11202         struct cnic_ctl_info ctl = {0};
11203
11204         ctl.cmd = cmd;
11205
11206         return bnx2x_cnic_ctl_send(bp, &ctl);
11207 }
11208
11209 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
11210 {
11211         struct cnic_ctl_info ctl;
11212
11213         /* first we tell CNIC and only then we count this as a completion */
11214         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
11215         ctl.data.comp.cid = cid;
11216
11217         bnx2x_cnic_ctl_send_bh(bp, &ctl);
11218         bnx2x_cnic_sp_post(bp, 1);
11219 }
11220
11221 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
11222 {
11223         struct bnx2x *bp = netdev_priv(dev);
11224         int rc = 0;
11225
11226         switch (ctl->cmd) {
11227         case DRV_CTL_CTXTBL_WR_CMD: {
11228                 u32 index = ctl->data.io.offset;
11229                 dma_addr_t addr = ctl->data.io.dma_addr;
11230
11231                 bnx2x_ilt_wr(bp, index, addr);
11232                 break;
11233         }
11234
11235         case DRV_CTL_COMPLETION_CMD: {
11236                 int count = ctl->data.comp.comp_count;
11237
11238                 bnx2x_cnic_sp_post(bp, count);
11239                 break;
11240         }
11241
11242         /* rtnl_lock is held.  */
11243         case DRV_CTL_START_L2_CMD: {
11244                 u32 cli = ctl->data.ring.client_id;
11245
11246                 bp->rx_mode_cl_mask |= (1 << cli);
11247                 bnx2x_set_storm_rx_mode(bp);
11248                 break;
11249         }
11250
11251         /* rtnl_lock is held.  */
11252         case DRV_CTL_STOP_L2_CMD: {
11253                 u32 cli = ctl->data.ring.client_id;
11254
11255                 bp->rx_mode_cl_mask &= ~(1 << cli);
11256                 bnx2x_set_storm_rx_mode(bp);
11257                 break;
11258         }
11259
11260         default:
11261                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
11262                 rc = -EINVAL;
11263         }
11264
11265         return rc;
11266 }
11267
11268 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
11269 {
11270         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
11271
11272         if (bp->flags & USING_MSIX_FLAG) {
11273                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
11274                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
11275                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
11276         } else {
11277                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
11278                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
11279         }
11280         cp->irq_arr[0].status_blk = bp->cnic_sb;
11281         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
11282         cp->irq_arr[1].status_blk = bp->def_status_blk;
11283         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
11284
11285         cp->num_irq = 2;
11286 }
11287
11288 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
11289                                void *data)
11290 {
11291         struct bnx2x *bp = netdev_priv(dev);
11292         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
11293
11294         if (ops == NULL)
11295                 return -EINVAL;
11296
11297         if (atomic_read(&bp->intr_sem) != 0)
11298                 return -EBUSY;
11299
11300         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
11301         if (!bp->cnic_kwq)
11302                 return -ENOMEM;
11303
11304         bp->cnic_kwq_cons = bp->cnic_kwq;
11305         bp->cnic_kwq_prod = bp->cnic_kwq;
11306         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
11307
11308         bp->cnic_spq_pending = 0;
11309         bp->cnic_kwq_pending = 0;
11310
11311         bp->cnic_data = data;
11312
11313         cp->num_irq = 0;
11314         cp->drv_state = CNIC_DRV_STATE_REGD;
11315
11316         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
11317
11318         bnx2x_setup_cnic_irq_info(bp);
11319         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
11320         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
11321         rcu_assign_pointer(bp->cnic_ops, ops);
11322
11323         return 0;
11324 }
11325
11326 static int bnx2x_unregister_cnic(struct net_device *dev)
11327 {
11328         struct bnx2x *bp = netdev_priv(dev);
11329         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
11330
11331         mutex_lock(&bp->cnic_mutex);
11332         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
11333                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
11334                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
11335         }
11336         cp->drv_state = 0;
11337         rcu_assign_pointer(bp->cnic_ops, NULL);
11338         mutex_unlock(&bp->cnic_mutex);
11339         synchronize_rcu();
11340         kfree(bp->cnic_kwq);
11341         bp->cnic_kwq = NULL;
11342
11343         return 0;
11344 }
11345
11346 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
11347 {
11348         struct bnx2x *bp = netdev_priv(dev);
11349         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
11350
11351         cp->drv_owner = THIS_MODULE;
11352         cp->chip_id = CHIP_ID(bp);
11353         cp->pdev = bp->pdev;
11354         cp->io_base = bp->regview;
11355         cp->io_base2 = bp->doorbells;
11356         cp->max_kwqe_pending = 8;
11357         cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
11358         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
11359         cp->ctx_tbl_len = CNIC_ILT_LINES;
11360         cp->starting_cid = BCM_CNIC_CID_START;
11361         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
11362         cp->drv_ctl = bnx2x_drv_ctl;
11363         cp->drv_register_cnic = bnx2x_register_cnic;
11364         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
11365
11366         return cp;
11367 }
11368 EXPORT_SYMBOL(bnx2x_cnic_probe);
11369
11370 #endif /* BCM_CNIC */
11371