]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/bnx2x_main.c
bnx2x: use mask in test_registers() to avoid parity error
[net-next-2.6.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52 #include <linux/stringify.h>
53
54
55 #include "bnx2x.h"
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
59
60 #define DRV_MODULE_VERSION      "1.52.1-8"
61 #define DRV_MODULE_RELDATE      "2010/04/01"
62 #define BNX2X_BC_VER            0x040200
63
64 #include <linux/firmware.h>
65 #include "bnx2x_fw_file_hdr.h"
66 /* FW files */
67 #define FW_FILE_VERSION                                 \
68         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
69         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
70         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
71         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72 #define FW_FILE_NAME_E1         "bnx2x-e1-" FW_FILE_VERSION ".fw"
73 #define FW_FILE_NAME_E1H        "bnx2x-e1h-" FW_FILE_VERSION ".fw"
74
75 /* Time in jiffies before concluding the transmitter is hung */
76 #define TX_TIMEOUT              (5*HZ)
77
78 static char version[] __devinitdata =
79         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
80         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
82 MODULE_AUTHOR("Eliezer Tamir");
83 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_MODULE_VERSION);
86 MODULE_FIRMWARE(FW_FILE_NAME_E1);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
88
89 static int multi_mode = 1;
90 module_param(multi_mode, int, 0);
91 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92                              "(0 Disable; 1 Enable (default))");
93
94 static int num_queues;
95 module_param(num_queues, int, 0);
96 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97                                 " (default is as a number of CPUs)");
98
99 static int disable_tpa;
100 module_param(disable_tpa, int, 0);
101 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
102
103 static int int_mode;
104 module_param(int_mode, int, 0);
105 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
106                                 "(1 INT#x; 2 MSI)");
107
108 static int dropless_fc;
109 module_param(dropless_fc, int, 0);
110 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
111
112 static int poll;
113 module_param(poll, int, 0);
114 MODULE_PARM_DESC(poll, " Use polling (for debug)");
115
116 static int mrrs = -1;
117 module_param(mrrs, int, 0);
118 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
119
120 static int debug;
121 module_param(debug, int, 0);
122 MODULE_PARM_DESC(debug, " Default debug msglevel");
123
124 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
125
126 static struct workqueue_struct *bnx2x_wq;
127
128 enum bnx2x_board_type {
129         BCM57710 = 0,
130         BCM57711 = 1,
131         BCM57711E = 2,
132 };
133
134 /* indexed by board_type, above */
135 static struct {
136         char *name;
137 } board_info[] __devinitdata = {
138         { "Broadcom NetXtreme II BCM57710 XGb" },
139         { "Broadcom NetXtreme II BCM57711 XGb" },
140         { "Broadcom NetXtreme II BCM57711E XGb" }
141 };
142
143
144 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
145         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
146         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
147         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
148         { 0 }
149 };
150
151 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
152
153 /****************************************************************************
154 * General service functions
155 ****************************************************************************/
156
157 /* used only at init
158  * locking is done by mcp
159  */
160 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
161 {
162         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
163         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
164         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
165                                PCICFG_VENDOR_ID_OFFSET);
166 }
167
168 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
169 {
170         u32 val;
171
172         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
173         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
174         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
175                                PCICFG_VENDOR_ID_OFFSET);
176
177         return val;
178 }
179
180 static const u32 dmae_reg_go_c[] = {
181         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
182         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
183         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
184         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
185 };
186
187 /* copy command into DMAE command memory and set DMAE command go */
188 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
189                             int idx)
190 {
191         u32 cmd_offset;
192         int i;
193
194         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
195         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
196                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
197
198                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
199                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
200         }
201         REG_WR(bp, dmae_reg_go_c[idx], 1);
202 }
203
204 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
205                       u32 len32)
206 {
207         struct dmae_command dmae;
208         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
209         int cnt = 200;
210
211         if (!bp->dmae_ready) {
212                 u32 *data = bnx2x_sp(bp, wb_data[0]);
213
214                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
215                    "  using indirect\n", dst_addr, len32);
216                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
217                 return;
218         }
219
220         memset(&dmae, 0, sizeof(struct dmae_command));
221
222         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
223                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
224                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
225 #ifdef __BIG_ENDIAN
226                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
227 #else
228                        DMAE_CMD_ENDIANITY_DW_SWAP |
229 #endif
230                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
231                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
232         dmae.src_addr_lo = U64_LO(dma_addr);
233         dmae.src_addr_hi = U64_HI(dma_addr);
234         dmae.dst_addr_lo = dst_addr >> 2;
235         dmae.dst_addr_hi = 0;
236         dmae.len = len32;
237         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
238         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
239         dmae.comp_val = DMAE_COMP_VAL;
240
241         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
242            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
243                     "dst_addr [%x:%08x (%08x)]\n"
244            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
245            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
246            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
247            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
248         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
249            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
250            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
251
252         mutex_lock(&bp->dmae_mutex);
253
254         *wb_comp = 0;
255
256         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
257
258         udelay(5);
259
260         while (*wb_comp != DMAE_COMP_VAL) {
261                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
262
263                 if (!cnt) {
264                         BNX2X_ERR("DMAE timeout!\n");
265                         break;
266                 }
267                 cnt--;
268                 /* adjust delay for emulation/FPGA */
269                 if (CHIP_REV_IS_SLOW(bp))
270                         msleep(100);
271                 else
272                         udelay(5);
273         }
274
275         mutex_unlock(&bp->dmae_mutex);
276 }
277
278 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
279 {
280         struct dmae_command dmae;
281         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
282         int cnt = 200;
283
284         if (!bp->dmae_ready) {
285                 u32 *data = bnx2x_sp(bp, wb_data[0]);
286                 int i;
287
288                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
289                    "  using indirect\n", src_addr, len32);
290                 for (i = 0; i < len32; i++)
291                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
292                 return;
293         }
294
295         memset(&dmae, 0, sizeof(struct dmae_command));
296
297         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
298                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
299                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
300 #ifdef __BIG_ENDIAN
301                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
302 #else
303                        DMAE_CMD_ENDIANITY_DW_SWAP |
304 #endif
305                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
306                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
307         dmae.src_addr_lo = src_addr >> 2;
308         dmae.src_addr_hi = 0;
309         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
310         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
311         dmae.len = len32;
312         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
313         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
314         dmae.comp_val = DMAE_COMP_VAL;
315
316         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
317            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
318                     "dst_addr [%x:%08x (%08x)]\n"
319            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
320            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
321            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
322            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
323
324         mutex_lock(&bp->dmae_mutex);
325
326         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
327         *wb_comp = 0;
328
329         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
330
331         udelay(5);
332
333         while (*wb_comp != DMAE_COMP_VAL) {
334
335                 if (!cnt) {
336                         BNX2X_ERR("DMAE timeout!\n");
337                         break;
338                 }
339                 cnt--;
340                 /* adjust delay for emulation/FPGA */
341                 if (CHIP_REV_IS_SLOW(bp))
342                         msleep(100);
343                 else
344                         udelay(5);
345         }
346         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
347            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
348            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
349
350         mutex_unlock(&bp->dmae_mutex);
351 }
352
353 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
354                                u32 addr, u32 len)
355 {
356         int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
357         int offset = 0;
358
359         while (len > dmae_wr_max) {
360                 bnx2x_write_dmae(bp, phys_addr + offset,
361                                  addr + offset, dmae_wr_max);
362                 offset += dmae_wr_max * 4;
363                 len -= dmae_wr_max;
364         }
365
366         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
367 }
368
369 /* used only for slowpath so not inlined */
370 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
371 {
372         u32 wb_write[2];
373
374         wb_write[0] = val_hi;
375         wb_write[1] = val_lo;
376         REG_WR_DMAE(bp, reg, wb_write, 2);
377 }
378
379 #ifdef USE_WB_RD
380 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
381 {
382         u32 wb_data[2];
383
384         REG_RD_DMAE(bp, reg, wb_data, 2);
385
386         return HILO_U64(wb_data[0], wb_data[1]);
387 }
388 #endif
389
390 static int bnx2x_mc_assert(struct bnx2x *bp)
391 {
392         char last_idx;
393         int i, rc = 0;
394         u32 row0, row1, row2, row3;
395
396         /* XSTORM */
397         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
398                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
399         if (last_idx)
400                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
401
402         /* print the asserts */
403         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
404
405                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406                               XSTORM_ASSERT_LIST_OFFSET(i));
407                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
409                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
411                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
412                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
413
414                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
415                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
416                                   " 0x%08x 0x%08x 0x%08x\n",
417                                   i, row3, row2, row1, row0);
418                         rc++;
419                 } else {
420                         break;
421                 }
422         }
423
424         /* TSTORM */
425         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
426                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
427         if (last_idx)
428                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
429
430         /* print the asserts */
431         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
432
433                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434                               TSTORM_ASSERT_LIST_OFFSET(i));
435                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
437                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
439                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
440                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
441
442                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
443                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
444                                   " 0x%08x 0x%08x 0x%08x\n",
445                                   i, row3, row2, row1, row0);
446                         rc++;
447                 } else {
448                         break;
449                 }
450         }
451
452         /* CSTORM */
453         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
454                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
455         if (last_idx)
456                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
457
458         /* print the asserts */
459         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
460
461                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462                               CSTORM_ASSERT_LIST_OFFSET(i));
463                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
465                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
467                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
468                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
469
470                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
471                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
472                                   " 0x%08x 0x%08x 0x%08x\n",
473                                   i, row3, row2, row1, row0);
474                         rc++;
475                 } else {
476                         break;
477                 }
478         }
479
480         /* USTORM */
481         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
482                            USTORM_ASSERT_LIST_INDEX_OFFSET);
483         if (last_idx)
484                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
485
486         /* print the asserts */
487         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
488
489                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
490                               USTORM_ASSERT_LIST_OFFSET(i));
491                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
492                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
493                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
494                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
495                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
496                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
497
498                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
499                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
500                                   " 0x%08x 0x%08x 0x%08x\n",
501                                   i, row3, row2, row1, row0);
502                         rc++;
503                 } else {
504                         break;
505                 }
506         }
507
508         return rc;
509 }
510
511 static void bnx2x_fw_dump(struct bnx2x *bp)
512 {
513         u32 addr;
514         u32 mark, offset;
515         __be32 data[9];
516         int word;
517
518         if (BP_NOMCP(bp)) {
519                 BNX2X_ERR("NO MCP - can not dump\n");
520                 return;
521         }
522
523         addr = bp->common.shmem_base - 0x0800 + 4;
524         mark = REG_RD(bp, addr);
525         mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
526         pr_err("begin fw dump (mark 0x%x)\n", mark);
527
528         pr_err("");
529         for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
530                 for (word = 0; word < 8; word++)
531                         data[word] = htonl(REG_RD(bp, offset + 4*word));
532                 data[8] = 0x0;
533                 pr_cont("%s", (char *)data);
534         }
535         for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
536                 for (word = 0; word < 8; word++)
537                         data[word] = htonl(REG_RD(bp, offset + 4*word));
538                 data[8] = 0x0;
539                 pr_cont("%s", (char *)data);
540         }
541         pr_err("end of fw dump\n");
542 }
543
544 static void bnx2x_panic_dump(struct bnx2x *bp)
545 {
546         int i;
547         u16 j, start, end;
548
549         bp->stats_state = STATS_STATE_DISABLED;
550         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
551
552         BNX2X_ERR("begin crash dump -----------------\n");
553
554         /* Indices */
555         /* Common */
556         BNX2X_ERR("def_c_idx(0x%x)  def_u_idx(0x%x)  def_x_idx(0x%x)"
557                   "  def_t_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
558                   "  spq_prod_idx(0x%x)\n",
559                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
560                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
561
562         /* Rx */
563         for_each_queue(bp, i) {
564                 struct bnx2x_fastpath *fp = &bp->fp[i];
565
566                 BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
567                           "  *rx_bd_cons_sb(0x%x)  rx_comp_prod(0x%x)"
568                           "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
569                           i, fp->rx_bd_prod, fp->rx_bd_cons,
570                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
571                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
572                 BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
573                           "  fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
574                           fp->rx_sge_prod, fp->last_max_sge,
575                           le16_to_cpu(fp->fp_u_idx),
576                           fp->status_blk->u_status_block.status_block_index);
577         }
578
579         /* Tx */
580         for_each_queue(bp, i) {
581                 struct bnx2x_fastpath *fp = &bp->fp[i];
582
583                 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
584                           "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
585                           "  *tx_cons_sb(0x%x)\n",
586                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
587                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
588                 BNX2X_ERR("     fp_c_idx(0x%x)  *sb_c_idx(0x%x)"
589                           "  tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
590                           fp->status_blk->c_status_block.status_block_index,
591                           fp->tx_db.data.prod);
592         }
593
594         /* Rings */
595         /* Rx */
596         for_each_queue(bp, i) {
597                 struct bnx2x_fastpath *fp = &bp->fp[i];
598
599                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
600                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
601                 for (j = start; j != end; j = RX_BD(j + 1)) {
602                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
603                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
604
605                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
606                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
607                 }
608
609                 start = RX_SGE(fp->rx_sge_prod);
610                 end = RX_SGE(fp->last_max_sge);
611                 for (j = start; j != end; j = RX_SGE(j + 1)) {
612                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
613                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
614
615                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
616                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
617                 }
618
619                 start = RCQ_BD(fp->rx_comp_cons - 10);
620                 end = RCQ_BD(fp->rx_comp_cons + 503);
621                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
622                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
623
624                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
625                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
626                 }
627         }
628
629         /* Tx */
630         for_each_queue(bp, i) {
631                 struct bnx2x_fastpath *fp = &bp->fp[i];
632
633                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
634                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
635                 for (j = start; j != end; j = TX_BD(j + 1)) {
636                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
637
638                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
639                                   i, j, sw_bd->skb, sw_bd->first_bd);
640                 }
641
642                 start = TX_BD(fp->tx_bd_cons - 10);
643                 end = TX_BD(fp->tx_bd_cons + 254);
644                 for (j = start; j != end; j = TX_BD(j + 1)) {
645                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
646
647                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
648                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
649                 }
650         }
651
652         bnx2x_fw_dump(bp);
653         bnx2x_mc_assert(bp);
654         BNX2X_ERR("end crash dump -----------------\n");
655 }
656
657 static void bnx2x_int_enable(struct bnx2x *bp)
658 {
659         int port = BP_PORT(bp);
660         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
661         u32 val = REG_RD(bp, addr);
662         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
663         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
664
665         if (msix) {
666                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667                          HC_CONFIG_0_REG_INT_LINE_EN_0);
668                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
669                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
670         } else if (msi) {
671                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
672                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
673                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
674                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
675         } else {
676                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
677                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
678                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
679                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
680
681                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
682                    val, port, addr);
683
684                 REG_WR(bp, addr, val);
685
686                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
687         }
688
689         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
690            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
691
692         REG_WR(bp, addr, val);
693         /*
694          * Ensure that HC_CONFIG is written before leading/trailing edge config
695          */
696         mmiowb();
697         barrier();
698
699         if (CHIP_IS_E1H(bp)) {
700                 /* init leading/trailing edge */
701                 if (IS_E1HMF(bp)) {
702                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
703                         if (bp->port.pmf)
704                                 /* enable nig and gpio3 attention */
705                                 val |= 0x1100;
706                 } else
707                         val = 0xffff;
708
709                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
710                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
711         }
712
713         /* Make sure that interrupts are indeed enabled from here on */
714         mmiowb();
715 }
716
717 static void bnx2x_int_disable(struct bnx2x *bp)
718 {
719         int port = BP_PORT(bp);
720         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
721         u32 val = REG_RD(bp, addr);
722
723         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
724                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
725                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
726                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
727
728         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
729            val, port, addr);
730
731         /* flush all outstanding writes */
732         mmiowb();
733
734         REG_WR(bp, addr, val);
735         if (REG_RD(bp, addr) != val)
736                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
737 }
738
739 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
740 {
741         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
742         int i, offset;
743
744         /* disable interrupt handling */
745         atomic_inc(&bp->intr_sem);
746         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
747
748         if (disable_hw)
749                 /* prevent the HW from sending interrupts */
750                 bnx2x_int_disable(bp);
751
752         /* make sure all ISRs are done */
753         if (msix) {
754                 synchronize_irq(bp->msix_table[0].vector);
755                 offset = 1;
756 #ifdef BCM_CNIC
757                 offset++;
758 #endif
759                 for_each_queue(bp, i)
760                         synchronize_irq(bp->msix_table[i + offset].vector);
761         } else
762                 synchronize_irq(bp->pdev->irq);
763
764         /* make sure sp_task is not running */
765         cancel_delayed_work(&bp->sp_task);
766         flush_workqueue(bnx2x_wq);
767 }
768
769 /* fast path */
770
771 /*
772  * General service functions
773  */
774
775 /* Return true if succeeded to acquire the lock */
776 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
777 {
778         u32 lock_status;
779         u32 resource_bit = (1 << resource);
780         int func = BP_FUNC(bp);
781         u32 hw_lock_control_reg;
782
783         DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
784
785         /* Validating that the resource is within range */
786         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
787                 DP(NETIF_MSG_HW,
788                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
789                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
790                 return -EINVAL;
791         }
792
793         if (func <= 5)
794                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
795         else
796                 hw_lock_control_reg =
797                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
798
799         /* Try to acquire the lock */
800         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
801         lock_status = REG_RD(bp, hw_lock_control_reg);
802         if (lock_status & resource_bit)
803                 return true;
804
805         DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
806         return false;
807 }
808
809 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
810                                 u8 storm, u16 index, u8 op, u8 update)
811 {
812         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
813                        COMMAND_REG_INT_ACK);
814         struct igu_ack_register igu_ack;
815
816         igu_ack.status_block_index = index;
817         igu_ack.sb_id_and_flags =
818                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
819                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
820                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
821                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
822
823         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
824            (*(u32 *)&igu_ack), hc_addr);
825         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
826
827         /* Make sure that ACK is written */
828         mmiowb();
829         barrier();
830 }
831
832 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
833 {
834         struct host_status_block *fpsb = fp->status_blk;
835
836         barrier(); /* status block is written to by the chip */
837         fp->fp_c_idx = fpsb->c_status_block.status_block_index;
838         fp->fp_u_idx = fpsb->u_status_block.status_block_index;
839 }
840
841 static u16 bnx2x_ack_int(struct bnx2x *bp)
842 {
843         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
844                        COMMAND_REG_SIMD_MASK);
845         u32 result = REG_RD(bp, hc_addr);
846
847         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
848            result, hc_addr);
849
850         return result;
851 }
852
853
854 /*
855  * fast path service functions
856  */
857
858 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
859 {
860         /* Tell compiler that consumer and producer can change */
861         barrier();
862         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
863 }
864
865 /* free skb in the packet ring at pos idx
866  * return idx of last bd freed
867  */
868 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
869                              u16 idx)
870 {
871         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
872         struct eth_tx_start_bd *tx_start_bd;
873         struct eth_tx_bd *tx_data_bd;
874         struct sk_buff *skb = tx_buf->skb;
875         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
876         int nbd;
877
878         /* prefetch skb end pointer to speedup dev_kfree_skb() */
879         prefetch(&skb->end);
880
881         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
882            idx, tx_buf, skb);
883
884         /* unmap first bd */
885         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
886         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
887         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
888                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
889
890         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
891 #ifdef BNX2X_STOP_ON_ERROR
892         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
893                 BNX2X_ERR("BAD nbd!\n");
894                 bnx2x_panic();
895         }
896 #endif
897         new_cons = nbd + tx_buf->first_bd;
898
899         /* Get the next bd */
900         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
901
902         /* Skip a parse bd... */
903         --nbd;
904         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
905
906         /* ...and the TSO split header bd since they have no mapping */
907         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
908                 --nbd;
909                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
910         }
911
912         /* now free frags */
913         while (nbd > 0) {
914
915                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
916                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
917                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
918                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
919                 if (--nbd)
920                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
921         }
922
923         /* release skb */
924         WARN_ON(!skb);
925         dev_kfree_skb(skb);
926         tx_buf->first_bd = 0;
927         tx_buf->skb = NULL;
928
929         return new_cons;
930 }
931
932 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
933 {
934         s16 used;
935         u16 prod;
936         u16 cons;
937
938         prod = fp->tx_bd_prod;
939         cons = fp->tx_bd_cons;
940
941         /* NUM_TX_RINGS = number of "next-page" entries
942            It will be used as a threshold */
943         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
944
945 #ifdef BNX2X_STOP_ON_ERROR
946         WARN_ON(used < 0);
947         WARN_ON(used > fp->bp->tx_ring_size);
948         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
949 #endif
950
951         return (s16)(fp->bp->tx_ring_size) - used;
952 }
953
954 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
955 {
956         u16 hw_cons;
957
958         /* Tell compiler that status block fields can change */
959         barrier();
960         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
961         return hw_cons != fp->tx_pkt_cons;
962 }
963
964 static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
965 {
966         struct bnx2x *bp = fp->bp;
967         struct netdev_queue *txq;
968         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
969
970 #ifdef BNX2X_STOP_ON_ERROR
971         if (unlikely(bp->panic))
972                 return -1;
973 #endif
974
975         txq = netdev_get_tx_queue(bp->dev, fp->index);
976         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
977         sw_cons = fp->tx_pkt_cons;
978
979         while (sw_cons != hw_cons) {
980                 u16 pkt_cons;
981
982                 pkt_cons = TX_BD(sw_cons);
983
984                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
985
986                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
987                    hw_cons, sw_cons, pkt_cons);
988
989 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
990                         rmb();
991                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
992                 }
993 */
994                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
995                 sw_cons++;
996         }
997
998         fp->tx_pkt_cons = sw_cons;
999         fp->tx_bd_cons = bd_cons;
1000
1001         /* Need to make the tx_bd_cons update visible to start_xmit()
1002          * before checking for netif_tx_queue_stopped().  Without the
1003          * memory barrier, there is a small possibility that
1004          * start_xmit() will miss it and cause the queue to be stopped
1005          * forever.
1006          */
1007         smp_mb();
1008
1009         /* TBD need a thresh? */
1010         if (unlikely(netif_tx_queue_stopped(txq))) {
1011                 /* Taking tx_lock() is needed to prevent reenabling the queue
1012                  * while it's empty. This could have happen if rx_action() gets
1013                  * suspended in bnx2x_tx_int() after the condition before
1014                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1015                  *
1016                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
1017                  * sends some packets consuming the whole queue again->
1018                  * stops the queue
1019                  */
1020
1021                 __netif_tx_lock(txq, smp_processor_id());
1022
1023                 if ((netif_tx_queue_stopped(txq)) &&
1024                     (bp->state == BNX2X_STATE_OPEN) &&
1025                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
1026                         netif_tx_wake_queue(txq);
1027
1028                 __netif_tx_unlock(txq);
1029         }
1030         return 0;
1031 }
1032
1033 #ifdef BCM_CNIC
1034 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1035 #endif
1036
1037 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1038                            union eth_rx_cqe *rr_cqe)
1039 {
1040         struct bnx2x *bp = fp->bp;
1041         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1042         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1043
1044         DP(BNX2X_MSG_SP,
1045            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
1046            fp->index, cid, command, bp->state,
1047            rr_cqe->ramrod_cqe.ramrod_type);
1048
1049         bp->spq_left++;
1050
1051         if (fp->index) {
1052                 switch (command | fp->state) {
1053                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1054                                                 BNX2X_FP_STATE_OPENING):
1055                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1056                            cid);
1057                         fp->state = BNX2X_FP_STATE_OPEN;
1058                         break;
1059
1060                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1061                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1062                            cid);
1063                         fp->state = BNX2X_FP_STATE_HALTED;
1064                         break;
1065
1066                 default:
1067                         BNX2X_ERR("unexpected MC reply (%d)  "
1068                                   "fp[%d] state is %x\n",
1069                                   command, fp->index, fp->state);
1070                         break;
1071                 }
1072                 mb(); /* force bnx2x_wait_ramrod() to see the change */
1073                 return;
1074         }
1075
1076         switch (command | bp->state) {
1077         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1078                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1079                 bp->state = BNX2X_STATE_OPEN;
1080                 break;
1081
1082         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1083                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1084                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1085                 fp->state = BNX2X_FP_STATE_HALTED;
1086                 break;
1087
1088         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1089                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1090                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1091                 break;
1092
1093 #ifdef BCM_CNIC
1094         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1095                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1096                 bnx2x_cnic_cfc_comp(bp, cid);
1097                 break;
1098 #endif
1099
1100         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1101         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1102                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1103                 bp->set_mac_pending--;
1104                 smp_wmb();
1105                 break;
1106
1107         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1108                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1109                 bp->set_mac_pending--;
1110                 smp_wmb();
1111                 break;
1112
1113         default:
1114                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1115                           command, bp->state);
1116                 break;
1117         }
1118         mb(); /* force bnx2x_wait_ramrod() to see the change */
1119 }
1120
1121 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1122                                      struct bnx2x_fastpath *fp, u16 index)
1123 {
1124         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1125         struct page *page = sw_buf->page;
1126         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1127
1128         /* Skip "next page" elements */
1129         if (!page)
1130                 return;
1131
1132         dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
1133                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1134         __free_pages(page, PAGES_PER_SGE_SHIFT);
1135
1136         sw_buf->page = NULL;
1137         sge->addr_hi = 0;
1138         sge->addr_lo = 0;
1139 }
1140
1141 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1142                                            struct bnx2x_fastpath *fp, int last)
1143 {
1144         int i;
1145
1146         for (i = 0; i < last; i++)
1147                 bnx2x_free_rx_sge(bp, fp, i);
1148 }
1149
1150 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1151                                      struct bnx2x_fastpath *fp, u16 index)
1152 {
1153         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1154         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1155         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1156         dma_addr_t mapping;
1157
1158         if (unlikely(page == NULL))
1159                 return -ENOMEM;
1160
1161         mapping = dma_map_page(&bp->pdev->dev, page, 0,
1162                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1163         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1164                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1165                 return -ENOMEM;
1166         }
1167
1168         sw_buf->page = page;
1169         dma_unmap_addr_set(sw_buf, mapping, mapping);
1170
1171         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1172         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1173
1174         return 0;
1175 }
1176
1177 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1178                                      struct bnx2x_fastpath *fp, u16 index)
1179 {
1180         struct sk_buff *skb;
1181         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1182         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1183         dma_addr_t mapping;
1184
1185         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1186         if (unlikely(skb == NULL))
1187                 return -ENOMEM;
1188
1189         mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1190                                  DMA_FROM_DEVICE);
1191         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1192                 dev_kfree_skb(skb);
1193                 return -ENOMEM;
1194         }
1195
1196         rx_buf->skb = skb;
1197         dma_unmap_addr_set(rx_buf, mapping, mapping);
1198
1199         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1200         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1201
1202         return 0;
1203 }
1204
1205 /* note that we are not allocating a new skb,
1206  * we are just moving one from cons to prod
1207  * we are not creating a new mapping,
1208  * so there is no need to check for dma_mapping_error().
1209  */
1210 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1211                                struct sk_buff *skb, u16 cons, u16 prod)
1212 {
1213         struct bnx2x *bp = fp->bp;
1214         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1215         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1216         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1217         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1218
1219         dma_sync_single_for_device(&bp->pdev->dev,
1220                                    dma_unmap_addr(cons_rx_buf, mapping),
1221                                    RX_COPY_THRESH, DMA_FROM_DEVICE);
1222
1223         prod_rx_buf->skb = cons_rx_buf->skb;
1224         dma_unmap_addr_set(prod_rx_buf, mapping,
1225                            dma_unmap_addr(cons_rx_buf, mapping));
1226         *prod_bd = *cons_bd;
1227 }
1228
1229 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1230                                              u16 idx)
1231 {
1232         u16 last_max = fp->last_max_sge;
1233
1234         if (SUB_S16(idx, last_max) > 0)
1235                 fp->last_max_sge = idx;
1236 }
1237
1238 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1239 {
1240         int i, j;
1241
1242         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1243                 int idx = RX_SGE_CNT * i - 1;
1244
1245                 for (j = 0; j < 2; j++) {
1246                         SGE_MASK_CLEAR_BIT(fp, idx);
1247                         idx--;
1248                 }
1249         }
1250 }
1251
1252 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1253                                   struct eth_fast_path_rx_cqe *fp_cqe)
1254 {
1255         struct bnx2x *bp = fp->bp;
1256         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1257                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1258                       SGE_PAGE_SHIFT;
1259         u16 last_max, last_elem, first_elem;
1260         u16 delta = 0;
1261         u16 i;
1262
1263         if (!sge_len)
1264                 return;
1265
1266         /* First mark all used pages */
1267         for (i = 0; i < sge_len; i++)
1268                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1269
1270         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1271            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1272
1273         /* Here we assume that the last SGE index is the biggest */
1274         prefetch((void *)(fp->sge_mask));
1275         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1276
1277         last_max = RX_SGE(fp->last_max_sge);
1278         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1279         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1280
1281         /* If ring is not full */
1282         if (last_elem + 1 != first_elem)
1283                 last_elem++;
1284
1285         /* Now update the prod */
1286         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1287                 if (likely(fp->sge_mask[i]))
1288                         break;
1289
1290                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1291                 delta += RX_SGE_MASK_ELEM_SZ;
1292         }
1293
1294         if (delta > 0) {
1295                 fp->rx_sge_prod += delta;
1296                 /* clear page-end entries */
1297                 bnx2x_clear_sge_mask_next_elems(fp);
1298         }
1299
1300         DP(NETIF_MSG_RX_STATUS,
1301            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1302            fp->last_max_sge, fp->rx_sge_prod);
1303 }
1304
1305 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1306 {
1307         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1308         memset(fp->sge_mask, 0xff,
1309                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1310
1311         /* Clear the two last indices in the page to 1:
1312            these are the indices that correspond to the "next" element,
1313            hence will never be indicated and should be removed from
1314            the calculations. */
1315         bnx2x_clear_sge_mask_next_elems(fp);
1316 }
1317
1318 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1319                             struct sk_buff *skb, u16 cons, u16 prod)
1320 {
1321         struct bnx2x *bp = fp->bp;
1322         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1323         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1324         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1325         dma_addr_t mapping;
1326
1327         /* move empty skb from pool to prod and map it */
1328         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1329         mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1330                                  bp->rx_buf_size, DMA_FROM_DEVICE);
1331         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
1332
1333         /* move partial skb from cons to pool (don't unmap yet) */
1334         fp->tpa_pool[queue] = *cons_rx_buf;
1335
1336         /* mark bin state as start - print error if current state != stop */
1337         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1338                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1339
1340         fp->tpa_state[queue] = BNX2X_TPA_START;
1341
1342         /* point prod_bd to new skb */
1343         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1344         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1345
1346 #ifdef BNX2X_STOP_ON_ERROR
1347         fp->tpa_queue_used |= (1 << queue);
1348 #ifdef _ASM_GENERIC_INT_L64_H
1349         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1350 #else
1351         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1352 #endif
1353            fp->tpa_queue_used);
1354 #endif
1355 }
1356
1357 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1358                                struct sk_buff *skb,
1359                                struct eth_fast_path_rx_cqe *fp_cqe,
1360                                u16 cqe_idx)
1361 {
1362         struct sw_rx_page *rx_pg, old_rx_pg;
1363         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1364         u32 i, frag_len, frag_size, pages;
1365         int err;
1366         int j;
1367
1368         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1369         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1370
1371         /* This is needed in order to enable forwarding support */
1372         if (frag_size)
1373                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1374                                                max(frag_size, (u32)len_on_bd));
1375
1376 #ifdef BNX2X_STOP_ON_ERROR
1377         if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
1378                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1379                           pages, cqe_idx);
1380                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1381                           fp_cqe->pkt_len, len_on_bd);
1382                 bnx2x_panic();
1383                 return -EINVAL;
1384         }
1385 #endif
1386
1387         /* Run through the SGL and compose the fragmented skb */
1388         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1389                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1390
1391                 /* FW gives the indices of the SGE as if the ring is an array
1392                    (meaning that "next" element will consume 2 indices) */
1393                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1394                 rx_pg = &fp->rx_page_ring[sge_idx];
1395                 old_rx_pg = *rx_pg;
1396
1397                 /* If we fail to allocate a substitute page, we simply stop
1398                    where we are and drop the whole packet */
1399                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1400                 if (unlikely(err)) {
1401                         fp->eth_q_stats.rx_skb_alloc_failed++;
1402                         return err;
1403                 }
1404
1405                 /* Unmap the page as we r going to pass it to the stack */
1406                 dma_unmap_page(&bp->pdev->dev,
1407                                dma_unmap_addr(&old_rx_pg, mapping),
1408                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1409
1410                 /* Add one frag and update the appropriate fields in the skb */
1411                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1412
1413                 skb->data_len += frag_len;
1414                 skb->truesize += frag_len;
1415                 skb->len += frag_len;
1416
1417                 frag_size -= frag_len;
1418         }
1419
1420         return 0;
1421 }
1422
1423 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1424                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1425                            u16 cqe_idx)
1426 {
1427         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1428         struct sk_buff *skb = rx_buf->skb;
1429         /* alloc new skb */
1430         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1431
1432         /* Unmap skb in the pool anyway, as we are going to change
1433            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1434            fails. */
1435         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1436                          bp->rx_buf_size, DMA_FROM_DEVICE);
1437
1438         if (likely(new_skb)) {
1439                 /* fix ip xsum and give it to the stack */
1440                 /* (no need to map the new skb) */
1441 #ifdef BCM_VLAN
1442                 int is_vlan_cqe =
1443                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1444                          PARSING_FLAGS_VLAN);
1445                 int is_not_hwaccel_vlan_cqe =
1446                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1447 #endif
1448
1449                 prefetch(skb);
1450                 prefetch(((char *)(skb)) + 128);
1451
1452 #ifdef BNX2X_STOP_ON_ERROR
1453                 if (pad + len > bp->rx_buf_size) {
1454                         BNX2X_ERR("skb_put is about to fail...  "
1455                                   "pad %d  len %d  rx_buf_size %d\n",
1456                                   pad, len, bp->rx_buf_size);
1457                         bnx2x_panic();
1458                         return;
1459                 }
1460 #endif
1461
1462                 skb_reserve(skb, pad);
1463                 skb_put(skb, len);
1464
1465                 skb->protocol = eth_type_trans(skb, bp->dev);
1466                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1467
1468                 {
1469                         struct iphdr *iph;
1470
1471                         iph = (struct iphdr *)skb->data;
1472 #ifdef BCM_VLAN
1473                         /* If there is no Rx VLAN offloading -
1474                            take VLAN tag into an account */
1475                         if (unlikely(is_not_hwaccel_vlan_cqe))
1476                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1477 #endif
1478                         iph->check = 0;
1479                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1480                 }
1481
1482                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1483                                          &cqe->fast_path_cqe, cqe_idx)) {
1484 #ifdef BCM_VLAN
1485                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1486                             (!is_not_hwaccel_vlan_cqe))
1487                                 vlan_gro_receive(&fp->napi, bp->vlgrp,
1488                                                  le16_to_cpu(cqe->fast_path_cqe.
1489                                                              vlan_tag), skb);
1490                         else
1491 #endif
1492                                 napi_gro_receive(&fp->napi, skb);
1493                 } else {
1494                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1495                            " - dropping packet!\n");
1496                         dev_kfree_skb(skb);
1497                 }
1498
1499
1500                 /* put new skb in bin */
1501                 fp->tpa_pool[queue].skb = new_skb;
1502
1503         } else {
1504                 /* else drop the packet and keep the buffer in the bin */
1505                 DP(NETIF_MSG_RX_STATUS,
1506                    "Failed to allocate new skb - dropping packet!\n");
1507                 fp->eth_q_stats.rx_skb_alloc_failed++;
1508         }
1509
1510         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1511 }
1512
1513 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1514                                         struct bnx2x_fastpath *fp,
1515                                         u16 bd_prod, u16 rx_comp_prod,
1516                                         u16 rx_sge_prod)
1517 {
1518         struct ustorm_eth_rx_producers rx_prods = {0};
1519         int i;
1520
1521         /* Update producers */
1522         rx_prods.bd_prod = bd_prod;
1523         rx_prods.cqe_prod = rx_comp_prod;
1524         rx_prods.sge_prod = rx_sge_prod;
1525
1526         /*
1527          * Make sure that the BD and SGE data is updated before updating the
1528          * producers since FW might read the BD/SGE right after the producer
1529          * is updated.
1530          * This is only applicable for weak-ordered memory model archs such
1531          * as IA-64. The following barrier is also mandatory since FW will
1532          * assumes BDs must have buffers.
1533          */
1534         wmb();
1535
1536         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1537                 REG_WR(bp, BAR_USTRORM_INTMEM +
1538                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1539                        ((u32 *)&rx_prods)[i]);
1540
1541         mmiowb(); /* keep prod updates ordered */
1542
1543         DP(NETIF_MSG_RX_STATUS,
1544            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1545            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1546 }
1547
1548 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1549 {
1550         struct bnx2x *bp = fp->bp;
1551         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1552         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1553         int rx_pkt = 0;
1554
1555 #ifdef BNX2X_STOP_ON_ERROR
1556         if (unlikely(bp->panic))
1557                 return 0;
1558 #endif
1559
1560         /* CQ "next element" is of the size of the regular element,
1561            that's why it's ok here */
1562         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1563         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1564                 hw_comp_cons++;
1565
1566         bd_cons = fp->rx_bd_cons;
1567         bd_prod = fp->rx_bd_prod;
1568         bd_prod_fw = bd_prod;
1569         sw_comp_cons = fp->rx_comp_cons;
1570         sw_comp_prod = fp->rx_comp_prod;
1571
1572         /* Memory barrier necessary as speculative reads of the rx
1573          * buffer can be ahead of the index in the status block
1574          */
1575         rmb();
1576
1577         DP(NETIF_MSG_RX_STATUS,
1578            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1579            fp->index, hw_comp_cons, sw_comp_cons);
1580
1581         while (sw_comp_cons != hw_comp_cons) {
1582                 struct sw_rx_bd *rx_buf = NULL;
1583                 struct sk_buff *skb;
1584                 union eth_rx_cqe *cqe;
1585                 u8 cqe_fp_flags;
1586                 u16 len, pad;
1587
1588                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1589                 bd_prod = RX_BD(bd_prod);
1590                 bd_cons = RX_BD(bd_cons);
1591
1592                 /* Prefetch the page containing the BD descriptor
1593                    at producer's index. It will be needed when new skb is
1594                    allocated */
1595                 prefetch((void *)(PAGE_ALIGN((unsigned long)
1596                                              (&fp->rx_desc_ring[bd_prod])) -
1597                                   PAGE_SIZE + 1));
1598
1599                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1600                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1601
1602                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1603                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1604                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1605                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1606                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1607                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1608
1609                 /* is this a slowpath msg? */
1610                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1611                         bnx2x_sp_event(fp, cqe);
1612                         goto next_cqe;
1613
1614                 /* this is an rx packet */
1615                 } else {
1616                         rx_buf = &fp->rx_buf_ring[bd_cons];
1617                         skb = rx_buf->skb;
1618                         prefetch(skb);
1619                         prefetch((u8 *)skb + 256);
1620                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1621                         pad = cqe->fast_path_cqe.placement_offset;
1622
1623                         /* If CQE is marked both TPA_START and TPA_END
1624                            it is a non-TPA CQE */
1625                         if ((!fp->disable_tpa) &&
1626                             (TPA_TYPE(cqe_fp_flags) !=
1627                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1628                                 u16 queue = cqe->fast_path_cqe.queue_index;
1629
1630                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1631                                         DP(NETIF_MSG_RX_STATUS,
1632                                            "calling tpa_start on queue %d\n",
1633                                            queue);
1634
1635                                         bnx2x_tpa_start(fp, queue, skb,
1636                                                         bd_cons, bd_prod);
1637                                         goto next_rx;
1638                                 }
1639
1640                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1641                                         DP(NETIF_MSG_RX_STATUS,
1642                                            "calling tpa_stop on queue %d\n",
1643                                            queue);
1644
1645                                         if (!BNX2X_RX_SUM_FIX(cqe))
1646                                                 BNX2X_ERR("STOP on none TCP "
1647                                                           "data\n");
1648
1649                                         /* This is a size of the linear data
1650                                            on this skb */
1651                                         len = le16_to_cpu(cqe->fast_path_cqe.
1652                                                                 len_on_bd);
1653                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1654                                                     len, cqe, comp_ring_cons);
1655 #ifdef BNX2X_STOP_ON_ERROR
1656                                         if (bp->panic)
1657                                                 return 0;
1658 #endif
1659
1660                                         bnx2x_update_sge_prod(fp,
1661                                                         &cqe->fast_path_cqe);
1662                                         goto next_cqe;
1663                                 }
1664                         }
1665
1666                         dma_sync_single_for_device(&bp->pdev->dev,
1667                                         dma_unmap_addr(rx_buf, mapping),
1668                                                    pad + RX_COPY_THRESH,
1669                                                    DMA_FROM_DEVICE);
1670                         prefetch(skb);
1671                         prefetch(((char *)(skb)) + 128);
1672
1673                         /* is this an error packet? */
1674                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1675                                 DP(NETIF_MSG_RX_ERR,
1676                                    "ERROR  flags %x  rx packet %u\n",
1677                                    cqe_fp_flags, sw_comp_cons);
1678                                 fp->eth_q_stats.rx_err_discard_pkt++;
1679                                 goto reuse_rx;
1680                         }
1681
1682                         /* Since we don't have a jumbo ring
1683                          * copy small packets if mtu > 1500
1684                          */
1685                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1686                             (len <= RX_COPY_THRESH)) {
1687                                 struct sk_buff *new_skb;
1688
1689                                 new_skb = netdev_alloc_skb(bp->dev,
1690                                                            len + pad);
1691                                 if (new_skb == NULL) {
1692                                         DP(NETIF_MSG_RX_ERR,
1693                                            "ERROR  packet dropped "
1694                                            "because of alloc failure\n");
1695                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1696                                         goto reuse_rx;
1697                                 }
1698
1699                                 /* aligned copy */
1700                                 skb_copy_from_linear_data_offset(skb, pad,
1701                                                     new_skb->data + pad, len);
1702                                 skb_reserve(new_skb, pad);
1703                                 skb_put(new_skb, len);
1704
1705                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1706
1707                                 skb = new_skb;
1708
1709                         } else
1710                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1711                                 dma_unmap_single(&bp->pdev->dev,
1712                                         dma_unmap_addr(rx_buf, mapping),
1713                                                  bp->rx_buf_size,
1714                                                  DMA_FROM_DEVICE);
1715                                 skb_reserve(skb, pad);
1716                                 skb_put(skb, len);
1717
1718                         } else {
1719                                 DP(NETIF_MSG_RX_ERR,
1720                                    "ERROR  packet dropped because "
1721                                    "of alloc failure\n");
1722                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1723 reuse_rx:
1724                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1725                                 goto next_rx;
1726                         }
1727
1728                         skb->protocol = eth_type_trans(skb, bp->dev);
1729
1730                         skb->ip_summed = CHECKSUM_NONE;
1731                         if (bp->rx_csum) {
1732                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1733                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1734                                 else
1735                                         fp->eth_q_stats.hw_csum_err++;
1736                         }
1737                 }
1738
1739                 skb_record_rx_queue(skb, fp->index);
1740
1741 #ifdef BCM_VLAN
1742                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1743                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1744                      PARSING_FLAGS_VLAN))
1745                         vlan_gro_receive(&fp->napi, bp->vlgrp,
1746                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
1747                 else
1748 #endif
1749                         napi_gro_receive(&fp->napi, skb);
1750
1751
1752 next_rx:
1753                 rx_buf->skb = NULL;
1754
1755                 bd_cons = NEXT_RX_IDX(bd_cons);
1756                 bd_prod = NEXT_RX_IDX(bd_prod);
1757                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1758                 rx_pkt++;
1759 next_cqe:
1760                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1761                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1762
1763                 if (rx_pkt == budget)
1764                         break;
1765         } /* while */
1766
1767         fp->rx_bd_cons = bd_cons;
1768         fp->rx_bd_prod = bd_prod_fw;
1769         fp->rx_comp_cons = sw_comp_cons;
1770         fp->rx_comp_prod = sw_comp_prod;
1771
1772         /* Update producers */
1773         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1774                              fp->rx_sge_prod);
1775
1776         fp->rx_pkt += rx_pkt;
1777         fp->rx_calls++;
1778
1779         return rx_pkt;
1780 }
1781
1782 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1783 {
1784         struct bnx2x_fastpath *fp = fp_cookie;
1785         struct bnx2x *bp = fp->bp;
1786
1787         /* Return here if interrupt is disabled */
1788         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1789                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1790                 return IRQ_HANDLED;
1791         }
1792
1793         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1794            fp->index, fp->sb_id);
1795         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1796
1797 #ifdef BNX2X_STOP_ON_ERROR
1798         if (unlikely(bp->panic))
1799                 return IRQ_HANDLED;
1800 #endif
1801
1802         /* Handle Rx and Tx according to MSI-X vector */
1803         prefetch(fp->rx_cons_sb);
1804         prefetch(fp->tx_cons_sb);
1805         prefetch(&fp->status_blk->u_status_block.status_block_index);
1806         prefetch(&fp->status_blk->c_status_block.status_block_index);
1807         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1808
1809         return IRQ_HANDLED;
1810 }
1811
1812 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1813 {
1814         struct bnx2x *bp = netdev_priv(dev_instance);
1815         u16 status = bnx2x_ack_int(bp);
1816         u16 mask;
1817         int i;
1818
1819         /* Return here if interrupt is shared and it's not for us */
1820         if (unlikely(status == 0)) {
1821                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1822                 return IRQ_NONE;
1823         }
1824         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1825
1826         /* Return here if interrupt is disabled */
1827         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1828                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1829                 return IRQ_HANDLED;
1830         }
1831
1832 #ifdef BNX2X_STOP_ON_ERROR
1833         if (unlikely(bp->panic))
1834                 return IRQ_HANDLED;
1835 #endif
1836
1837         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1838                 struct bnx2x_fastpath *fp = &bp->fp[i];
1839
1840                 mask = 0x2 << fp->sb_id;
1841                 if (status & mask) {
1842                         /* Handle Rx and Tx according to SB id */
1843                         prefetch(fp->rx_cons_sb);
1844                         prefetch(&fp->status_blk->u_status_block.
1845                                                 status_block_index);
1846                         prefetch(fp->tx_cons_sb);
1847                         prefetch(&fp->status_blk->c_status_block.
1848                                                 status_block_index);
1849                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1850                         status &= ~mask;
1851                 }
1852         }
1853
1854 #ifdef BCM_CNIC
1855         mask = 0x2 << CNIC_SB_ID(bp);
1856         if (status & (mask | 0x1)) {
1857                 struct cnic_ops *c_ops = NULL;
1858
1859                 rcu_read_lock();
1860                 c_ops = rcu_dereference(bp->cnic_ops);
1861                 if (c_ops)
1862                         c_ops->cnic_handler(bp->cnic_data, NULL);
1863                 rcu_read_unlock();
1864
1865                 status &= ~mask;
1866         }
1867 #endif
1868
1869         if (unlikely(status & 0x1)) {
1870                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1871
1872                 status &= ~0x1;
1873                 if (!status)
1874                         return IRQ_HANDLED;
1875         }
1876
1877         if (unlikely(status))
1878                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1879                    status);
1880
1881         return IRQ_HANDLED;
1882 }
1883
1884 /* end of fast path */
1885
1886 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1887
1888 /* Link */
1889
1890 /*
1891  * General service functions
1892  */
1893
1894 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1895 {
1896         u32 lock_status;
1897         u32 resource_bit = (1 << resource);
1898         int func = BP_FUNC(bp);
1899         u32 hw_lock_control_reg;
1900         int cnt;
1901
1902         /* Validating that the resource is within range */
1903         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1904                 DP(NETIF_MSG_HW,
1905                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1906                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1907                 return -EINVAL;
1908         }
1909
1910         if (func <= 5) {
1911                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1912         } else {
1913                 hw_lock_control_reg =
1914                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1915         }
1916
1917         /* Validating that the resource is not already taken */
1918         lock_status = REG_RD(bp, hw_lock_control_reg);
1919         if (lock_status & resource_bit) {
1920                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1921                    lock_status, resource_bit);
1922                 return -EEXIST;
1923         }
1924
1925         /* Try for 5 second every 5ms */
1926         for (cnt = 0; cnt < 1000; cnt++) {
1927                 /* Try to acquire the lock */
1928                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1929                 lock_status = REG_RD(bp, hw_lock_control_reg);
1930                 if (lock_status & resource_bit)
1931                         return 0;
1932
1933                 msleep(5);
1934         }
1935         DP(NETIF_MSG_HW, "Timeout\n");
1936         return -EAGAIN;
1937 }
1938
1939 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1940 {
1941         u32 lock_status;
1942         u32 resource_bit = (1 << resource);
1943         int func = BP_FUNC(bp);
1944         u32 hw_lock_control_reg;
1945
1946         DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1947
1948         /* Validating that the resource is within range */
1949         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1950                 DP(NETIF_MSG_HW,
1951                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1952                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1953                 return -EINVAL;
1954         }
1955
1956         if (func <= 5) {
1957                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1958         } else {
1959                 hw_lock_control_reg =
1960                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1961         }
1962
1963         /* Validating that the resource is currently taken */
1964         lock_status = REG_RD(bp, hw_lock_control_reg);
1965         if (!(lock_status & resource_bit)) {
1966                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1967                    lock_status, resource_bit);
1968                 return -EFAULT;
1969         }
1970
1971         REG_WR(bp, hw_lock_control_reg, resource_bit);
1972         return 0;
1973 }
1974
1975 /* HW Lock for shared dual port PHYs */
1976 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1977 {
1978         mutex_lock(&bp->port.phy_mutex);
1979
1980         if (bp->port.need_hw_lock)
1981                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1982 }
1983
1984 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1985 {
1986         if (bp->port.need_hw_lock)
1987                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1988
1989         mutex_unlock(&bp->port.phy_mutex);
1990 }
1991
1992 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1993 {
1994         /* The GPIO should be swapped if swap register is set and active */
1995         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1996                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1997         int gpio_shift = gpio_num +
1998                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1999         u32 gpio_mask = (1 << gpio_shift);
2000         u32 gpio_reg;
2001         int value;
2002
2003         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2004                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2005                 return -EINVAL;
2006         }
2007
2008         /* read GPIO value */
2009         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2010
2011         /* get the requested pin value */
2012         if ((gpio_reg & gpio_mask) == gpio_mask)
2013                 value = 1;
2014         else
2015                 value = 0;
2016
2017         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
2018
2019         return value;
2020 }
2021
2022 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2023 {
2024         /* The GPIO should be swapped if swap register is set and active */
2025         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2026                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2027         int gpio_shift = gpio_num +
2028                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2029         u32 gpio_mask = (1 << gpio_shift);
2030         u32 gpio_reg;
2031
2032         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2033                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2034                 return -EINVAL;
2035         }
2036
2037         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2038         /* read GPIO and mask except the float bits */
2039         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2040
2041         switch (mode) {
2042         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2043                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2044                    gpio_num, gpio_shift);
2045                 /* clear FLOAT and set CLR */
2046                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2047                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2048                 break;
2049
2050         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2051                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2052                    gpio_num, gpio_shift);
2053                 /* clear FLOAT and set SET */
2054                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2055                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2056                 break;
2057
2058         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2059                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2060                    gpio_num, gpio_shift);
2061                 /* set FLOAT */
2062                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2063                 break;
2064
2065         default:
2066                 break;
2067         }
2068
2069         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2070         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2071
2072         return 0;
2073 }
2074
2075 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2076 {
2077         /* The GPIO should be swapped if swap register is set and active */
2078         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2079                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2080         int gpio_shift = gpio_num +
2081                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2082         u32 gpio_mask = (1 << gpio_shift);
2083         u32 gpio_reg;
2084
2085         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2086                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2087                 return -EINVAL;
2088         }
2089
2090         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2091         /* read GPIO int */
2092         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2093
2094         switch (mode) {
2095         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2096                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2097                                    "output low\n", gpio_num, gpio_shift);
2098                 /* clear SET and set CLR */
2099                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2100                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2101                 break;
2102
2103         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2104                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2105                                    "output high\n", gpio_num, gpio_shift);
2106                 /* clear CLR and set SET */
2107                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2108                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2109                 break;
2110
2111         default:
2112                 break;
2113         }
2114
2115         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2116         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2117
2118         return 0;
2119 }
2120
2121 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2122 {
2123         u32 spio_mask = (1 << spio_num);
2124         u32 spio_reg;
2125
2126         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2127             (spio_num > MISC_REGISTERS_SPIO_7)) {
2128                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2129                 return -EINVAL;
2130         }
2131
2132         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2133         /* read SPIO and mask except the float bits */
2134         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2135
2136         switch (mode) {
2137         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2138                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2139                 /* clear FLOAT and set CLR */
2140                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2141                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2142                 break;
2143
2144         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2145                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2146                 /* clear FLOAT and set SET */
2147                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2148                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2149                 break;
2150
2151         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2152                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2153                 /* set FLOAT */
2154                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2155                 break;
2156
2157         default:
2158                 break;
2159         }
2160
2161         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2162         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2163
2164         return 0;
2165 }
2166
2167 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2168 {
2169         switch (bp->link_vars.ieee_fc &
2170                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2171         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2172                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2173                                           ADVERTISED_Pause);
2174                 break;
2175
2176         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2177                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2178                                          ADVERTISED_Pause);
2179                 break;
2180
2181         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2182                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2183                 break;
2184
2185         default:
2186                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2187                                           ADVERTISED_Pause);
2188                 break;
2189         }
2190 }
2191
2192 static void bnx2x_link_report(struct bnx2x *bp)
2193 {
2194         if (bp->flags & MF_FUNC_DIS) {
2195                 netif_carrier_off(bp->dev);
2196                 netdev_err(bp->dev, "NIC Link is Down\n");
2197                 return;
2198         }
2199
2200         if (bp->link_vars.link_up) {
2201                 u16 line_speed;
2202
2203                 if (bp->state == BNX2X_STATE_OPEN)
2204                         netif_carrier_on(bp->dev);
2205                 netdev_info(bp->dev, "NIC Link is Up, ");
2206
2207                 line_speed = bp->link_vars.line_speed;
2208                 if (IS_E1HMF(bp)) {
2209                         u16 vn_max_rate;
2210
2211                         vn_max_rate =
2212                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2213                                  FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2214                         if (vn_max_rate < line_speed)
2215                                 line_speed = vn_max_rate;
2216                 }
2217                 pr_cont("%d Mbps ", line_speed);
2218
2219                 if (bp->link_vars.duplex == DUPLEX_FULL)
2220                         pr_cont("full duplex");
2221                 else
2222                         pr_cont("half duplex");
2223
2224                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2225                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2226                                 pr_cont(", receive ");
2227                                 if (bp->link_vars.flow_ctrl &
2228                                     BNX2X_FLOW_CTRL_TX)
2229                                         pr_cont("& transmit ");
2230                         } else {
2231                                 pr_cont(", transmit ");
2232                         }
2233                         pr_cont("flow control ON");
2234                 }
2235                 pr_cont("\n");
2236
2237         } else { /* link_down */
2238                 netif_carrier_off(bp->dev);
2239                 netdev_err(bp->dev, "NIC Link is Down\n");
2240         }
2241 }
2242
2243 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2244 {
2245         if (!BP_NOMCP(bp)) {
2246                 u8 rc;
2247
2248                 /* Initialize link parameters structure variables */
2249                 /* It is recommended to turn off RX FC for jumbo frames
2250                    for better performance */
2251                 if (bp->dev->mtu > 5000)
2252                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2253                 else
2254                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2255
2256                 bnx2x_acquire_phy_lock(bp);
2257
2258                 if (load_mode == LOAD_DIAG)
2259                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2260
2261                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2262
2263                 bnx2x_release_phy_lock(bp);
2264
2265                 bnx2x_calc_fc_adv(bp);
2266
2267                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2268                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2269                         bnx2x_link_report(bp);
2270                 }
2271
2272                 return rc;
2273         }
2274         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2275         return -EINVAL;
2276 }
2277
2278 static void bnx2x_link_set(struct bnx2x *bp)
2279 {
2280         if (!BP_NOMCP(bp)) {
2281                 bnx2x_acquire_phy_lock(bp);
2282                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2283                 bnx2x_release_phy_lock(bp);
2284
2285                 bnx2x_calc_fc_adv(bp);
2286         } else
2287                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2288 }
2289
2290 static void bnx2x__link_reset(struct bnx2x *bp)
2291 {
2292         if (!BP_NOMCP(bp)) {
2293                 bnx2x_acquire_phy_lock(bp);
2294                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2295                 bnx2x_release_phy_lock(bp);
2296         } else
2297                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2298 }
2299
2300 static u8 bnx2x_link_test(struct bnx2x *bp)
2301 {
2302         u8 rc = 0;
2303
2304         if (!BP_NOMCP(bp)) {
2305                 bnx2x_acquire_phy_lock(bp);
2306                 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2307                 bnx2x_release_phy_lock(bp);
2308         } else
2309                 BNX2X_ERR("Bootcode is missing - can not test link\n");
2310
2311         return rc;
2312 }
2313
2314 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2315 {
2316         u32 r_param = bp->link_vars.line_speed / 8;
2317         u32 fair_periodic_timeout_usec;
2318         u32 t_fair;
2319
2320         memset(&(bp->cmng.rs_vars), 0,
2321                sizeof(struct rate_shaping_vars_per_port));
2322         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2323
2324         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2325         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2326
2327         /* this is the threshold below which no timer arming will occur
2328            1.25 coefficient is for the threshold to be a little bigger
2329            than the real time, to compensate for timer in-accuracy */
2330         bp->cmng.rs_vars.rs_threshold =
2331                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2332
2333         /* resolution of fairness timer */
2334         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2335         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2336         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2337
2338         /* this is the threshold below which we won't arm the timer anymore */
2339         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2340
2341         /* we multiply by 1e3/8 to get bytes/msec.
2342            We don't want the credits to pass a credit
2343            of the t_fair*FAIR_MEM (algorithm resolution) */
2344         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2345         /* since each tick is 4 usec */
2346         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2347 }
2348
2349 /* Calculates the sum of vn_min_rates.
2350    It's needed for further normalizing of the min_rates.
2351    Returns:
2352      sum of vn_min_rates.
2353        or
2354      0 - if all the min_rates are 0.
2355      In the later case fainess algorithm should be deactivated.
2356      If not all min_rates are zero then those that are zeroes will be set to 1.
2357  */
2358 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2359 {
2360         int all_zero = 1;
2361         int port = BP_PORT(bp);
2362         int vn;
2363
2364         bp->vn_weight_sum = 0;
2365         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2366                 int func = 2*vn + port;
2367                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2368                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2369                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2370
2371                 /* Skip hidden vns */
2372                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2373                         continue;
2374
2375                 /* If min rate is zero - set it to 1 */
2376                 if (!vn_min_rate)
2377                         vn_min_rate = DEF_MIN_RATE;
2378                 else
2379                         all_zero = 0;
2380
2381                 bp->vn_weight_sum += vn_min_rate;
2382         }
2383
2384         /* ... only if all min rates are zeros - disable fairness */
2385         if (all_zero) {
2386                 bp->cmng.flags.cmng_enables &=
2387                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2388                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2389                    "  fairness will be disabled\n");
2390         } else
2391                 bp->cmng.flags.cmng_enables |=
2392                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2393 }
2394
2395 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2396 {
2397         struct rate_shaping_vars_per_vn m_rs_vn;
2398         struct fairness_vars_per_vn m_fair_vn;
2399         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2400         u16 vn_min_rate, vn_max_rate;
2401         int i;
2402
2403         /* If function is hidden - set min and max to zeroes */
2404         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2405                 vn_min_rate = 0;
2406                 vn_max_rate = 0;
2407
2408         } else {
2409                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2410                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2411                 /* If min rate is zero - set it to 1 */
2412                 if (!vn_min_rate)
2413                         vn_min_rate = DEF_MIN_RATE;
2414                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2415                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2416         }
2417         DP(NETIF_MSG_IFUP,
2418            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
2419            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2420
2421         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2422         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2423
2424         /* global vn counter - maximal Mbps for this vn */
2425         m_rs_vn.vn_counter.rate = vn_max_rate;
2426
2427         /* quota - number of bytes transmitted in this period */
2428         m_rs_vn.vn_counter.quota =
2429                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2430
2431         if (bp->vn_weight_sum) {
2432                 /* credit for each period of the fairness algorithm:
2433                    number of bytes in T_FAIR (the vn share the port rate).
2434                    vn_weight_sum should not be larger than 10000, thus
2435                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2436                    than zero */
2437                 m_fair_vn.vn_credit_delta =
2438                         max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2439                                                    (8 * bp->vn_weight_sum))),
2440                               (bp->cmng.fair_vars.fair_threshold * 2));
2441                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2442                    m_fair_vn.vn_credit_delta);
2443         }
2444
2445         /* Store it to internal memory */
2446         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2447                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2448                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2449                        ((u32 *)(&m_rs_vn))[i]);
2450
2451         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2452                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2453                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2454                        ((u32 *)(&m_fair_vn))[i]);
2455 }
2456
2457
2458 /* This function is called upon link interrupt */
2459 static void bnx2x_link_attn(struct bnx2x *bp)
2460 {
2461         /* Make sure that we are synced with the current statistics */
2462         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2463
2464         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2465
2466         if (bp->link_vars.link_up) {
2467
2468                 /* dropless flow control */
2469                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2470                         int port = BP_PORT(bp);
2471                         u32 pause_enabled = 0;
2472
2473                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2474                                 pause_enabled = 1;
2475
2476                         REG_WR(bp, BAR_USTRORM_INTMEM +
2477                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2478                                pause_enabled);
2479                 }
2480
2481                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2482                         struct host_port_stats *pstats;
2483
2484                         pstats = bnx2x_sp(bp, port_stats);
2485                         /* reset old bmac stats */
2486                         memset(&(pstats->mac_stx[0]), 0,
2487                                sizeof(struct mac_stx));
2488                 }
2489                 if (bp->state == BNX2X_STATE_OPEN)
2490                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2491         }
2492
2493         /* indicate link status */
2494         bnx2x_link_report(bp);
2495
2496         if (IS_E1HMF(bp)) {
2497                 int port = BP_PORT(bp);
2498                 int func;
2499                 int vn;
2500
2501                 /* Set the attention towards other drivers on the same port */
2502                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2503                         if (vn == BP_E1HVN(bp))
2504                                 continue;
2505
2506                         func = ((vn << 1) | port);
2507                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2508                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2509                 }
2510
2511                 if (bp->link_vars.link_up) {
2512                         int i;
2513
2514                         /* Init rate shaping and fairness contexts */
2515                         bnx2x_init_port_minmax(bp);
2516
2517                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2518                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2519
2520                         /* Store it to internal memory */
2521                         for (i = 0;
2522                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2523                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2524                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2525                                        ((u32 *)(&bp->cmng))[i]);
2526                 }
2527         }
2528 }
2529
2530 static void bnx2x__link_status_update(struct bnx2x *bp)
2531 {
2532         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2533                 return;
2534
2535         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2536
2537         if (bp->link_vars.link_up)
2538                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2539         else
2540                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2541
2542         bnx2x_calc_vn_weight_sum(bp);
2543
2544         /* indicate link status */
2545         bnx2x_link_report(bp);
2546 }
2547
2548 static void bnx2x_pmf_update(struct bnx2x *bp)
2549 {
2550         int port = BP_PORT(bp);
2551         u32 val;
2552
2553         bp->port.pmf = 1;
2554         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2555
2556         /* enable nig attention */
2557         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2558         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2559         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2560
2561         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2562 }
2563
2564 /* end of Link */
2565
2566 /* slow path */
2567
2568 /*
2569  * General service functions
2570  */
2571
2572 /* send the MCP a request, block until there is a reply */
2573 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2574 {
2575         int func = BP_FUNC(bp);
2576         u32 seq = ++bp->fw_seq;
2577         u32 rc = 0;
2578         u32 cnt = 1;
2579         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2580
2581         mutex_lock(&bp->fw_mb_mutex);
2582         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2583         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2584
2585         do {
2586                 /* let the FW do it's magic ... */
2587                 msleep(delay);
2588
2589                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2590
2591                 /* Give the FW up to 5 second (500*10ms) */
2592         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2593
2594         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2595            cnt*delay, rc, seq);
2596
2597         /* is this a reply to our command? */
2598         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2599                 rc &= FW_MSG_CODE_MASK;
2600         else {
2601                 /* FW BUG! */
2602                 BNX2X_ERR("FW failed to respond!\n");
2603                 bnx2x_fw_dump(bp);
2604                 rc = 0;
2605         }
2606         mutex_unlock(&bp->fw_mb_mutex);
2607
2608         return rc;
2609 }
2610
2611 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2612 static void bnx2x_set_rx_mode(struct net_device *dev);
2613
2614 static void bnx2x_e1h_disable(struct bnx2x *bp)
2615 {
2616         int port = BP_PORT(bp);
2617
2618         netif_tx_disable(bp->dev);
2619
2620         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2621
2622         netif_carrier_off(bp->dev);
2623 }
2624
2625 static void bnx2x_e1h_enable(struct bnx2x *bp)
2626 {
2627         int port = BP_PORT(bp);
2628
2629         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2630
2631         /* Tx queue should be only reenabled */
2632         netif_tx_wake_all_queues(bp->dev);
2633
2634         /*
2635          * Should not call netif_carrier_on since it will be called if the link
2636          * is up when checking for link state
2637          */
2638 }
2639
2640 static void bnx2x_update_min_max(struct bnx2x *bp)
2641 {
2642         int port = BP_PORT(bp);
2643         int vn, i;
2644
2645         /* Init rate shaping and fairness contexts */
2646         bnx2x_init_port_minmax(bp);
2647
2648         bnx2x_calc_vn_weight_sum(bp);
2649
2650         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2651                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2652
2653         if (bp->port.pmf) {
2654                 int func;
2655
2656                 /* Set the attention towards other drivers on the same port */
2657                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2658                         if (vn == BP_E1HVN(bp))
2659                                 continue;
2660
2661                         func = ((vn << 1) | port);
2662                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2663                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2664                 }
2665
2666                 /* Store it to internal memory */
2667                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2668                         REG_WR(bp, BAR_XSTRORM_INTMEM +
2669                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2670                                ((u32 *)(&bp->cmng))[i]);
2671         }
2672 }
2673
2674 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2675 {
2676         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2677
2678         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2679
2680                 /*
2681                  * This is the only place besides the function initialization
2682                  * where the bp->flags can change so it is done without any
2683                  * locks
2684                  */
2685                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2686                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2687                         bp->flags |= MF_FUNC_DIS;
2688
2689                         bnx2x_e1h_disable(bp);
2690                 } else {
2691                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2692                         bp->flags &= ~MF_FUNC_DIS;
2693
2694                         bnx2x_e1h_enable(bp);
2695                 }
2696                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2697         }
2698         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2699
2700                 bnx2x_update_min_max(bp);
2701                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2702         }
2703
2704         /* Report results to MCP */
2705         if (dcc_event)
2706                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2707         else
2708                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2709 }
2710
2711 /* must be called under the spq lock */
2712 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2713 {
2714         struct eth_spe *next_spe = bp->spq_prod_bd;
2715
2716         if (bp->spq_prod_bd == bp->spq_last_bd) {
2717                 bp->spq_prod_bd = bp->spq;
2718                 bp->spq_prod_idx = 0;
2719                 DP(NETIF_MSG_TIMER, "end of spq\n");
2720         } else {
2721                 bp->spq_prod_bd++;
2722                 bp->spq_prod_idx++;
2723         }
2724         return next_spe;
2725 }
2726
2727 /* must be called under the spq lock */
2728 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2729 {
2730         int func = BP_FUNC(bp);
2731
2732         /* Make sure that BD data is updated before writing the producer */
2733         wmb();
2734
2735         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2736                bp->spq_prod_idx);
2737         mmiowb();
2738 }
2739
2740 /* the slow path queue is odd since completions arrive on the fastpath ring */
2741 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2742                          u32 data_hi, u32 data_lo, int common)
2743 {
2744         struct eth_spe *spe;
2745
2746 #ifdef BNX2X_STOP_ON_ERROR
2747         if (unlikely(bp->panic))
2748                 return -EIO;
2749 #endif
2750
2751         spin_lock_bh(&bp->spq_lock);
2752
2753         if (!bp->spq_left) {
2754                 BNX2X_ERR("BUG! SPQ ring full!\n");
2755                 spin_unlock_bh(&bp->spq_lock);
2756                 bnx2x_panic();
2757                 return -EBUSY;
2758         }
2759
2760         spe = bnx2x_sp_get_next(bp);
2761
2762         /* CID needs port number to be encoded int it */
2763         spe->hdr.conn_and_cmd_data =
2764                         cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2765                                     HW_CID(bp, cid));
2766         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2767         if (common)
2768                 spe->hdr.type |=
2769                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2770
2771         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2772         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2773
2774         bp->spq_left--;
2775
2776         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2777            "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2778            bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2779            (u32)(U64_LO(bp->spq_mapping) +
2780            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2781            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2782
2783         bnx2x_sp_prod_update(bp);
2784         spin_unlock_bh(&bp->spq_lock);
2785         return 0;
2786 }
2787
2788 /* acquire split MCP access lock register */
2789 static int bnx2x_acquire_alr(struct bnx2x *bp)
2790 {
2791         u32 j, val;
2792         int rc = 0;
2793
2794         might_sleep();
2795         for (j = 0; j < 1000; j++) {
2796                 val = (1UL << 31);
2797                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2798                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2799                 if (val & (1L << 31))
2800                         break;
2801
2802                 msleep(5);
2803         }
2804         if (!(val & (1L << 31))) {
2805                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2806                 rc = -EBUSY;
2807         }
2808
2809         return rc;
2810 }
2811
2812 /* release split MCP access lock register */
2813 static void bnx2x_release_alr(struct bnx2x *bp)
2814 {
2815         REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2816 }
2817
2818 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2819 {
2820         struct host_def_status_block *def_sb = bp->def_status_blk;
2821         u16 rc = 0;
2822
2823         barrier(); /* status block is written to by the chip */
2824         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2825                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2826                 rc |= 1;
2827         }
2828         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2829                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2830                 rc |= 2;
2831         }
2832         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2833                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2834                 rc |= 4;
2835         }
2836         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2837                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2838                 rc |= 8;
2839         }
2840         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2841                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2842                 rc |= 16;
2843         }
2844         return rc;
2845 }
2846
2847 /*
2848  * slow path service functions
2849  */
2850
2851 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2852 {
2853         int port = BP_PORT(bp);
2854         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2855                        COMMAND_REG_ATTN_BITS_SET);
2856         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2857                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2858         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2859                                        NIG_REG_MASK_INTERRUPT_PORT0;
2860         u32 aeu_mask;
2861         u32 nig_mask = 0;
2862
2863         if (bp->attn_state & asserted)
2864                 BNX2X_ERR("IGU ERROR\n");
2865
2866         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2867         aeu_mask = REG_RD(bp, aeu_addr);
2868
2869         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2870            aeu_mask, asserted);
2871         aeu_mask &= ~(asserted & 0x3ff);
2872         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2873
2874         REG_WR(bp, aeu_addr, aeu_mask);
2875         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2876
2877         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2878         bp->attn_state |= asserted;
2879         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2880
2881         if (asserted & ATTN_HARD_WIRED_MASK) {
2882                 if (asserted & ATTN_NIG_FOR_FUNC) {
2883
2884                         bnx2x_acquire_phy_lock(bp);
2885
2886                         /* save nig interrupt mask */
2887                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2888                         REG_WR(bp, nig_int_mask_addr, 0);
2889
2890                         bnx2x_link_attn(bp);
2891
2892                         /* handle unicore attn? */
2893                 }
2894                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2895                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2896
2897                 if (asserted & GPIO_2_FUNC)
2898                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2899
2900                 if (asserted & GPIO_3_FUNC)
2901                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2902
2903                 if (asserted & GPIO_4_FUNC)
2904                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2905
2906                 if (port == 0) {
2907                         if (asserted & ATTN_GENERAL_ATTN_1) {
2908                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2909                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2910                         }
2911                         if (asserted & ATTN_GENERAL_ATTN_2) {
2912                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2913                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2914                         }
2915                         if (asserted & ATTN_GENERAL_ATTN_3) {
2916                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2917                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2918                         }
2919                 } else {
2920                         if (asserted & ATTN_GENERAL_ATTN_4) {
2921                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2922                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2923                         }
2924                         if (asserted & ATTN_GENERAL_ATTN_5) {
2925                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2926                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2927                         }
2928                         if (asserted & ATTN_GENERAL_ATTN_6) {
2929                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2930                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2931                         }
2932                 }
2933
2934         } /* if hardwired */
2935
2936         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2937            asserted, hc_addr);
2938         REG_WR(bp, hc_addr, asserted);
2939
2940         /* now set back the mask */
2941         if (asserted & ATTN_NIG_FOR_FUNC) {
2942                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2943                 bnx2x_release_phy_lock(bp);
2944         }
2945 }
2946
2947 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2948 {
2949         int port = BP_PORT(bp);
2950
2951         /* mark the failure */
2952         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2953         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2954         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2955                  bp->link_params.ext_phy_config);
2956
2957         /* log the failure */
2958         netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2959                " the driver to shutdown the card to prevent permanent"
2960                " damage.  Please contact OEM Support for assistance\n");
2961 }
2962
2963 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2964 {
2965         int port = BP_PORT(bp);
2966         int reg_offset;
2967         u32 val, swap_val, swap_override;
2968
2969         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2970                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2971
2972         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2973
2974                 val = REG_RD(bp, reg_offset);
2975                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2976                 REG_WR(bp, reg_offset, val);
2977
2978                 BNX2X_ERR("SPIO5 hw attention\n");
2979
2980                 /* Fan failure attention */
2981                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2982                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2983                         /* Low power mode is controlled by GPIO 2 */
2984                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2985                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2986                         /* The PHY reset is controlled by GPIO 1 */
2987                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2988                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2989                         break;
2990
2991                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2992                         /* The PHY reset is controlled by GPIO 1 */
2993                         /* fake the port number to cancel the swap done in
2994                            set_gpio() */
2995                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2996                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2997                         port = (swap_val && swap_override) ^ 1;
2998                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2999                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3000                         break;
3001
3002                 default:
3003                         break;
3004                 }
3005                 bnx2x_fan_failure(bp);
3006         }
3007
3008         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
3009                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
3010                 bnx2x_acquire_phy_lock(bp);
3011                 bnx2x_handle_module_detect_int(&bp->link_params);
3012                 bnx2x_release_phy_lock(bp);
3013         }
3014
3015         if (attn & HW_INTERRUT_ASSERT_SET_0) {
3016
3017                 val = REG_RD(bp, reg_offset);
3018                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3019                 REG_WR(bp, reg_offset, val);
3020
3021                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
3022                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3023                 bnx2x_panic();
3024         }
3025 }
3026
3027 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3028 {
3029         u32 val;
3030
3031         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3032
3033                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3034                 BNX2X_ERR("DB hw attention 0x%x\n", val);
3035                 /* DORQ discard attention */
3036                 if (val & 0x2)
3037                         BNX2X_ERR("FATAL error from DORQ\n");
3038         }
3039
3040         if (attn & HW_INTERRUT_ASSERT_SET_1) {
3041
3042                 int port = BP_PORT(bp);
3043                 int reg_offset;
3044
3045                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3046                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3047
3048                 val = REG_RD(bp, reg_offset);
3049                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3050                 REG_WR(bp, reg_offset, val);
3051
3052                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3053                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3054                 bnx2x_panic();
3055         }
3056 }
3057
3058 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3059 {
3060         u32 val;
3061
3062         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3063
3064                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3065                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3066                 /* CFC error attention */
3067                 if (val & 0x2)
3068                         BNX2X_ERR("FATAL error from CFC\n");
3069         }
3070
3071         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3072
3073                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3074                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3075                 /* RQ_USDMDP_FIFO_OVERFLOW */
3076                 if (val & 0x18000)
3077                         BNX2X_ERR("FATAL error from PXP\n");
3078         }
3079
3080         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3081
3082                 int port = BP_PORT(bp);
3083                 int reg_offset;
3084
3085                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3086                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3087
3088                 val = REG_RD(bp, reg_offset);
3089                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3090                 REG_WR(bp, reg_offset, val);
3091
3092                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3093                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3094                 bnx2x_panic();
3095         }
3096 }
3097
3098 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3099 {
3100         u32 val;
3101
3102         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3103
3104                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3105                         int func = BP_FUNC(bp);
3106
3107                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3108                         bp->mf_config = SHMEM_RD(bp,
3109                                            mf_cfg.func_mf_config[func].config);
3110                         val = SHMEM_RD(bp, func_mb[func].drv_status);
3111                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3112                                 bnx2x_dcc_event(bp,
3113                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3114                         bnx2x__link_status_update(bp);
3115                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3116                                 bnx2x_pmf_update(bp);
3117
3118                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3119
3120                         BNX2X_ERR("MC assert!\n");
3121                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3122                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3123                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3124                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3125                         bnx2x_panic();
3126
3127                 } else if (attn & BNX2X_MCP_ASSERT) {
3128
3129                         BNX2X_ERR("MCP assert!\n");
3130                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3131                         bnx2x_fw_dump(bp);
3132
3133                 } else
3134                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3135         }
3136
3137         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3138                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3139                 if (attn & BNX2X_GRC_TIMEOUT) {
3140                         val = CHIP_IS_E1H(bp) ?
3141                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3142                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3143                 }
3144                 if (attn & BNX2X_GRC_RSV) {
3145                         val = CHIP_IS_E1H(bp) ?
3146                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3147                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3148                 }
3149                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3150         }
3151 }
3152
3153 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3154 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3155
3156
3157 #define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
3158 #define LOAD_COUNTER_BITS       16 /* Number of bits for load counter */
3159 #define LOAD_COUNTER_MASK       (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3160 #define RESET_DONE_FLAG_MASK    (~LOAD_COUNTER_MASK)
3161 #define RESET_DONE_FLAG_SHIFT   LOAD_COUNTER_BITS
3162 #define CHIP_PARITY_SUPPORTED(bp)   (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3163 /*
3164  * should be run under rtnl lock
3165  */
3166 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3167 {
3168         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3169         val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3170         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3171         barrier();
3172         mmiowb();
3173 }
3174
3175 /*
3176  * should be run under rtnl lock
3177  */
3178 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3179 {
3180         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3181         val |= (1 << 16);
3182         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3183         barrier();
3184         mmiowb();
3185 }
3186
3187 /*
3188  * should be run under rtnl lock
3189  */
3190 static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3191 {
3192         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3193         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3194         return (val & RESET_DONE_FLAG_MASK) ? false : true;
3195 }
3196
3197 /*
3198  * should be run under rtnl lock
3199  */
3200 static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3201 {
3202         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3203
3204         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3205
3206         val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3207         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3208         barrier();
3209         mmiowb();
3210 }
3211
3212 /*
3213  * should be run under rtnl lock
3214  */
3215 static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3216 {
3217         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3218
3219         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3220
3221         val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3222         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3223         barrier();
3224         mmiowb();
3225
3226         return val1;
3227 }
3228
3229 /*
3230  * should be run under rtnl lock
3231  */
3232 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3233 {
3234         return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3235 }
3236
3237 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3238 {
3239         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3240         REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3241 }
3242
3243 static inline void _print_next_block(int idx, const char *blk)
3244 {
3245         if (idx)
3246                 pr_cont(", ");
3247         pr_cont("%s", blk);
3248 }
3249
3250 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3251 {
3252         int i = 0;
3253         u32 cur_bit = 0;
3254         for (i = 0; sig; i++) {
3255                 cur_bit = ((u32)0x1 << i);
3256                 if (sig & cur_bit) {
3257                         switch (cur_bit) {
3258                         case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3259                                 _print_next_block(par_num++, "BRB");
3260                                 break;
3261                         case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3262                                 _print_next_block(par_num++, "PARSER");
3263                                 break;
3264                         case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3265                                 _print_next_block(par_num++, "TSDM");
3266                                 break;
3267                         case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3268                                 _print_next_block(par_num++, "SEARCHER");
3269                                 break;
3270                         case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3271                                 _print_next_block(par_num++, "TSEMI");
3272                                 break;
3273                         }
3274
3275                         /* Clear the bit */
3276                         sig &= ~cur_bit;
3277                 }
3278         }
3279
3280         return par_num;
3281 }
3282
3283 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3284 {
3285         int i = 0;
3286         u32 cur_bit = 0;
3287         for (i = 0; sig; i++) {
3288                 cur_bit = ((u32)0x1 << i);
3289                 if (sig & cur_bit) {
3290                         switch (cur_bit) {
3291                         case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3292                                 _print_next_block(par_num++, "PBCLIENT");
3293                                 break;
3294                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3295                                 _print_next_block(par_num++, "QM");
3296                                 break;
3297                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3298                                 _print_next_block(par_num++, "XSDM");
3299                                 break;
3300                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3301                                 _print_next_block(par_num++, "XSEMI");
3302                                 break;
3303                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3304                                 _print_next_block(par_num++, "DOORBELLQ");
3305                                 break;
3306                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3307                                 _print_next_block(par_num++, "VAUX PCI CORE");
3308                                 break;
3309                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3310                                 _print_next_block(par_num++, "DEBUG");
3311                                 break;
3312                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3313                                 _print_next_block(par_num++, "USDM");
3314                                 break;
3315                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3316                                 _print_next_block(par_num++, "USEMI");
3317                                 break;
3318                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3319                                 _print_next_block(par_num++, "UPB");
3320                                 break;
3321                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3322                                 _print_next_block(par_num++, "CSDM");
3323                                 break;
3324                         }
3325
3326                         /* Clear the bit */
3327                         sig &= ~cur_bit;
3328                 }
3329         }
3330
3331         return par_num;
3332 }
3333
3334 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3335 {
3336         int i = 0;
3337         u32 cur_bit = 0;
3338         for (i = 0; sig; i++) {
3339                 cur_bit = ((u32)0x1 << i);
3340                 if (sig & cur_bit) {
3341                         switch (cur_bit) {
3342                         case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3343                                 _print_next_block(par_num++, "CSEMI");
3344                                 break;
3345                         case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3346                                 _print_next_block(par_num++, "PXP");
3347                                 break;
3348                         case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3349                                 _print_next_block(par_num++,
3350                                         "PXPPCICLOCKCLIENT");
3351                                 break;
3352                         case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3353                                 _print_next_block(par_num++, "CFC");
3354                                 break;
3355                         case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3356                                 _print_next_block(par_num++, "CDU");
3357                                 break;
3358                         case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3359                                 _print_next_block(par_num++, "IGU");
3360                                 break;
3361                         case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3362                                 _print_next_block(par_num++, "MISC");
3363                                 break;
3364                         }
3365
3366                         /* Clear the bit */
3367                         sig &= ~cur_bit;
3368                 }
3369         }
3370
3371         return par_num;
3372 }
3373
3374 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3375 {
3376         int i = 0;
3377         u32 cur_bit = 0;
3378         for (i = 0; sig; i++) {
3379                 cur_bit = ((u32)0x1 << i);
3380                 if (sig & cur_bit) {
3381                         switch (cur_bit) {
3382                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3383                                 _print_next_block(par_num++, "MCP ROM");
3384                                 break;
3385                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3386                                 _print_next_block(par_num++, "MCP UMP RX");
3387                                 break;
3388                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3389                                 _print_next_block(par_num++, "MCP UMP TX");
3390                                 break;
3391                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3392                                 _print_next_block(par_num++, "MCP SCPAD");
3393                                 break;
3394                         }
3395
3396                         /* Clear the bit */
3397                         sig &= ~cur_bit;
3398                 }
3399         }
3400
3401         return par_num;
3402 }
3403
3404 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3405                                      u32 sig2, u32 sig3)
3406 {
3407         if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3408             (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3409                 int par_num = 0;
3410                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3411                         "[0]:0x%08x [1]:0x%08x "
3412                         "[2]:0x%08x [3]:0x%08x\n",
3413                           sig0 & HW_PRTY_ASSERT_SET_0,
3414                           sig1 & HW_PRTY_ASSERT_SET_1,
3415                           sig2 & HW_PRTY_ASSERT_SET_2,
3416                           sig3 & HW_PRTY_ASSERT_SET_3);
3417                 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3418                        bp->dev->name);
3419                 par_num = bnx2x_print_blocks_with_parity0(
3420                         sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3421                 par_num = bnx2x_print_blocks_with_parity1(
3422                         sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3423                 par_num = bnx2x_print_blocks_with_parity2(
3424                         sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3425                 par_num = bnx2x_print_blocks_with_parity3(
3426                         sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3427                 printk("\n");
3428                 return true;
3429         } else
3430                 return false;
3431 }
3432
3433 static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3434 {
3435         struct attn_route attn;
3436         int port = BP_PORT(bp);
3437
3438         attn.sig[0] = REG_RD(bp,
3439                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3440                              port*4);
3441         attn.sig[1] = REG_RD(bp,
3442                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3443                              port*4);
3444         attn.sig[2] = REG_RD(bp,
3445                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3446                              port*4);
3447         attn.sig[3] = REG_RD(bp,
3448                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3449                              port*4);
3450
3451         return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3452                                         attn.sig[3]);
3453 }
3454
3455 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3456 {
3457         struct attn_route attn, *group_mask;
3458         int port = BP_PORT(bp);
3459         int index;
3460         u32 reg_addr;
3461         u32 val;
3462         u32 aeu_mask;
3463
3464         /* need to take HW lock because MCP or other port might also
3465            try to handle this event */
3466         bnx2x_acquire_alr(bp);
3467
3468         if (bnx2x_chk_parity_attn(bp)) {
3469                 bp->recovery_state = BNX2X_RECOVERY_INIT;
3470                 bnx2x_set_reset_in_progress(bp);
3471                 schedule_delayed_work(&bp->reset_task, 0);
3472                 /* Disable HW interrupts */
3473                 bnx2x_int_disable(bp);
3474                 bnx2x_release_alr(bp);
3475                 /* In case of parity errors don't handle attentions so that
3476                  * other function would "see" parity errors.
3477                  */
3478                 return;
3479         }
3480
3481         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3482         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3483         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3484         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3485         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3486            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3487
3488         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3489                 if (deasserted & (1 << index)) {
3490                         group_mask = &bp->attn_group[index];
3491
3492                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3493                            index, group_mask->sig[0], group_mask->sig[1],
3494                            group_mask->sig[2], group_mask->sig[3]);
3495
3496                         bnx2x_attn_int_deasserted3(bp,
3497                                         attn.sig[3] & group_mask->sig[3]);
3498                         bnx2x_attn_int_deasserted1(bp,
3499                                         attn.sig[1] & group_mask->sig[1]);
3500                         bnx2x_attn_int_deasserted2(bp,
3501                                         attn.sig[2] & group_mask->sig[2]);
3502                         bnx2x_attn_int_deasserted0(bp,
3503                                         attn.sig[0] & group_mask->sig[0]);
3504                 }
3505         }
3506
3507         bnx2x_release_alr(bp);
3508
3509         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3510
3511         val = ~deasserted;
3512         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3513            val, reg_addr);
3514         REG_WR(bp, reg_addr, val);
3515
3516         if (~bp->attn_state & deasserted)
3517                 BNX2X_ERR("IGU ERROR\n");
3518
3519         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3520                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3521
3522         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3523         aeu_mask = REG_RD(bp, reg_addr);
3524
3525         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3526            aeu_mask, deasserted);
3527         aeu_mask |= (deasserted & 0x3ff);
3528         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3529
3530         REG_WR(bp, reg_addr, aeu_mask);
3531         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3532
3533         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3534         bp->attn_state &= ~deasserted;
3535         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3536 }
3537
3538 static void bnx2x_attn_int(struct bnx2x *bp)
3539 {
3540         /* read local copy of bits */
3541         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3542                                                                 attn_bits);
3543         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3544                                                                 attn_bits_ack);
3545         u32 attn_state = bp->attn_state;
3546
3547         /* look for changed bits */
3548         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3549         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3550
3551         DP(NETIF_MSG_HW,
3552            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3553            attn_bits, attn_ack, asserted, deasserted);
3554
3555         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3556                 BNX2X_ERR("BAD attention state\n");
3557
3558         /* handle bits that were raised */
3559         if (asserted)
3560                 bnx2x_attn_int_asserted(bp, asserted);
3561
3562         if (deasserted)
3563                 bnx2x_attn_int_deasserted(bp, deasserted);
3564 }
3565
3566 static void bnx2x_sp_task(struct work_struct *work)
3567 {
3568         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3569         u16 status;
3570
3571         /* Return here if interrupt is disabled */
3572         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3573                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3574                 return;
3575         }
3576
3577         status = bnx2x_update_dsb_idx(bp);
3578 /*      if (status == 0)                                     */
3579 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3580
3581         DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3582
3583         /* HW attentions */
3584         if (status & 0x1) {
3585                 bnx2x_attn_int(bp);
3586                 status &= ~0x1;
3587         }
3588
3589         /* CStorm events: STAT_QUERY */
3590         if (status & 0x2) {
3591                 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
3592                 status &= ~0x2;
3593         }
3594
3595         if (unlikely(status))
3596                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3597                    status);
3598
3599         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3600                      IGU_INT_NOP, 1);
3601         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3602                      IGU_INT_NOP, 1);
3603         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3604                      IGU_INT_NOP, 1);
3605         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3606                      IGU_INT_NOP, 1);
3607         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3608                      IGU_INT_ENABLE, 1);
3609 }
3610
3611 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3612 {
3613         struct net_device *dev = dev_instance;
3614         struct bnx2x *bp = netdev_priv(dev);
3615
3616         /* Return here if interrupt is disabled */
3617         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3618                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3619                 return IRQ_HANDLED;
3620         }
3621
3622         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3623
3624 #ifdef BNX2X_STOP_ON_ERROR
3625         if (unlikely(bp->panic))
3626                 return IRQ_HANDLED;
3627 #endif
3628
3629 #ifdef BCM_CNIC
3630         {
3631                 struct cnic_ops *c_ops;
3632
3633                 rcu_read_lock();
3634                 c_ops = rcu_dereference(bp->cnic_ops);
3635                 if (c_ops)
3636                         c_ops->cnic_handler(bp->cnic_data, NULL);
3637                 rcu_read_unlock();
3638         }
3639 #endif
3640         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3641
3642         return IRQ_HANDLED;
3643 }
3644
3645 /* end of slow path */
3646
3647 /* Statistics */
3648
3649 /****************************************************************************
3650 * Macros
3651 ****************************************************************************/
3652
3653 /* sum[hi:lo] += add[hi:lo] */
3654 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3655         do { \
3656                 s_lo += a_lo; \
3657                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3658         } while (0)
3659
3660 /* difference = minuend - subtrahend */
3661 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3662         do { \
3663                 if (m_lo < s_lo) { \
3664                         /* underflow */ \
3665                         d_hi = m_hi - s_hi; \
3666                         if (d_hi > 0) { \
3667                                 /* we can 'loan' 1 */ \
3668                                 d_hi--; \
3669                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3670                         } else { \
3671                                 /* m_hi <= s_hi */ \
3672                                 d_hi = 0; \
3673                                 d_lo = 0; \
3674                         } \
3675                 } else { \
3676                         /* m_lo >= s_lo */ \
3677                         if (m_hi < s_hi) { \
3678                                 d_hi = 0; \
3679                                 d_lo = 0; \
3680                         } else { \
3681                                 /* m_hi >= s_hi */ \
3682                                 d_hi = m_hi - s_hi; \
3683                                 d_lo = m_lo - s_lo; \
3684                         } \
3685                 } \
3686         } while (0)
3687
3688 #define UPDATE_STAT64(s, t) \
3689         do { \
3690                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3691                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3692                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3693                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3694                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3695                        pstats->mac_stx[1].t##_lo, diff.lo); \
3696         } while (0)
3697
3698 #define UPDATE_STAT64_NIG(s, t) \
3699         do { \
3700                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3701                         diff.lo, new->s##_lo, old->s##_lo); \
3702                 ADD_64(estats->t##_hi, diff.hi, \
3703                        estats->t##_lo, diff.lo); \
3704         } while (0)
3705
3706 /* sum[hi:lo] += add */
3707 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3708         do { \
3709                 s_lo += a; \
3710                 s_hi += (s_lo < a) ? 1 : 0; \
3711         } while (0)
3712
3713 #define UPDATE_EXTEND_STAT(s) \
3714         do { \
3715                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3716                               pstats->mac_stx[1].s##_lo, \
3717                               new->s); \
3718         } while (0)
3719
3720 #define UPDATE_EXTEND_TSTAT(s, t) \
3721         do { \
3722                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3723                 old_tclient->s = tclient->s; \
3724                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3725         } while (0)
3726
3727 #define UPDATE_EXTEND_USTAT(s, t) \
3728         do { \
3729                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3730                 old_uclient->s = uclient->s; \
3731                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3732         } while (0)
3733
3734 #define UPDATE_EXTEND_XSTAT(s, t) \
3735         do { \
3736                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3737                 old_xclient->s = xclient->s; \
3738                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3739         } while (0)
3740
3741 /* minuend -= subtrahend */
3742 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3743         do { \
3744                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3745         } while (0)
3746
3747 /* minuend[hi:lo] -= subtrahend */
3748 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3749         do { \
3750                 SUB_64(m_hi, 0, m_lo, s); \
3751         } while (0)
3752
3753 #define SUB_EXTEND_USTAT(s, t) \
3754         do { \
3755                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3756                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3757         } while (0)
3758
3759 /*
3760  * General service functions
3761  */
3762
3763 static inline long bnx2x_hilo(u32 *hiref)
3764 {
3765         u32 lo = *(hiref + 1);
3766 #if (BITS_PER_LONG == 64)
3767         u32 hi = *hiref;
3768
3769         return HILO_U64(hi, lo);
3770 #else
3771         return lo;
3772 #endif
3773 }
3774
3775 /*
3776  * Init service functions
3777  */
3778
3779 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3780 {
3781         if (!bp->stats_pending) {
3782                 struct eth_query_ramrod_data ramrod_data = {0};
3783                 int i, rc;
3784
3785                 ramrod_data.drv_counter = bp->stats_counter++;
3786                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3787                 for_each_queue(bp, i)
3788                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3789
3790                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3791                                    ((u32 *)&ramrod_data)[1],
3792                                    ((u32 *)&ramrod_data)[0], 0);
3793                 if (rc == 0) {
3794                         /* stats ramrod has it's own slot on the spq */
3795                         bp->spq_left++;
3796                         bp->stats_pending = 1;
3797                 }
3798         }
3799 }
3800
3801 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3802 {
3803         struct dmae_command *dmae = &bp->stats_dmae;
3804         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3805
3806         *stats_comp = DMAE_COMP_VAL;
3807         if (CHIP_REV_IS_SLOW(bp))
3808                 return;
3809
3810         /* loader */
3811         if (bp->executer_idx) {
3812                 int loader_idx = PMF_DMAE_C(bp);
3813
3814                 memset(dmae, 0, sizeof(struct dmae_command));
3815
3816                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3817                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3818                                 DMAE_CMD_DST_RESET |
3819 #ifdef __BIG_ENDIAN
3820                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3821 #else
3822                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3823 #endif
3824                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3825                                                DMAE_CMD_PORT_0) |
3826                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3827                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3828                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3829                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3830                                      sizeof(struct dmae_command) *
3831                                      (loader_idx + 1)) >> 2;
3832                 dmae->dst_addr_hi = 0;
3833                 dmae->len = sizeof(struct dmae_command) >> 2;
3834                 if (CHIP_IS_E1(bp))
3835                         dmae->len--;
3836                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3837                 dmae->comp_addr_hi = 0;
3838                 dmae->comp_val = 1;
3839
3840                 *stats_comp = 0;
3841                 bnx2x_post_dmae(bp, dmae, loader_idx);
3842
3843         } else if (bp->func_stx) {
3844                 *stats_comp = 0;
3845                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3846         }
3847 }
3848
3849 static int bnx2x_stats_comp(struct bnx2x *bp)
3850 {
3851         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3852         int cnt = 10;
3853
3854         might_sleep();
3855         while (*stats_comp != DMAE_COMP_VAL) {
3856                 if (!cnt) {
3857                         BNX2X_ERR("timeout waiting for stats finished\n");
3858                         break;
3859                 }
3860                 cnt--;
3861                 msleep(1);
3862         }
3863         return 1;
3864 }
3865
3866 /*
3867  * Statistics service functions
3868  */
3869
3870 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3871 {
3872         struct dmae_command *dmae;
3873         u32 opcode;
3874         int loader_idx = PMF_DMAE_C(bp);
3875         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3876
3877         /* sanity */
3878         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3879                 BNX2X_ERR("BUG!\n");
3880                 return;
3881         }
3882
3883         bp->executer_idx = 0;
3884
3885         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3886                   DMAE_CMD_C_ENABLE |
3887                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3888 #ifdef __BIG_ENDIAN
3889                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3890 #else
3891                   DMAE_CMD_ENDIANITY_DW_SWAP |
3892 #endif
3893                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3894                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3895
3896         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3897         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3898         dmae->src_addr_lo = bp->port.port_stx >> 2;
3899         dmae->src_addr_hi = 0;
3900         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3901         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3902         dmae->len = DMAE_LEN32_RD_MAX;
3903         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3904         dmae->comp_addr_hi = 0;
3905         dmae->comp_val = 1;
3906
3907         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3908         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3909         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3910         dmae->src_addr_hi = 0;
3911         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3912                                    DMAE_LEN32_RD_MAX * 4);
3913         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3914                                    DMAE_LEN32_RD_MAX * 4);
3915         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3916         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3917         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3918         dmae->comp_val = DMAE_COMP_VAL;
3919
3920         *stats_comp = 0;
3921         bnx2x_hw_stats_post(bp);
3922         bnx2x_stats_comp(bp);
3923 }
3924
3925 static void bnx2x_port_stats_init(struct bnx2x *bp)
3926 {
3927         struct dmae_command *dmae;
3928         int port = BP_PORT(bp);
3929         int vn = BP_E1HVN(bp);
3930         u32 opcode;
3931         int loader_idx = PMF_DMAE_C(bp);
3932         u32 mac_addr;
3933         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3934
3935         /* sanity */
3936         if (!bp->link_vars.link_up || !bp->port.pmf) {
3937                 BNX2X_ERR("BUG!\n");
3938                 return;
3939         }
3940
3941         bp->executer_idx = 0;
3942
3943         /* MCP */
3944         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3945                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3946                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3947 #ifdef __BIG_ENDIAN
3948                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3949 #else
3950                   DMAE_CMD_ENDIANITY_DW_SWAP |
3951 #endif
3952                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3953                   (vn << DMAE_CMD_E1HVN_SHIFT));
3954
3955         if (bp->port.port_stx) {
3956
3957                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3958                 dmae->opcode = opcode;
3959                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3960                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3961                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3962                 dmae->dst_addr_hi = 0;
3963                 dmae->len = sizeof(struct host_port_stats) >> 2;
3964                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3965                 dmae->comp_addr_hi = 0;
3966                 dmae->comp_val = 1;
3967         }
3968
3969         if (bp->func_stx) {
3970
3971                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3972                 dmae->opcode = opcode;
3973                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3974                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3975                 dmae->dst_addr_lo = bp->func_stx >> 2;
3976                 dmae->dst_addr_hi = 0;
3977                 dmae->len = sizeof(struct host_func_stats) >> 2;
3978                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3979                 dmae->comp_addr_hi = 0;
3980                 dmae->comp_val = 1;
3981         }
3982
3983         /* MAC */
3984         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3985                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3986                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3987 #ifdef __BIG_ENDIAN
3988                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3989 #else
3990                   DMAE_CMD_ENDIANITY_DW_SWAP |
3991 #endif
3992                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3993                   (vn << DMAE_CMD_E1HVN_SHIFT));
3994
3995         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3996
3997                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3998                                    NIG_REG_INGRESS_BMAC0_MEM);
3999
4000                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4001                    BIGMAC_REGISTER_TX_STAT_GTBYT */
4002                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4003                 dmae->opcode = opcode;
4004                 dmae->src_addr_lo = (mac_addr +
4005                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4006                 dmae->src_addr_hi = 0;
4007                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4008                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4009                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4010                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4011                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4012                 dmae->comp_addr_hi = 0;
4013                 dmae->comp_val = 1;
4014
4015                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4016                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
4017                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4018                 dmae->opcode = opcode;
4019                 dmae->src_addr_lo = (mac_addr +
4020                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4021                 dmae->src_addr_hi = 0;
4022                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4023                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4024                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4025                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4026                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4027                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4028                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4029                 dmae->comp_addr_hi = 0;
4030                 dmae->comp_val = 1;
4031
4032         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
4033
4034                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4035
4036                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4037                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4038                 dmae->opcode = opcode;
4039                 dmae->src_addr_lo = (mac_addr +
4040                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4041                 dmae->src_addr_hi = 0;
4042                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4043                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4044                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4045                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4046                 dmae->comp_addr_hi = 0;
4047                 dmae->comp_val = 1;
4048
4049                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4050                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4051                 dmae->opcode = opcode;
4052                 dmae->src_addr_lo = (mac_addr +
4053                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4054                 dmae->src_addr_hi = 0;
4055                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4056                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4057                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4058                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4059                 dmae->len = 1;
4060                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4061                 dmae->comp_addr_hi = 0;
4062                 dmae->comp_val = 1;
4063
4064                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4065                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4066                 dmae->opcode = opcode;
4067                 dmae->src_addr_lo = (mac_addr +
4068                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4069                 dmae->src_addr_hi = 0;
4070                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4071                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4072                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4073                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4074                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4075                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4076                 dmae->comp_addr_hi = 0;
4077                 dmae->comp_val = 1;
4078         }
4079
4080         /* NIG */
4081         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4082         dmae->opcode = opcode;
4083         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4084                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
4085         dmae->src_addr_hi = 0;
4086         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
4087         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
4088         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
4089         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4090         dmae->comp_addr_hi = 0;
4091         dmae->comp_val = 1;
4092
4093         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4094         dmae->opcode = opcode;
4095         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
4096                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
4097         dmae->src_addr_hi = 0;
4098         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4099                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
4100         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4101                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
4102         dmae->len = (2*sizeof(u32)) >> 2;
4103         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4104         dmae->comp_addr_hi = 0;
4105         dmae->comp_val = 1;
4106
4107         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4108         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4109                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4110                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4111 #ifdef __BIG_ENDIAN
4112                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4113 #else
4114                         DMAE_CMD_ENDIANITY_DW_SWAP |
4115 #endif
4116                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4117                         (vn << DMAE_CMD_E1HVN_SHIFT));
4118         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
4119                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
4120         dmae->src_addr_hi = 0;
4121         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4122                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
4123         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4124                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
4125         dmae->len = (2*sizeof(u32)) >> 2;
4126         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4127         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4128         dmae->comp_val = DMAE_COMP_VAL;
4129
4130         *stats_comp = 0;
4131 }
4132
4133 static void bnx2x_func_stats_init(struct bnx2x *bp)
4134 {
4135         struct dmae_command *dmae = &bp->stats_dmae;
4136         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4137
4138         /* sanity */
4139         if (!bp->func_stx) {
4140                 BNX2X_ERR("BUG!\n");
4141                 return;
4142         }
4143
4144         bp->executer_idx = 0;
4145         memset(dmae, 0, sizeof(struct dmae_command));
4146
4147         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4148                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4149                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4150 #ifdef __BIG_ENDIAN
4151                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4152 #else
4153                         DMAE_CMD_ENDIANITY_DW_SWAP |
4154 #endif
4155                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4156                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4157         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4158         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4159         dmae->dst_addr_lo = bp->func_stx >> 2;
4160         dmae->dst_addr_hi = 0;
4161         dmae->len = sizeof(struct host_func_stats) >> 2;
4162         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4163         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4164         dmae->comp_val = DMAE_COMP_VAL;
4165
4166         *stats_comp = 0;
4167 }
4168
4169 static void bnx2x_stats_start(struct bnx2x *bp)
4170 {
4171         if (bp->port.pmf)
4172                 bnx2x_port_stats_init(bp);
4173
4174         else if (bp->func_stx)
4175                 bnx2x_func_stats_init(bp);
4176
4177         bnx2x_hw_stats_post(bp);
4178         bnx2x_storm_stats_post(bp);
4179 }
4180
4181 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
4182 {
4183         bnx2x_stats_comp(bp);
4184         bnx2x_stats_pmf_update(bp);
4185         bnx2x_stats_start(bp);
4186 }
4187
4188 static void bnx2x_stats_restart(struct bnx2x *bp)
4189 {
4190         bnx2x_stats_comp(bp);
4191         bnx2x_stats_start(bp);
4192 }
4193
4194 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
4195 {
4196         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
4197         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4198         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4199         struct {
4200                 u32 lo;
4201                 u32 hi;
4202         } diff;
4203
4204         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
4205         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
4206         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
4207         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
4208         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
4209         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
4210         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
4211         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
4212         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
4213         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
4214         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
4215         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
4216         UPDATE_STAT64(tx_stat_gt127,
4217                                 tx_stat_etherstatspkts65octetsto127octets);
4218         UPDATE_STAT64(tx_stat_gt255,
4219                                 tx_stat_etherstatspkts128octetsto255octets);
4220         UPDATE_STAT64(tx_stat_gt511,
4221                                 tx_stat_etherstatspkts256octetsto511octets);
4222         UPDATE_STAT64(tx_stat_gt1023,
4223                                 tx_stat_etherstatspkts512octetsto1023octets);
4224         UPDATE_STAT64(tx_stat_gt1518,
4225                                 tx_stat_etherstatspkts1024octetsto1522octets);
4226         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
4227         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
4228         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
4229         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
4230         UPDATE_STAT64(tx_stat_gterr,
4231                                 tx_stat_dot3statsinternalmactransmiterrors);
4232         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
4233
4234         estats->pause_frames_received_hi =
4235                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
4236         estats->pause_frames_received_lo =
4237                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
4238
4239         estats->pause_frames_sent_hi =
4240                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
4241         estats->pause_frames_sent_lo =
4242                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
4243 }
4244
4245 static void bnx2x_emac_stats_update(struct bnx2x *bp)
4246 {
4247         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
4248         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4249         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4250
4251         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
4252         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
4253         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
4254         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
4255         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
4256         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
4257         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
4258         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
4259         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
4260         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
4261         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
4262         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
4263         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
4264         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
4265         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
4266         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
4267         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
4268         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
4269         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
4270         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
4271         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
4272         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
4273         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
4274         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
4275         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
4276         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
4277         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
4278         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
4279         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
4280         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
4281         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
4282
4283         estats->pause_frames_received_hi =
4284                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
4285         estats->pause_frames_received_lo =
4286                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
4287         ADD_64(estats->pause_frames_received_hi,
4288                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
4289                estats->pause_frames_received_lo,
4290                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
4291
4292         estats->pause_frames_sent_hi =
4293                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
4294         estats->pause_frames_sent_lo =
4295                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
4296         ADD_64(estats->pause_frames_sent_hi,
4297                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
4298                estats->pause_frames_sent_lo,
4299                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
4300 }
4301
4302 static int bnx2x_hw_stats_update(struct bnx2x *bp)
4303 {
4304         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
4305         struct nig_stats *old = &(bp->port.old_nig_stats);
4306         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4307         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4308         struct {
4309                 u32 lo;
4310                 u32 hi;
4311         } diff;
4312
4313         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
4314                 bnx2x_bmac_stats_update(bp);
4315
4316         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
4317                 bnx2x_emac_stats_update(bp);
4318
4319         else { /* unreached */
4320                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
4321                 return -1;
4322         }
4323
4324         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
4325                       new->brb_discard - old->brb_discard);
4326         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
4327                       new->brb_truncate - old->brb_truncate);
4328
4329         UPDATE_STAT64_NIG(egress_mac_pkt0,
4330                                         etherstatspkts1024octetsto1522octets);
4331         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
4332
4333         memcpy(old, new, sizeof(struct nig_stats));
4334
4335         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
4336                sizeof(struct mac_stx));
4337         estats->brb_drop_hi = pstats->brb_drop_hi;
4338         estats->brb_drop_lo = pstats->brb_drop_lo;
4339
4340         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
4341
4342         if (!BP_NOMCP(bp)) {
4343                 u32 nig_timer_max =
4344                         SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4345                 if (nig_timer_max != estats->nig_timer_max) {
4346                         estats->nig_timer_max = nig_timer_max;
4347                         BNX2X_ERR("NIG timer max (%u)\n",
4348                                   estats->nig_timer_max);
4349                 }
4350         }
4351
4352         return 0;
4353 }
4354
4355 static int bnx2x_storm_stats_update(struct bnx2x *bp)
4356 {
4357         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4358         struct tstorm_per_port_stats *tport =
4359                                         &stats->tstorm_common.port_statistics;
4360         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4361         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4362         int i;
4363
4364         memcpy(&(fstats->total_bytes_received_hi),
4365                &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4366                sizeof(struct host_func_stats) - 2*sizeof(u32));
4367         estats->error_bytes_received_hi = 0;
4368         estats->error_bytes_received_lo = 0;
4369         estats->etherstatsoverrsizepkts_hi = 0;
4370         estats->etherstatsoverrsizepkts_lo = 0;
4371         estats->no_buff_discard_hi = 0;
4372         estats->no_buff_discard_lo = 0;
4373
4374         for_each_queue(bp, i) {
4375                 struct bnx2x_fastpath *fp = &bp->fp[i];
4376                 int cl_id = fp->cl_id;
4377                 struct tstorm_per_client_stats *tclient =
4378                                 &stats->tstorm_common.client_statistics[cl_id];
4379                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4380                 struct ustorm_per_client_stats *uclient =
4381                                 &stats->ustorm_common.client_statistics[cl_id];
4382                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4383                 struct xstorm_per_client_stats *xclient =
4384                                 &stats->xstorm_common.client_statistics[cl_id];
4385                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4386                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4387                 u32 diff;
4388
4389                 /* are storm stats valid? */
4390                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4391                                                         bp->stats_counter) {
4392                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4393                            "  xstorm counter (0x%x) != stats_counter (0x%x)\n",
4394                            i, xclient->stats_counter, bp->stats_counter);
4395                         return -1;
4396                 }
4397                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4398                                                         bp->stats_counter) {
4399                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4400                            "  tstorm counter (0x%x) != stats_counter (0x%x)\n",
4401                            i, tclient->stats_counter, bp->stats_counter);
4402                         return -2;
4403                 }
4404                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4405                                                         bp->stats_counter) {
4406                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4407                            "  ustorm counter (0x%x) != stats_counter (0x%x)\n",
4408                            i, uclient->stats_counter, bp->stats_counter);
4409                         return -4;
4410                 }
4411
4412                 qstats->total_bytes_received_hi =
4413                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4414                 qstats->total_bytes_received_lo =
4415                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4416
4417                 ADD_64(qstats->total_bytes_received_hi,
4418                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4419                        qstats->total_bytes_received_lo,
4420                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4421
4422                 ADD_64(qstats->total_bytes_received_hi,
4423                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4424                        qstats->total_bytes_received_lo,
4425                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4426
4427                 SUB_64(qstats->total_bytes_received_hi,
4428                        le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
4429                        qstats->total_bytes_received_lo,
4430                        le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
4431
4432                 SUB_64(qstats->total_bytes_received_hi,
4433                        le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
4434                        qstats->total_bytes_received_lo,
4435                        le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
4436
4437                 SUB_64(qstats->total_bytes_received_hi,
4438                        le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
4439                        qstats->total_bytes_received_lo,
4440                        le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
4441
4442                 qstats->valid_bytes_received_hi =
4443                                         qstats->total_bytes_received_hi;
4444                 qstats->valid_bytes_received_lo =
4445                                         qstats->total_bytes_received_lo;
4446
4447                 qstats->error_bytes_received_hi =
4448                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
4449                 qstats->error_bytes_received_lo =
4450                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
4451
4452                 ADD_64(qstats->total_bytes_received_hi,
4453                        qstats->error_bytes_received_hi,
4454                        qstats->total_bytes_received_lo,
4455                        qstats->error_bytes_received_lo);
4456
4457                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4458                                         total_unicast_packets_received);
4459                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4460                                         total_multicast_packets_received);
4461                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4462                                         total_broadcast_packets_received);
4463                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4464                                         etherstatsoverrsizepkts);
4465                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4466
4467                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4468                                         total_unicast_packets_received);
4469                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4470                                         total_multicast_packets_received);
4471                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4472                                         total_broadcast_packets_received);
4473                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4474                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4475                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4476
4477                 qstats->total_bytes_transmitted_hi =
4478                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4479                 qstats->total_bytes_transmitted_lo =
4480                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4481
4482                 ADD_64(qstats->total_bytes_transmitted_hi,
4483                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
4484                        qstats->total_bytes_transmitted_lo,
4485                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
4486
4487                 ADD_64(qstats->total_bytes_transmitted_hi,
4488                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4489                        qstats->total_bytes_transmitted_lo,
4490                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4491
4492                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4493                                         total_unicast_packets_transmitted);
4494                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4495                                         total_multicast_packets_transmitted);
4496                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4497                                         total_broadcast_packets_transmitted);
4498
4499                 old_tclient->checksum_discard = tclient->checksum_discard;
4500                 old_tclient->ttl0_discard = tclient->ttl0_discard;
4501
4502                 ADD_64(fstats->total_bytes_received_hi,
4503                        qstats->total_bytes_received_hi,
4504                        fstats->total_bytes_received_lo,
4505                        qstats->total_bytes_received_lo);
4506                 ADD_64(fstats->total_bytes_transmitted_hi,
4507                        qstats->total_bytes_transmitted_hi,
4508                        fstats->total_bytes_transmitted_lo,
4509                        qstats->total_bytes_transmitted_lo);
4510                 ADD_64(fstats->total_unicast_packets_received_hi,
4511                        qstats->total_unicast_packets_received_hi,
4512                        fstats->total_unicast_packets_received_lo,
4513                        qstats->total_unicast_packets_received_lo);
4514                 ADD_64(fstats->total_multicast_packets_received_hi,
4515                        qstats->total_multicast_packets_received_hi,
4516                        fstats->total_multicast_packets_received_lo,
4517                        qstats->total_multicast_packets_received_lo);
4518                 ADD_64(fstats->total_broadcast_packets_received_hi,
4519                        qstats->total_broadcast_packets_received_hi,
4520                        fstats->total_broadcast_packets_received_lo,
4521                        qstats->total_broadcast_packets_received_lo);
4522                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4523                        qstats->total_unicast_packets_transmitted_hi,
4524                        fstats->total_unicast_packets_transmitted_lo,
4525                        qstats->total_unicast_packets_transmitted_lo);
4526                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4527                        qstats->total_multicast_packets_transmitted_hi,
4528                        fstats->total_multicast_packets_transmitted_lo,
4529                        qstats->total_multicast_packets_transmitted_lo);
4530                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4531                        qstats->total_broadcast_packets_transmitted_hi,
4532                        fstats->total_broadcast_packets_transmitted_lo,
4533                        qstats->total_broadcast_packets_transmitted_lo);
4534                 ADD_64(fstats->valid_bytes_received_hi,
4535                        qstats->valid_bytes_received_hi,
4536                        fstats->valid_bytes_received_lo,
4537                        qstats->valid_bytes_received_lo);
4538
4539                 ADD_64(estats->error_bytes_received_hi,
4540                        qstats->error_bytes_received_hi,
4541                        estats->error_bytes_received_lo,
4542                        qstats->error_bytes_received_lo);
4543                 ADD_64(estats->etherstatsoverrsizepkts_hi,
4544                        qstats->etherstatsoverrsizepkts_hi,
4545                        estats->etherstatsoverrsizepkts_lo,
4546                        qstats->etherstatsoverrsizepkts_lo);
4547                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4548                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4549         }
4550
4551         ADD_64(fstats->total_bytes_received_hi,
4552                estats->rx_stat_ifhcinbadoctets_hi,
4553                fstats->total_bytes_received_lo,
4554                estats->rx_stat_ifhcinbadoctets_lo);
4555
4556         memcpy(estats, &(fstats->total_bytes_received_hi),
4557                sizeof(struct host_func_stats) - 2*sizeof(u32));
4558
4559         ADD_64(estats->etherstatsoverrsizepkts_hi,
4560                estats->rx_stat_dot3statsframestoolong_hi,
4561                estats->etherstatsoverrsizepkts_lo,
4562                estats->rx_stat_dot3statsframestoolong_lo);
4563         ADD_64(estats->error_bytes_received_hi,
4564                estats->rx_stat_ifhcinbadoctets_hi,
4565                estats->error_bytes_received_lo,
4566                estats->rx_stat_ifhcinbadoctets_lo);
4567
4568         if (bp->port.pmf) {
4569                 estats->mac_filter_discard =
4570                                 le32_to_cpu(tport->mac_filter_discard);
4571                 estats->xxoverflow_discard =
4572                                 le32_to_cpu(tport->xxoverflow_discard);
4573                 estats->brb_truncate_discard =
4574                                 le32_to_cpu(tport->brb_truncate_discard);
4575                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4576         }
4577
4578         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4579
4580         bp->stats_pending = 0;
4581
4582         return 0;
4583 }
4584
4585 static void bnx2x_net_stats_update(struct bnx2x *bp)
4586 {
4587         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4588         struct net_device_stats *nstats = &bp->dev->stats;
4589         int i;
4590
4591         nstats->rx_packets =
4592                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4593                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4594                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4595
4596         nstats->tx_packets =
4597                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4598                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4599                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4600
4601         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4602
4603         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4604
4605         nstats->rx_dropped = estats->mac_discard;
4606         for_each_queue(bp, i)
4607                 nstats->rx_dropped +=
4608                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4609
4610         nstats->tx_dropped = 0;
4611
4612         nstats->multicast =
4613                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4614
4615         nstats->collisions =
4616                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4617
4618         nstats->rx_length_errors =
4619                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4620                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4621         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4622                                  bnx2x_hilo(&estats->brb_truncate_hi);
4623         nstats->rx_crc_errors =
4624                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4625         nstats->rx_frame_errors =
4626                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4627         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4628         nstats->rx_missed_errors = estats->xxoverflow_discard;
4629
4630         nstats->rx_errors = nstats->rx_length_errors +
4631                             nstats->rx_over_errors +
4632                             nstats->rx_crc_errors +
4633                             nstats->rx_frame_errors +
4634                             nstats->rx_fifo_errors +
4635                             nstats->rx_missed_errors;
4636
4637         nstats->tx_aborted_errors =
4638                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4639                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4640         nstats->tx_carrier_errors =
4641                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4642         nstats->tx_fifo_errors = 0;
4643         nstats->tx_heartbeat_errors = 0;
4644         nstats->tx_window_errors = 0;
4645
4646         nstats->tx_errors = nstats->tx_aborted_errors +
4647                             nstats->tx_carrier_errors +
4648             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4649 }
4650
4651 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4652 {
4653         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4654         int i;
4655
4656         estats->driver_xoff = 0;
4657         estats->rx_err_discard_pkt = 0;
4658         estats->rx_skb_alloc_failed = 0;
4659         estats->hw_csum_err = 0;
4660         for_each_queue(bp, i) {
4661                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4662
4663                 estats->driver_xoff += qstats->driver_xoff;
4664                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4665                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4666                 estats->hw_csum_err += qstats->hw_csum_err;
4667         }
4668 }
4669
4670 static void bnx2x_stats_update(struct bnx2x *bp)
4671 {
4672         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4673
4674         if (*stats_comp != DMAE_COMP_VAL)
4675                 return;
4676
4677         if (bp->port.pmf)
4678                 bnx2x_hw_stats_update(bp);
4679
4680         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4681                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4682                 bnx2x_panic();
4683                 return;
4684         }
4685
4686         bnx2x_net_stats_update(bp);
4687         bnx2x_drv_stats_update(bp);
4688
4689         if (netif_msg_timer(bp)) {
4690                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4691                 int i;
4692
4693                 printk(KERN_DEBUG "%s: brb drops %u  brb truncate %u\n",
4694                        bp->dev->name,
4695                        estats->brb_drop_lo, estats->brb_truncate_lo);
4696
4697                 for_each_queue(bp, i) {
4698                         struct bnx2x_fastpath *fp = &bp->fp[i];
4699                         struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4700
4701                         printk(KERN_DEBUG "%s: rx usage(%4u)  *rx_cons_sb(%u)"
4702                                           "  rx pkt(%lu)  rx calls(%lu %lu)\n",
4703                                fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
4704                                fp->rx_comp_cons),
4705                                le16_to_cpu(*fp->rx_cons_sb),
4706                                bnx2x_hilo(&qstats->
4707                                           total_unicast_packets_received_hi),
4708                                fp->rx_calls, fp->rx_pkt);
4709                 }
4710
4711                 for_each_queue(bp, i) {
4712                         struct bnx2x_fastpath *fp = &bp->fp[i];
4713                         struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4714                         struct netdev_queue *txq =
4715                                 netdev_get_tx_queue(bp->dev, i);
4716
4717                         printk(KERN_DEBUG "%s: tx avail(%4u)  *tx_cons_sb(%u)"
4718                                           "  tx pkt(%lu) tx calls (%lu)"
4719                                           "  %s (Xoff events %u)\n",
4720                                fp->name, bnx2x_tx_avail(fp),
4721                                le16_to_cpu(*fp->tx_cons_sb),
4722                                bnx2x_hilo(&qstats->
4723                                           total_unicast_packets_transmitted_hi),
4724                                fp->tx_pkt,
4725                                (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
4726                                qstats->driver_xoff);
4727                 }
4728         }
4729
4730         bnx2x_hw_stats_post(bp);
4731         bnx2x_storm_stats_post(bp);
4732 }
4733
4734 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4735 {
4736         struct dmae_command *dmae;
4737         u32 opcode;
4738         int loader_idx = PMF_DMAE_C(bp);
4739         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4740
4741         bp->executer_idx = 0;
4742
4743         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4744                   DMAE_CMD_C_ENABLE |
4745                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4746 #ifdef __BIG_ENDIAN
4747                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4748 #else
4749                   DMAE_CMD_ENDIANITY_DW_SWAP |
4750 #endif
4751                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4752                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4753
4754         if (bp->port.port_stx) {
4755
4756                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4757                 if (bp->func_stx)
4758                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4759                 else
4760                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4761                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4762                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4763                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4764                 dmae->dst_addr_hi = 0;
4765                 dmae->len = sizeof(struct host_port_stats) >> 2;
4766                 if (bp->func_stx) {
4767                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4768                         dmae->comp_addr_hi = 0;
4769                         dmae->comp_val = 1;
4770                 } else {
4771                         dmae->comp_addr_lo =
4772                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4773                         dmae->comp_addr_hi =
4774                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4775                         dmae->comp_val = DMAE_COMP_VAL;
4776
4777                         *stats_comp = 0;
4778                 }
4779         }
4780
4781         if (bp->func_stx) {
4782
4783                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4784                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4785                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4786                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4787                 dmae->dst_addr_lo = bp->func_stx >> 2;
4788                 dmae->dst_addr_hi = 0;
4789                 dmae->len = sizeof(struct host_func_stats) >> 2;
4790                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4791                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4792                 dmae->comp_val = DMAE_COMP_VAL;
4793
4794                 *stats_comp = 0;
4795         }
4796 }
4797
4798 static void bnx2x_stats_stop(struct bnx2x *bp)
4799 {
4800         int update = 0;
4801
4802         bnx2x_stats_comp(bp);
4803
4804         if (bp->port.pmf)
4805                 update = (bnx2x_hw_stats_update(bp) == 0);
4806
4807         update |= (bnx2x_storm_stats_update(bp) == 0);
4808
4809         if (update) {
4810                 bnx2x_net_stats_update(bp);
4811
4812                 if (bp->port.pmf)
4813                         bnx2x_port_stats_stop(bp);
4814
4815                 bnx2x_hw_stats_post(bp);
4816                 bnx2x_stats_comp(bp);
4817         }
4818 }
4819
4820 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4821 {
4822 }
4823
4824 static const struct {
4825         void (*action)(struct bnx2x *bp);
4826         enum bnx2x_stats_state next_state;
4827 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4828 /* state        event   */
4829 {
4830 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4831 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4832 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4833 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4834 },
4835 {
4836 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4837 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4838 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4839 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4840 }
4841 };
4842
4843 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4844 {
4845         enum bnx2x_stats_state state = bp->stats_state;
4846
4847         if (unlikely(bp->panic))
4848                 return;
4849
4850         bnx2x_stats_stm[state][event].action(bp);
4851         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4852
4853         /* Make sure the state has been "changed" */
4854         smp_wmb();
4855
4856         if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
4857                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4858                    state, event, bp->stats_state);
4859 }
4860
4861 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4862 {
4863         struct dmae_command *dmae;
4864         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4865
4866         /* sanity */
4867         if (!bp->port.pmf || !bp->port.port_stx) {
4868                 BNX2X_ERR("BUG!\n");
4869                 return;
4870         }
4871
4872         bp->executer_idx = 0;
4873
4874         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4875         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4876                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4877                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4878 #ifdef __BIG_ENDIAN
4879                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4880 #else
4881                         DMAE_CMD_ENDIANITY_DW_SWAP |
4882 #endif
4883                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4884                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4885         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4886         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4887         dmae->dst_addr_lo = bp->port.port_stx >> 2;
4888         dmae->dst_addr_hi = 0;
4889         dmae->len = sizeof(struct host_port_stats) >> 2;
4890         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4891         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4892         dmae->comp_val = DMAE_COMP_VAL;
4893
4894         *stats_comp = 0;
4895         bnx2x_hw_stats_post(bp);
4896         bnx2x_stats_comp(bp);
4897 }
4898
4899 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4900 {
4901         int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4902         int port = BP_PORT(bp);
4903         int func;
4904         u32 func_stx;
4905
4906         /* sanity */
4907         if (!bp->port.pmf || !bp->func_stx) {
4908                 BNX2X_ERR("BUG!\n");
4909                 return;
4910         }
4911
4912         /* save our func_stx */
4913         func_stx = bp->func_stx;
4914
4915         for (vn = VN_0; vn < vn_max; vn++) {
4916                 func = 2*vn + port;
4917
4918                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4919                 bnx2x_func_stats_init(bp);
4920                 bnx2x_hw_stats_post(bp);
4921                 bnx2x_stats_comp(bp);
4922         }
4923
4924         /* restore our func_stx */
4925         bp->func_stx = func_stx;
4926 }
4927
4928 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4929 {
4930         struct dmae_command *dmae = &bp->stats_dmae;
4931         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4932
4933         /* sanity */
4934         if (!bp->func_stx) {
4935                 BNX2X_ERR("BUG!\n");
4936                 return;
4937         }
4938
4939         bp->executer_idx = 0;
4940         memset(dmae, 0, sizeof(struct dmae_command));
4941
4942         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4943                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4944                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4945 #ifdef __BIG_ENDIAN
4946                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4947 #else
4948                         DMAE_CMD_ENDIANITY_DW_SWAP |
4949 #endif
4950                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4951                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4952         dmae->src_addr_lo = bp->func_stx >> 2;
4953         dmae->src_addr_hi = 0;
4954         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4955         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4956         dmae->len = sizeof(struct host_func_stats) >> 2;
4957         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4958         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4959         dmae->comp_val = DMAE_COMP_VAL;
4960
4961         *stats_comp = 0;
4962         bnx2x_hw_stats_post(bp);
4963         bnx2x_stats_comp(bp);
4964 }
4965
4966 static void bnx2x_stats_init(struct bnx2x *bp)
4967 {
4968         int port = BP_PORT(bp);
4969         int func = BP_FUNC(bp);
4970         int i;
4971
4972         bp->stats_pending = 0;
4973         bp->executer_idx = 0;
4974         bp->stats_counter = 0;
4975
4976         /* port and func stats for management */
4977         if (!BP_NOMCP(bp)) {
4978                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4979                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4980
4981         } else {
4982                 bp->port.port_stx = 0;
4983                 bp->func_stx = 0;
4984         }
4985         DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
4986            bp->port.port_stx, bp->func_stx);
4987
4988         /* port stats */
4989         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4990         bp->port.old_nig_stats.brb_discard =
4991                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4992         bp->port.old_nig_stats.brb_truncate =
4993                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4994         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4995                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4996         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4997                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4998
4999         /* function stats */
5000         for_each_queue(bp, i) {
5001                 struct bnx2x_fastpath *fp = &bp->fp[i];
5002
5003                 memset(&fp->old_tclient, 0,
5004                        sizeof(struct tstorm_per_client_stats));
5005                 memset(&fp->old_uclient, 0,
5006                        sizeof(struct ustorm_per_client_stats));
5007                 memset(&fp->old_xclient, 0,
5008                        sizeof(struct xstorm_per_client_stats));
5009                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
5010         }
5011
5012         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
5013         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
5014
5015         bp->stats_state = STATS_STATE_DISABLED;
5016
5017         if (bp->port.pmf) {
5018                 if (bp->port.port_stx)
5019                         bnx2x_port_stats_base_init(bp);
5020
5021                 if (bp->func_stx)
5022                         bnx2x_func_stats_base_init(bp);
5023
5024         } else if (bp->func_stx)
5025                 bnx2x_func_stats_base_update(bp);
5026 }
5027
5028 static void bnx2x_timer(unsigned long data)
5029 {
5030         struct bnx2x *bp = (struct bnx2x *) data;
5031
5032         if (!netif_running(bp->dev))
5033                 return;
5034
5035         if (atomic_read(&bp->intr_sem) != 0)
5036                 goto timer_restart;
5037
5038         if (poll) {
5039                 struct bnx2x_fastpath *fp = &bp->fp[0];
5040                 int rc;
5041
5042                 bnx2x_tx_int(fp);
5043                 rc = bnx2x_rx_int(fp, 1000);
5044         }
5045
5046         if (!BP_NOMCP(bp)) {
5047                 int func = BP_FUNC(bp);
5048                 u32 drv_pulse;
5049                 u32 mcp_pulse;
5050
5051                 ++bp->fw_drv_pulse_wr_seq;
5052                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5053                 /* TBD - add SYSTEM_TIME */
5054                 drv_pulse = bp->fw_drv_pulse_wr_seq;
5055                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
5056
5057                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
5058                              MCP_PULSE_SEQ_MASK);
5059                 /* The delta between driver pulse and mcp response
5060                  * should be 1 (before mcp response) or 0 (after mcp response)
5061                  */
5062                 if ((drv_pulse != mcp_pulse) &&
5063                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5064                         /* someone lost a heartbeat... */
5065                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5066                                   drv_pulse, mcp_pulse);
5067                 }
5068         }
5069
5070         if (bp->state == BNX2X_STATE_OPEN)
5071                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5072
5073 timer_restart:
5074         mod_timer(&bp->timer, jiffies + bp->current_interval);
5075 }
5076
5077 /* end of Statistics */
5078
5079 /* nic init */
5080
5081 /*
5082  * nic init service functions
5083  */
5084
5085 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
5086 {
5087         int port = BP_PORT(bp);
5088
5089         /* "CSTORM" */
5090         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5091                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
5092                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
5093         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5094                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
5095                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
5096 }
5097
5098 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5099                           dma_addr_t mapping, int sb_id)
5100 {
5101         int port = BP_PORT(bp);
5102         int func = BP_FUNC(bp);
5103         int index;
5104         u64 section;
5105
5106         /* USTORM */
5107         section = ((u64)mapping) + offsetof(struct host_status_block,
5108                                             u_status_block);
5109         sb->u_status_block.status_block_id = sb_id;
5110
5111         REG_WR(bp, BAR_CSTRORM_INTMEM +
5112                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
5113         REG_WR(bp, BAR_CSTRORM_INTMEM +
5114                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
5115                U64_HI(section));
5116         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
5117                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
5118
5119         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
5120                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5121                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
5122
5123         /* CSTORM */
5124         section = ((u64)mapping) + offsetof(struct host_status_block,
5125                                             c_status_block);
5126         sb->c_status_block.status_block_id = sb_id;
5127
5128         REG_WR(bp, BAR_CSTRORM_INTMEM +
5129                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
5130         REG_WR(bp, BAR_CSTRORM_INTMEM +
5131                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
5132                U64_HI(section));
5133         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
5134                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
5135
5136         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5137                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5138                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
5139
5140         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5141 }
5142
5143 static void bnx2x_zero_def_sb(struct bnx2x *bp)
5144 {
5145         int func = BP_FUNC(bp);
5146
5147         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
5148                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5149                         sizeof(struct tstorm_def_status_block)/4);
5150         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5151                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
5152                         sizeof(struct cstorm_def_status_block_u)/4);
5153         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5154                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
5155                         sizeof(struct cstorm_def_status_block_c)/4);
5156         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
5157                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5158                         sizeof(struct xstorm_def_status_block)/4);
5159 }
5160
5161 static void bnx2x_init_def_sb(struct bnx2x *bp,
5162                               struct host_def_status_block *def_sb,
5163                               dma_addr_t mapping, int sb_id)
5164 {
5165         int port = BP_PORT(bp);
5166         int func = BP_FUNC(bp);
5167         int index, val, reg_offset;
5168         u64 section;
5169
5170         /* ATTN */
5171         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5172                                             atten_status_block);
5173         def_sb->atten_status_block.status_block_id = sb_id;
5174
5175         bp->attn_state = 0;
5176
5177         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5178                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5179
5180         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5181                 bp->attn_group[index].sig[0] = REG_RD(bp,
5182                                                      reg_offset + 0x10*index);
5183                 bp->attn_group[index].sig[1] = REG_RD(bp,
5184                                                reg_offset + 0x4 + 0x10*index);
5185                 bp->attn_group[index].sig[2] = REG_RD(bp,
5186                                                reg_offset + 0x8 + 0x10*index);
5187                 bp->attn_group[index].sig[3] = REG_RD(bp,
5188                                                reg_offset + 0xc + 0x10*index);
5189         }
5190
5191         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5192                              HC_REG_ATTN_MSG0_ADDR_L);
5193
5194         REG_WR(bp, reg_offset, U64_LO(section));
5195         REG_WR(bp, reg_offset + 4, U64_HI(section));
5196
5197         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5198
5199         val = REG_RD(bp, reg_offset);
5200         val |= sb_id;
5201         REG_WR(bp, reg_offset, val);
5202
5203         /* USTORM */
5204         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5205                                             u_def_status_block);
5206         def_sb->u_def_status_block.status_block_id = sb_id;
5207
5208         REG_WR(bp, BAR_CSTRORM_INTMEM +
5209                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
5210         REG_WR(bp, BAR_CSTRORM_INTMEM +
5211                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
5212                U64_HI(section));
5213         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
5214                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
5215
5216         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
5217                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5218                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
5219
5220         /* CSTORM */
5221         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5222                                             c_def_status_block);
5223         def_sb->c_def_status_block.status_block_id = sb_id;
5224
5225         REG_WR(bp, BAR_CSTRORM_INTMEM +
5226                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
5227         REG_WR(bp, BAR_CSTRORM_INTMEM +
5228                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
5229                U64_HI(section));
5230         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
5231                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
5232
5233         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5234                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5235                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
5236
5237         /* TSTORM */
5238         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5239                                             t_def_status_block);
5240         def_sb->t_def_status_block.status_block_id = sb_id;
5241
5242         REG_WR(bp, BAR_TSTRORM_INTMEM +
5243                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5244         REG_WR(bp, BAR_TSTRORM_INTMEM +
5245                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5246                U64_HI(section));
5247         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
5248                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5249
5250         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5251                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
5252                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5253
5254         /* XSTORM */
5255         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5256                                             x_def_status_block);
5257         def_sb->x_def_status_block.status_block_id = sb_id;
5258
5259         REG_WR(bp, BAR_XSTRORM_INTMEM +
5260                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5261         REG_WR(bp, BAR_XSTRORM_INTMEM +
5262                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5263                U64_HI(section));
5264         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
5265                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5266
5267         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5268                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
5269                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5270
5271         bp->stats_pending = 0;
5272         bp->set_mac_pending = 0;
5273
5274         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5275 }
5276
5277 static void bnx2x_update_coalesce(struct bnx2x *bp)
5278 {
5279         int port = BP_PORT(bp);
5280         int i;
5281
5282         for_each_queue(bp, i) {
5283                 int sb_id = bp->fp[i].sb_id;
5284
5285                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
5286                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5287                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
5288                                                       U_SB_ETH_RX_CQ_INDEX),
5289                         bp->rx_ticks/(4 * BNX2X_BTR));
5290                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5291                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
5292                                                        U_SB_ETH_RX_CQ_INDEX),
5293                          (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5294
5295                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5296                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5297                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
5298                                                       C_SB_ETH_TX_CQ_INDEX),
5299                         bp->tx_ticks/(4 * BNX2X_BTR));
5300                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5301                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
5302                                                        C_SB_ETH_TX_CQ_INDEX),
5303                          (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5304         }
5305 }
5306
5307 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5308                                        struct bnx2x_fastpath *fp, int last)
5309 {
5310         int i;
5311
5312         for (i = 0; i < last; i++) {
5313                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5314                 struct sk_buff *skb = rx_buf->skb;
5315
5316                 if (skb == NULL) {
5317                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5318                         continue;
5319                 }
5320
5321                 if (fp->tpa_state[i] == BNX2X_TPA_START)
5322                         dma_unmap_single(&bp->pdev->dev,
5323                                          dma_unmap_addr(rx_buf, mapping),
5324                                          bp->rx_buf_size, DMA_FROM_DEVICE);
5325
5326                 dev_kfree_skb(skb);
5327                 rx_buf->skb = NULL;
5328         }
5329 }
5330
5331 static void bnx2x_init_rx_rings(struct bnx2x *bp)
5332 {
5333         int func = BP_FUNC(bp);
5334         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5335                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
5336         u16 ring_prod, cqe_ring_prod;
5337         int i, j;
5338
5339         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
5340         DP(NETIF_MSG_IFUP,
5341            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
5342
5343         if (bp->flags & TPA_ENABLE_FLAG) {
5344
5345                 for_each_queue(bp, j) {
5346                         struct bnx2x_fastpath *fp = &bp->fp[j];
5347
5348                         for (i = 0; i < max_agg_queues; i++) {
5349                                 fp->tpa_pool[i].skb =
5350                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5351                                 if (!fp->tpa_pool[i].skb) {
5352                                         BNX2X_ERR("Failed to allocate TPA "
5353                                                   "skb pool for queue[%d] - "
5354                                                   "disabling TPA on this "
5355                                                   "queue!\n", j);
5356                                         bnx2x_free_tpa_pool(bp, fp, i);
5357                                         fp->disable_tpa = 1;
5358                                         break;
5359                                 }
5360                                 dma_unmap_addr_set((struct sw_rx_bd *)
5361                                                         &bp->fp->tpa_pool[i],
5362                                                    mapping, 0);
5363                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
5364                         }
5365                 }
5366         }
5367
5368         for_each_queue(bp, j) {
5369                 struct bnx2x_fastpath *fp = &bp->fp[j];
5370
5371                 fp->rx_bd_cons = 0;
5372                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5373                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5374
5375                 /* "next page" elements initialization */
5376                 /* SGE ring */
5377                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5378                         struct eth_rx_sge *sge;
5379
5380                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5381                         sge->addr_hi =
5382                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5383                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5384                         sge->addr_lo =
5385                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5386                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5387                 }
5388
5389                 bnx2x_init_sge_ring_bit_mask(fp);
5390
5391                 /* RX BD ring */
5392                 for (i = 1; i <= NUM_RX_RINGS; i++) {
5393                         struct eth_rx_bd *rx_bd;
5394
5395                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5396                         rx_bd->addr_hi =
5397                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5398                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5399                         rx_bd->addr_lo =
5400                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5401                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5402                 }
5403
5404                 /* CQ ring */
5405                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5406                         struct eth_rx_cqe_next_page *nextpg;
5407
5408                         nextpg = (struct eth_rx_cqe_next_page *)
5409                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5410                         nextpg->addr_hi =
5411                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5412                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5413                         nextpg->addr_lo =
5414                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5415                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5416                 }
5417
5418                 /* Allocate SGEs and initialize the ring elements */
5419                 for (i = 0, ring_prod = 0;
5420                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5421
5422                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5423                                 BNX2X_ERR("was only able to allocate "
5424                                           "%d rx sges\n", i);
5425                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5426                                 /* Cleanup already allocated elements */
5427                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5428                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5429                                 fp->disable_tpa = 1;
5430                                 ring_prod = 0;
5431                                 break;
5432                         }
5433                         ring_prod = NEXT_SGE_IDX(ring_prod);
5434                 }
5435                 fp->rx_sge_prod = ring_prod;
5436
5437                 /* Allocate BDs and initialize BD ring */
5438                 fp->rx_comp_cons = 0;
5439                 cqe_ring_prod = ring_prod = 0;
5440                 for (i = 0; i < bp->rx_ring_size; i++) {
5441                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5442                                 BNX2X_ERR("was only able to allocate "
5443                                           "%d rx skbs on queue[%d]\n", i, j);
5444                                 fp->eth_q_stats.rx_skb_alloc_failed++;
5445                                 break;
5446                         }
5447                         ring_prod = NEXT_RX_IDX(ring_prod);
5448                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5449                         WARN_ON(ring_prod <= i);
5450                 }
5451
5452                 fp->rx_bd_prod = ring_prod;
5453                 /* must not have more available CQEs than BDs */
5454                 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
5455                                          cqe_ring_prod);
5456                 fp->rx_pkt = fp->rx_calls = 0;
5457
5458                 /* Warning!
5459                  * this will generate an interrupt (to the TSTORM)
5460                  * must only be done after chip is initialized
5461                  */
5462                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5463                                      fp->rx_sge_prod);
5464                 if (j != 0)
5465                         continue;
5466
5467                 REG_WR(bp, BAR_USTRORM_INTMEM +
5468                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5469                        U64_LO(fp->rx_comp_mapping));
5470                 REG_WR(bp, BAR_USTRORM_INTMEM +
5471                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5472                        U64_HI(fp->rx_comp_mapping));
5473         }
5474 }
5475
5476 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5477 {
5478         int i, j;
5479
5480         for_each_queue(bp, j) {
5481                 struct bnx2x_fastpath *fp = &bp->fp[j];
5482
5483                 for (i = 1; i <= NUM_TX_RINGS; i++) {
5484                         struct eth_tx_next_bd *tx_next_bd =
5485                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5486
5487                         tx_next_bd->addr_hi =
5488                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5489                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5490                         tx_next_bd->addr_lo =
5491                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5492                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5493                 }
5494
5495                 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5496                 fp->tx_db.data.zero_fill1 = 0;
5497                 fp->tx_db.data.prod = 0;
5498
5499                 fp->tx_pkt_prod = 0;
5500                 fp->tx_pkt_cons = 0;
5501                 fp->tx_bd_prod = 0;
5502                 fp->tx_bd_cons = 0;
5503                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5504                 fp->tx_pkt = 0;
5505         }
5506 }
5507
5508 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5509 {
5510         int func = BP_FUNC(bp);
5511
5512         spin_lock_init(&bp->spq_lock);
5513
5514         bp->spq_left = MAX_SPQ_PENDING;
5515         bp->spq_prod_idx = 0;
5516         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5517         bp->spq_prod_bd = bp->spq;
5518         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5519
5520         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5521                U64_LO(bp->spq_mapping));
5522         REG_WR(bp,
5523                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5524                U64_HI(bp->spq_mapping));
5525
5526         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5527                bp->spq_prod_idx);
5528 }
5529
5530 static void bnx2x_init_context(struct bnx2x *bp)
5531 {
5532         int i;
5533
5534         /* Rx */
5535         for_each_queue(bp, i) {
5536                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5537                 struct bnx2x_fastpath *fp = &bp->fp[i];
5538                 u8 cl_id = fp->cl_id;
5539
5540                 context->ustorm_st_context.common.sb_index_numbers =
5541                                                 BNX2X_RX_SB_INDEX_NUM;
5542                 context->ustorm_st_context.common.clientId = cl_id;
5543                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5544                 context->ustorm_st_context.common.flags =
5545                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5546                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5547                 context->ustorm_st_context.common.statistics_counter_id =
5548                                                 cl_id;
5549                 context->ustorm_st_context.common.mc_alignment_log_size =
5550                                                 BNX2X_RX_ALIGN_SHIFT;
5551                 context->ustorm_st_context.common.bd_buff_size =
5552                                                 bp->rx_buf_size;
5553                 context->ustorm_st_context.common.bd_page_base_hi =
5554                                                 U64_HI(fp->rx_desc_mapping);
5555                 context->ustorm_st_context.common.bd_page_base_lo =
5556                                                 U64_LO(fp->rx_desc_mapping);
5557                 if (!fp->disable_tpa) {
5558                         context->ustorm_st_context.common.flags |=
5559                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5560                         context->ustorm_st_context.common.sge_buff_size =
5561                                 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
5562                                            0xffff);
5563                         context->ustorm_st_context.common.sge_page_base_hi =
5564                                                 U64_HI(fp->rx_sge_mapping);
5565                         context->ustorm_st_context.common.sge_page_base_lo =
5566                                                 U64_LO(fp->rx_sge_mapping);
5567
5568                         context->ustorm_st_context.common.max_sges_for_packet =
5569                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5570                         context->ustorm_st_context.common.max_sges_for_packet =
5571                                 ((context->ustorm_st_context.common.
5572                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
5573                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5574                 }
5575
5576                 context->ustorm_ag_context.cdu_usage =
5577                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5578                                                CDU_REGION_NUMBER_UCM_AG,
5579                                                ETH_CONNECTION_TYPE);
5580
5581                 context->xstorm_ag_context.cdu_reserved =
5582                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5583                                                CDU_REGION_NUMBER_XCM_AG,
5584                                                ETH_CONNECTION_TYPE);
5585         }
5586
5587         /* Tx */
5588         for_each_queue(bp, i) {
5589                 struct bnx2x_fastpath *fp = &bp->fp[i];
5590                 struct eth_context *context =
5591                         bnx2x_sp(bp, context[i].eth);
5592
5593                 context->cstorm_st_context.sb_index_number =
5594                                                 C_SB_ETH_TX_CQ_INDEX;
5595                 context->cstorm_st_context.status_block_id = fp->sb_id;
5596
5597                 context->xstorm_st_context.tx_bd_page_base_hi =
5598                                                 U64_HI(fp->tx_desc_mapping);
5599                 context->xstorm_st_context.tx_bd_page_base_lo =
5600                                                 U64_LO(fp->tx_desc_mapping);
5601                 context->xstorm_st_context.statistics_data = (fp->cl_id |
5602                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5603         }
5604 }
5605
5606 static void bnx2x_init_ind_table(struct bnx2x *bp)
5607 {
5608         int func = BP_FUNC(bp);
5609         int i;
5610
5611         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5612                 return;
5613
5614         DP(NETIF_MSG_IFUP,
5615            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
5616         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5617                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5618                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5619                         bp->fp->cl_id + (i % bp->num_queues));
5620 }
5621
5622 static void bnx2x_set_client_config(struct bnx2x *bp)
5623 {
5624         struct tstorm_eth_client_config tstorm_client = {0};
5625         int port = BP_PORT(bp);
5626         int i;
5627
5628         tstorm_client.mtu = bp->dev->mtu;
5629         tstorm_client.config_flags =
5630                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5631                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5632 #ifdef BCM_VLAN
5633         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5634                 tstorm_client.config_flags |=
5635                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5636                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5637         }
5638 #endif
5639
5640         for_each_queue(bp, i) {
5641                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5642
5643                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5644                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5645                        ((u32 *)&tstorm_client)[0]);
5646                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5647                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5648                        ((u32 *)&tstorm_client)[1]);
5649         }
5650
5651         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5652            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5653 }
5654
5655 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5656 {
5657         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5658         int mode = bp->rx_mode;
5659         int mask = bp->rx_mode_cl_mask;
5660         int func = BP_FUNC(bp);
5661         int port = BP_PORT(bp);
5662         int i;
5663         /* All but management unicast packets should pass to the host as well */
5664         u32 llh_mask =
5665                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5666                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5667                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5668                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5669
5670         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
5671
5672         switch (mode) {
5673         case BNX2X_RX_MODE_NONE: /* no Rx */
5674                 tstorm_mac_filter.ucast_drop_all = mask;
5675                 tstorm_mac_filter.mcast_drop_all = mask;
5676                 tstorm_mac_filter.bcast_drop_all = mask;
5677                 break;
5678
5679         case BNX2X_RX_MODE_NORMAL:
5680                 tstorm_mac_filter.bcast_accept_all = mask;
5681                 break;
5682
5683         case BNX2X_RX_MODE_ALLMULTI:
5684                 tstorm_mac_filter.mcast_accept_all = mask;
5685                 tstorm_mac_filter.bcast_accept_all = mask;
5686                 break;
5687
5688         case BNX2X_RX_MODE_PROMISC:
5689                 tstorm_mac_filter.ucast_accept_all = mask;
5690                 tstorm_mac_filter.mcast_accept_all = mask;
5691                 tstorm_mac_filter.bcast_accept_all = mask;
5692                 /* pass management unicast packets as well */
5693                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5694                 break;
5695
5696         default:
5697                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5698                 break;
5699         }
5700
5701         REG_WR(bp,
5702                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5703                llh_mask);
5704
5705         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5706                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5707                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5708                        ((u32 *)&tstorm_mac_filter)[i]);
5709
5710 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5711                    ((u32 *)&tstorm_mac_filter)[i]); */
5712         }
5713
5714         if (mode != BNX2X_RX_MODE_NONE)
5715                 bnx2x_set_client_config(bp);
5716 }
5717
5718 static void bnx2x_init_internal_common(struct bnx2x *bp)
5719 {
5720         int i;
5721
5722         /* Zero this manually as its initialization is
5723            currently missing in the initTool */
5724         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5725                 REG_WR(bp, BAR_USTRORM_INTMEM +
5726                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5727 }
5728
5729 static void bnx2x_init_internal_port(struct bnx2x *bp)
5730 {
5731         int port = BP_PORT(bp);
5732
5733         REG_WR(bp,
5734                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5735         REG_WR(bp,
5736                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5737         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5738         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5739 }
5740
5741 static void bnx2x_init_internal_func(struct bnx2x *bp)
5742 {
5743         struct tstorm_eth_function_common_config tstorm_config = {0};
5744         struct stats_indication_flags stats_flags = {0};
5745         int port = BP_PORT(bp);
5746         int func = BP_FUNC(bp);
5747         int i, j;
5748         u32 offset;
5749         u16 max_agg_size;
5750
5751         if (is_multi(bp)) {
5752                 tstorm_config.config_flags = MULTI_FLAGS(bp);
5753                 tstorm_config.rss_result_mask = MULTI_MASK;
5754         }
5755
5756         /* Enable TPA if needed */
5757         if (bp->flags & TPA_ENABLE_FLAG)
5758                 tstorm_config.config_flags |=
5759                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5760
5761         if (IS_E1HMF(bp))
5762                 tstorm_config.config_flags |=
5763                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5764
5765         tstorm_config.leading_client_id = BP_L_ID(bp);
5766
5767         REG_WR(bp, BAR_TSTRORM_INTMEM +
5768                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5769                (*(u32 *)&tstorm_config));
5770
5771         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5772         bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5773         bnx2x_set_storm_rx_mode(bp);
5774
5775         for_each_queue(bp, i) {
5776                 u8 cl_id = bp->fp[i].cl_id;
5777
5778                 /* reset xstorm per client statistics */
5779                 offset = BAR_XSTRORM_INTMEM +
5780                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5781                 for (j = 0;
5782                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5783                         REG_WR(bp, offset + j*4, 0);
5784
5785                 /* reset tstorm per client statistics */
5786                 offset = BAR_TSTRORM_INTMEM +
5787                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5788                 for (j = 0;
5789                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5790                         REG_WR(bp, offset + j*4, 0);
5791
5792                 /* reset ustorm per client statistics */
5793                 offset = BAR_USTRORM_INTMEM +
5794                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5795                 for (j = 0;
5796                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5797                         REG_WR(bp, offset + j*4, 0);
5798         }
5799
5800         /* Init statistics related context */
5801         stats_flags.collect_eth = 1;
5802
5803         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5804                ((u32 *)&stats_flags)[0]);
5805         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5806                ((u32 *)&stats_flags)[1]);
5807
5808         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5809                ((u32 *)&stats_flags)[0]);
5810         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5811                ((u32 *)&stats_flags)[1]);
5812
5813         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5814                ((u32 *)&stats_flags)[0]);
5815         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5816                ((u32 *)&stats_flags)[1]);
5817
5818         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5819                ((u32 *)&stats_flags)[0]);
5820         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5821                ((u32 *)&stats_flags)[1]);
5822
5823         REG_WR(bp, BAR_XSTRORM_INTMEM +
5824                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5825                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5826         REG_WR(bp, BAR_XSTRORM_INTMEM +
5827                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5828                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5829
5830         REG_WR(bp, BAR_TSTRORM_INTMEM +
5831                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5832                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5833         REG_WR(bp, BAR_TSTRORM_INTMEM +
5834                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5835                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5836
5837         REG_WR(bp, BAR_USTRORM_INTMEM +
5838                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5839                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5840         REG_WR(bp, BAR_USTRORM_INTMEM +
5841                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5842                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5843
5844         if (CHIP_IS_E1H(bp)) {
5845                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5846                         IS_E1HMF(bp));
5847                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5848                         IS_E1HMF(bp));
5849                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5850                         IS_E1HMF(bp));
5851                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5852                         IS_E1HMF(bp));
5853
5854                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5855                          bp->e1hov);
5856         }
5857
5858         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5859         max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
5860                                    SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
5861         for_each_queue(bp, i) {
5862                 struct bnx2x_fastpath *fp = &bp->fp[i];
5863
5864                 REG_WR(bp, BAR_USTRORM_INTMEM +
5865                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5866                        U64_LO(fp->rx_comp_mapping));
5867                 REG_WR(bp, BAR_USTRORM_INTMEM +
5868                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5869                        U64_HI(fp->rx_comp_mapping));
5870
5871                 /* Next page */
5872                 REG_WR(bp, BAR_USTRORM_INTMEM +
5873                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5874                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5875                 REG_WR(bp, BAR_USTRORM_INTMEM +
5876                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5877                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5878
5879                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5880                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5881                          max_agg_size);
5882         }
5883
5884         /* dropless flow control */
5885         if (CHIP_IS_E1H(bp)) {
5886                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5887
5888                 rx_pause.bd_thr_low = 250;
5889                 rx_pause.cqe_thr_low = 250;
5890                 rx_pause.cos = 1;
5891                 rx_pause.sge_thr_low = 0;
5892                 rx_pause.bd_thr_high = 350;
5893                 rx_pause.cqe_thr_high = 350;
5894                 rx_pause.sge_thr_high = 0;
5895
5896                 for_each_queue(bp, i) {
5897                         struct bnx2x_fastpath *fp = &bp->fp[i];
5898
5899                         if (!fp->disable_tpa) {
5900                                 rx_pause.sge_thr_low = 150;
5901                                 rx_pause.sge_thr_high = 250;
5902                         }
5903
5904
5905                         offset = BAR_USTRORM_INTMEM +
5906                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5907                                                                    fp->cl_id);
5908                         for (j = 0;
5909                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5910                              j++)
5911                                 REG_WR(bp, offset + j*4,
5912                                        ((u32 *)&rx_pause)[j]);
5913                 }
5914         }
5915
5916         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5917
5918         /* Init rate shaping and fairness contexts */
5919         if (IS_E1HMF(bp)) {
5920                 int vn;
5921
5922                 /* During init there is no active link
5923                    Until link is up, set link rate to 10Gbps */
5924                 bp->link_vars.line_speed = SPEED_10000;
5925                 bnx2x_init_port_minmax(bp);
5926
5927                 if (!BP_NOMCP(bp))
5928                         bp->mf_config =
5929                               SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5930                 bnx2x_calc_vn_weight_sum(bp);
5931
5932                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5933                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5934
5935                 /* Enable rate shaping and fairness */
5936                 bp->cmng.flags.cmng_enables |=
5937                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5938
5939         } else {
5940                 /* rate shaping and fairness are disabled */
5941                 DP(NETIF_MSG_IFUP,
5942                    "single function mode  minmax will be disabled\n");
5943         }
5944
5945
5946         /* Store cmng structures to internal memory */
5947         if (bp->port.pmf)
5948                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5949                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5950                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5951                                ((u32 *)(&bp->cmng))[i]);
5952 }
5953
5954 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5955 {
5956         switch (load_code) {
5957         case FW_MSG_CODE_DRV_LOAD_COMMON:
5958                 bnx2x_init_internal_common(bp);
5959                 /* no break */
5960
5961         case FW_MSG_CODE_DRV_LOAD_PORT:
5962                 bnx2x_init_internal_port(bp);
5963                 /* no break */
5964
5965         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5966                 bnx2x_init_internal_func(bp);
5967                 break;
5968
5969         default:
5970                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5971                 break;
5972         }
5973 }
5974
5975 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5976 {
5977         int i;
5978
5979         for_each_queue(bp, i) {
5980                 struct bnx2x_fastpath *fp = &bp->fp[i];
5981
5982                 fp->bp = bp;
5983                 fp->state = BNX2X_FP_STATE_CLOSED;
5984                 fp->index = i;
5985                 fp->cl_id = BP_L_ID(bp) + i;
5986 #ifdef BCM_CNIC
5987                 fp->sb_id = fp->cl_id + 1;
5988 #else
5989                 fp->sb_id = fp->cl_id;
5990 #endif
5991                 DP(NETIF_MSG_IFUP,
5992                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5993                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5994                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5995                               fp->sb_id);
5996                 bnx2x_update_fpsb_idx(fp);
5997         }
5998
5999         /* ensure status block indices were read */
6000         rmb();
6001
6002
6003         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
6004                           DEF_SB_ID);
6005         bnx2x_update_dsb_idx(bp);
6006         bnx2x_update_coalesce(bp);
6007         bnx2x_init_rx_rings(bp);
6008         bnx2x_init_tx_ring(bp);
6009         bnx2x_init_sp_ring(bp);
6010         bnx2x_init_context(bp);
6011         bnx2x_init_internal(bp, load_code);
6012         bnx2x_init_ind_table(bp);
6013         bnx2x_stats_init(bp);
6014
6015         /* At this point, we are ready for interrupts */
6016         atomic_set(&bp->intr_sem, 0);
6017
6018         /* flush all before enabling interrupts */
6019         mb();
6020         mmiowb();
6021
6022         bnx2x_int_enable(bp);
6023
6024         /* Check for SPIO5 */
6025         bnx2x_attn_int_deasserted0(bp,
6026                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6027                                    AEU_INPUTS_ATTN_BITS_SPIO5);
6028 }
6029
6030 /* end of nic init */
6031
6032 /*
6033  * gzip service functions
6034  */
6035
6036 static int bnx2x_gunzip_init(struct bnx2x *bp)
6037 {
6038         bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6039                                             &bp->gunzip_mapping, GFP_KERNEL);
6040         if (bp->gunzip_buf  == NULL)
6041                 goto gunzip_nomem1;
6042
6043         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6044         if (bp->strm  == NULL)
6045                 goto gunzip_nomem2;
6046
6047         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
6048                                       GFP_KERNEL);
6049         if (bp->strm->workspace == NULL)
6050                 goto gunzip_nomem3;
6051
6052         return 0;
6053
6054 gunzip_nomem3:
6055         kfree(bp->strm);
6056         bp->strm = NULL;
6057
6058 gunzip_nomem2:
6059         dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6060                           bp->gunzip_mapping);
6061         bp->gunzip_buf = NULL;
6062
6063 gunzip_nomem1:
6064         netdev_err(bp->dev, "Cannot allocate firmware buffer for"
6065                " un-compression\n");
6066         return -ENOMEM;
6067 }
6068
6069 static void bnx2x_gunzip_end(struct bnx2x *bp)
6070 {
6071         kfree(bp->strm->workspace);
6072
6073         kfree(bp->strm);
6074         bp->strm = NULL;
6075
6076         if (bp->gunzip_buf) {
6077                 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6078                                   bp->gunzip_mapping);
6079                 bp->gunzip_buf = NULL;
6080         }
6081 }
6082
6083 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6084 {
6085         int n, rc;
6086
6087         /* check gzip header */
6088         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6089                 BNX2X_ERR("Bad gzip header\n");
6090                 return -EINVAL;
6091         }
6092
6093         n = 10;
6094
6095 #define FNAME                           0x8
6096
6097         if (zbuf[3] & FNAME)
6098                 while ((zbuf[n++] != 0) && (n < len));
6099
6100         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6101         bp->strm->avail_in = len - n;
6102         bp->strm->next_out = bp->gunzip_buf;
6103         bp->strm->avail_out = FW_BUF_SIZE;
6104
6105         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6106         if (rc != Z_OK)
6107                 return rc;
6108
6109         rc = zlib_inflate(bp->strm, Z_FINISH);
6110         if ((rc != Z_OK) && (rc != Z_STREAM_END))
6111                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6112                            bp->strm->msg);
6113
6114         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6115         if (bp->gunzip_outlen & 0x3)
6116                 netdev_err(bp->dev, "Firmware decompression error:"
6117                                     " gunzip_outlen (%d) not aligned\n",
6118                                 bp->gunzip_outlen);
6119         bp->gunzip_outlen >>= 2;
6120
6121         zlib_inflateEnd(bp->strm);
6122
6123         if (rc == Z_STREAM_END)
6124                 return 0;
6125
6126         return rc;
6127 }
6128
6129 /* nic load/unload */
6130
6131 /*
6132  * General service functions
6133  */
6134
6135 /* send a NIG loopback debug packet */
6136 static void bnx2x_lb_pckt(struct bnx2x *bp)
6137 {
6138         u32 wb_write[3];
6139
6140         /* Ethernet source and destination addresses */
6141         wb_write[0] = 0x55555555;
6142         wb_write[1] = 0x55555555;
6143         wb_write[2] = 0x20;             /* SOP */
6144         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6145
6146         /* NON-IP protocol */
6147         wb_write[0] = 0x09000000;
6148         wb_write[1] = 0x55555555;
6149         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
6150         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6151 }
6152
6153 /* some of the internal memories
6154  * are not directly readable from the driver
6155  * to test them we send debug packets
6156  */
6157 static int bnx2x_int_mem_test(struct bnx2x *bp)
6158 {
6159         int factor;
6160         int count, i;
6161         u32 val = 0;
6162
6163         if (CHIP_REV_IS_FPGA(bp))
6164                 factor = 120;
6165         else if (CHIP_REV_IS_EMUL(bp))
6166                 factor = 200;
6167         else
6168                 factor = 1;
6169
6170         DP(NETIF_MSG_HW, "start part1\n");
6171
6172         /* Disable inputs of parser neighbor blocks */
6173         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6174         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6175         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6176         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6177
6178         /*  Write 0 to parser credits for CFC search request */
6179         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6180
6181         /* send Ethernet packet */
6182         bnx2x_lb_pckt(bp);
6183
6184         /* TODO do i reset NIG statistic? */
6185         /* Wait until NIG register shows 1 packet of size 0x10 */
6186         count = 1000 * factor;
6187         while (count) {
6188
6189                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6190                 val = *bnx2x_sp(bp, wb_data[0]);
6191                 if (val == 0x10)
6192                         break;
6193
6194                 msleep(10);
6195                 count--;
6196         }
6197         if (val != 0x10) {
6198                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
6199                 return -1;
6200         }
6201
6202         /* Wait until PRS register shows 1 packet */
6203         count = 1000 * factor;
6204         while (count) {
6205                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6206                 if (val == 1)
6207                         break;
6208
6209                 msleep(10);
6210                 count--;
6211         }
6212         if (val != 0x1) {
6213                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6214                 return -2;
6215         }
6216
6217         /* Reset and init BRB, PRS */
6218         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6219         msleep(50);
6220         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6221         msleep(50);
6222         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6223         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6224
6225         DP(NETIF_MSG_HW, "part2\n");
6226
6227         /* Disable inputs of parser neighbor blocks */
6228         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6229         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6230         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6231         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6232
6233         /* Write 0 to parser credits for CFC search request */
6234         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6235
6236         /* send 10 Ethernet packets */
6237         for (i = 0; i < 10; i++)
6238                 bnx2x_lb_pckt(bp);
6239
6240         /* Wait until NIG register shows 10 + 1
6241            packets of size 11*0x10 = 0xb0 */
6242         count = 1000 * factor;
6243         while (count) {
6244
6245                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6246                 val = *bnx2x_sp(bp, wb_data[0]);
6247                 if (val == 0xb0)
6248                         break;
6249
6250                 msleep(10);
6251                 count--;
6252         }
6253         if (val != 0xb0) {
6254                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
6255                 return -3;
6256         }
6257
6258         /* Wait until PRS register shows 2 packets */
6259         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6260         if (val != 2)
6261                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
6262
6263         /* Write 1 to parser credits for CFC search request */
6264         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6265
6266         /* Wait until PRS register shows 3 packets */
6267         msleep(10 * factor);
6268         /* Wait until NIG register shows 1 packet of size 0x10 */
6269         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6270         if (val != 3)
6271                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
6272
6273         /* clear NIG EOP FIFO */
6274         for (i = 0; i < 11; i++)
6275                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6276         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6277         if (val != 1) {
6278                 BNX2X_ERR("clear of NIG failed\n");
6279                 return -4;
6280         }
6281
6282         /* Reset and init BRB, PRS, NIG */
6283         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6284         msleep(50);
6285         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6286         msleep(50);
6287         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6288         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6289 #ifndef BCM_CNIC
6290         /* set NIC mode */
6291         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6292 #endif
6293
6294         /* Enable inputs of parser neighbor blocks */
6295         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6296         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6297         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6298         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6299
6300         DP(NETIF_MSG_HW, "done\n");
6301
6302         return 0; /* OK */
6303 }
6304
6305 static void enable_blocks_attention(struct bnx2x *bp)
6306 {
6307         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6308         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6309         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6310         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6311         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6312         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6313         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6314         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6315         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6316 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6317 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6318         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6319         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6320         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6321 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6322 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6323         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6324         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6325         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6326         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6327 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6328 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6329         if (CHIP_REV_IS_FPGA(bp))
6330                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
6331         else
6332                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
6333         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6334         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6335         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6336 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6337 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
6338         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6339         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6340 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6341         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
6342 }
6343
6344 static const struct {
6345         u32 addr;
6346         u32 mask;
6347 } bnx2x_parity_mask[] = {
6348         {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6349         {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6350         {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6351         {HC_REG_HC_PRTY_MASK, 0xffffffff},
6352         {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6353         {QM_REG_QM_PRTY_MASK, 0x0},
6354         {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6355         {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6356         {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6357         {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6358         {CDU_REG_CDU_PRTY_MASK, 0x0},
6359         {CFC_REG_CFC_PRTY_MASK, 0x0},
6360         {DBG_REG_DBG_PRTY_MASK, 0x0},
6361         {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6362         {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6363         {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6364         {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6365         {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6366         {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6367         {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6368         {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6369         {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6370         {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6371         {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6372         {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6373         {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6374         {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6375         {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6376 };
6377
6378 static void enable_blocks_parity(struct bnx2x *bp)
6379 {
6380         int i, mask_arr_len =
6381                 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6382
6383         for (i = 0; i < mask_arr_len; i++)
6384                 REG_WR(bp, bnx2x_parity_mask[i].addr,
6385                         bnx2x_parity_mask[i].mask);
6386 }
6387
6388
6389 static void bnx2x_reset_common(struct bnx2x *bp)
6390 {
6391         /* reset_common */
6392         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6393                0xd3ffff7f);
6394         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6395 }
6396
6397 static void bnx2x_init_pxp(struct bnx2x *bp)
6398 {
6399         u16 devctl;
6400         int r_order, w_order;
6401
6402         pci_read_config_word(bp->pdev,
6403                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6404         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6405         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6406         if (bp->mrrs == -1)
6407                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6408         else {
6409                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6410                 r_order = bp->mrrs;
6411         }
6412
6413         bnx2x_init_pxp_arb(bp, r_order, w_order);
6414 }
6415
6416 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6417 {
6418         int is_required;
6419         u32 val;
6420         int port;
6421
6422         if (BP_NOMCP(bp))
6423                 return;
6424
6425         is_required = 0;
6426         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6427               SHARED_HW_CFG_FAN_FAILURE_MASK;
6428
6429         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6430                 is_required = 1;
6431
6432         /*
6433          * The fan failure mechanism is usually related to the PHY type since
6434          * the power consumption of the board is affected by the PHY. Currently,
6435          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6436          */
6437         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6438                 for (port = PORT_0; port < PORT_MAX; port++) {
6439                         u32 phy_type =
6440                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
6441                                          external_phy_config) &
6442                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6443                         is_required |=
6444                                 ((phy_type ==
6445                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6446                                  (phy_type ==
6447                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6448                                  (phy_type ==
6449                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6450                 }
6451
6452         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6453
6454         if (is_required == 0)
6455                 return;
6456
6457         /* Fan failure is indicated by SPIO 5 */
6458         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6459                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
6460
6461         /* set to active low mode */
6462         val = REG_RD(bp, MISC_REG_SPIO_INT);
6463         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6464                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6465         REG_WR(bp, MISC_REG_SPIO_INT, val);
6466
6467         /* enable interrupt to signal the IGU */
6468         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6469         val |= (1 << MISC_REGISTERS_SPIO_5);
6470         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6471 }
6472
6473 static int bnx2x_init_common(struct bnx2x *bp)
6474 {
6475         u32 val, i;
6476 #ifdef BCM_CNIC
6477         u32 wb_write[2];
6478 #endif
6479
6480         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
6481
6482         bnx2x_reset_common(bp);
6483         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6484         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6485
6486         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6487         if (CHIP_IS_E1H(bp))
6488                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6489
6490         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6491         msleep(30);
6492         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6493
6494         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6495         if (CHIP_IS_E1(bp)) {
6496                 /* enable HW interrupt from PXP on USDM overflow
6497                    bit 16 on INT_MASK_0 */
6498                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6499         }
6500
6501         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6502         bnx2x_init_pxp(bp);
6503
6504 #ifdef __BIG_ENDIAN
6505         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6506         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6507         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6508         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6509         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6510         /* make sure this value is 0 */
6511         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6512
6513 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6514         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6515         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6516         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6517         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6518 #endif
6519
6520         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6521 #ifdef BCM_CNIC
6522         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6523         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6524         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6525 #endif
6526
6527         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6528                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6529
6530         /* let the HW do it's magic ... */
6531         msleep(100);
6532         /* finish PXP init */
6533         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6534         if (val != 1) {
6535                 BNX2X_ERR("PXP2 CFG failed\n");
6536                 return -EBUSY;
6537         }
6538         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6539         if (val != 1) {
6540                 BNX2X_ERR("PXP2 RD_INIT failed\n");
6541                 return -EBUSY;
6542         }
6543
6544         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6545         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6546
6547         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6548
6549         /* clean the DMAE memory */
6550         bp->dmae_ready = 1;
6551         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6552
6553         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6554         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6555         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6556         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6557
6558         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6559         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6560         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6561         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6562
6563         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6564
6565 #ifdef BCM_CNIC
6566         wb_write[0] = 0;
6567         wb_write[1] = 0;
6568         for (i = 0; i < 64; i++) {
6569                 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6570                 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6571
6572                 if (CHIP_IS_E1H(bp)) {
6573                         REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6574                         bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6575                                           wb_write, 2);
6576                 }
6577         }
6578 #endif
6579         /* soft reset pulse */
6580         REG_WR(bp, QM_REG_SOFT_RESET, 1);
6581         REG_WR(bp, QM_REG_SOFT_RESET, 0);
6582
6583 #ifdef BCM_CNIC
6584         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6585 #endif
6586
6587         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6588         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6589         if (!CHIP_REV_IS_SLOW(bp)) {
6590                 /* enable hw interrupt from doorbell Q */
6591                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6592         }
6593
6594         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6595         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6596         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6597 #ifndef BCM_CNIC
6598         /* set NIC mode */
6599         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6600 #endif
6601         if (CHIP_IS_E1H(bp))
6602                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6603
6604         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6605         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6606         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6607         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6608
6609         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6610         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6611         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6612         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6613
6614         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6615         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6616         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6617         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6618
6619         /* sync semi rtc */
6620         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6621                0x80000000);
6622         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6623                0x80000000);
6624
6625         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6626         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6627         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6628
6629         REG_WR(bp, SRC_REG_SOFT_RST, 1);
6630         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6631                 REG_WR(bp, i, 0xc0cac01a);
6632                 /* TODO: replace with something meaningful */
6633         }
6634         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6635 #ifdef BCM_CNIC
6636         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6637         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6638         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6639         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6640         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6641         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6642         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6643         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6644         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6645         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6646 #endif
6647         REG_WR(bp, SRC_REG_SOFT_RST, 0);
6648
6649         if (sizeof(union cdu_context) != 1024)
6650                 /* we currently assume that a context is 1024 bytes */
6651                 dev_alert(&bp->pdev->dev, "please adjust the size "
6652                                           "of cdu_context(%ld)\n",
6653                          (long)sizeof(union cdu_context));
6654
6655         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6656         val = (4 << 24) + (0 << 12) + 1024;
6657         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6658
6659         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6660         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6661         /* enable context validation interrupt from CFC */
6662         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6663
6664         /* set the thresholds to prevent CFC/CDU race */
6665         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6666
6667         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6668         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6669
6670         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6671         /* Reset PCIE errors for debug */
6672         REG_WR(bp, 0x2814, 0xffffffff);
6673         REG_WR(bp, 0x3820, 0xffffffff);
6674
6675         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6676         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6677         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6678         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6679
6680         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6681         if (CHIP_IS_E1H(bp)) {
6682                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6683                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6684         }
6685
6686         if (CHIP_REV_IS_SLOW(bp))
6687                 msleep(200);
6688
6689         /* finish CFC init */
6690         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6691         if (val != 1) {
6692                 BNX2X_ERR("CFC LL_INIT failed\n");
6693                 return -EBUSY;
6694         }
6695         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6696         if (val != 1) {
6697                 BNX2X_ERR("CFC AC_INIT failed\n");
6698                 return -EBUSY;
6699         }
6700         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6701         if (val != 1) {
6702                 BNX2X_ERR("CFC CAM_INIT failed\n");
6703                 return -EBUSY;
6704         }
6705         REG_WR(bp, CFC_REG_DEBUG0, 0);
6706
6707         /* read NIG statistic
6708            to see if this is our first up since powerup */
6709         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6710         val = *bnx2x_sp(bp, wb_data[0]);
6711
6712         /* do internal memory self test */
6713         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6714                 BNX2X_ERR("internal mem self test failed\n");
6715                 return -EBUSY;
6716         }
6717
6718         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6719         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6720         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6721         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6722         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6723                 bp->port.need_hw_lock = 1;
6724                 break;
6725
6726         default:
6727                 break;
6728         }
6729
6730         bnx2x_setup_fan_failure_detection(bp);
6731
6732         /* clear PXP2 attentions */
6733         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6734
6735         enable_blocks_attention(bp);
6736         if (CHIP_PARITY_SUPPORTED(bp))
6737                 enable_blocks_parity(bp);
6738
6739         if (!BP_NOMCP(bp)) {
6740                 bnx2x_acquire_phy_lock(bp);
6741                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6742                 bnx2x_release_phy_lock(bp);
6743         } else
6744                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6745
6746         return 0;
6747 }
6748
6749 static int bnx2x_init_port(struct bnx2x *bp)
6750 {
6751         int port = BP_PORT(bp);
6752         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6753         u32 low, high;
6754         u32 val;
6755
6756         DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
6757
6758         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6759
6760         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6761         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6762
6763         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6764         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6765         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6766         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6767
6768 #ifdef BCM_CNIC
6769         REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6770
6771         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6772         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6773         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6774 #endif
6775
6776         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6777
6778         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6779         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6780                 /* no pause for emulation and FPGA */
6781                 low = 0;
6782                 high = 513;
6783         } else {
6784                 if (IS_E1HMF(bp))
6785                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6786                 else if (bp->dev->mtu > 4096) {
6787                         if (bp->flags & ONE_PORT_FLAG)
6788                                 low = 160;
6789                         else {
6790                                 val = bp->dev->mtu;
6791                                 /* (24*1024 + val*4)/256 */
6792                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6793                         }
6794                 } else
6795                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6796                 high = low + 56;        /* 14*1024/256 */
6797         }
6798         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6799         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6800
6801
6802         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6803
6804         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6805         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6806         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6807         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6808
6809         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6810         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6811         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6812         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6813
6814         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6815         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6816
6817         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6818
6819         /* configure PBF to work without PAUSE mtu 9000 */
6820         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6821
6822         /* update threshold */
6823         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6824         /* update init credit */
6825         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6826
6827         /* probe changes */
6828         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6829         msleep(5);
6830         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6831
6832 #ifdef BCM_CNIC
6833         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6834 #endif
6835         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6836         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6837
6838         if (CHIP_IS_E1(bp)) {
6839                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6840                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6841         }
6842         bnx2x_init_block(bp, HC_BLOCK, init_stage);
6843
6844         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6845         /* init aeu_mask_attn_func_0/1:
6846          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6847          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6848          *             bits 4-7 are used for "per vn group attention" */
6849         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6850                (IS_E1HMF(bp) ? 0xF7 : 0x7));
6851
6852         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6853         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6854         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6855         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6856         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6857
6858         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6859
6860         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6861
6862         if (CHIP_IS_E1H(bp)) {
6863                 /* 0x2 disable e1hov, 0x1 enable */
6864                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6865                        (IS_E1HMF(bp) ? 0x1 : 0x2));
6866
6867                 {
6868                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6869                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6870                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6871                 }
6872         }
6873
6874         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6875         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6876
6877         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6878         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6879                 {
6880                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6881
6882                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6883                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6884
6885                 /* The GPIO should be swapped if the swap register is
6886                    set and active */
6887                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6888                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6889
6890                 /* Select function upon port-swap configuration */
6891                 if (port == 0) {
6892                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6893                         aeu_gpio_mask = (swap_val && swap_override) ?
6894                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6895                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6896                 } else {
6897                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6898                         aeu_gpio_mask = (swap_val && swap_override) ?
6899                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6900                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6901                 }
6902                 val = REG_RD(bp, offset);
6903                 /* add GPIO3 to group */
6904                 val |= aeu_gpio_mask;
6905                 REG_WR(bp, offset, val);
6906                 }
6907                 break;
6908
6909         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6910         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6911                 /* add SPIO 5 to group 0 */
6912                 {
6913                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6914                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6915                 val = REG_RD(bp, reg_addr);
6916                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6917                 REG_WR(bp, reg_addr, val);
6918                 }
6919                 break;
6920
6921         default:
6922                 break;
6923         }
6924
6925         bnx2x__link_reset(bp);
6926
6927         return 0;
6928 }
6929
6930 #define ILT_PER_FUNC            (768/2)
6931 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6932 /* the phys address is shifted right 12 bits and has an added
6933    1=valid bit added to the 53rd bit
6934    then since this is a wide register(TM)
6935    we split it into two 32 bit writes
6936  */
6937 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6938 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6939 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6940 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6941
6942 #ifdef BCM_CNIC
6943 #define CNIC_ILT_LINES          127
6944 #define CNIC_CTX_PER_ILT        16
6945 #else
6946 #define CNIC_ILT_LINES          0
6947 #endif
6948
6949 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6950 {
6951         int reg;
6952
6953         if (CHIP_IS_E1H(bp))
6954                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6955         else /* E1 */
6956                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6957
6958         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6959 }
6960
6961 static int bnx2x_init_func(struct bnx2x *bp)
6962 {
6963         int port = BP_PORT(bp);
6964         int func = BP_FUNC(bp);
6965         u32 addr, val;
6966         int i;
6967
6968         DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
6969
6970         /* set MSI reconfigure capability */
6971         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6972         val = REG_RD(bp, addr);
6973         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6974         REG_WR(bp, addr, val);
6975
6976         i = FUNC_ILT_BASE(func);
6977
6978         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6979         if (CHIP_IS_E1H(bp)) {
6980                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6981                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6982         } else /* E1 */
6983                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6984                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6985
6986 #ifdef BCM_CNIC
6987         i += 1 + CNIC_ILT_LINES;
6988         bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6989         if (CHIP_IS_E1(bp))
6990                 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6991         else {
6992                 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6993                 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6994         }
6995
6996         i++;
6997         bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6998         if (CHIP_IS_E1(bp))
6999                 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
7000         else {
7001                 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
7002                 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
7003         }
7004
7005         i++;
7006         bnx2x_ilt_wr(bp, i, bp->t1_mapping);
7007         if (CHIP_IS_E1(bp))
7008                 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
7009         else {
7010                 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
7011                 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
7012         }
7013
7014         /* tell the searcher where the T2 table is */
7015         REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
7016
7017         bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
7018                     U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
7019
7020         bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
7021                     U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
7022                     U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
7023
7024         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
7025 #endif
7026
7027         if (CHIP_IS_E1H(bp)) {
7028                 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
7029                 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
7030                 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
7031                 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
7032                 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
7033                 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
7034                 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
7035                 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
7036                 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
7037
7038                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7039                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
7040         }
7041
7042         /* HC init per function */
7043         if (CHIP_IS_E1H(bp)) {
7044                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7045
7046                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7047                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7048         }
7049         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
7050
7051         /* Reset PCIE errors for debug */
7052         REG_WR(bp, 0x2114, 0xffffffff);
7053         REG_WR(bp, 0x2120, 0xffffffff);
7054
7055         return 0;
7056 }
7057
7058 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7059 {
7060         int i, rc = 0;
7061
7062         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
7063            BP_FUNC(bp), load_code);
7064
7065         bp->dmae_ready = 0;
7066         mutex_init(&bp->dmae_mutex);
7067         rc = bnx2x_gunzip_init(bp);
7068         if (rc)
7069                 return rc;
7070
7071         switch (load_code) {
7072         case FW_MSG_CODE_DRV_LOAD_COMMON:
7073                 rc = bnx2x_init_common(bp);
7074                 if (rc)
7075                         goto init_hw_err;
7076                 /* no break */
7077
7078         case FW_MSG_CODE_DRV_LOAD_PORT:
7079                 bp->dmae_ready = 1;
7080                 rc = bnx2x_init_port(bp);
7081                 if (rc)
7082                         goto init_hw_err;
7083                 /* no break */
7084
7085         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
7086                 bp->dmae_ready = 1;
7087                 rc = bnx2x_init_func(bp);
7088                 if (rc)
7089                         goto init_hw_err;
7090                 break;
7091
7092         default:
7093                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
7094                 break;
7095         }
7096
7097         if (!BP_NOMCP(bp)) {
7098                 int func = BP_FUNC(bp);
7099
7100                 bp->fw_drv_pulse_wr_seq =
7101                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
7102                                  DRV_PULSE_SEQ_MASK);
7103                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
7104         }
7105
7106         /* this needs to be done before gunzip end */
7107         bnx2x_zero_def_sb(bp);
7108         for_each_queue(bp, i)
7109                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7110 #ifdef BCM_CNIC
7111         bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7112 #endif
7113
7114 init_hw_err:
7115         bnx2x_gunzip_end(bp);
7116
7117         return rc;
7118 }
7119
7120 static void bnx2x_free_mem(struct bnx2x *bp)
7121 {
7122
7123 #define BNX2X_PCI_FREE(x, y, size) \
7124         do { \
7125                 if (x) { \
7126                         dma_free_coherent(&bp->pdev->dev, size, x, y); \
7127                         x = NULL; \
7128                         y = 0; \
7129                 } \
7130         } while (0)
7131
7132 #define BNX2X_FREE(x) \
7133         do { \
7134                 if (x) { \
7135                         vfree(x); \
7136                         x = NULL; \
7137                 } \
7138         } while (0)
7139
7140         int i;
7141
7142         /* fastpath */
7143         /* Common */
7144         for_each_queue(bp, i) {
7145
7146                 /* status blocks */
7147                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
7148                                bnx2x_fp(bp, i, status_blk_mapping),
7149                                sizeof(struct host_status_block));
7150         }
7151         /* Rx */
7152         for_each_queue(bp, i) {
7153
7154                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7155                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
7156                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
7157                                bnx2x_fp(bp, i, rx_desc_mapping),
7158                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
7159
7160                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
7161                                bnx2x_fp(bp, i, rx_comp_mapping),
7162                                sizeof(struct eth_fast_path_rx_cqe) *
7163                                NUM_RCQ_BD);
7164
7165                 /* SGE ring */
7166                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7167                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
7168                                bnx2x_fp(bp, i, rx_sge_mapping),
7169                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7170         }
7171         /* Tx */
7172         for_each_queue(bp, i) {
7173
7174                 /* fastpath tx rings: tx_buf tx_desc */
7175                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
7176                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
7177                                bnx2x_fp(bp, i, tx_desc_mapping),
7178                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7179         }
7180         /* end of fastpath */
7181
7182         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
7183                        sizeof(struct host_def_status_block));
7184
7185         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
7186                        sizeof(struct bnx2x_slowpath));
7187
7188 #ifdef BCM_CNIC
7189         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
7190         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
7191         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
7192         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
7193         BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
7194                        sizeof(struct host_status_block));
7195 #endif
7196         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
7197
7198 #undef BNX2X_PCI_FREE
7199 #undef BNX2X_KFREE
7200 }
7201
7202 static int bnx2x_alloc_mem(struct bnx2x *bp)
7203 {
7204
7205 #define BNX2X_PCI_ALLOC(x, y, size) \
7206         do { \
7207                 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
7208                 if (x == NULL) \
7209                         goto alloc_mem_err; \
7210                 memset(x, 0, size); \
7211         } while (0)
7212
7213 #define BNX2X_ALLOC(x, size) \
7214         do { \
7215                 x = vmalloc(size); \
7216                 if (x == NULL) \
7217                         goto alloc_mem_err; \
7218                 memset(x, 0, size); \
7219         } while (0)
7220
7221         int i;
7222
7223         /* fastpath */
7224         /* Common */
7225         for_each_queue(bp, i) {
7226                 bnx2x_fp(bp, i, bp) = bp;
7227
7228                 /* status blocks */
7229                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
7230                                 &bnx2x_fp(bp, i, status_blk_mapping),
7231                                 sizeof(struct host_status_block));
7232         }
7233         /* Rx */
7234         for_each_queue(bp, i) {
7235
7236                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7237                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
7238                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
7239                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
7240                                 &bnx2x_fp(bp, i, rx_desc_mapping),
7241                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7242
7243                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
7244                                 &bnx2x_fp(bp, i, rx_comp_mapping),
7245                                 sizeof(struct eth_fast_path_rx_cqe) *
7246                                 NUM_RCQ_BD);
7247
7248                 /* SGE ring */
7249                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
7250                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
7251                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
7252                                 &bnx2x_fp(bp, i, rx_sge_mapping),
7253                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7254         }
7255         /* Tx */
7256         for_each_queue(bp, i) {
7257
7258                 /* fastpath tx rings: tx_buf tx_desc */
7259                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
7260                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
7261                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
7262                                 &bnx2x_fp(bp, i, tx_desc_mapping),
7263                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7264         }
7265         /* end of fastpath */
7266
7267         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7268                         sizeof(struct host_def_status_block));
7269
7270         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7271                         sizeof(struct bnx2x_slowpath));
7272
7273 #ifdef BCM_CNIC
7274         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
7275
7276         /* allocate searcher T2 table
7277            we allocate 1/4 of alloc num for T2
7278           (which is not entered into the ILT) */
7279         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
7280
7281         /* Initialize T2 (for 1024 connections) */
7282         for (i = 0; i < 16*1024; i += 64)
7283                 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
7284
7285         /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
7286         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
7287
7288         /* QM queues (128*MAX_CONN) */
7289         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
7290
7291         BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
7292                         sizeof(struct host_status_block));
7293 #endif
7294
7295         /* Slow path ring */
7296         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7297
7298         return 0;
7299
7300 alloc_mem_err:
7301         bnx2x_free_mem(bp);
7302         return -ENOMEM;
7303
7304 #undef BNX2X_PCI_ALLOC
7305 #undef BNX2X_ALLOC
7306 }
7307
7308 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7309 {
7310         int i;
7311
7312         for_each_queue(bp, i) {
7313                 struct bnx2x_fastpath *fp = &bp->fp[i];
7314
7315                 u16 bd_cons = fp->tx_bd_cons;
7316                 u16 sw_prod = fp->tx_pkt_prod;
7317                 u16 sw_cons = fp->tx_pkt_cons;
7318
7319                 while (sw_cons != sw_prod) {
7320                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7321                         sw_cons++;
7322                 }
7323         }
7324 }
7325
7326 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7327 {
7328         int i, j;
7329
7330         for_each_queue(bp, j) {
7331                 struct bnx2x_fastpath *fp = &bp->fp[j];
7332
7333                 for (i = 0; i < NUM_RX_BD; i++) {
7334                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7335                         struct sk_buff *skb = rx_buf->skb;
7336
7337                         if (skb == NULL)
7338                                 continue;
7339
7340                         dma_unmap_single(&bp->pdev->dev,
7341                                          dma_unmap_addr(rx_buf, mapping),
7342                                          bp->rx_buf_size, DMA_FROM_DEVICE);
7343
7344                         rx_buf->skb = NULL;
7345                         dev_kfree_skb(skb);
7346                 }
7347                 if (!fp->disable_tpa)
7348                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7349                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
7350                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
7351         }
7352 }
7353
7354 static void bnx2x_free_skbs(struct bnx2x *bp)
7355 {
7356         bnx2x_free_tx_skbs(bp);
7357         bnx2x_free_rx_skbs(bp);
7358 }
7359
7360 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7361 {
7362         int i, offset = 1;
7363
7364         free_irq(bp->msix_table[0].vector, bp->dev);
7365         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
7366            bp->msix_table[0].vector);
7367
7368 #ifdef BCM_CNIC
7369         offset++;
7370 #endif
7371         for_each_queue(bp, i) {
7372                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
7373                    "state %x\n", i, bp->msix_table[i + offset].vector,
7374                    bnx2x_fp(bp, i, state));
7375
7376                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
7377         }
7378 }
7379
7380 static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
7381 {
7382         if (bp->flags & USING_MSIX_FLAG) {
7383                 if (!disable_only)
7384                         bnx2x_free_msix_irqs(bp);
7385                 pci_disable_msix(bp->pdev);
7386                 bp->flags &= ~USING_MSIX_FLAG;
7387
7388         } else if (bp->flags & USING_MSI_FLAG) {
7389                 if (!disable_only)
7390                         free_irq(bp->pdev->irq, bp->dev);
7391                 pci_disable_msi(bp->pdev);
7392                 bp->flags &= ~USING_MSI_FLAG;
7393
7394         } else if (!disable_only)
7395                 free_irq(bp->pdev->irq, bp->dev);
7396 }
7397
7398 static int bnx2x_enable_msix(struct bnx2x *bp)
7399 {
7400         int i, rc, offset = 1;
7401         int igu_vec = 0;
7402
7403         bp->msix_table[0].entry = igu_vec;
7404         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
7405
7406 #ifdef BCM_CNIC
7407         igu_vec = BP_L_ID(bp) + offset;
7408         bp->msix_table[1].entry = igu_vec;
7409         DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7410         offset++;
7411 #endif
7412         for_each_queue(bp, i) {
7413                 igu_vec = BP_L_ID(bp) + offset + i;
7414                 bp->msix_table[i + offset].entry = igu_vec;
7415                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7416                    "(fastpath #%u)\n", i + offset, igu_vec, i);
7417         }
7418
7419         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
7420                              BNX2X_NUM_QUEUES(bp) + offset);
7421
7422         /*
7423          * reconfigure number of tx/rx queues according to available
7424          * MSI-X vectors
7425          */
7426         if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
7427                 /* vectors available for FP */
7428                 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
7429
7430                 DP(NETIF_MSG_IFUP,
7431                    "Trying to use less MSI-X vectors: %d\n", rc);
7432
7433                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
7434
7435                 if (rc) {
7436                         DP(NETIF_MSG_IFUP,
7437                            "MSI-X is not attainable  rc %d\n", rc);
7438                         return rc;
7439                 }
7440
7441                 bp->num_queues = min(bp->num_queues, fp_vec);
7442
7443                 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
7444                                   bp->num_queues);
7445         } else if (rc) {
7446                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
7447                 return rc;
7448         }
7449
7450         bp->flags |= USING_MSIX_FLAG;
7451
7452         return 0;
7453 }
7454
7455 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7456 {
7457         int i, rc, offset = 1;
7458
7459         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7460                          bp->dev->name, bp->dev);
7461         if (rc) {
7462                 BNX2X_ERR("request sp irq failed\n");
7463                 return -EBUSY;
7464         }
7465
7466 #ifdef BCM_CNIC
7467         offset++;
7468 #endif
7469         for_each_queue(bp, i) {
7470                 struct bnx2x_fastpath *fp = &bp->fp[i];
7471                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7472                          bp->dev->name, i);
7473
7474                 rc = request_irq(bp->msix_table[i + offset].vector,
7475                                  bnx2x_msix_fp_int, 0, fp->name, fp);
7476                 if (rc) {
7477                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
7478                         bnx2x_free_msix_irqs(bp);
7479                         return -EBUSY;
7480                 }
7481
7482                 fp->state = BNX2X_FP_STATE_IRQ;
7483         }
7484
7485         i = BNX2X_NUM_QUEUES(bp);
7486         netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d"
7487                " ... fp[%d] %d\n",
7488                bp->msix_table[0].vector,
7489                0, bp->msix_table[offset].vector,
7490                i - 1, bp->msix_table[offset + i - 1].vector);
7491
7492         return 0;
7493 }
7494
7495 static int bnx2x_enable_msi(struct bnx2x *bp)
7496 {
7497         int rc;
7498
7499         rc = pci_enable_msi(bp->pdev);
7500         if (rc) {
7501                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7502                 return -1;
7503         }
7504         bp->flags |= USING_MSI_FLAG;
7505
7506         return 0;
7507 }
7508
7509 static int bnx2x_req_irq(struct bnx2x *bp)
7510 {
7511         unsigned long flags;
7512         int rc;
7513
7514         if (bp->flags & USING_MSI_FLAG)
7515                 flags = 0;
7516         else
7517                 flags = IRQF_SHARED;
7518
7519         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7520                          bp->dev->name, bp->dev);
7521         if (!rc)
7522                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7523
7524         return rc;
7525 }
7526
7527 static void bnx2x_napi_enable(struct bnx2x *bp)
7528 {
7529         int i;
7530
7531         for_each_queue(bp, i)
7532                 napi_enable(&bnx2x_fp(bp, i, napi));
7533 }
7534
7535 static void bnx2x_napi_disable(struct bnx2x *bp)
7536 {
7537         int i;
7538
7539         for_each_queue(bp, i)
7540                 napi_disable(&bnx2x_fp(bp, i, napi));
7541 }
7542
7543 static void bnx2x_netif_start(struct bnx2x *bp)
7544 {
7545         int intr_sem;
7546
7547         intr_sem = atomic_dec_and_test(&bp->intr_sem);
7548         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7549
7550         if (intr_sem) {
7551                 if (netif_running(bp->dev)) {
7552                         bnx2x_napi_enable(bp);
7553                         bnx2x_int_enable(bp);
7554                         if (bp->state == BNX2X_STATE_OPEN)
7555                                 netif_tx_wake_all_queues(bp->dev);
7556                 }
7557         }
7558 }
7559
7560 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7561 {
7562         bnx2x_int_disable_sync(bp, disable_hw);
7563         bnx2x_napi_disable(bp);
7564         netif_tx_disable(bp->dev);
7565 }
7566
7567 /*
7568  * Init service functions
7569  */
7570
7571 /**
7572  * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7573  *
7574  * @param bp driver descriptor
7575  * @param set set or clear an entry (1 or 0)
7576  * @param mac pointer to a buffer containing a MAC
7577  * @param cl_bit_vec bit vector of clients to register a MAC for
7578  * @param cam_offset offset in a CAM to use
7579  * @param with_bcast set broadcast MAC as well
7580  */
7581 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7582                                       u32 cl_bit_vec, u8 cam_offset,
7583                                       u8 with_bcast)
7584 {
7585         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7586         int port = BP_PORT(bp);
7587
7588         /* CAM allocation
7589          * unicasts 0-31:port0 32-63:port1
7590          * multicast 64-127:port0 128-191:port1
7591          */
7592         config->hdr.length = 1 + (with_bcast ? 1 : 0);
7593         config->hdr.offset = cam_offset;
7594         config->hdr.client_id = 0xff;
7595         config->hdr.reserved1 = 0;
7596
7597         /* primary MAC */
7598         config->config_table[0].cam_entry.msb_mac_addr =
7599                                         swab16(*(u16 *)&mac[0]);
7600         config->config_table[0].cam_entry.middle_mac_addr =
7601                                         swab16(*(u16 *)&mac[2]);
7602         config->config_table[0].cam_entry.lsb_mac_addr =
7603                                         swab16(*(u16 *)&mac[4]);
7604         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7605         if (set)
7606                 config->config_table[0].target_table_entry.flags = 0;
7607         else
7608                 CAM_INVALIDATE(config->config_table[0]);
7609         config->config_table[0].target_table_entry.clients_bit_vector =
7610                                                 cpu_to_le32(cl_bit_vec);
7611         config->config_table[0].target_table_entry.vlan_id = 0;
7612
7613         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7614            (set ? "setting" : "clearing"),
7615            config->config_table[0].cam_entry.msb_mac_addr,
7616            config->config_table[0].cam_entry.middle_mac_addr,
7617            config->config_table[0].cam_entry.lsb_mac_addr);
7618
7619         /* broadcast */
7620         if (with_bcast) {
7621                 config->config_table[1].cam_entry.msb_mac_addr =
7622                         cpu_to_le16(0xffff);
7623                 config->config_table[1].cam_entry.middle_mac_addr =
7624                         cpu_to_le16(0xffff);
7625                 config->config_table[1].cam_entry.lsb_mac_addr =
7626                         cpu_to_le16(0xffff);
7627                 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7628                 if (set)
7629                         config->config_table[1].target_table_entry.flags =
7630                                         TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7631                 else
7632                         CAM_INVALIDATE(config->config_table[1]);
7633                 config->config_table[1].target_table_entry.clients_bit_vector =
7634                                                         cpu_to_le32(cl_bit_vec);
7635                 config->config_table[1].target_table_entry.vlan_id = 0;
7636         }
7637
7638         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7639                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7640                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7641 }
7642
7643 /**
7644  * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7645  *
7646  * @param bp driver descriptor
7647  * @param set set or clear an entry (1 or 0)
7648  * @param mac pointer to a buffer containing a MAC
7649  * @param cl_bit_vec bit vector of clients to register a MAC for
7650  * @param cam_offset offset in a CAM to use
7651  */
7652 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7653                                        u32 cl_bit_vec, u8 cam_offset)
7654 {
7655         struct mac_configuration_cmd_e1h *config =
7656                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7657
7658         config->hdr.length = 1;
7659         config->hdr.offset = cam_offset;
7660         config->hdr.client_id = 0xff;
7661         config->hdr.reserved1 = 0;
7662
7663         /* primary MAC */
7664         config->config_table[0].msb_mac_addr =
7665                                         swab16(*(u16 *)&mac[0]);
7666         config->config_table[0].middle_mac_addr =
7667                                         swab16(*(u16 *)&mac[2]);
7668         config->config_table[0].lsb_mac_addr =
7669                                         swab16(*(u16 *)&mac[4]);
7670         config->config_table[0].clients_bit_vector =
7671                                         cpu_to_le32(cl_bit_vec);
7672         config->config_table[0].vlan_id = 0;
7673         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7674         if (set)
7675                 config->config_table[0].flags = BP_PORT(bp);
7676         else
7677                 config->config_table[0].flags =
7678                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7679
7680         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
7681            (set ? "setting" : "clearing"),
7682            config->config_table[0].msb_mac_addr,
7683            config->config_table[0].middle_mac_addr,
7684            config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7685
7686         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7687                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7688                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7689 }
7690
7691 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7692                              int *state_p, int poll)
7693 {
7694         /* can take a while if any port is running */
7695         int cnt = 5000;
7696
7697         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7698            poll ? "polling" : "waiting", state, idx);
7699
7700         might_sleep();
7701         while (cnt--) {
7702                 if (poll) {
7703                         bnx2x_rx_int(bp->fp, 10);
7704                         /* if index is different from 0
7705                          * the reply for some commands will
7706                          * be on the non default queue
7707                          */
7708                         if (idx)
7709                                 bnx2x_rx_int(&bp->fp[idx], 10);
7710                 }
7711
7712                 mb(); /* state is changed by bnx2x_sp_event() */
7713                 if (*state_p == state) {
7714 #ifdef BNX2X_STOP_ON_ERROR
7715                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
7716 #endif
7717                         return 0;
7718                 }
7719
7720                 msleep(1);
7721
7722                 if (bp->panic)
7723                         return -EIO;
7724         }
7725
7726         /* timeout! */
7727         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7728                   poll ? "polling" : "waiting", state, idx);
7729 #ifdef BNX2X_STOP_ON_ERROR
7730         bnx2x_panic();
7731 #endif
7732
7733         return -EBUSY;
7734 }
7735
7736 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7737 {
7738         bp->set_mac_pending++;
7739         smp_wmb();
7740
7741         bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7742                                    (1 << bp->fp->cl_id), BP_FUNC(bp));
7743
7744         /* Wait for a completion */
7745         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7746 }
7747
7748 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7749 {
7750         bp->set_mac_pending++;
7751         smp_wmb();
7752
7753         bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7754                                   (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7755                                   1);
7756
7757         /* Wait for a completion */
7758         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7759 }
7760
7761 #ifdef BCM_CNIC
7762 /**
7763  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7764  * MAC(s). This function will wait until the ramdord completion
7765  * returns.
7766  *
7767  * @param bp driver handle
7768  * @param set set or clear the CAM entry
7769  *
7770  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7771  */
7772 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7773 {
7774         u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7775
7776         bp->set_mac_pending++;
7777         smp_wmb();
7778
7779         /* Send a SET_MAC ramrod */
7780         if (CHIP_IS_E1(bp))
7781                 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7782                                   cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7783                                   1);
7784         else
7785                 /* CAM allocation for E1H
7786                 * unicasts: by func number
7787                 * multicast: 20+FUNC*20, 20 each
7788                 */
7789                 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7790                                    cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7791
7792         /* Wait for a completion when setting */
7793         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7794
7795         return 0;
7796 }
7797 #endif
7798
7799 static int bnx2x_setup_leading(struct bnx2x *bp)
7800 {
7801         int rc;
7802
7803         /* reset IGU state */
7804         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7805
7806         /* SETUP ramrod */
7807         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7808
7809         /* Wait for completion */
7810         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7811
7812         return rc;
7813 }
7814
7815 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7816 {
7817         struct bnx2x_fastpath *fp = &bp->fp[index];
7818
7819         /* reset IGU state */
7820         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7821
7822         /* SETUP ramrod */
7823         fp->state = BNX2X_FP_STATE_OPENING;
7824         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7825                       fp->cl_id, 0);
7826
7827         /* Wait for completion */
7828         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7829                                  &(fp->state), 0);
7830 }
7831
7832 static int bnx2x_poll(struct napi_struct *napi, int budget);
7833
7834 static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7835 {
7836
7837         switch (bp->multi_mode) {
7838         case ETH_RSS_MODE_DISABLED:
7839                 bp->num_queues = 1;
7840                 break;
7841
7842         case ETH_RSS_MODE_REGULAR:
7843                 if (num_queues)
7844                         bp->num_queues = min_t(u32, num_queues,
7845                                                   BNX2X_MAX_QUEUES(bp));
7846                 else
7847                         bp->num_queues = min_t(u32, num_online_cpus(),
7848                                                   BNX2X_MAX_QUEUES(bp));
7849                 break;
7850
7851
7852         default:
7853                 bp->num_queues = 1;
7854                 break;
7855         }
7856 }
7857
7858 static int bnx2x_set_num_queues(struct bnx2x *bp)
7859 {
7860         int rc = 0;
7861
7862         switch (int_mode) {
7863         case INT_MODE_INTx:
7864         case INT_MODE_MSI:
7865                 bp->num_queues = 1;
7866                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7867                 break;
7868         default:
7869                 /* Set number of queues according to bp->multi_mode value */
7870                 bnx2x_set_num_queues_msix(bp);
7871
7872                 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7873                    bp->num_queues);
7874
7875                 /* if we can't use MSI-X we only need one fp,
7876                  * so try to enable MSI-X with the requested number of fp's
7877                  * and fallback to MSI or legacy INTx with one fp
7878                  */
7879                 rc = bnx2x_enable_msix(bp);
7880                 if (rc)
7881                         /* failed to enable MSI-X */
7882                         bp->num_queues = 1;
7883                 break;
7884         }
7885         bp->dev->real_num_tx_queues = bp->num_queues;
7886         return rc;
7887 }
7888
7889 #ifdef BCM_CNIC
7890 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7891 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7892 #endif
7893
7894 /* must be called with rtnl_lock */
7895 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7896 {
7897         u32 load_code;
7898         int i, rc;
7899
7900 #ifdef BNX2X_STOP_ON_ERROR
7901         if (unlikely(bp->panic))
7902                 return -EPERM;
7903 #endif
7904
7905         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7906
7907         rc = bnx2x_set_num_queues(bp);
7908
7909         if (bnx2x_alloc_mem(bp)) {
7910                 bnx2x_free_irq(bp, true);
7911                 return -ENOMEM;
7912         }
7913
7914         for_each_queue(bp, i)
7915                 bnx2x_fp(bp, i, disable_tpa) =
7916                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
7917
7918         for_each_queue(bp, i)
7919                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7920                                bnx2x_poll, 128);
7921
7922         bnx2x_napi_enable(bp);
7923
7924         if (bp->flags & USING_MSIX_FLAG) {
7925                 rc = bnx2x_req_msix_irqs(bp);
7926                 if (rc) {
7927                         bnx2x_free_irq(bp, true);
7928                         goto load_error1;
7929                 }
7930         } else {
7931                 /* Fall to INTx if failed to enable MSI-X due to lack of
7932                    memory (in bnx2x_set_num_queues()) */
7933                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7934                         bnx2x_enable_msi(bp);
7935                 bnx2x_ack_int(bp);
7936                 rc = bnx2x_req_irq(bp);
7937                 if (rc) {
7938                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
7939                         bnx2x_free_irq(bp, true);
7940                         goto load_error1;
7941                 }
7942                 if (bp->flags & USING_MSI_FLAG) {
7943                         bp->dev->irq = bp->pdev->irq;
7944                         netdev_info(bp->dev, "using MSI  IRQ %d\n",
7945                                     bp->pdev->irq);
7946                 }
7947         }
7948
7949         /* Send LOAD_REQUEST command to MCP
7950            Returns the type of LOAD command:
7951            if it is the first port to be initialized
7952            common blocks should be initialized, otherwise - not
7953         */
7954         if (!BP_NOMCP(bp)) {
7955                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7956                 if (!load_code) {
7957                         BNX2X_ERR("MCP response failure, aborting\n");
7958                         rc = -EBUSY;
7959                         goto load_error2;
7960                 }
7961                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7962                         rc = -EBUSY; /* other port in diagnostic mode */
7963                         goto load_error2;
7964                 }
7965
7966         } else {
7967                 int port = BP_PORT(bp);
7968
7969                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
7970                    load_count[0], load_count[1], load_count[2]);
7971                 load_count[0]++;
7972                 load_count[1 + port]++;
7973                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
7974                    load_count[0], load_count[1], load_count[2]);
7975                 if (load_count[0] == 1)
7976                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7977                 else if (load_count[1 + port] == 1)
7978                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7979                 else
7980                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7981         }
7982
7983         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7984             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7985                 bp->port.pmf = 1;
7986         else
7987                 bp->port.pmf = 0;
7988         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7989
7990         /* Initialize HW */
7991         rc = bnx2x_init_hw(bp, load_code);
7992         if (rc) {
7993                 BNX2X_ERR("HW init failed, aborting\n");
7994                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7995                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7996                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7997                 goto load_error2;
7998         }
7999
8000         /* Setup NIC internals and enable interrupts */
8001         bnx2x_nic_init(bp, load_code);
8002
8003         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
8004             (bp->common.shmem2_base))
8005                 SHMEM2_WR(bp, dcc_support,
8006                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
8007                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
8008
8009         /* Send LOAD_DONE command to MCP */
8010         if (!BP_NOMCP(bp)) {
8011                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8012                 if (!load_code) {
8013                         BNX2X_ERR("MCP response failure, aborting\n");
8014                         rc = -EBUSY;
8015                         goto load_error3;
8016                 }
8017         }
8018
8019         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
8020
8021         rc = bnx2x_setup_leading(bp);
8022         if (rc) {
8023                 BNX2X_ERR("Setup leading failed!\n");
8024 #ifndef BNX2X_STOP_ON_ERROR
8025                 goto load_error3;
8026 #else
8027                 bp->panic = 1;
8028                 return -EBUSY;
8029 #endif
8030         }
8031
8032         if (CHIP_IS_E1H(bp))
8033                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
8034                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
8035                         bp->flags |= MF_FUNC_DIS;
8036                 }
8037
8038         if (bp->state == BNX2X_STATE_OPEN) {
8039 #ifdef BCM_CNIC
8040                 /* Enable Timer scan */
8041                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
8042 #endif
8043                 for_each_nondefault_queue(bp, i) {
8044                         rc = bnx2x_setup_multi(bp, i);
8045                         if (rc)
8046 #ifdef BCM_CNIC
8047                                 goto load_error4;
8048 #else
8049                                 goto load_error3;
8050 #endif
8051                 }
8052
8053                 if (CHIP_IS_E1(bp))
8054                         bnx2x_set_eth_mac_addr_e1(bp, 1);
8055                 else
8056                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
8057 #ifdef BCM_CNIC
8058                 /* Set iSCSI L2 MAC */
8059                 mutex_lock(&bp->cnic_mutex);
8060                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
8061                         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8062                         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
8063                         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
8064                                       CNIC_SB_ID(bp));
8065                 }
8066                 mutex_unlock(&bp->cnic_mutex);
8067 #endif
8068         }
8069
8070         if (bp->port.pmf)
8071                 bnx2x_initial_phy_init(bp, load_mode);
8072
8073         /* Start fast path */
8074         switch (load_mode) {
8075         case LOAD_NORMAL:
8076                 if (bp->state == BNX2X_STATE_OPEN) {
8077                         /* Tx queue should be only reenabled */
8078                         netif_tx_wake_all_queues(bp->dev);
8079                 }
8080                 /* Initialize the receive filter. */
8081                 bnx2x_set_rx_mode(bp->dev);
8082                 break;
8083
8084         case LOAD_OPEN:
8085                 netif_tx_start_all_queues(bp->dev);
8086                 if (bp->state != BNX2X_STATE_OPEN)
8087                         netif_tx_disable(bp->dev);
8088                 /* Initialize the receive filter. */
8089                 bnx2x_set_rx_mode(bp->dev);
8090                 break;
8091
8092         case LOAD_DIAG:
8093                 /* Initialize the receive filter. */
8094                 bnx2x_set_rx_mode(bp->dev);
8095                 bp->state = BNX2X_STATE_DIAG;
8096                 break;
8097
8098         default:
8099                 break;
8100         }
8101
8102         if (!bp->port.pmf)
8103                 bnx2x__link_status_update(bp);
8104
8105         /* start the timer */
8106         mod_timer(&bp->timer, jiffies + bp->current_interval);
8107
8108 #ifdef BCM_CNIC
8109         bnx2x_setup_cnic_irq_info(bp);
8110         if (bp->state == BNX2X_STATE_OPEN)
8111                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8112 #endif
8113         bnx2x_inc_load_cnt(bp);
8114
8115         return 0;
8116
8117 #ifdef BCM_CNIC
8118 load_error4:
8119         /* Disable Timer scan */
8120         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8121 #endif
8122 load_error3:
8123         bnx2x_int_disable_sync(bp, 1);
8124         if (!BP_NOMCP(bp)) {
8125                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8126                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8127         }
8128         bp->port.pmf = 0;
8129         /* Free SKBs, SGEs, TPA pool and driver internals */
8130         bnx2x_free_skbs(bp);
8131         for_each_queue(bp, i)
8132                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8133 load_error2:
8134         /* Release IRQs */
8135         bnx2x_free_irq(bp, false);
8136 load_error1:
8137         bnx2x_napi_disable(bp);
8138         for_each_queue(bp, i)
8139                 netif_napi_del(&bnx2x_fp(bp, i, napi));
8140         bnx2x_free_mem(bp);
8141
8142         return rc;
8143 }
8144
8145 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8146 {
8147         struct bnx2x_fastpath *fp = &bp->fp[index];
8148         int rc;
8149
8150         /* halt the connection */
8151         fp->state = BNX2X_FP_STATE_HALTING;
8152         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
8153
8154         /* Wait for completion */
8155         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
8156                                &(fp->state), 1);
8157         if (rc) /* timeout */
8158                 return rc;
8159
8160         /* delete cfc entry */
8161         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
8162
8163         /* Wait for completion */
8164         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
8165                                &(fp->state), 1);
8166         return rc;
8167 }
8168
8169 static int bnx2x_stop_leading(struct bnx2x *bp)
8170 {
8171         __le16 dsb_sp_prod_idx;
8172         /* if the other port is handling traffic,
8173            this can take a lot of time */
8174         int cnt = 500;
8175         int rc;
8176
8177         might_sleep();
8178
8179         /* Send HALT ramrod */
8180         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
8181         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
8182
8183         /* Wait for completion */
8184         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
8185                                &(bp->fp[0].state), 1);
8186         if (rc) /* timeout */
8187                 return rc;
8188
8189         dsb_sp_prod_idx = *bp->dsb_sp_prod;
8190
8191         /* Send PORT_DELETE ramrod */
8192         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
8193
8194         /* Wait for completion to arrive on default status block
8195            we are going to reset the chip anyway
8196            so there is not much to do if this times out
8197          */
8198         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
8199                 if (!cnt) {
8200                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
8201                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
8202                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
8203 #ifdef BNX2X_STOP_ON_ERROR
8204                         bnx2x_panic();
8205 #endif
8206                         rc = -EBUSY;
8207                         break;
8208                 }
8209                 cnt--;
8210                 msleep(1);
8211                 rmb(); /* Refresh the dsb_sp_prod */
8212         }
8213         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
8214         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
8215
8216         return rc;
8217 }
8218
8219 static void bnx2x_reset_func(struct bnx2x *bp)
8220 {
8221         int port = BP_PORT(bp);
8222         int func = BP_FUNC(bp);
8223         int base, i;
8224
8225         /* Configure IGU */
8226         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8227         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8228
8229 #ifdef BCM_CNIC
8230         /* Disable Timer scan */
8231         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8232         /*
8233          * Wait for at least 10ms and up to 2 second for the timers scan to
8234          * complete
8235          */
8236         for (i = 0; i < 200; i++) {
8237                 msleep(10);
8238                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8239                         break;
8240         }
8241 #endif
8242         /* Clear ILT */
8243         base = FUNC_ILT_BASE(func);
8244         for (i = base; i < base + ILT_PER_FUNC; i++)
8245                 bnx2x_ilt_wr(bp, i, 0);
8246 }
8247
8248 static void bnx2x_reset_port(struct bnx2x *bp)
8249 {
8250         int port = BP_PORT(bp);
8251         u32 val;
8252
8253         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8254
8255         /* Do not rcv packets to BRB */
8256         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8257         /* Do not direct rcv packets that are not for MCP to the BRB */
8258         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8259                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8260
8261         /* Configure AEU */
8262         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8263
8264         msleep(100);
8265         /* Check for BRB port occupancy */
8266         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8267         if (val)
8268                 DP(NETIF_MSG_IFDOWN,
8269                    "BRB1 is not empty  %d blocks are occupied\n", val);
8270
8271         /* TODO: Close Doorbell port? */
8272 }
8273
8274 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8275 {
8276         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
8277            BP_FUNC(bp), reset_code);
8278
8279         switch (reset_code) {
8280         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
8281                 bnx2x_reset_port(bp);
8282                 bnx2x_reset_func(bp);
8283                 bnx2x_reset_common(bp);
8284                 break;
8285
8286         case FW_MSG_CODE_DRV_UNLOAD_PORT:
8287                 bnx2x_reset_port(bp);
8288                 bnx2x_reset_func(bp);
8289                 break;
8290
8291         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
8292                 bnx2x_reset_func(bp);
8293                 break;
8294
8295         default:
8296                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
8297                 break;
8298         }
8299 }
8300
8301 static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
8302 {
8303         int port = BP_PORT(bp);
8304         u32 reset_code = 0;
8305         int i, cnt, rc;
8306
8307         /* Wait until tx fastpath tasks complete */
8308         for_each_queue(bp, i) {
8309                 struct bnx2x_fastpath *fp = &bp->fp[i];
8310
8311                 cnt = 1000;
8312                 while (bnx2x_has_tx_work_unload(fp)) {
8313
8314                         bnx2x_tx_int(fp);
8315                         if (!cnt) {
8316                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
8317                                           i);
8318 #ifdef BNX2X_STOP_ON_ERROR
8319                                 bnx2x_panic();
8320                                 return -EBUSY;
8321 #else
8322                                 break;
8323 #endif
8324                         }
8325                         cnt--;
8326                         msleep(1);
8327                 }
8328         }
8329         /* Give HW time to discard old tx messages */
8330         msleep(1);
8331
8332         if (CHIP_IS_E1(bp)) {
8333                 struct mac_configuration_cmd *config =
8334                                                 bnx2x_sp(bp, mcast_config);
8335
8336                 bnx2x_set_eth_mac_addr_e1(bp, 0);
8337
8338                 for (i = 0; i < config->hdr.length; i++)
8339                         CAM_INVALIDATE(config->config_table[i]);
8340
8341                 config->hdr.length = i;
8342                 if (CHIP_REV_IS_SLOW(bp))
8343                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8344                 else
8345                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
8346                 config->hdr.client_id = bp->fp->cl_id;
8347                 config->hdr.reserved1 = 0;
8348
8349                 bp->set_mac_pending++;
8350                 smp_wmb();
8351
8352                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8353                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8354                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8355
8356         } else { /* E1H */
8357                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8358
8359                 bnx2x_set_eth_mac_addr_e1h(bp, 0);
8360
8361                 for (i = 0; i < MC_HASH_SIZE; i++)
8362                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
8363
8364                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
8365         }
8366 #ifdef BCM_CNIC
8367         /* Clear iSCSI L2 MAC */
8368         mutex_lock(&bp->cnic_mutex);
8369         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8370                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8371                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8372         }
8373         mutex_unlock(&bp->cnic_mutex);
8374 #endif
8375
8376         if (unload_mode == UNLOAD_NORMAL)
8377                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8378
8379         else if (bp->flags & NO_WOL_FLAG)
8380                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
8381
8382         else if (bp->wol) {
8383                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8384                 u8 *mac_addr = bp->dev->dev_addr;
8385                 u32 val;
8386                 /* The mac address is written to entries 1-4 to
8387                    preserve entry 0 which is used by the PMF */
8388                 u8 entry = (BP_E1HVN(bp) + 1)*8;
8389
8390                 val = (mac_addr[0] << 8) | mac_addr[1];
8391                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8392
8393                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8394                       (mac_addr[4] << 8) | mac_addr[5];
8395                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8396
8397                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8398
8399         } else
8400                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8401
8402         /* Close multi and leading connections
8403            Completions for ramrods are collected in a synchronous way */
8404         for_each_nondefault_queue(bp, i)
8405                 if (bnx2x_stop_multi(bp, i))
8406                         goto unload_error;
8407
8408         rc = bnx2x_stop_leading(bp);
8409         if (rc) {
8410                 BNX2X_ERR("Stop leading failed!\n");
8411 #ifdef BNX2X_STOP_ON_ERROR
8412                 return -EBUSY;
8413 #else
8414                 goto unload_error;
8415 #endif
8416         }
8417
8418 unload_error:
8419         if (!BP_NOMCP(bp))
8420                 reset_code = bnx2x_fw_command(bp, reset_code);
8421         else {
8422                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
8423                    load_count[0], load_count[1], load_count[2]);
8424                 load_count[0]--;
8425                 load_count[1 + port]--;
8426                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
8427                    load_count[0], load_count[1], load_count[2]);
8428                 if (load_count[0] == 0)
8429                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8430                 else if (load_count[1 + port] == 0)
8431                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8432                 else
8433                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8434         }
8435
8436         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8437             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8438                 bnx2x__link_reset(bp);
8439
8440         /* Reset the chip */
8441         bnx2x_reset_chip(bp, reset_code);
8442
8443         /* Report UNLOAD_DONE to MCP */
8444         if (!BP_NOMCP(bp))
8445                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8446
8447 }
8448
8449 static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8450 {
8451         u32 val;
8452
8453         DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8454
8455         if (CHIP_IS_E1(bp)) {
8456                 int port = BP_PORT(bp);
8457                 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8458                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
8459
8460                 val = REG_RD(bp, addr);
8461                 val &= ~(0x300);
8462                 REG_WR(bp, addr, val);
8463         } else if (CHIP_IS_E1H(bp)) {
8464                 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8465                 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8466                          MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8467                 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8468         }
8469 }
8470
8471 /* must be called with rtnl_lock */
8472 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8473 {
8474         int i;
8475
8476         if (bp->state == BNX2X_STATE_CLOSED) {
8477                 /* Interface has been removed - nothing to recover */
8478                 bp->recovery_state = BNX2X_RECOVERY_DONE;
8479                 bp->is_leader = 0;
8480                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8481                 smp_wmb();
8482
8483                 return -EINVAL;
8484         }
8485
8486 #ifdef BCM_CNIC
8487         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8488 #endif
8489         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8490
8491         /* Set "drop all" */
8492         bp->rx_mode = BNX2X_RX_MODE_NONE;
8493         bnx2x_set_storm_rx_mode(bp);
8494
8495         /* Disable HW interrupts, NAPI and Tx */
8496         bnx2x_netif_stop(bp, 1);
8497
8498         del_timer_sync(&bp->timer);
8499         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8500                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8501         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8502
8503         /* Release IRQs */
8504         bnx2x_free_irq(bp, false);
8505
8506         /* Cleanup the chip if needed */
8507         if (unload_mode != UNLOAD_RECOVERY)
8508                 bnx2x_chip_cleanup(bp, unload_mode);
8509
8510         bp->port.pmf = 0;
8511
8512         /* Free SKBs, SGEs, TPA pool and driver internals */
8513         bnx2x_free_skbs(bp);
8514         for_each_queue(bp, i)
8515                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8516         for_each_queue(bp, i)
8517                 netif_napi_del(&bnx2x_fp(bp, i, napi));
8518         bnx2x_free_mem(bp);
8519
8520         bp->state = BNX2X_STATE_CLOSED;
8521
8522         netif_carrier_off(bp->dev);
8523
8524         /* The last driver must disable a "close the gate" if there is no
8525          * parity attention or "process kill" pending.
8526          */
8527         if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8528             bnx2x_reset_is_done(bp))
8529                 bnx2x_disable_close_the_gate(bp);
8530
8531         /* Reset MCP mail box sequence if there is on going recovery */
8532         if (unload_mode == UNLOAD_RECOVERY)
8533                 bp->fw_seq = 0;
8534
8535         return 0;
8536 }
8537
8538 /* Close gates #2, #3 and #4: */
8539 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8540 {
8541         u32 val, addr;
8542
8543         /* Gates #2 and #4a are closed/opened for "not E1" only */
8544         if (!CHIP_IS_E1(bp)) {
8545                 /* #4 */
8546                 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8547                 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8548                        close ? (val | 0x1) : (val & (~(u32)1)));
8549                 /* #2 */
8550                 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8551                 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8552                        close ? (val | 0x1) : (val & (~(u32)1)));
8553         }
8554
8555         /* #3 */
8556         addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8557         val = REG_RD(bp, addr);
8558         REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8559
8560         DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8561                 close ? "closing" : "opening");
8562         mmiowb();
8563 }
8564
8565 #define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
8566
8567 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8568 {
8569         /* Do some magic... */
8570         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8571         *magic_val = val & SHARED_MF_CLP_MAGIC;
8572         MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8573 }
8574
8575 /* Restore the value of the `magic' bit.
8576  *
8577  * @param pdev Device handle.
8578  * @param magic_val Old value of the `magic' bit.
8579  */
8580 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8581 {
8582         /* Restore the `magic' bit value... */
8583         /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8584         SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8585                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8586         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8587         MF_CFG_WR(bp, shared_mf_config.clp_mb,
8588                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8589 }
8590
8591 /* Prepares for MCP reset: takes care of CLP configurations.
8592  *
8593  * @param bp
8594  * @param magic_val Old value of 'magic' bit.
8595  */
8596 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8597 {
8598         u32 shmem;
8599         u32 validity_offset;
8600
8601         DP(NETIF_MSG_HW, "Starting\n");
8602
8603         /* Set `magic' bit in order to save MF config */
8604         if (!CHIP_IS_E1(bp))
8605                 bnx2x_clp_reset_prep(bp, magic_val);
8606
8607         /* Get shmem offset */
8608         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8609         validity_offset = offsetof(struct shmem_region, validity_map[0]);
8610
8611         /* Clear validity map flags */
8612         if (shmem > 0)
8613                 REG_WR(bp, shmem + validity_offset, 0);
8614 }
8615
8616 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
8617 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
8618
8619 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8620  * depending on the HW type.
8621  *
8622  * @param bp
8623  */
8624 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8625 {
8626         /* special handling for emulation and FPGA,
8627            wait 10 times longer */
8628         if (CHIP_REV_IS_SLOW(bp))
8629                 msleep(MCP_ONE_TIMEOUT*10);
8630         else
8631                 msleep(MCP_ONE_TIMEOUT);
8632 }
8633
8634 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8635 {
8636         u32 shmem, cnt, validity_offset, val;
8637         int rc = 0;
8638
8639         msleep(100);
8640
8641         /* Get shmem offset */
8642         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8643         if (shmem == 0) {
8644                 BNX2X_ERR("Shmem 0 return failure\n");
8645                 rc = -ENOTTY;
8646                 goto exit_lbl;
8647         }
8648
8649         validity_offset = offsetof(struct shmem_region, validity_map[0]);
8650
8651         /* Wait for MCP to come up */
8652         for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8653                 /* TBD: its best to check validity map of last port.
8654                  * currently checks on port 0.
8655                  */
8656                 val = REG_RD(bp, shmem + validity_offset);
8657                 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8658                    shmem + validity_offset, val);
8659
8660                 /* check that shared memory is valid. */
8661                 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8662                     == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8663                         break;
8664
8665                 bnx2x_mcp_wait_one(bp);
8666         }
8667
8668         DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8669
8670         /* Check that shared memory is valid. This indicates that MCP is up. */
8671         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8672             (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8673                 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8674                 rc = -ENOTTY;
8675                 goto exit_lbl;
8676         }
8677
8678 exit_lbl:
8679         /* Restore the `magic' bit value */
8680         if (!CHIP_IS_E1(bp))
8681                 bnx2x_clp_reset_done(bp, magic_val);
8682
8683         return rc;
8684 }
8685
8686 static void bnx2x_pxp_prep(struct bnx2x *bp)
8687 {
8688         if (!CHIP_IS_E1(bp)) {
8689                 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8690                 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8691                 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8692                 mmiowb();
8693         }
8694 }
8695
8696 /*
8697  * Reset the whole chip except for:
8698  *      - PCIE core
8699  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8700  *              one reset bit)
8701  *      - IGU
8702  *      - MISC (including AEU)
8703  *      - GRC
8704  *      - RBCN, RBCP
8705  */
8706 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8707 {
8708         u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8709
8710         not_reset_mask1 =
8711                 MISC_REGISTERS_RESET_REG_1_RST_HC |
8712                 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8713                 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8714
8715         not_reset_mask2 =
8716                 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8717                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8718                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8719                 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8720                 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8721                 MISC_REGISTERS_RESET_REG_2_RST_GRC  |
8722                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8723                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8724
8725         reset_mask1 = 0xffffffff;
8726
8727         if (CHIP_IS_E1(bp))
8728                 reset_mask2 = 0xffff;
8729         else
8730                 reset_mask2 = 0x1ffff;
8731
8732         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8733                reset_mask1 & (~not_reset_mask1));
8734         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8735                reset_mask2 & (~not_reset_mask2));
8736
8737         barrier();
8738         mmiowb();
8739
8740         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8741         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8742         mmiowb();
8743 }
8744
8745 static int bnx2x_process_kill(struct bnx2x *bp)
8746 {
8747         int cnt = 1000;
8748         u32 val = 0;
8749         u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8750
8751
8752         /* Empty the Tetris buffer, wait for 1s */
8753         do {
8754                 sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8755                 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8756                 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8757                 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8758                 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8759                 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8760                     ((port_is_idle_0 & 0x1) == 0x1) &&
8761                     ((port_is_idle_1 & 0x1) == 0x1) &&
8762                     (pgl_exp_rom2 == 0xffffffff))
8763                         break;
8764                 msleep(1);
8765         } while (cnt-- > 0);
8766
8767         if (cnt <= 0) {
8768                 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8769                           " are still"
8770                           " outstanding read requests after 1s!\n");
8771                 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8772                           " port_is_idle_0=0x%08x,"
8773                           " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8774                           sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8775                           pgl_exp_rom2);
8776                 return -EAGAIN;
8777         }
8778
8779         barrier();
8780
8781         /* Close gates #2, #3 and #4 */
8782         bnx2x_set_234_gates(bp, true);
8783
8784         /* TBD: Indicate that "process kill" is in progress to MCP */
8785
8786         /* Clear "unprepared" bit */
8787         REG_WR(bp, MISC_REG_UNPREPARED, 0);
8788         barrier();
8789
8790         /* Make sure all is written to the chip before the reset */
8791         mmiowb();
8792
8793         /* Wait for 1ms to empty GLUE and PCI-E core queues,
8794          * PSWHST, GRC and PSWRD Tetris buffer.
8795          */
8796         msleep(1);
8797
8798         /* Prepare to chip reset: */
8799         /* MCP */
8800         bnx2x_reset_mcp_prep(bp, &val);
8801
8802         /* PXP */
8803         bnx2x_pxp_prep(bp);
8804         barrier();
8805
8806         /* reset the chip */
8807         bnx2x_process_kill_chip_reset(bp);
8808         barrier();
8809
8810         /* Recover after reset: */
8811         /* MCP */
8812         if (bnx2x_reset_mcp_comp(bp, val))
8813                 return -EAGAIN;
8814
8815         /* PXP */
8816         bnx2x_pxp_prep(bp);
8817
8818         /* Open the gates #2, #3 and #4 */
8819         bnx2x_set_234_gates(bp, false);
8820
8821         /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8822          * reset state, re-enable attentions. */
8823
8824         return 0;
8825 }
8826
8827 static int bnx2x_leader_reset(struct bnx2x *bp)
8828 {
8829         int rc = 0;
8830         /* Try to recover after the failure */
8831         if (bnx2x_process_kill(bp)) {
8832                 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8833                        bp->dev->name);
8834                 rc = -EAGAIN;
8835                 goto exit_leader_reset;
8836         }
8837
8838         /* Clear "reset is in progress" bit and update the driver state */
8839         bnx2x_set_reset_done(bp);
8840         bp->recovery_state = BNX2X_RECOVERY_DONE;
8841
8842 exit_leader_reset:
8843         bp->is_leader = 0;
8844         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8845         smp_wmb();
8846         return rc;
8847 }
8848
8849 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8850
8851 /* Assumption: runs under rtnl lock. This together with the fact
8852  * that it's called only from bnx2x_reset_task() ensure that it
8853  * will never be called when netif_running(bp->dev) is false.
8854  */
8855 static void bnx2x_parity_recover(struct bnx2x *bp)
8856 {
8857         DP(NETIF_MSG_HW, "Handling parity\n");
8858         while (1) {
8859                 switch (bp->recovery_state) {
8860                 case BNX2X_RECOVERY_INIT:
8861                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8862                         /* Try to get a LEADER_LOCK HW lock */
8863                         if (bnx2x_trylock_hw_lock(bp,
8864                                 HW_LOCK_RESOURCE_RESERVED_08))
8865                                 bp->is_leader = 1;
8866
8867                         /* Stop the driver */
8868                         /* If interface has been removed - break */
8869                         if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8870                                 return;
8871
8872                         bp->recovery_state = BNX2X_RECOVERY_WAIT;
8873                         /* Ensure "is_leader" and "recovery_state"
8874                          *  update values are seen on other CPUs
8875                          */
8876                         smp_wmb();
8877                         break;
8878
8879                 case BNX2X_RECOVERY_WAIT:
8880                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8881                         if (bp->is_leader) {
8882                                 u32 load_counter = bnx2x_get_load_cnt(bp);
8883                                 if (load_counter) {
8884                                         /* Wait until all other functions get
8885                                          * down.
8886                                          */
8887                                         schedule_delayed_work(&bp->reset_task,
8888                                                                 HZ/10);
8889                                         return;
8890                                 } else {
8891                                         /* If all other functions got down -
8892                                          * try to bring the chip back to
8893                                          * normal. In any case it's an exit
8894                                          * point for a leader.
8895                                          */
8896                                         if (bnx2x_leader_reset(bp) ||
8897                                         bnx2x_nic_load(bp, LOAD_NORMAL)) {
8898                                                 printk(KERN_ERR"%s: Recovery "
8899                                                 "has failed. Power cycle is "
8900                                                 "needed.\n", bp->dev->name);
8901                                                 /* Disconnect this device */
8902                                                 netif_device_detach(bp->dev);
8903                                                 /* Block ifup for all function
8904                                                  * of this ASIC until
8905                                                  * "process kill" or power
8906                                                  * cycle.
8907                                                  */
8908                                                 bnx2x_set_reset_in_progress(bp);
8909                                                 /* Shut down the power */
8910                                                 bnx2x_set_power_state(bp,
8911                                                                 PCI_D3hot);
8912                                                 return;
8913                                         }
8914
8915                                         return;
8916                                 }
8917                         } else { /* non-leader */
8918                                 if (!bnx2x_reset_is_done(bp)) {
8919                                         /* Try to get a LEADER_LOCK HW lock as
8920                                          * long as a former leader may have
8921                                          * been unloaded by the user or
8922                                          * released a leadership by another
8923                                          * reason.
8924                                          */
8925                                         if (bnx2x_trylock_hw_lock(bp,
8926                                             HW_LOCK_RESOURCE_RESERVED_08)) {
8927                                                 /* I'm a leader now! Restart a
8928                                                  * switch case.
8929                                                  */
8930                                                 bp->is_leader = 1;
8931                                                 break;
8932                                         }
8933
8934                                         schedule_delayed_work(&bp->reset_task,
8935                                                                 HZ/10);
8936                                         return;
8937
8938                                 } else { /* A leader has completed
8939                                           * the "process kill". It's an exit
8940                                           * point for a non-leader.
8941                                           */
8942                                         bnx2x_nic_load(bp, LOAD_NORMAL);
8943                                         bp->recovery_state =
8944                                                 BNX2X_RECOVERY_DONE;
8945                                         smp_wmb();
8946                                         return;
8947                                 }
8948                         }
8949                 default:
8950                         return;
8951                 }
8952         }
8953 }
8954
8955 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8956  * scheduled on a general queue in order to prevent a dead lock.
8957  */
8958 static void bnx2x_reset_task(struct work_struct *work)
8959 {
8960         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
8961
8962 #ifdef BNX2X_STOP_ON_ERROR
8963         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8964                   " so reset not done to allow debug dump,\n"
8965          KERN_ERR " you will need to reboot when done\n");
8966         return;
8967 #endif
8968
8969         rtnl_lock();
8970
8971         if (!netif_running(bp->dev))
8972                 goto reset_task_exit;
8973
8974         if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8975                 bnx2x_parity_recover(bp);
8976         else {
8977                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8978                 bnx2x_nic_load(bp, LOAD_NORMAL);
8979         }
8980
8981 reset_task_exit:
8982         rtnl_unlock();
8983 }
8984
8985 /* end of nic load/unload */
8986
8987 /* ethtool_ops */
8988
8989 /*
8990  * Init service functions
8991  */
8992
8993 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8994 {
8995         switch (func) {
8996         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8997         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8998         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8999         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
9000         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
9001         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
9002         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
9003         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
9004         default:
9005                 BNX2X_ERR("Unsupported function index: %d\n", func);
9006                 return (u32)(-1);
9007         }
9008 }
9009
9010 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
9011 {
9012         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
9013
9014         /* Flush all outstanding writes */
9015         mmiowb();
9016
9017         /* Pretend to be function 0 */
9018         REG_WR(bp, reg, 0);
9019         /* Flush the GRC transaction (in the chip) */
9020         new_val = REG_RD(bp, reg);
9021         if (new_val != 0) {
9022                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
9023                           new_val);
9024                 BUG();
9025         }
9026
9027         /* From now we are in the "like-E1" mode */
9028         bnx2x_int_disable(bp);
9029
9030         /* Flush all outstanding writes */
9031         mmiowb();
9032
9033         /* Restore the original funtion settings */
9034         REG_WR(bp, reg, orig_func);
9035         new_val = REG_RD(bp, reg);
9036         if (new_val != orig_func) {
9037                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
9038                           orig_func, new_val);
9039                 BUG();
9040         }
9041 }
9042
9043 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
9044 {
9045         if (CHIP_IS_E1H(bp))
9046                 bnx2x_undi_int_disable_e1h(bp, func);
9047         else
9048                 bnx2x_int_disable(bp);
9049 }
9050
9051 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
9052 {
9053         u32 val;
9054
9055         /* Check if there is any driver already loaded */
9056         val = REG_RD(bp, MISC_REG_UNPREPARED);
9057         if (val == 0x1) {
9058                 /* Check if it is the UNDI driver
9059                  * UNDI driver initializes CID offset for normal bell to 0x7
9060                  */
9061                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9062                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
9063                 if (val == 0x7) {
9064                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9065                         /* save our func */
9066                         int func = BP_FUNC(bp);
9067                         u32 swap_en;
9068                         u32 swap_val;
9069
9070                         /* clear the UNDI indication */
9071                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9072
9073                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
9074
9075                         /* try unload UNDI on port 0 */
9076                         bp->func = 0;
9077                         bp->fw_seq =
9078                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9079                                 DRV_MSG_SEQ_NUMBER_MASK);
9080                         reset_code = bnx2x_fw_command(bp, reset_code);
9081
9082                         /* if UNDI is loaded on the other port */
9083                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9084
9085                                 /* send "DONE" for previous unload */
9086                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9087
9088                                 /* unload UNDI on port 1 */
9089                                 bp->func = 1;
9090                                 bp->fw_seq =
9091                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9092                                         DRV_MSG_SEQ_NUMBER_MASK);
9093                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9094
9095                                 bnx2x_fw_command(bp, reset_code);
9096                         }
9097
9098                         /* now it's safe to release the lock */
9099                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9100
9101                         bnx2x_undi_int_disable(bp, func);
9102
9103                         /* close input traffic and wait for it */
9104                         /* Do not rcv packets to BRB */
9105                         REG_WR(bp,
9106                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
9107                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
9108                         /* Do not direct rcv packets that are not for MCP to
9109                          * the BRB */
9110                         REG_WR(bp,
9111                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
9112                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9113                         /* clear AEU */
9114                         REG_WR(bp,
9115                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9116                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
9117                         msleep(10);
9118
9119                         /* save NIG port swap info */
9120                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
9121                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
9122                         /* reset device */
9123                         REG_WR(bp,
9124                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9125                                0xd3ffffff);
9126                         REG_WR(bp,
9127                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9128                                0x1403);
9129                         /* take the NIG out of reset and restore swap values */
9130                         REG_WR(bp,
9131                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
9132                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
9133                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
9134                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
9135
9136                         /* send unload done to the MCP */
9137                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9138
9139                         /* restore our func and fw_seq */
9140                         bp->func = func;
9141                         bp->fw_seq =
9142                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9143                                 DRV_MSG_SEQ_NUMBER_MASK);
9144
9145                 } else
9146                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9147         }
9148 }
9149
9150 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9151 {
9152         u32 val, val2, val3, val4, id;
9153         u16 pmc;
9154
9155         /* Get the chip revision id and number. */
9156         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
9157         val = REG_RD(bp, MISC_REG_CHIP_NUM);
9158         id = ((val & 0xffff) << 16);
9159         val = REG_RD(bp, MISC_REG_CHIP_REV);
9160         id |= ((val & 0xf) << 12);
9161         val = REG_RD(bp, MISC_REG_CHIP_METAL);
9162         id |= ((val & 0xff) << 4);
9163         val = REG_RD(bp, MISC_REG_BOND_ID);
9164         id |= (val & 0xf);
9165         bp->common.chip_id = id;
9166         bp->link_params.chip_id = bp->common.chip_id;
9167         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
9168
9169         val = (REG_RD(bp, 0x2874) & 0x55);
9170         if ((bp->common.chip_id & 0x1) ||
9171             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
9172                 bp->flags |= ONE_PORT_FLAG;
9173                 BNX2X_DEV_INFO("single port device\n");
9174         }
9175
9176         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
9177         bp->common.flash_size = (NVRAM_1MB_SIZE <<
9178                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
9179         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
9180                        bp->common.flash_size, bp->common.flash_size);
9181
9182         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9183         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
9184         bp->link_params.shmem_base = bp->common.shmem_base;
9185         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
9186                        bp->common.shmem_base, bp->common.shmem2_base);
9187
9188         if (!bp->common.shmem_base ||
9189             (bp->common.shmem_base < 0xA0000) ||
9190             (bp->common.shmem_base >= 0xC0000)) {
9191                 BNX2X_DEV_INFO("MCP not active\n");
9192                 bp->flags |= NO_MCP_FLAG;
9193                 return;
9194         }
9195
9196         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9197         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9198                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9199                 BNX2X_ERROR("BAD MCP validity signature\n");
9200
9201         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
9202         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
9203
9204         bp->link_params.hw_led_mode = ((bp->common.hw_config &
9205                                         SHARED_HW_CFG_LED_MODE_MASK) >>
9206                                        SHARED_HW_CFG_LED_MODE_SHIFT);
9207
9208         bp->link_params.feature_config_flags = 0;
9209         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
9210         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
9211                 bp->link_params.feature_config_flags |=
9212                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9213         else
9214                 bp->link_params.feature_config_flags &=
9215                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9216
9217         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
9218         bp->common.bc_ver = val;
9219         BNX2X_DEV_INFO("bc_ver %X\n", val);
9220         if (val < BNX2X_BC_VER) {
9221                 /* for now only warn
9222                  * later we might need to enforce this */
9223                 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
9224                             "please upgrade BC\n", BNX2X_BC_VER, val);
9225         }
9226         bp->link_params.feature_config_flags |=
9227                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
9228                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
9229
9230         if (BP_E1HVN(bp) == 0) {
9231                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
9232                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
9233         } else {
9234                 /* no WOL capability for E1HVN != 0 */
9235                 bp->flags |= NO_WOL_FLAG;
9236         }
9237         BNX2X_DEV_INFO("%sWoL capable\n",
9238                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
9239
9240         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
9241         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
9242         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
9243         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
9244
9245         dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
9246                  val, val2, val3, val4);
9247 }
9248
9249 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
9250                                                     u32 switch_cfg)
9251 {
9252         int port = BP_PORT(bp);
9253         u32 ext_phy_type;
9254
9255         switch (switch_cfg) {
9256         case SWITCH_CFG_1G:
9257                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
9258
9259                 ext_phy_type =
9260                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9261                 switch (ext_phy_type) {
9262                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
9263                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9264                                        ext_phy_type);
9265
9266                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9267                                                SUPPORTED_10baseT_Full |
9268                                                SUPPORTED_100baseT_Half |
9269                                                SUPPORTED_100baseT_Full |
9270                                                SUPPORTED_1000baseT_Full |
9271                                                SUPPORTED_2500baseX_Full |
9272                                                SUPPORTED_TP |
9273                                                SUPPORTED_FIBRE |
9274                                                SUPPORTED_Autoneg |
9275                                                SUPPORTED_Pause |
9276                                                SUPPORTED_Asym_Pause);
9277                         break;
9278
9279                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
9280                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
9281                                        ext_phy_type);
9282
9283                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9284                                                SUPPORTED_10baseT_Full |
9285                                                SUPPORTED_100baseT_Half |
9286                                                SUPPORTED_100baseT_Full |
9287                                                SUPPORTED_1000baseT_Full |
9288                                                SUPPORTED_TP |
9289                                                SUPPORTED_FIBRE |
9290                                                SUPPORTED_Autoneg |
9291                                                SUPPORTED_Pause |
9292                                                SUPPORTED_Asym_Pause);
9293                         break;
9294
9295                 default:
9296                         BNX2X_ERR("NVRAM config error. "
9297                                   "BAD SerDes ext_phy_config 0x%x\n",
9298                                   bp->link_params.ext_phy_config);
9299                         return;
9300                 }
9301
9302                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
9303                                            port*0x10);
9304                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9305                 break;
9306
9307         case SWITCH_CFG_10G:
9308                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
9309
9310                 ext_phy_type =
9311                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9312                 switch (ext_phy_type) {
9313                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9314                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9315                                        ext_phy_type);
9316
9317                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9318                                                SUPPORTED_10baseT_Full |
9319                                                SUPPORTED_100baseT_Half |
9320                                                SUPPORTED_100baseT_Full |
9321                                                SUPPORTED_1000baseT_Full |
9322                                                SUPPORTED_2500baseX_Full |
9323                                                SUPPORTED_10000baseT_Full |
9324                                                SUPPORTED_TP |
9325                                                SUPPORTED_FIBRE |
9326                                                SUPPORTED_Autoneg |
9327                                                SUPPORTED_Pause |
9328                                                SUPPORTED_Asym_Pause);
9329                         break;
9330
9331                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9332                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
9333                                        ext_phy_type);
9334
9335                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9336                                                SUPPORTED_1000baseT_Full |
9337                                                SUPPORTED_FIBRE |
9338                                                SUPPORTED_Autoneg |
9339                                                SUPPORTED_Pause |
9340                                                SUPPORTED_Asym_Pause);
9341                         break;
9342
9343                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9344                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
9345                                        ext_phy_type);
9346
9347                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9348                                                SUPPORTED_2500baseX_Full |
9349                                                SUPPORTED_1000baseT_Full |
9350                                                SUPPORTED_FIBRE |
9351                                                SUPPORTED_Autoneg |
9352                                                SUPPORTED_Pause |
9353                                                SUPPORTED_Asym_Pause);
9354                         break;
9355
9356                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9357                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
9358                                        ext_phy_type);
9359
9360                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9361                                                SUPPORTED_FIBRE |
9362                                                SUPPORTED_Pause |
9363                                                SUPPORTED_Asym_Pause);
9364                         break;
9365
9366                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9367                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
9368                                        ext_phy_type);
9369
9370                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9371                                                SUPPORTED_1000baseT_Full |
9372                                                SUPPORTED_FIBRE |
9373                                                SUPPORTED_Pause |
9374                                                SUPPORTED_Asym_Pause);
9375                         break;
9376
9377                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9378                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
9379                                        ext_phy_type);
9380
9381                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9382                                                SUPPORTED_1000baseT_Full |
9383                                                SUPPORTED_Autoneg |
9384                                                SUPPORTED_FIBRE |
9385                                                SUPPORTED_Pause |
9386                                                SUPPORTED_Asym_Pause);
9387                         break;
9388
9389                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9390                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
9391                                        ext_phy_type);
9392
9393                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9394                                                SUPPORTED_1000baseT_Full |
9395                                                SUPPORTED_Autoneg |
9396                                                SUPPORTED_FIBRE |
9397                                                SUPPORTED_Pause |
9398                                                SUPPORTED_Asym_Pause);
9399                         break;
9400
9401                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9402                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
9403                                        ext_phy_type);
9404
9405                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9406                                                SUPPORTED_TP |
9407                                                SUPPORTED_Autoneg |
9408                                                SUPPORTED_Pause |
9409                                                SUPPORTED_Asym_Pause);
9410                         break;
9411
9412                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9413                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
9414                                        ext_phy_type);
9415
9416                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9417                                                SUPPORTED_10baseT_Full |
9418                                                SUPPORTED_100baseT_Half |
9419                                                SUPPORTED_100baseT_Full |
9420                                                SUPPORTED_1000baseT_Full |
9421                                                SUPPORTED_10000baseT_Full |
9422                                                SUPPORTED_TP |
9423                                                SUPPORTED_Autoneg |
9424                                                SUPPORTED_Pause |
9425                                                SUPPORTED_Asym_Pause);
9426                         break;
9427
9428                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9429                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9430                                   bp->link_params.ext_phy_config);
9431                         break;
9432
9433                 default:
9434                         BNX2X_ERR("NVRAM config error. "
9435                                   "BAD XGXS ext_phy_config 0x%x\n",
9436                                   bp->link_params.ext_phy_config);
9437                         return;
9438                 }
9439
9440                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
9441                                            port*0x18);
9442                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9443
9444                 break;
9445
9446         default:
9447                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
9448                           bp->port.link_config);
9449                 return;
9450         }
9451         bp->link_params.phy_addr = bp->port.phy_addr;
9452
9453         /* mask what we support according to speed_cap_mask */
9454         if (!(bp->link_params.speed_cap_mask &
9455                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
9456                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
9457
9458         if (!(bp->link_params.speed_cap_mask &
9459                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
9460                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
9461
9462         if (!(bp->link_params.speed_cap_mask &
9463                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
9464                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
9465
9466         if (!(bp->link_params.speed_cap_mask &
9467                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
9468                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
9469
9470         if (!(bp->link_params.speed_cap_mask &
9471                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
9472                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
9473                                         SUPPORTED_1000baseT_Full);
9474
9475         if (!(bp->link_params.speed_cap_mask &
9476                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
9477                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
9478
9479         if (!(bp->link_params.speed_cap_mask &
9480                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
9481                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
9482
9483         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
9484 }
9485
9486 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
9487 {
9488         bp->link_params.req_duplex = DUPLEX_FULL;
9489
9490         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
9491         case PORT_FEATURE_LINK_SPEED_AUTO:
9492                 if (bp->port.supported & SUPPORTED_Autoneg) {
9493                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9494                         bp->port.advertising = bp->port.supported;
9495                 } else {
9496                         u32 ext_phy_type =
9497                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9498
9499                         if ((ext_phy_type ==
9500                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
9501                             (ext_phy_type ==
9502                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
9503                                 /* force 10G, no AN */
9504                                 bp->link_params.req_line_speed = SPEED_10000;
9505                                 bp->port.advertising =
9506                                                 (ADVERTISED_10000baseT_Full |
9507                                                  ADVERTISED_FIBRE);
9508                                 break;
9509                         }
9510                         BNX2X_ERR("NVRAM config error. "
9511                                   "Invalid link_config 0x%x"
9512                                   "  Autoneg not supported\n",
9513                                   bp->port.link_config);
9514                         return;
9515                 }
9516                 break;
9517
9518         case PORT_FEATURE_LINK_SPEED_10M_FULL:
9519                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
9520                         bp->link_params.req_line_speed = SPEED_10;
9521                         bp->port.advertising = (ADVERTISED_10baseT_Full |
9522                                                 ADVERTISED_TP);
9523                 } else {
9524                         BNX2X_ERROR("NVRAM config error. "
9525                                     "Invalid link_config 0x%x"
9526                                     "  speed_cap_mask 0x%x\n",
9527                                     bp->port.link_config,
9528                                     bp->link_params.speed_cap_mask);
9529                         return;
9530                 }
9531                 break;
9532
9533         case PORT_FEATURE_LINK_SPEED_10M_HALF:
9534                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
9535                         bp->link_params.req_line_speed = SPEED_10;
9536                         bp->link_params.req_duplex = DUPLEX_HALF;
9537                         bp->port.advertising = (ADVERTISED_10baseT_Half |
9538                                                 ADVERTISED_TP);
9539                 } else {
9540                         BNX2X_ERROR("NVRAM config error. "
9541                                     "Invalid link_config 0x%x"
9542                                     "  speed_cap_mask 0x%x\n",
9543                                     bp->port.link_config,
9544                                     bp->link_params.speed_cap_mask);
9545                         return;
9546                 }
9547                 break;
9548
9549         case PORT_FEATURE_LINK_SPEED_100M_FULL:
9550                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
9551                         bp->link_params.req_line_speed = SPEED_100;
9552                         bp->port.advertising = (ADVERTISED_100baseT_Full |
9553                                                 ADVERTISED_TP);
9554                 } else {
9555                         BNX2X_ERROR("NVRAM config error. "
9556                                     "Invalid link_config 0x%x"
9557                                     "  speed_cap_mask 0x%x\n",
9558                                     bp->port.link_config,
9559                                     bp->link_params.speed_cap_mask);
9560                         return;
9561                 }
9562                 break;
9563
9564         case PORT_FEATURE_LINK_SPEED_100M_HALF:
9565                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
9566                         bp->link_params.req_line_speed = SPEED_100;
9567                         bp->link_params.req_duplex = DUPLEX_HALF;
9568                         bp->port.advertising = (ADVERTISED_100baseT_Half |
9569                                                 ADVERTISED_TP);
9570                 } else {
9571                         BNX2X_ERROR("NVRAM config error. "
9572                                     "Invalid link_config 0x%x"
9573                                     "  speed_cap_mask 0x%x\n",
9574                                     bp->port.link_config,
9575                                     bp->link_params.speed_cap_mask);
9576                         return;
9577                 }
9578                 break;
9579
9580         case PORT_FEATURE_LINK_SPEED_1G:
9581                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
9582                         bp->link_params.req_line_speed = SPEED_1000;
9583                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
9584                                                 ADVERTISED_TP);
9585                 } else {
9586                         BNX2X_ERROR("NVRAM config error. "
9587                                     "Invalid link_config 0x%x"
9588                                     "  speed_cap_mask 0x%x\n",
9589                                     bp->port.link_config,
9590                                     bp->link_params.speed_cap_mask);
9591                         return;
9592                 }
9593                 break;
9594
9595         case PORT_FEATURE_LINK_SPEED_2_5G:
9596                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
9597                         bp->link_params.req_line_speed = SPEED_2500;
9598                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
9599                                                 ADVERTISED_TP);
9600                 } else {
9601                         BNX2X_ERROR("NVRAM config error. "
9602                                     "Invalid link_config 0x%x"
9603                                     "  speed_cap_mask 0x%x\n",
9604                                     bp->port.link_config,
9605                                     bp->link_params.speed_cap_mask);
9606                         return;
9607                 }
9608                 break;
9609
9610         case PORT_FEATURE_LINK_SPEED_10G_CX4:
9611         case PORT_FEATURE_LINK_SPEED_10G_KX4:
9612         case PORT_FEATURE_LINK_SPEED_10G_KR:
9613                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
9614                         bp->link_params.req_line_speed = SPEED_10000;
9615                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
9616                                                 ADVERTISED_FIBRE);
9617                 } else {
9618                         BNX2X_ERROR("NVRAM config error. "
9619                                     "Invalid link_config 0x%x"
9620                                     "  speed_cap_mask 0x%x\n",
9621                                     bp->port.link_config,
9622                                     bp->link_params.speed_cap_mask);
9623                         return;
9624                 }
9625                 break;
9626
9627         default:
9628                 BNX2X_ERROR("NVRAM config error. "
9629                             "BAD link speed link_config 0x%x\n",
9630                             bp->port.link_config);
9631                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9632                 bp->port.advertising = bp->port.supported;
9633                 break;
9634         }
9635
9636         bp->link_params.req_flow_ctrl = (bp->port.link_config &
9637                                          PORT_FEATURE_FLOW_CONTROL_MASK);
9638         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
9639             !(bp->port.supported & SUPPORTED_Autoneg))
9640                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9641
9642         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
9643                        "  advertising 0x%x\n",
9644                        bp->link_params.req_line_speed,
9645                        bp->link_params.req_duplex,
9646                        bp->link_params.req_flow_ctrl, bp->port.advertising);
9647 }
9648
9649 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
9650 {
9651         mac_hi = cpu_to_be16(mac_hi);
9652         mac_lo = cpu_to_be32(mac_lo);
9653         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
9654         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
9655 }
9656
9657 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
9658 {
9659         int port = BP_PORT(bp);
9660         u32 val, val2;
9661         u32 config;
9662         u16 i;
9663         u32 ext_phy_type;
9664
9665         bp->link_params.bp = bp;
9666         bp->link_params.port = port;
9667
9668         bp->link_params.lane_config =
9669                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
9670         bp->link_params.ext_phy_config =
9671                 SHMEM_RD(bp,
9672                          dev_info.port_hw_config[port].external_phy_config);
9673         /* BCM8727_NOC => BCM8727 no over current */
9674         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9675             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
9676                 bp->link_params.ext_phy_config &=
9677                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
9678                 bp->link_params.ext_phy_config |=
9679                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
9680                 bp->link_params.feature_config_flags |=
9681                         FEATURE_CONFIG_BCM8727_NOC;
9682         }
9683
9684         bp->link_params.speed_cap_mask =
9685                 SHMEM_RD(bp,
9686                          dev_info.port_hw_config[port].speed_capability_mask);
9687
9688         bp->port.link_config =
9689                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
9690
9691         /* Get the 4 lanes xgxs config rx and tx */
9692         for (i = 0; i < 2; i++) {
9693                 val = SHMEM_RD(bp,
9694                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
9695                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
9696                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
9697
9698                 val = SHMEM_RD(bp,
9699                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
9700                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
9701                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
9702         }
9703
9704         /* If the device is capable of WoL, set the default state according
9705          * to the HW
9706          */
9707         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
9708         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
9709                    (config & PORT_FEATURE_WOL_ENABLED));
9710
9711         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
9712                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
9713                        bp->link_params.lane_config,
9714                        bp->link_params.ext_phy_config,
9715                        bp->link_params.speed_cap_mask, bp->port.link_config);
9716
9717         bp->link_params.switch_cfg |= (bp->port.link_config &
9718                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
9719         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
9720
9721         bnx2x_link_settings_requested(bp);
9722
9723         /*
9724          * If connected directly, work with the internal PHY, otherwise, work
9725          * with the external PHY
9726          */
9727         ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9728         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
9729                 bp->mdio.prtad = bp->link_params.phy_addr;
9730
9731         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
9732                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
9733                 bp->mdio.prtad =
9734                         XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9735
9736         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
9737         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
9738         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
9739         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
9740         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9741
9742 #ifdef BCM_CNIC
9743         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
9744         val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
9745         bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
9746 #endif
9747 }
9748
9749 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9750 {
9751         int func = BP_FUNC(bp);
9752         u32 val, val2;
9753         int rc = 0;
9754
9755         bnx2x_get_common_hwinfo(bp);
9756
9757         bp->e1hov = 0;
9758         bp->e1hmf = 0;
9759         if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
9760                 bp->mf_config =
9761                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
9762
9763                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
9764                        FUNC_MF_CFG_E1HOV_TAG_MASK);
9765                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
9766                         bp->e1hmf = 1;
9767                 BNX2X_DEV_INFO("%s function mode\n",
9768                                IS_E1HMF(bp) ? "multi" : "single");
9769
9770                 if (IS_E1HMF(bp)) {
9771                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
9772                                                                 e1hov_tag) &
9773                                FUNC_MF_CFG_E1HOV_TAG_MASK);
9774                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
9775                                 bp->e1hov = val;
9776                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
9777                                                "(0x%04x)\n",
9778                                                func, bp->e1hov, bp->e1hov);
9779                         } else {
9780                                 BNX2X_ERROR("No valid E1HOV for func %d,"
9781                                             "  aborting\n", func);
9782                                 rc = -EPERM;
9783                         }
9784                 } else {
9785                         if (BP_E1HVN(bp)) {
9786                                 BNX2X_ERROR("VN %d in single function mode,"
9787                                             "  aborting\n", BP_E1HVN(bp));
9788                                 rc = -EPERM;
9789                         }
9790                 }
9791         }
9792
9793         if (!BP_NOMCP(bp)) {
9794                 bnx2x_get_port_hwinfo(bp);
9795
9796                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
9797                               DRV_MSG_SEQ_NUMBER_MASK);
9798                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9799         }
9800
9801         if (IS_E1HMF(bp)) {
9802                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
9803                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
9804                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
9805                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
9806                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
9807                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
9808                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
9809                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
9810                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
9811                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
9812                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
9813                                ETH_ALEN);
9814                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
9815                                ETH_ALEN);
9816                 }
9817
9818                 return rc;
9819         }
9820
9821         if (BP_NOMCP(bp)) {
9822                 /* only supposed to happen on emulation/FPGA */
9823                 BNX2X_ERROR("warning: random MAC workaround active\n");
9824                 random_ether_addr(bp->dev->dev_addr);
9825                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9826         }
9827
9828         return rc;
9829 }
9830
9831 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9832 {
9833         int cnt, i, block_end, rodi;
9834         char vpd_data[BNX2X_VPD_LEN+1];
9835         char str_id_reg[VENDOR_ID_LEN+1];
9836         char str_id_cap[VENDOR_ID_LEN+1];
9837         u8 len;
9838
9839         cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9840         memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9841
9842         if (cnt < BNX2X_VPD_LEN)
9843                 goto out_not_found;
9844
9845         i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9846                              PCI_VPD_LRDT_RO_DATA);
9847         if (i < 0)
9848                 goto out_not_found;
9849
9850
9851         block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9852                     pci_vpd_lrdt_size(&vpd_data[i]);
9853
9854         i += PCI_VPD_LRDT_TAG_SIZE;
9855
9856         if (block_end > BNX2X_VPD_LEN)
9857                 goto out_not_found;
9858
9859         rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9860                                    PCI_VPD_RO_KEYWORD_MFR_ID);
9861         if (rodi < 0)
9862                 goto out_not_found;
9863
9864         len = pci_vpd_info_field_size(&vpd_data[rodi]);
9865
9866         if (len != VENDOR_ID_LEN)
9867                 goto out_not_found;
9868
9869         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9870
9871         /* vendor specific info */
9872         snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9873         snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9874         if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9875             !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9876
9877                 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9878                                                 PCI_VPD_RO_KEYWORD_VENDOR0);
9879                 if (rodi >= 0) {
9880                         len = pci_vpd_info_field_size(&vpd_data[rodi]);
9881
9882                         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9883
9884                         if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9885                                 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9886                                 bp->fw_ver[len] = ' ';
9887                         }
9888                 }
9889                 return;
9890         }
9891 out_not_found:
9892         return;
9893 }
9894
9895 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9896 {
9897         int func = BP_FUNC(bp);
9898         int timer_interval;
9899         int rc;
9900
9901         /* Disable interrupt handling until HW is initialized */
9902         atomic_set(&bp->intr_sem, 1);
9903         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
9904
9905         mutex_init(&bp->port.phy_mutex);
9906         mutex_init(&bp->fw_mb_mutex);
9907 #ifdef BCM_CNIC
9908         mutex_init(&bp->cnic_mutex);
9909 #endif
9910
9911         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
9912         INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
9913
9914         rc = bnx2x_get_hwinfo(bp);
9915
9916         bnx2x_read_fwinfo(bp);
9917         /* need to reset chip if undi was active */
9918         if (!BP_NOMCP(bp))
9919                 bnx2x_undi_unload(bp);
9920
9921         if (CHIP_REV_IS_FPGA(bp))
9922                 dev_err(&bp->pdev->dev, "FPGA detected\n");
9923
9924         if (BP_NOMCP(bp) && (func == 0))
9925                 dev_err(&bp->pdev->dev, "MCP disabled, "
9926                                         "must load devices in order!\n");
9927
9928         /* Set multi queue mode */
9929         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
9930             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
9931                 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
9932                                         "requested is not MSI-X\n");
9933                 multi_mode = ETH_RSS_MODE_DISABLED;
9934         }
9935         bp->multi_mode = multi_mode;
9936
9937
9938         bp->dev->features |= NETIF_F_GRO;
9939
9940         /* Set TPA flags */
9941         if (disable_tpa) {
9942                 bp->flags &= ~TPA_ENABLE_FLAG;
9943                 bp->dev->features &= ~NETIF_F_LRO;
9944         } else {
9945                 bp->flags |= TPA_ENABLE_FLAG;
9946                 bp->dev->features |= NETIF_F_LRO;
9947         }
9948
9949         if (CHIP_IS_E1(bp))
9950                 bp->dropless_fc = 0;
9951         else
9952                 bp->dropless_fc = dropless_fc;
9953
9954         bp->mrrs = mrrs;
9955
9956         bp->tx_ring_size = MAX_TX_AVAIL;
9957         bp->rx_ring_size = MAX_RX_AVAIL;
9958
9959         bp->rx_csum = 1;
9960
9961         /* make sure that the numbers are in the right granularity */
9962         bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9963         bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9964
9965         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9966         bp->current_interval = (poll ? poll : timer_interval);
9967
9968         init_timer(&bp->timer);
9969         bp->timer.expires = jiffies + bp->current_interval;
9970         bp->timer.data = (unsigned long) bp;
9971         bp->timer.function = bnx2x_timer;
9972
9973         return rc;
9974 }
9975
9976 /*
9977  * ethtool service functions
9978  */
9979
9980 /* All ethtool functions called with rtnl_lock */
9981
9982 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9983 {
9984         struct bnx2x *bp = netdev_priv(dev);
9985
9986         cmd->supported = bp->port.supported;
9987         cmd->advertising = bp->port.advertising;
9988
9989         if ((bp->state == BNX2X_STATE_OPEN) &&
9990             !(bp->flags & MF_FUNC_DIS) &&
9991             (bp->link_vars.link_up)) {
9992                 cmd->speed = bp->link_vars.line_speed;
9993                 cmd->duplex = bp->link_vars.duplex;
9994                 if (IS_E1HMF(bp)) {
9995                         u16 vn_max_rate;
9996
9997                         vn_max_rate =
9998                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
9999                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
10000                         if (vn_max_rate < cmd->speed)
10001                                 cmd->speed = vn_max_rate;
10002                 }
10003         } else {
10004                 cmd->speed = -1;
10005                 cmd->duplex = -1;
10006         }
10007
10008         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
10009                 u32 ext_phy_type =
10010                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
10011
10012                 switch (ext_phy_type) {
10013                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
10014                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
10015                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
10016                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
10017                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
10018                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
10019                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
10020                         cmd->port = PORT_FIBRE;
10021                         break;
10022
10023                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
10024                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
10025                         cmd->port = PORT_TP;
10026                         break;
10027
10028                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
10029                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
10030                                   bp->link_params.ext_phy_config);
10031                         break;
10032
10033                 default:
10034                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
10035                            bp->link_params.ext_phy_config);
10036                         break;
10037                 }
10038         } else
10039                 cmd->port = PORT_TP;
10040
10041         cmd->phy_address = bp->mdio.prtad;
10042         cmd->transceiver = XCVR_INTERNAL;
10043
10044         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10045                 cmd->autoneg = AUTONEG_ENABLE;
10046         else
10047                 cmd->autoneg = AUTONEG_DISABLE;
10048
10049         cmd->maxtxpkt = 0;
10050         cmd->maxrxpkt = 0;
10051
10052         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10053            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
10054            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
10055            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
10056            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10057            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10058            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10059
10060         return 0;
10061 }
10062
10063 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10064 {
10065         struct bnx2x *bp = netdev_priv(dev);
10066         u32 advertising;
10067
10068         if (IS_E1HMF(bp))
10069                 return 0;
10070
10071         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10072            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
10073            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
10074            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
10075            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10076            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10077            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10078
10079         if (cmd->autoneg == AUTONEG_ENABLE) {
10080                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10081                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
10082                         return -EINVAL;
10083                 }
10084
10085                 /* advertise the requested speed and duplex if supported */
10086                 cmd->advertising &= bp->port.supported;
10087
10088                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
10089                 bp->link_params.req_duplex = DUPLEX_FULL;
10090                 bp->port.advertising |= (ADVERTISED_Autoneg |
10091                                          cmd->advertising);
10092
10093         } else { /* forced speed */
10094                 /* advertise the requested speed and duplex if supported */
10095                 switch (cmd->speed) {
10096                 case SPEED_10:
10097                         if (cmd->duplex == DUPLEX_FULL) {
10098                                 if (!(bp->port.supported &
10099                                       SUPPORTED_10baseT_Full)) {
10100                                         DP(NETIF_MSG_LINK,
10101                                            "10M full not supported\n");
10102                                         return -EINVAL;
10103                                 }
10104
10105                                 advertising = (ADVERTISED_10baseT_Full |
10106                                                ADVERTISED_TP);
10107                         } else {
10108                                 if (!(bp->port.supported &
10109                                       SUPPORTED_10baseT_Half)) {
10110                                         DP(NETIF_MSG_LINK,
10111                                            "10M half not supported\n");
10112                                         return -EINVAL;
10113                                 }
10114
10115                                 advertising = (ADVERTISED_10baseT_Half |
10116                                                ADVERTISED_TP);
10117                         }
10118                         break;
10119
10120                 case SPEED_100:
10121                         if (cmd->duplex == DUPLEX_FULL) {
10122                                 if (!(bp->port.supported &
10123                                                 SUPPORTED_100baseT_Full)) {
10124                                         DP(NETIF_MSG_LINK,
10125                                            "100M full not supported\n");
10126                                         return -EINVAL;
10127                                 }
10128
10129                                 advertising = (ADVERTISED_100baseT_Full |
10130                                                ADVERTISED_TP);
10131                         } else {
10132                                 if (!(bp->port.supported &
10133                                                 SUPPORTED_100baseT_Half)) {
10134                                         DP(NETIF_MSG_LINK,
10135                                            "100M half not supported\n");
10136                                         return -EINVAL;
10137                                 }
10138
10139                                 advertising = (ADVERTISED_100baseT_Half |
10140                                                ADVERTISED_TP);
10141                         }
10142                         break;
10143
10144                 case SPEED_1000:
10145                         if (cmd->duplex != DUPLEX_FULL) {
10146                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
10147                                 return -EINVAL;
10148                         }
10149
10150                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
10151                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
10152                                 return -EINVAL;
10153                         }
10154
10155                         advertising = (ADVERTISED_1000baseT_Full |
10156                                        ADVERTISED_TP);
10157                         break;
10158
10159                 case SPEED_2500:
10160                         if (cmd->duplex != DUPLEX_FULL) {
10161                                 DP(NETIF_MSG_LINK,
10162                                    "2.5G half not supported\n");
10163                                 return -EINVAL;
10164                         }
10165
10166                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
10167                                 DP(NETIF_MSG_LINK,
10168                                    "2.5G full not supported\n");
10169                                 return -EINVAL;
10170                         }
10171
10172                         advertising = (ADVERTISED_2500baseX_Full |
10173                                        ADVERTISED_TP);
10174                         break;
10175
10176                 case SPEED_10000:
10177                         if (cmd->duplex != DUPLEX_FULL) {
10178                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
10179                                 return -EINVAL;
10180                         }
10181
10182                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
10183                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
10184                                 return -EINVAL;
10185                         }
10186
10187                         advertising = (ADVERTISED_10000baseT_Full |
10188                                        ADVERTISED_FIBRE);
10189                         break;
10190
10191                 default:
10192                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
10193                         return -EINVAL;
10194                 }
10195
10196                 bp->link_params.req_line_speed = cmd->speed;
10197                 bp->link_params.req_duplex = cmd->duplex;
10198                 bp->port.advertising = advertising;
10199         }
10200
10201         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
10202            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
10203            bp->link_params.req_line_speed, bp->link_params.req_duplex,
10204            bp->port.advertising);
10205
10206         if (netif_running(dev)) {
10207                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10208                 bnx2x_link_set(bp);
10209         }
10210
10211         return 0;
10212 }
10213
10214 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
10215 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
10216
10217 static int bnx2x_get_regs_len(struct net_device *dev)
10218 {
10219         struct bnx2x *bp = netdev_priv(dev);
10220         int regdump_len = 0;
10221         int i;
10222
10223         if (CHIP_IS_E1(bp)) {
10224                 for (i = 0; i < REGS_COUNT; i++)
10225                         if (IS_E1_ONLINE(reg_addrs[i].info))
10226                                 regdump_len += reg_addrs[i].size;
10227
10228                 for (i = 0; i < WREGS_COUNT_E1; i++)
10229                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
10230                                 regdump_len += wreg_addrs_e1[i].size *
10231                                         (1 + wreg_addrs_e1[i].read_regs_count);
10232
10233         } else { /* E1H */
10234                 for (i = 0; i < REGS_COUNT; i++)
10235                         if (IS_E1H_ONLINE(reg_addrs[i].info))
10236                                 regdump_len += reg_addrs[i].size;
10237
10238                 for (i = 0; i < WREGS_COUNT_E1H; i++)
10239                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
10240                                 regdump_len += wreg_addrs_e1h[i].size *
10241                                         (1 + wreg_addrs_e1h[i].read_regs_count);
10242         }
10243         regdump_len *= 4;
10244         regdump_len += sizeof(struct dump_hdr);
10245
10246         return regdump_len;
10247 }
10248
10249 static void bnx2x_get_regs(struct net_device *dev,
10250                            struct ethtool_regs *regs, void *_p)
10251 {
10252         u32 *p = _p, i, j;
10253         struct bnx2x *bp = netdev_priv(dev);
10254         struct dump_hdr dump_hdr = {0};
10255
10256         regs->version = 0;
10257         memset(p, 0, regs->len);
10258
10259         if (!netif_running(bp->dev))
10260                 return;
10261
10262         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
10263         dump_hdr.dump_sign = dump_sign_all;
10264         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
10265         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
10266         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
10267         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
10268         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
10269
10270         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
10271         p += dump_hdr.hdr_size + 1;
10272
10273         if (CHIP_IS_E1(bp)) {
10274                 for (i = 0; i < REGS_COUNT; i++)
10275                         if (IS_E1_ONLINE(reg_addrs[i].info))
10276                                 for (j = 0; j < reg_addrs[i].size; j++)
10277                                         *p++ = REG_RD(bp,
10278                                                       reg_addrs[i].addr + j*4);
10279
10280         } else { /* E1H */
10281                 for (i = 0; i < REGS_COUNT; i++)
10282                         if (IS_E1H_ONLINE(reg_addrs[i].info))
10283                                 for (j = 0; j < reg_addrs[i].size; j++)
10284                                         *p++ = REG_RD(bp,
10285                                                       reg_addrs[i].addr + j*4);
10286         }
10287 }
10288
10289 #define PHY_FW_VER_LEN                  10
10290
10291 static void bnx2x_get_drvinfo(struct net_device *dev,
10292                               struct ethtool_drvinfo *info)
10293 {
10294         struct bnx2x *bp = netdev_priv(dev);
10295         u8 phy_fw_ver[PHY_FW_VER_LEN];
10296
10297         strcpy(info->driver, DRV_MODULE_NAME);
10298         strcpy(info->version, DRV_MODULE_VERSION);
10299
10300         phy_fw_ver[0] = '\0';
10301         if (bp->port.pmf) {
10302                 bnx2x_acquire_phy_lock(bp);
10303                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
10304                                              (bp->state != BNX2X_STATE_CLOSED),
10305                                              phy_fw_ver, PHY_FW_VER_LEN);
10306                 bnx2x_release_phy_lock(bp);
10307         }
10308
10309         strncpy(info->fw_version, bp->fw_ver, 32);
10310         snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10311                  "bc %d.%d.%d%s%s",
10312                  (bp->common.bc_ver & 0xff0000) >> 16,
10313                  (bp->common.bc_ver & 0xff00) >> 8,
10314                  (bp->common.bc_ver & 0xff),
10315                  ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
10316         strcpy(info->bus_info, pci_name(bp->pdev));
10317         info->n_stats = BNX2X_NUM_STATS;
10318         info->testinfo_len = BNX2X_NUM_TESTS;
10319         info->eedump_len = bp->common.flash_size;
10320         info->regdump_len = bnx2x_get_regs_len(dev);
10321 }
10322
10323 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10324 {
10325         struct bnx2x *bp = netdev_priv(dev);
10326
10327         if (bp->flags & NO_WOL_FLAG) {
10328                 wol->supported = 0;
10329                 wol->wolopts = 0;
10330         } else {
10331                 wol->supported = WAKE_MAGIC;
10332                 if (bp->wol)
10333                         wol->wolopts = WAKE_MAGIC;
10334                 else
10335                         wol->wolopts = 0;
10336         }
10337         memset(&wol->sopass, 0, sizeof(wol->sopass));
10338 }
10339
10340 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10341 {
10342         struct bnx2x *bp = netdev_priv(dev);
10343
10344         if (wol->wolopts & ~WAKE_MAGIC)
10345                 return -EINVAL;
10346
10347         if (wol->wolopts & WAKE_MAGIC) {
10348                 if (bp->flags & NO_WOL_FLAG)
10349                         return -EINVAL;
10350
10351                 bp->wol = 1;
10352         } else
10353                 bp->wol = 0;
10354
10355         return 0;
10356 }
10357
10358 static u32 bnx2x_get_msglevel(struct net_device *dev)
10359 {
10360         struct bnx2x *bp = netdev_priv(dev);
10361
10362         return bp->msg_enable;
10363 }
10364
10365 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
10366 {
10367         struct bnx2x *bp = netdev_priv(dev);
10368
10369         if (capable(CAP_NET_ADMIN))
10370                 bp->msg_enable = level;
10371 }
10372
10373 static int bnx2x_nway_reset(struct net_device *dev)
10374 {
10375         struct bnx2x *bp = netdev_priv(dev);
10376
10377         if (!bp->port.pmf)
10378                 return 0;
10379
10380         if (netif_running(dev)) {
10381                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10382                 bnx2x_link_set(bp);
10383         }
10384
10385         return 0;
10386 }
10387
10388 static u32 bnx2x_get_link(struct net_device *dev)
10389 {
10390         struct bnx2x *bp = netdev_priv(dev);
10391
10392         if (bp->flags & MF_FUNC_DIS)
10393                 return 0;
10394
10395         return bp->link_vars.link_up;
10396 }
10397
10398 static int bnx2x_get_eeprom_len(struct net_device *dev)
10399 {
10400         struct bnx2x *bp = netdev_priv(dev);
10401
10402         return bp->common.flash_size;
10403 }
10404
10405 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
10406 {
10407         int port = BP_PORT(bp);
10408         int count, i;
10409         u32 val = 0;
10410
10411         /* adjust timeout for emulation/FPGA */
10412         count = NVRAM_TIMEOUT_COUNT;
10413         if (CHIP_REV_IS_SLOW(bp))
10414                 count *= 100;
10415
10416         /* request access to nvram interface */
10417         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10418                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
10419
10420         for (i = 0; i < count*10; i++) {
10421                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10422                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
10423                         break;
10424
10425                 udelay(5);
10426         }
10427
10428         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
10429                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
10430                 return -EBUSY;
10431         }
10432
10433         return 0;
10434 }
10435
10436 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
10437 {
10438         int port = BP_PORT(bp);
10439         int count, i;
10440         u32 val = 0;
10441
10442         /* adjust timeout for emulation/FPGA */
10443         count = NVRAM_TIMEOUT_COUNT;
10444         if (CHIP_REV_IS_SLOW(bp))
10445                 count *= 100;
10446
10447         /* relinquish nvram interface */
10448         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10449                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
10450
10451         for (i = 0; i < count*10; i++) {
10452                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10453                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
10454                         break;
10455
10456                 udelay(5);
10457         }
10458
10459         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
10460                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
10461                 return -EBUSY;
10462         }
10463
10464         return 0;
10465 }
10466
10467 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
10468 {
10469         u32 val;
10470
10471         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10472
10473         /* enable both bits, even on read */
10474         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10475                (val | MCPR_NVM_ACCESS_ENABLE_EN |
10476                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
10477 }
10478
10479 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
10480 {
10481         u32 val;
10482
10483         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10484
10485         /* disable both bits, even after read */
10486         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10487                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
10488                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
10489 }
10490
10491 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
10492                                   u32 cmd_flags)
10493 {
10494         int count, i, rc;
10495         u32 val;
10496
10497         /* build the command word */
10498         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
10499
10500         /* need to clear DONE bit separately */
10501         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10502
10503         /* address of the NVRAM to read from */
10504         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10505                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10506
10507         /* issue a read command */
10508         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10509
10510         /* adjust timeout for emulation/FPGA */
10511         count = NVRAM_TIMEOUT_COUNT;
10512         if (CHIP_REV_IS_SLOW(bp))
10513                 count *= 100;
10514
10515         /* wait for completion */
10516         *ret_val = 0;
10517         rc = -EBUSY;
10518         for (i = 0; i < count; i++) {
10519                 udelay(5);
10520                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10521
10522                 if (val & MCPR_NVM_COMMAND_DONE) {
10523                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
10524                         /* we read nvram data in cpu order
10525                          * but ethtool sees it as an array of bytes
10526                          * converting to big-endian will do the work */
10527                         *ret_val = cpu_to_be32(val);
10528                         rc = 0;
10529                         break;
10530                 }
10531         }
10532
10533         return rc;
10534 }
10535
10536 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
10537                             int buf_size)
10538 {
10539         int rc;
10540         u32 cmd_flags;
10541         __be32 val;
10542
10543         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10544                 DP(BNX2X_MSG_NVM,
10545                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
10546                    offset, buf_size);
10547                 return -EINVAL;
10548         }
10549
10550         if (offset + buf_size > bp->common.flash_size) {
10551                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10552                                   " buf_size (0x%x) > flash_size (0x%x)\n",
10553                    offset, buf_size, bp->common.flash_size);
10554                 return -EINVAL;
10555         }
10556
10557         /* request access to nvram interface */
10558         rc = bnx2x_acquire_nvram_lock(bp);
10559         if (rc)
10560                 return rc;
10561
10562         /* enable access to nvram interface */
10563         bnx2x_enable_nvram_access(bp);
10564
10565         /* read the first word(s) */
10566         cmd_flags = MCPR_NVM_COMMAND_FIRST;
10567         while ((buf_size > sizeof(u32)) && (rc == 0)) {
10568                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10569                 memcpy(ret_buf, &val, 4);
10570
10571                 /* advance to the next dword */
10572                 offset += sizeof(u32);
10573                 ret_buf += sizeof(u32);
10574                 buf_size -= sizeof(u32);
10575                 cmd_flags = 0;
10576         }
10577
10578         if (rc == 0) {
10579                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10580                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10581                 memcpy(ret_buf, &val, 4);
10582         }
10583
10584         /* disable access to nvram interface */
10585         bnx2x_disable_nvram_access(bp);
10586         bnx2x_release_nvram_lock(bp);
10587
10588         return rc;
10589 }
10590
10591 static int bnx2x_get_eeprom(struct net_device *dev,
10592                             struct ethtool_eeprom *eeprom, u8 *eebuf)
10593 {
10594         struct bnx2x *bp = netdev_priv(dev);
10595         int rc;
10596
10597         if (!netif_running(dev))
10598                 return -EAGAIN;
10599
10600         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10601            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
10602            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10603            eeprom->len, eeprom->len);
10604
10605         /* parameters already validated in ethtool_get_eeprom */
10606
10607         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
10608
10609         return rc;
10610 }
10611
10612 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
10613                                    u32 cmd_flags)
10614 {
10615         int count, i, rc;
10616
10617         /* build the command word */
10618         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
10619
10620         /* need to clear DONE bit separately */
10621         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10622
10623         /* write the data */
10624         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
10625
10626         /* address of the NVRAM to write to */
10627         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10628                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10629
10630         /* issue the write command */
10631         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10632
10633         /* adjust timeout for emulation/FPGA */
10634         count = NVRAM_TIMEOUT_COUNT;
10635         if (CHIP_REV_IS_SLOW(bp))
10636                 count *= 100;
10637
10638         /* wait for completion */
10639         rc = -EBUSY;
10640         for (i = 0; i < count; i++) {
10641                 udelay(5);
10642                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10643                 if (val & MCPR_NVM_COMMAND_DONE) {
10644                         rc = 0;
10645                         break;
10646                 }
10647         }
10648
10649         return rc;
10650 }
10651
10652 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
10653
10654 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
10655                               int buf_size)
10656 {
10657         int rc;
10658         u32 cmd_flags;
10659         u32 align_offset;
10660         __be32 val;
10661
10662         if (offset + buf_size > bp->common.flash_size) {
10663                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10664                                   " buf_size (0x%x) > flash_size (0x%x)\n",
10665                    offset, buf_size, bp->common.flash_size);
10666                 return -EINVAL;
10667         }
10668
10669         /* request access to nvram interface */
10670         rc = bnx2x_acquire_nvram_lock(bp);
10671         if (rc)
10672                 return rc;
10673
10674         /* enable access to nvram interface */
10675         bnx2x_enable_nvram_access(bp);
10676
10677         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
10678         align_offset = (offset & ~0x03);
10679         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
10680
10681         if (rc == 0) {
10682                 val &= ~(0xff << BYTE_OFFSET(offset));
10683                 val |= (*data_buf << BYTE_OFFSET(offset));
10684
10685                 /* nvram data is returned as an array of bytes
10686                  * convert it back to cpu order */
10687                 val = be32_to_cpu(val);
10688
10689                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
10690                                              cmd_flags);
10691         }
10692
10693         /* disable access to nvram interface */
10694         bnx2x_disable_nvram_access(bp);
10695         bnx2x_release_nvram_lock(bp);
10696
10697         return rc;
10698 }
10699
10700 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
10701                              int buf_size)
10702 {
10703         int rc;
10704         u32 cmd_flags;
10705         u32 val;
10706         u32 written_so_far;
10707
10708         if (buf_size == 1)      /* ethtool */
10709                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
10710
10711         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10712                 DP(BNX2X_MSG_NVM,
10713                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
10714                    offset, buf_size);
10715                 return -EINVAL;
10716         }
10717
10718         if (offset + buf_size > bp->common.flash_size) {
10719                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10720                                   " buf_size (0x%x) > flash_size (0x%x)\n",
10721                    offset, buf_size, bp->common.flash_size);
10722                 return -EINVAL;
10723         }
10724
10725         /* request access to nvram interface */
10726         rc = bnx2x_acquire_nvram_lock(bp);
10727         if (rc)
10728                 return rc;
10729
10730         /* enable access to nvram interface */
10731         bnx2x_enable_nvram_access(bp);
10732
10733         written_so_far = 0;
10734         cmd_flags = MCPR_NVM_COMMAND_FIRST;
10735         while ((written_so_far < buf_size) && (rc == 0)) {
10736                 if (written_so_far == (buf_size - sizeof(u32)))
10737                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
10738                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
10739                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
10740                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
10741                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
10742
10743                 memcpy(&val, data_buf, 4);
10744
10745                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
10746
10747                 /* advance to the next dword */
10748                 offset += sizeof(u32);
10749                 data_buf += sizeof(u32);
10750                 written_so_far += sizeof(u32);
10751                 cmd_flags = 0;
10752         }
10753
10754         /* disable access to nvram interface */
10755         bnx2x_disable_nvram_access(bp);
10756         bnx2x_release_nvram_lock(bp);
10757
10758         return rc;
10759 }
10760
10761 static int bnx2x_set_eeprom(struct net_device *dev,
10762                             struct ethtool_eeprom *eeprom, u8 *eebuf)
10763 {
10764         struct bnx2x *bp = netdev_priv(dev);
10765         int port = BP_PORT(bp);
10766         int rc = 0;
10767
10768         if (!netif_running(dev))
10769                 return -EAGAIN;
10770
10771         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10772            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
10773            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10774            eeprom->len, eeprom->len);
10775
10776         /* parameters already validated in ethtool_set_eeprom */
10777
10778         /* PHY eeprom can be accessed only by the PMF */
10779         if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
10780             !bp->port.pmf)
10781                 return -EINVAL;
10782
10783         if (eeprom->magic == 0x50485950) {
10784                 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
10785                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10786
10787                 bnx2x_acquire_phy_lock(bp);
10788                 rc |= bnx2x_link_reset(&bp->link_params,
10789                                        &bp->link_vars, 0);
10790                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10791                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
10792                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10793                                        MISC_REGISTERS_GPIO_HIGH, port);
10794                 bnx2x_release_phy_lock(bp);
10795                 bnx2x_link_report(bp);
10796
10797         } else if (eeprom->magic == 0x50485952) {
10798                 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
10799                 if (bp->state == BNX2X_STATE_OPEN) {
10800                         bnx2x_acquire_phy_lock(bp);
10801                         rc |= bnx2x_link_reset(&bp->link_params,
10802                                                &bp->link_vars, 1);
10803
10804                         rc |= bnx2x_phy_init(&bp->link_params,
10805                                              &bp->link_vars);
10806                         bnx2x_release_phy_lock(bp);
10807                         bnx2x_calc_fc_adv(bp);
10808                 }
10809         } else if (eeprom->magic == 0x53985943) {
10810                 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
10811                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10812                                        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
10813                         u8 ext_phy_addr =
10814                              XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
10815
10816                         /* DSP Remove Download Mode */
10817                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10818                                        MISC_REGISTERS_GPIO_LOW, port);
10819
10820                         bnx2x_acquire_phy_lock(bp);
10821
10822                         bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
10823
10824                         /* wait 0.5 sec to allow it to run */
10825                         msleep(500);
10826                         bnx2x_ext_phy_hw_reset(bp, port);
10827                         msleep(500);
10828                         bnx2x_release_phy_lock(bp);
10829                 }
10830         } else
10831                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
10832
10833         return rc;
10834 }
10835
10836 static int bnx2x_get_coalesce(struct net_device *dev,
10837                               struct ethtool_coalesce *coal)
10838 {
10839         struct bnx2x *bp = netdev_priv(dev);
10840
10841         memset(coal, 0, sizeof(struct ethtool_coalesce));
10842
10843         coal->rx_coalesce_usecs = bp->rx_ticks;
10844         coal->tx_coalesce_usecs = bp->tx_ticks;
10845
10846         return 0;
10847 }
10848
10849 static int bnx2x_set_coalesce(struct net_device *dev,
10850                               struct ethtool_coalesce *coal)
10851 {
10852         struct bnx2x *bp = netdev_priv(dev);
10853
10854         bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
10855         if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
10856                 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
10857
10858         bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
10859         if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
10860                 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
10861
10862         if (netif_running(dev))
10863                 bnx2x_update_coalesce(bp);
10864
10865         return 0;
10866 }
10867
10868 static void bnx2x_get_ringparam(struct net_device *dev,
10869                                 struct ethtool_ringparam *ering)
10870 {
10871         struct bnx2x *bp = netdev_priv(dev);
10872
10873         ering->rx_max_pending = MAX_RX_AVAIL;
10874         ering->rx_mini_max_pending = 0;
10875         ering->rx_jumbo_max_pending = 0;
10876
10877         ering->rx_pending = bp->rx_ring_size;
10878         ering->rx_mini_pending = 0;
10879         ering->rx_jumbo_pending = 0;
10880
10881         ering->tx_max_pending = MAX_TX_AVAIL;
10882         ering->tx_pending = bp->tx_ring_size;
10883 }
10884
10885 static int bnx2x_set_ringparam(struct net_device *dev,
10886                                struct ethtool_ringparam *ering)
10887 {
10888         struct bnx2x *bp = netdev_priv(dev);
10889         int rc = 0;
10890
10891         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10892                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10893                 return -EAGAIN;
10894         }
10895
10896         if ((ering->rx_pending > MAX_RX_AVAIL) ||
10897             (ering->tx_pending > MAX_TX_AVAIL) ||
10898             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
10899                 return -EINVAL;
10900
10901         bp->rx_ring_size = ering->rx_pending;
10902         bp->tx_ring_size = ering->tx_pending;
10903
10904         if (netif_running(dev)) {
10905                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10906                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10907         }
10908
10909         return rc;
10910 }
10911
10912 static void bnx2x_get_pauseparam(struct net_device *dev,
10913                                  struct ethtool_pauseparam *epause)
10914 {
10915         struct bnx2x *bp = netdev_priv(dev);
10916
10917         epause->autoneg = (bp->link_params.req_flow_ctrl ==
10918                            BNX2X_FLOW_CTRL_AUTO) &&
10919                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
10920
10921         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
10922                             BNX2X_FLOW_CTRL_RX);
10923         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
10924                             BNX2X_FLOW_CTRL_TX);
10925
10926         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10927            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
10928            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10929 }
10930
10931 static int bnx2x_set_pauseparam(struct net_device *dev,
10932                                 struct ethtool_pauseparam *epause)
10933 {
10934         struct bnx2x *bp = netdev_priv(dev);
10935
10936         if (IS_E1HMF(bp))
10937                 return 0;
10938
10939         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10940            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
10941            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10942
10943         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10944
10945         if (epause->rx_pause)
10946                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
10947
10948         if (epause->tx_pause)
10949                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
10950
10951         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10952                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
10953
10954         if (epause->autoneg) {
10955                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10956                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
10957                         return -EINVAL;
10958                 }
10959
10960                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10961                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10962         }
10963
10964         DP(NETIF_MSG_LINK,
10965            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
10966
10967         if (netif_running(dev)) {
10968                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10969                 bnx2x_link_set(bp);
10970         }
10971
10972         return 0;
10973 }
10974
10975 static int bnx2x_set_flags(struct net_device *dev, u32 data)
10976 {
10977         struct bnx2x *bp = netdev_priv(dev);
10978         int changed = 0;
10979         int rc = 0;
10980
10981         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10982                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10983                 return -EAGAIN;
10984         }
10985
10986         /* TPA requires Rx CSUM offloading */
10987         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
10988                 if (!disable_tpa) {
10989                         if (!(dev->features & NETIF_F_LRO)) {
10990                                 dev->features |= NETIF_F_LRO;
10991                                 bp->flags |= TPA_ENABLE_FLAG;
10992                                 changed = 1;
10993                         }
10994                 } else
10995                         rc = -EINVAL;
10996         } else if (dev->features & NETIF_F_LRO) {
10997                 dev->features &= ~NETIF_F_LRO;
10998                 bp->flags &= ~TPA_ENABLE_FLAG;
10999                 changed = 1;
11000         }
11001
11002         if (changed && netif_running(dev)) {
11003                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11004                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11005         }
11006
11007         return rc;
11008 }
11009
11010 static u32 bnx2x_get_rx_csum(struct net_device *dev)
11011 {
11012         struct bnx2x *bp = netdev_priv(dev);
11013
11014         return bp->rx_csum;
11015 }
11016
11017 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
11018 {
11019         struct bnx2x *bp = netdev_priv(dev);
11020         int rc = 0;
11021
11022         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11023                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11024                 return -EAGAIN;
11025         }
11026
11027         bp->rx_csum = data;
11028
11029         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
11030            TPA'ed packets will be discarded due to wrong TCP CSUM */
11031         if (!data) {
11032                 u32 flags = ethtool_op_get_flags(dev);
11033
11034                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
11035         }
11036
11037         return rc;
11038 }
11039
11040 static int bnx2x_set_tso(struct net_device *dev, u32 data)
11041 {
11042         if (data) {
11043                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11044                 dev->features |= NETIF_F_TSO6;
11045         } else {
11046                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
11047                 dev->features &= ~NETIF_F_TSO6;
11048         }
11049
11050         return 0;
11051 }
11052
11053 static const struct {
11054         char string[ETH_GSTRING_LEN];
11055 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
11056         { "register_test (offline)" },
11057         { "memory_test (offline)" },
11058         { "loopback_test (offline)" },
11059         { "nvram_test (online)" },
11060         { "interrupt_test (online)" },
11061         { "link_test (online)" },
11062         { "idle check (online)" }
11063 };
11064
11065 static int bnx2x_test_registers(struct bnx2x *bp)
11066 {
11067         int idx, i, rc = -ENODEV;
11068         u32 wr_val = 0;
11069         int port = BP_PORT(bp);
11070         static const struct {
11071                 u32 offset0;
11072                 u32 offset1;
11073                 u32 mask;
11074         } reg_tbl[] = {
11075 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
11076                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
11077                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
11078                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
11079                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
11080                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
11081                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
11082                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
11083                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
11084                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
11085 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
11086                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
11087                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
11088                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
11089                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
11090                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
11091                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
11092                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
11093                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
11094                 { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
11095 /* 20 */        { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
11096                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
11097                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
11098                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
11099                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
11100                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
11101                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
11102                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
11103                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
11104                 { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
11105 /* 30 */        { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
11106                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
11107                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
11108                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
11109                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
11110                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
11111                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
11112
11113                 { 0xffffffff, 0, 0x00000000 }
11114         };
11115
11116         if (!netif_running(bp->dev))
11117                 return rc;
11118
11119         /* Repeat the test twice:
11120            First by writing 0x00000000, second by writing 0xffffffff */
11121         for (idx = 0; idx < 2; idx++) {
11122
11123                 switch (idx) {
11124                 case 0:
11125                         wr_val = 0;
11126                         break;
11127                 case 1:
11128                         wr_val = 0xffffffff;
11129                         break;
11130                 }
11131
11132                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
11133                         u32 offset, mask, save_val, val;
11134
11135                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
11136                         mask = reg_tbl[i].mask;
11137
11138                         save_val = REG_RD(bp, offset);
11139
11140                         REG_WR(bp, offset, (wr_val & mask));
11141                         val = REG_RD(bp, offset);
11142
11143                         /* Restore the original register's value */
11144                         REG_WR(bp, offset, save_val);
11145
11146                         /* verify value is as expected */
11147                         if ((val & mask) != (wr_val & mask)) {
11148                                 DP(NETIF_MSG_PROBE,
11149                                    "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
11150                                    offset, val, wr_val, mask);
11151                                 goto test_reg_exit;
11152                         }
11153                 }
11154         }
11155
11156         rc = 0;
11157
11158 test_reg_exit:
11159         return rc;
11160 }
11161
11162 static int bnx2x_test_memory(struct bnx2x *bp)
11163 {
11164         int i, j, rc = -ENODEV;
11165         u32 val;
11166         static const struct {
11167                 u32 offset;
11168                 int size;
11169         } mem_tbl[] = {
11170                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
11171                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
11172                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
11173                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
11174                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
11175                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
11176                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
11177
11178                 { 0xffffffff, 0 }
11179         };
11180         static const struct {
11181                 char *name;
11182                 u32 offset;
11183                 u32 e1_mask;
11184                 u32 e1h_mask;
11185         } prty_tbl[] = {
11186                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
11187                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
11188                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
11189                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
11190                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
11191                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
11192
11193                 { NULL, 0xffffffff, 0, 0 }
11194         };
11195
11196         if (!netif_running(bp->dev))
11197                 return rc;
11198
11199         /* Go through all the memories */
11200         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
11201                 for (j = 0; j < mem_tbl[i].size; j++)
11202                         REG_RD(bp, mem_tbl[i].offset + j*4);
11203
11204         /* Check the parity status */
11205         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
11206                 val = REG_RD(bp, prty_tbl[i].offset);
11207                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
11208                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
11209                         DP(NETIF_MSG_HW,
11210                            "%s is 0x%x\n", prty_tbl[i].name, val);
11211                         goto test_mem_exit;
11212                 }
11213         }
11214
11215         rc = 0;
11216
11217 test_mem_exit:
11218         return rc;
11219 }
11220
11221 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
11222 {
11223         int cnt = 1000;
11224
11225         if (link_up)
11226                 while (bnx2x_link_test(bp) && cnt--)
11227                         msleep(10);
11228 }
11229
11230 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
11231 {
11232         unsigned int pkt_size, num_pkts, i;
11233         struct sk_buff *skb;
11234         unsigned char *packet;
11235         struct bnx2x_fastpath *fp_rx = &bp->fp[0];
11236         struct bnx2x_fastpath *fp_tx = &bp->fp[0];
11237         u16 tx_start_idx, tx_idx;
11238         u16 rx_start_idx, rx_idx;
11239         u16 pkt_prod, bd_prod;
11240         struct sw_tx_bd *tx_buf;
11241         struct eth_tx_start_bd *tx_start_bd;
11242         struct eth_tx_parse_bd *pbd = NULL;
11243         dma_addr_t mapping;
11244         union eth_rx_cqe *cqe;
11245         u8 cqe_fp_flags;
11246         struct sw_rx_bd *rx_buf;
11247         u16 len;
11248         int rc = -ENODEV;
11249
11250         /* check the loopback mode */
11251         switch (loopback_mode) {
11252         case BNX2X_PHY_LOOPBACK:
11253                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
11254                         return -EINVAL;
11255                 break;
11256         case BNX2X_MAC_LOOPBACK:
11257                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
11258                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
11259                 break;
11260         default:
11261                 return -EINVAL;
11262         }
11263
11264         /* prepare the loopback packet */
11265         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
11266                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
11267         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
11268         if (!skb) {
11269                 rc = -ENOMEM;
11270                 goto test_loopback_exit;
11271         }
11272         packet = skb_put(skb, pkt_size);
11273         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
11274         memset(packet + ETH_ALEN, 0, ETH_ALEN);
11275         memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
11276         for (i = ETH_HLEN; i < pkt_size; i++)
11277                 packet[i] = (unsigned char) (i & 0xff);
11278
11279         /* send the loopback packet */
11280         num_pkts = 0;
11281         tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11282         rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11283
11284         pkt_prod = fp_tx->tx_pkt_prod++;
11285         tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
11286         tx_buf->first_bd = fp_tx->tx_bd_prod;
11287         tx_buf->skb = skb;
11288         tx_buf->flags = 0;
11289
11290         bd_prod = TX_BD(fp_tx->tx_bd_prod);
11291         tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
11292         mapping = dma_map_single(&bp->pdev->dev, skb->data,
11293                                  skb_headlen(skb), DMA_TO_DEVICE);
11294         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11295         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11296         tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
11297         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11298         tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11299         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11300         tx_start_bd->general_data = ((UNICAST_ADDRESS <<
11301                                 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
11302
11303         /* turn on parsing and get a BD */
11304         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11305         pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
11306
11307         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11308
11309         wmb();
11310
11311         fp_tx->tx_db.data.prod += 2;
11312         barrier();
11313         DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
11314
11315         mmiowb();
11316
11317         num_pkts++;
11318         fp_tx->tx_bd_prod += 2; /* start + pbd */
11319
11320         udelay(100);
11321
11322         tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11323         if (tx_idx != tx_start_idx + num_pkts)
11324                 goto test_loopback_exit;
11325
11326         rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11327         if (rx_idx != rx_start_idx + num_pkts)
11328                 goto test_loopback_exit;
11329
11330         cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
11331         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
11332         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
11333                 goto test_loopback_rx_exit;
11334
11335         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
11336         if (len != pkt_size)
11337                 goto test_loopback_rx_exit;
11338
11339         rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
11340         skb = rx_buf->skb;
11341         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
11342         for (i = ETH_HLEN; i < pkt_size; i++)
11343                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
11344                         goto test_loopback_rx_exit;
11345
11346         rc = 0;
11347
11348 test_loopback_rx_exit:
11349
11350         fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
11351         fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
11352         fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
11353         fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
11354
11355         /* Update producers */
11356         bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
11357                              fp_rx->rx_sge_prod);
11358
11359 test_loopback_exit:
11360         bp->link_params.loopback_mode = LOOPBACK_NONE;
11361
11362         return rc;
11363 }
11364
11365 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
11366 {
11367         int rc = 0, res;
11368
11369         if (BP_NOMCP(bp))
11370                 return rc;
11371
11372         if (!netif_running(bp->dev))
11373                 return BNX2X_LOOPBACK_FAILED;
11374
11375         bnx2x_netif_stop(bp, 1);
11376         bnx2x_acquire_phy_lock(bp);
11377
11378         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
11379         if (res) {
11380                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
11381                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
11382         }
11383
11384         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
11385         if (res) {
11386                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
11387                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
11388         }
11389
11390         bnx2x_release_phy_lock(bp);
11391         bnx2x_netif_start(bp);
11392
11393         return rc;
11394 }
11395
11396 #define CRC32_RESIDUAL                  0xdebb20e3
11397
11398 static int bnx2x_test_nvram(struct bnx2x *bp)
11399 {
11400         static const struct {
11401                 int offset;
11402                 int size;
11403         } nvram_tbl[] = {
11404                 {     0,  0x14 }, /* bootstrap */
11405                 {  0x14,  0xec }, /* dir */
11406                 { 0x100, 0x350 }, /* manuf_info */
11407                 { 0x450,  0xf0 }, /* feature_info */
11408                 { 0x640,  0x64 }, /* upgrade_key_info */
11409                 { 0x6a4,  0x64 },
11410                 { 0x708,  0x70 }, /* manuf_key_info */
11411                 { 0x778,  0x70 },
11412                 {     0,     0 }
11413         };
11414         __be32 buf[0x350 / 4];
11415         u8 *data = (u8 *)buf;
11416         int i, rc;
11417         u32 magic, crc;
11418
11419         if (BP_NOMCP(bp))
11420                 return 0;
11421
11422         rc = bnx2x_nvram_read(bp, 0, data, 4);
11423         if (rc) {
11424                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
11425                 goto test_nvram_exit;
11426         }
11427
11428         magic = be32_to_cpu(buf[0]);
11429         if (magic != 0x669955aa) {
11430                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
11431                 rc = -ENODEV;
11432                 goto test_nvram_exit;
11433         }
11434
11435         for (i = 0; nvram_tbl[i].size; i++) {
11436
11437                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
11438                                       nvram_tbl[i].size);
11439                 if (rc) {
11440                         DP(NETIF_MSG_PROBE,
11441                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
11442                         goto test_nvram_exit;
11443                 }
11444
11445                 crc = ether_crc_le(nvram_tbl[i].size, data);
11446                 if (crc != CRC32_RESIDUAL) {
11447                         DP(NETIF_MSG_PROBE,
11448                            "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
11449                         rc = -ENODEV;
11450                         goto test_nvram_exit;
11451                 }
11452         }
11453
11454 test_nvram_exit:
11455         return rc;
11456 }
11457
11458 static int bnx2x_test_intr(struct bnx2x *bp)
11459 {
11460         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
11461         int i, rc;
11462
11463         if (!netif_running(bp->dev))
11464                 return -ENODEV;
11465
11466         config->hdr.length = 0;
11467         if (CHIP_IS_E1(bp))
11468                 /* use last unicast entries */
11469                 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
11470         else
11471                 config->hdr.offset = BP_FUNC(bp);
11472         config->hdr.client_id = bp->fp->cl_id;
11473         config->hdr.reserved1 = 0;
11474
11475         bp->set_mac_pending++;
11476         smp_wmb();
11477         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11478                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
11479                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
11480         if (rc == 0) {
11481                 for (i = 0; i < 10; i++) {
11482                         if (!bp->set_mac_pending)
11483                                 break;
11484                         smp_rmb();
11485                         msleep_interruptible(10);
11486                 }
11487                 if (i == 10)
11488                         rc = -ENODEV;
11489         }
11490
11491         return rc;
11492 }
11493
11494 static void bnx2x_self_test(struct net_device *dev,
11495                             struct ethtool_test *etest, u64 *buf)
11496 {
11497         struct bnx2x *bp = netdev_priv(dev);
11498
11499         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11500                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11501                 etest->flags |= ETH_TEST_FL_FAILED;
11502                 return;
11503         }
11504
11505         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
11506
11507         if (!netif_running(dev))
11508                 return;
11509
11510         /* offline tests are not supported in MF mode */
11511         if (IS_E1HMF(bp))
11512                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
11513
11514         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11515                 int port = BP_PORT(bp);
11516                 u32 val;
11517                 u8 link_up;
11518
11519                 /* save current value of input enable for TX port IF */
11520                 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
11521                 /* disable input for TX port IF */
11522                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
11523
11524                 link_up = (bnx2x_link_test(bp) == 0);
11525                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11526                 bnx2x_nic_load(bp, LOAD_DIAG);
11527                 /* wait until link state is restored */
11528                 bnx2x_wait_for_link(bp, link_up);
11529
11530                 if (bnx2x_test_registers(bp) != 0) {
11531                         buf[0] = 1;
11532                         etest->flags |= ETH_TEST_FL_FAILED;
11533                 }
11534                 if (bnx2x_test_memory(bp) != 0) {
11535                         buf[1] = 1;
11536                         etest->flags |= ETH_TEST_FL_FAILED;
11537                 }
11538                 buf[2] = bnx2x_test_loopback(bp, link_up);
11539                 if (buf[2] != 0)
11540                         etest->flags |= ETH_TEST_FL_FAILED;
11541
11542                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11543
11544                 /* restore input for TX port IF */
11545                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
11546
11547                 bnx2x_nic_load(bp, LOAD_NORMAL);
11548                 /* wait until link state is restored */
11549                 bnx2x_wait_for_link(bp, link_up);
11550         }
11551         if (bnx2x_test_nvram(bp) != 0) {
11552                 buf[3] = 1;
11553                 etest->flags |= ETH_TEST_FL_FAILED;
11554         }
11555         if (bnx2x_test_intr(bp) != 0) {
11556                 buf[4] = 1;
11557                 etest->flags |= ETH_TEST_FL_FAILED;
11558         }
11559         if (bp->port.pmf)
11560                 if (bnx2x_link_test(bp) != 0) {
11561                         buf[5] = 1;
11562                         etest->flags |= ETH_TEST_FL_FAILED;
11563                 }
11564
11565 #ifdef BNX2X_EXTRA_DEBUG
11566         bnx2x_panic_dump(bp);
11567 #endif
11568 }
11569
11570 static const struct {
11571         long offset;
11572         int size;
11573         u8 string[ETH_GSTRING_LEN];
11574 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
11575 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
11576         { Q_STATS_OFFSET32(error_bytes_received_hi),
11577                                                 8, "[%d]: rx_error_bytes" },
11578         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
11579                                                 8, "[%d]: rx_ucast_packets" },
11580         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
11581                                                 8, "[%d]: rx_mcast_packets" },
11582         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
11583                                                 8, "[%d]: rx_bcast_packets" },
11584         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
11585         { Q_STATS_OFFSET32(rx_err_discard_pkt),
11586                                          4, "[%d]: rx_phy_ip_err_discards"},
11587         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
11588                                          4, "[%d]: rx_skb_alloc_discard" },
11589         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
11590
11591 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
11592         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11593                                                 8, "[%d]: tx_ucast_packets" },
11594         { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11595                                                 8, "[%d]: tx_mcast_packets" },
11596         { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11597                                                 8, "[%d]: tx_bcast_packets" }
11598 };
11599
11600 static const struct {
11601         long offset;
11602         int size;
11603         u32 flags;
11604 #define STATS_FLAGS_PORT                1
11605 #define STATS_FLAGS_FUNC                2
11606 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
11607         u8 string[ETH_GSTRING_LEN];
11608 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
11609 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
11610                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
11611         { STATS_OFFSET32(error_bytes_received_hi),
11612                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
11613         { STATS_OFFSET32(total_unicast_packets_received_hi),
11614                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
11615         { STATS_OFFSET32(total_multicast_packets_received_hi),
11616                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
11617         { STATS_OFFSET32(total_broadcast_packets_received_hi),
11618                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
11619         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
11620                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
11621         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
11622                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
11623         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
11624                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
11625         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
11626                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
11627 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
11628                                 8, STATS_FLAGS_PORT, "rx_fragments" },
11629         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
11630                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
11631         { STATS_OFFSET32(no_buff_discard_hi),
11632                                 8, STATS_FLAGS_BOTH, "rx_discards" },
11633         { STATS_OFFSET32(mac_filter_discard),
11634                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
11635         { STATS_OFFSET32(xxoverflow_discard),
11636                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
11637         { STATS_OFFSET32(brb_drop_hi),
11638                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
11639         { STATS_OFFSET32(brb_truncate_hi),
11640                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
11641         { STATS_OFFSET32(pause_frames_received_hi),
11642                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
11643         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
11644                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
11645         { STATS_OFFSET32(nig_timer_max),
11646                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
11647 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
11648                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
11649         { STATS_OFFSET32(rx_skb_alloc_failed),
11650                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
11651         { STATS_OFFSET32(hw_csum_err),
11652                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
11653
11654         { STATS_OFFSET32(total_bytes_transmitted_hi),
11655                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
11656         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
11657                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
11658         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11659                                 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
11660         { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11661                                 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
11662         { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11663                                 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
11664         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
11665                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
11666         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
11667                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
11668 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
11669                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
11670         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
11671                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
11672         { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
11673                                 8, STATS_FLAGS_PORT, "tx_deferred" },
11674         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
11675                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
11676         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
11677                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
11678         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
11679                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
11680         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
11681                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
11682         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
11683                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
11684         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
11685                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
11686         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
11687                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
11688 /* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
11689                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
11690         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
11691                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
11692         { STATS_OFFSET32(etherstatspktsover1522octets_hi),
11693                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
11694         { STATS_OFFSET32(pause_frames_sent_hi),
11695                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
11696 };
11697
11698 #define IS_PORT_STAT(i) \
11699         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
11700 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
11701 #define IS_E1HMF_MODE_STAT(bp) \
11702                         (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
11703
11704 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
11705 {
11706         struct bnx2x *bp = netdev_priv(dev);
11707         int i, num_stats;
11708
11709         switch (stringset) {
11710         case ETH_SS_STATS:
11711                 if (is_multi(bp)) {
11712                         num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
11713                         if (!IS_E1HMF_MODE_STAT(bp))
11714                                 num_stats += BNX2X_NUM_STATS;
11715                 } else {
11716                         if (IS_E1HMF_MODE_STAT(bp)) {
11717                                 num_stats = 0;
11718                                 for (i = 0; i < BNX2X_NUM_STATS; i++)
11719                                         if (IS_FUNC_STAT(i))
11720                                                 num_stats++;
11721                         } else
11722                                 num_stats = BNX2X_NUM_STATS;
11723                 }
11724                 return num_stats;
11725
11726         case ETH_SS_TEST:
11727                 return BNX2X_NUM_TESTS;
11728
11729         default:
11730                 return -EINVAL;
11731         }
11732 }
11733
11734 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11735 {
11736         struct bnx2x *bp = netdev_priv(dev);
11737         int i, j, k;
11738
11739         switch (stringset) {
11740         case ETH_SS_STATS:
11741                 if (is_multi(bp)) {
11742                         k = 0;
11743                         for_each_queue(bp, i) {
11744                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
11745                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
11746                                                 bnx2x_q_stats_arr[j].string, i);
11747                                 k += BNX2X_NUM_Q_STATS;
11748                         }
11749                         if (IS_E1HMF_MODE_STAT(bp))
11750                                 break;
11751                         for (j = 0; j < BNX2X_NUM_STATS; j++)
11752                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
11753                                        bnx2x_stats_arr[j].string);
11754                 } else {
11755                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11756                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11757                                         continue;
11758                                 strcpy(buf + j*ETH_GSTRING_LEN,
11759                                        bnx2x_stats_arr[i].string);
11760                                 j++;
11761                         }
11762                 }
11763                 break;
11764
11765         case ETH_SS_TEST:
11766                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
11767                 break;
11768         }
11769 }
11770
11771 static void bnx2x_get_ethtool_stats(struct net_device *dev,
11772                                     struct ethtool_stats *stats, u64 *buf)
11773 {
11774         struct bnx2x *bp = netdev_priv(dev);
11775         u32 *hw_stats, *offset;
11776         int i, j, k;
11777
11778         if (is_multi(bp)) {
11779                 k = 0;
11780                 for_each_queue(bp, i) {
11781                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
11782                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
11783                                 if (bnx2x_q_stats_arr[j].size == 0) {
11784                                         /* skip this counter */
11785                                         buf[k + j] = 0;
11786                                         continue;
11787                                 }
11788                                 offset = (hw_stats +
11789                                           bnx2x_q_stats_arr[j].offset);
11790                                 if (bnx2x_q_stats_arr[j].size == 4) {
11791                                         /* 4-byte counter */
11792                                         buf[k + j] = (u64) *offset;
11793                                         continue;
11794                                 }
11795                                 /* 8-byte counter */
11796                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11797                         }
11798                         k += BNX2X_NUM_Q_STATS;
11799                 }
11800                 if (IS_E1HMF_MODE_STAT(bp))
11801                         return;
11802                 hw_stats = (u32 *)&bp->eth_stats;
11803                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
11804                         if (bnx2x_stats_arr[j].size == 0) {
11805                                 /* skip this counter */
11806                                 buf[k + j] = 0;
11807                                 continue;
11808                         }
11809                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
11810                         if (bnx2x_stats_arr[j].size == 4) {
11811                                 /* 4-byte counter */
11812                                 buf[k + j] = (u64) *offset;
11813                                 continue;
11814                         }
11815                         /* 8-byte counter */
11816                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
11817                 }
11818         } else {
11819                 hw_stats = (u32 *)&bp->eth_stats;
11820                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11821                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11822                                 continue;
11823                         if (bnx2x_stats_arr[i].size == 0) {
11824                                 /* skip this counter */
11825                                 buf[j] = 0;
11826                                 j++;
11827                                 continue;
11828                         }
11829                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
11830                         if (bnx2x_stats_arr[i].size == 4) {
11831                                 /* 4-byte counter */
11832                                 buf[j] = (u64) *offset;
11833                                 j++;
11834                                 continue;
11835                         }
11836                         /* 8-byte counter */
11837                         buf[j] = HILO_U64(*offset, *(offset + 1));
11838                         j++;
11839                 }
11840         }
11841 }
11842
11843 static int bnx2x_phys_id(struct net_device *dev, u32 data)
11844 {
11845         struct bnx2x *bp = netdev_priv(dev);
11846         int i;
11847
11848         if (!netif_running(dev))
11849                 return 0;
11850
11851         if (!bp->port.pmf)
11852                 return 0;
11853
11854         if (data == 0)
11855                 data = 2;
11856
11857         for (i = 0; i < (data * 2); i++) {
11858                 if ((i % 2) == 0)
11859                         bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11860                                       SPEED_1000);
11861                 else
11862                         bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
11863
11864                 msleep_interruptible(500);
11865                 if (signal_pending(current))
11866                         break;
11867         }
11868
11869         if (bp->link_vars.link_up)
11870                 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11871                               bp->link_vars.line_speed);
11872
11873         return 0;
11874 }
11875
11876 static const struct ethtool_ops bnx2x_ethtool_ops = {
11877         .get_settings           = bnx2x_get_settings,
11878         .set_settings           = bnx2x_set_settings,
11879         .get_drvinfo            = bnx2x_get_drvinfo,
11880         .get_regs_len           = bnx2x_get_regs_len,
11881         .get_regs               = bnx2x_get_regs,
11882         .get_wol                = bnx2x_get_wol,
11883         .set_wol                = bnx2x_set_wol,
11884         .get_msglevel           = bnx2x_get_msglevel,
11885         .set_msglevel           = bnx2x_set_msglevel,
11886         .nway_reset             = bnx2x_nway_reset,
11887         .get_link               = bnx2x_get_link,
11888         .get_eeprom_len         = bnx2x_get_eeprom_len,
11889         .get_eeprom             = bnx2x_get_eeprom,
11890         .set_eeprom             = bnx2x_set_eeprom,
11891         .get_coalesce           = bnx2x_get_coalesce,
11892         .set_coalesce           = bnx2x_set_coalesce,
11893         .get_ringparam          = bnx2x_get_ringparam,
11894         .set_ringparam          = bnx2x_set_ringparam,
11895         .get_pauseparam         = bnx2x_get_pauseparam,
11896         .set_pauseparam         = bnx2x_set_pauseparam,
11897         .get_rx_csum            = bnx2x_get_rx_csum,
11898         .set_rx_csum            = bnx2x_set_rx_csum,
11899         .get_tx_csum            = ethtool_op_get_tx_csum,
11900         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
11901         .set_flags              = bnx2x_set_flags,
11902         .get_flags              = ethtool_op_get_flags,
11903         .get_sg                 = ethtool_op_get_sg,
11904         .set_sg                 = ethtool_op_set_sg,
11905         .get_tso                = ethtool_op_get_tso,
11906         .set_tso                = bnx2x_set_tso,
11907         .self_test              = bnx2x_self_test,
11908         .get_sset_count         = bnx2x_get_sset_count,
11909         .get_strings            = bnx2x_get_strings,
11910         .phys_id                = bnx2x_phys_id,
11911         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
11912 };
11913
11914 /* end of ethtool_ops */
11915
11916 /****************************************************************************
11917 * General service functions
11918 ****************************************************************************/
11919
11920 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11921 {
11922         u16 pmcsr;
11923
11924         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11925
11926         switch (state) {
11927         case PCI_D0:
11928                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11929                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11930                                        PCI_PM_CTRL_PME_STATUS));
11931
11932                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
11933                         /* delay required during transition out of D3hot */
11934                         msleep(20);
11935                 break;
11936
11937         case PCI_D3hot:
11938                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11939                 pmcsr |= 3;
11940
11941                 if (bp->wol)
11942                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
11943
11944                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11945                                       pmcsr);
11946
11947                 /* No more memory access after this point until
11948                 * device is brought back to D0.
11949                 */
11950                 break;
11951
11952         default:
11953                 return -EINVAL;
11954         }
11955         return 0;
11956 }
11957
11958 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11959 {
11960         u16 rx_cons_sb;
11961
11962         /* Tell compiler that status block fields can change */
11963         barrier();
11964         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
11965         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
11966                 rx_cons_sb++;
11967         return (fp->rx_comp_cons != rx_cons_sb);
11968 }
11969
11970 /*
11971  * net_device service functions
11972  */
11973
11974 static int bnx2x_poll(struct napi_struct *napi, int budget)
11975 {
11976         int work_done = 0;
11977         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
11978                                                  napi);
11979         struct bnx2x *bp = fp->bp;
11980
11981         while (1) {
11982 #ifdef BNX2X_STOP_ON_ERROR
11983                 if (unlikely(bp->panic)) {
11984                         napi_complete(napi);
11985                         return 0;
11986                 }
11987 #endif
11988
11989                 if (bnx2x_has_tx_work(fp))
11990                         bnx2x_tx_int(fp);
11991
11992                 if (bnx2x_has_rx_work(fp)) {
11993                         work_done += bnx2x_rx_int(fp, budget - work_done);
11994
11995                         /* must not complete if we consumed full budget */
11996                         if (work_done >= budget)
11997                                 break;
11998                 }
11999
12000                 /* Fall out from the NAPI loop if needed */
12001                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12002                         bnx2x_update_fpsb_idx(fp);
12003                 /* bnx2x_has_rx_work() reads the status block, thus we need
12004                  * to ensure that status block indices have been actually read
12005                  * (bnx2x_update_fpsb_idx) prior to this check
12006                  * (bnx2x_has_rx_work) so that we won't write the "newer"
12007                  * value of the status block to IGU (if there was a DMA right
12008                  * after bnx2x_has_rx_work and if there is no rmb, the memory
12009                  * reading (bnx2x_update_fpsb_idx) may be postponed to right
12010                  * before bnx2x_ack_sb). In this case there will never be
12011                  * another interrupt until there is another update of the
12012                  * status block, while there is still unhandled work.
12013                  */
12014                         rmb();
12015
12016                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12017                                 napi_complete(napi);
12018                                 /* Re-enable interrupts */
12019                                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
12020                                              le16_to_cpu(fp->fp_c_idx),
12021                                              IGU_INT_NOP, 1);
12022                                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
12023                                              le16_to_cpu(fp->fp_u_idx),
12024                                              IGU_INT_ENABLE, 1);
12025                                 break;
12026                         }
12027                 }
12028         }
12029
12030         return work_done;
12031 }
12032
12033
12034 /* we split the first BD into headers and data BDs
12035  * to ease the pain of our fellow microcode engineers
12036  * we use one mapping for both BDs
12037  * So far this has only been observed to happen
12038  * in Other Operating Systems(TM)
12039  */
12040 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
12041                                    struct bnx2x_fastpath *fp,
12042                                    struct sw_tx_bd *tx_buf,
12043                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
12044                                    u16 bd_prod, int nbd)
12045 {
12046         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
12047         struct eth_tx_bd *d_tx_bd;
12048         dma_addr_t mapping;
12049         int old_len = le16_to_cpu(h_tx_bd->nbytes);
12050
12051         /* first fix first BD */
12052         h_tx_bd->nbd = cpu_to_le16(nbd);
12053         h_tx_bd->nbytes = cpu_to_le16(hlen);
12054
12055         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
12056            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
12057            h_tx_bd->addr_lo, h_tx_bd->nbd);
12058
12059         /* now get a new data BD
12060          * (after the pbd) and fill it */
12061         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12062         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12063
12064         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
12065                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
12066
12067         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12068         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12069         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
12070
12071         /* this marks the BD as one that has no individual mapping */
12072         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
12073
12074         DP(NETIF_MSG_TX_QUEUED,
12075            "TSO split data size is %d (%x:%x)\n",
12076            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
12077
12078         /* update tx_bd */
12079         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
12080
12081         return bd_prod;
12082 }
12083
12084 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
12085 {
12086         if (fix > 0)
12087                 csum = (u16) ~csum_fold(csum_sub(csum,
12088                                 csum_partial(t_header - fix, fix, 0)));
12089
12090         else if (fix < 0)
12091                 csum = (u16) ~csum_fold(csum_add(csum,
12092                                 csum_partial(t_header, -fix, 0)));
12093
12094         return swab16(csum);
12095 }
12096
12097 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
12098 {
12099         u32 rc;
12100
12101         if (skb->ip_summed != CHECKSUM_PARTIAL)
12102                 rc = XMIT_PLAIN;
12103
12104         else {
12105                 if (skb->protocol == htons(ETH_P_IPV6)) {
12106                         rc = XMIT_CSUM_V6;
12107                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
12108                                 rc |= XMIT_CSUM_TCP;
12109
12110                 } else {
12111                         rc = XMIT_CSUM_V4;
12112                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
12113                                 rc |= XMIT_CSUM_TCP;
12114                 }
12115         }
12116
12117         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
12118                 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
12119
12120         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
12121                 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
12122
12123         return rc;
12124 }
12125
12126 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12127 /* check if packet requires linearization (packet is too fragmented)
12128    no need to check fragmentation if page size > 8K (there will be no
12129    violation to FW restrictions) */
12130 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
12131                              u32 xmit_type)
12132 {
12133         int to_copy = 0;
12134         int hlen = 0;
12135         int first_bd_sz = 0;
12136
12137         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
12138         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
12139
12140                 if (xmit_type & XMIT_GSO) {
12141                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
12142                         /* Check if LSO packet needs to be copied:
12143                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
12144                         int wnd_size = MAX_FETCH_BD - 3;
12145                         /* Number of windows to check */
12146                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
12147                         int wnd_idx = 0;
12148                         int frag_idx = 0;
12149                         u32 wnd_sum = 0;
12150
12151                         /* Headers length */
12152                         hlen = (int)(skb_transport_header(skb) - skb->data) +
12153                                 tcp_hdrlen(skb);
12154
12155                         /* Amount of data (w/o headers) on linear part of SKB*/
12156                         first_bd_sz = skb_headlen(skb) - hlen;
12157
12158                         wnd_sum  = first_bd_sz;
12159
12160                         /* Calculate the first sum - it's special */
12161                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12162                                 wnd_sum +=
12163                                         skb_shinfo(skb)->frags[frag_idx].size;
12164
12165                         /* If there was data on linear skb data - check it */
12166                         if (first_bd_sz > 0) {
12167                                 if (unlikely(wnd_sum < lso_mss)) {
12168                                         to_copy = 1;
12169                                         goto exit_lbl;
12170                                 }
12171
12172                                 wnd_sum -= first_bd_sz;
12173                         }
12174
12175                         /* Others are easier: run through the frag list and
12176                            check all windows */
12177                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12178                                 wnd_sum +=
12179                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12180
12181                                 if (unlikely(wnd_sum < lso_mss)) {
12182                                         to_copy = 1;
12183                                         break;
12184                                 }
12185                                 wnd_sum -=
12186                                         skb_shinfo(skb)->frags[wnd_idx].size;
12187                         }
12188                 } else {
12189                         /* in non-LSO too fragmented packet should always
12190                            be linearized */
12191                         to_copy = 1;
12192                 }
12193         }
12194
12195 exit_lbl:
12196         if (unlikely(to_copy))
12197                 DP(NETIF_MSG_TX_QUEUED,
12198                    "Linearization IS REQUIRED for %s packet. "
12199                    "num_frags %d  hlen %d  first_bd_sz %d\n",
12200                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12201                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12202
12203         return to_copy;
12204 }
12205 #endif
12206
12207 /* called with netif_tx_lock
12208  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
12209  * netif_wake_queue()
12210  */
12211 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
12212 {
12213         struct bnx2x *bp = netdev_priv(dev);
12214         struct bnx2x_fastpath *fp;
12215         struct netdev_queue *txq;
12216         struct sw_tx_bd *tx_buf;
12217         struct eth_tx_start_bd *tx_start_bd;
12218         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
12219         struct eth_tx_parse_bd *pbd = NULL;
12220         u16 pkt_prod, bd_prod;
12221         int nbd, fp_index;
12222         dma_addr_t mapping;
12223         u32 xmit_type = bnx2x_xmit_type(bp, skb);
12224         int i;
12225         u8 hlen = 0;
12226         __le16 pkt_size = 0;
12227         struct ethhdr *eth;
12228         u8 mac_type = UNICAST_ADDRESS;
12229
12230 #ifdef BNX2X_STOP_ON_ERROR
12231         if (unlikely(bp->panic))
12232                 return NETDEV_TX_BUSY;
12233 #endif
12234
12235         fp_index = skb_get_queue_mapping(skb);
12236         txq = netdev_get_tx_queue(dev, fp_index);
12237
12238         fp = &bp->fp[fp_index];
12239
12240         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
12241                 fp->eth_q_stats.driver_xoff++;
12242                 netif_tx_stop_queue(txq);
12243                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12244                 return NETDEV_TX_BUSY;
12245         }
12246
12247         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
12248            "  gso type %x  xmit_type %x\n",
12249            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12250            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12251
12252         eth = (struct ethhdr *)skb->data;
12253
12254         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
12255         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
12256                 if (is_broadcast_ether_addr(eth->h_dest))
12257                         mac_type = BROADCAST_ADDRESS;
12258                 else
12259                         mac_type = MULTICAST_ADDRESS;
12260         }
12261
12262 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12263         /* First, check if we need to linearize the skb (due to FW
12264            restrictions). No need to check fragmentation if page size > 8K
12265            (there will be no violation to FW restrictions) */
12266         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12267                 /* Statistics of linearization */
12268                 bp->lin_cnt++;
12269                 if (skb_linearize(skb) != 0) {
12270                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12271                            "silently dropping this SKB\n");
12272                         dev_kfree_skb_any(skb);
12273                         return NETDEV_TX_OK;
12274                 }
12275         }
12276 #endif
12277
12278         /*
12279         Please read carefully. First we use one BD which we mark as start,
12280         then we have a parsing info BD (used for TSO or xsum),
12281         and only then we have the rest of the TSO BDs.
12282         (don't forget to mark the last one as last,
12283         and to unmap only AFTER you write to the BD ...)
12284         And above all, all pdb sizes are in words - NOT DWORDS!
12285         */
12286
12287         pkt_prod = fp->tx_pkt_prod++;
12288         bd_prod = TX_BD(fp->tx_bd_prod);
12289
12290         /* get a tx_buf and first BD */
12291         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
12292         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
12293
12294         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
12295         tx_start_bd->general_data =  (mac_type <<
12296                                         ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
12297         /* header nbd */
12298         tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
12299
12300         /* remember the first BD of the packet */
12301         tx_buf->first_bd = fp->tx_bd_prod;
12302         tx_buf->skb = skb;
12303         tx_buf->flags = 0;
12304
12305         DP(NETIF_MSG_TX_QUEUED,
12306            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
12307            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
12308
12309 #ifdef BCM_VLAN
12310         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12311             (bp->flags & HW_VLAN_TX_FLAG)) {
12312                 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12313                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
12314         } else
12315 #endif
12316                 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
12317
12318         /* turn on parsing and get a BD */
12319         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12320         pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
12321
12322         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
12323
12324         if (xmit_type & XMIT_CSUM) {
12325                 hlen = (skb_network_header(skb) - skb->data) / 2;
12326
12327                 /* for now NS flag is not used in Linux */
12328                 pbd->global_data =
12329                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12330                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
12331
12332                 pbd->ip_hlen = (skb_transport_header(skb) -
12333                                 skb_network_header(skb)) / 2;
12334
12335                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
12336
12337                 pbd->total_hlen = cpu_to_le16(hlen);
12338                 hlen = hlen*2;
12339
12340                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
12341
12342                 if (xmit_type & XMIT_CSUM_V4)
12343                         tx_start_bd->bd_flags.as_bitfield |=
12344                                                 ETH_TX_BD_FLAGS_IP_CSUM;
12345                 else
12346                         tx_start_bd->bd_flags.as_bitfield |=
12347                                                 ETH_TX_BD_FLAGS_IPV6;
12348
12349                 if (xmit_type & XMIT_CSUM_TCP) {
12350                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12351
12352                 } else {
12353                         s8 fix = SKB_CS_OFF(skb); /* signed! */
12354
12355                         pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
12356
12357                         DP(NETIF_MSG_TX_QUEUED,
12358                            "hlen %d  fix %d  csum before fix %x\n",
12359                            le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
12360
12361                         /* HW bug: fixup the CSUM */
12362                         pbd->tcp_pseudo_csum =
12363                                 bnx2x_csum_fix(skb_transport_header(skb),
12364                                                SKB_CS(skb), fix);
12365
12366                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12367                            pbd->tcp_pseudo_csum);
12368                 }
12369         }
12370
12371         mapping = dma_map_single(&bp->pdev->dev, skb->data,
12372                                  skb_headlen(skb), DMA_TO_DEVICE);
12373
12374         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12375         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12376         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12377         tx_start_bd->nbd = cpu_to_le16(nbd);
12378         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12379         pkt_size = tx_start_bd->nbytes;
12380
12381         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
12382            "  nbytes %d  flags %x  vlan %x\n",
12383            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12384            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12385            tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
12386
12387         if (xmit_type & XMIT_GSO) {
12388
12389                 DP(NETIF_MSG_TX_QUEUED,
12390                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
12391                    skb->len, hlen, skb_headlen(skb),
12392                    skb_shinfo(skb)->gso_size);
12393
12394                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
12395
12396                 if (unlikely(skb_headlen(skb) > hlen))
12397                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12398                                                  hlen, bd_prod, ++nbd);
12399
12400                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12401                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
12402                 pbd->tcp_flags = pbd_tcp_flags(skb);
12403
12404                 if (xmit_type & XMIT_GSO_V4) {
12405                         pbd->ip_id = swab16(ip_hdr(skb)->id);
12406                         pbd->tcp_pseudo_csum =
12407                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12408                                                           ip_hdr(skb)->daddr,
12409                                                           0, IPPROTO_TCP, 0));
12410
12411                 } else
12412                         pbd->tcp_pseudo_csum =
12413                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12414                                                         &ipv6_hdr(skb)->daddr,
12415                                                         0, IPPROTO_TCP, 0));
12416
12417                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12418         }
12419         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
12420
12421         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12422                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
12423
12424                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12425                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12426                 if (total_pkt_bd == NULL)
12427                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12428
12429                 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12430                                        frag->page_offset,
12431                                        frag->size, DMA_TO_DEVICE);
12432
12433                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12434                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12435                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12436                 le16_add_cpu(&pkt_size, frag->size);
12437
12438                 DP(NETIF_MSG_TX_QUEUED,
12439                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
12440                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12441                    le16_to_cpu(tx_data_bd->nbytes));
12442         }
12443
12444         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
12445
12446         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12447
12448         /* now send a tx doorbell, counting the next BD
12449          * if the packet contains or ends with it
12450          */
12451         if (TX_BD_POFF(bd_prod) < nbd)
12452                 nbd++;
12453
12454         if (total_pkt_bd != NULL)
12455                 total_pkt_bd->total_pkt_bytes = pkt_size;
12456
12457         if (pbd)
12458                 DP(NETIF_MSG_TX_QUEUED,
12459                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
12460                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
12461                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12462                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
12463                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
12464
12465         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
12466
12467         /*
12468          * Make sure that the BD data is updated before updating the producer
12469          * since FW might read the BD right after the producer is updated.
12470          * This is only applicable for weak-ordered memory model archs such
12471          * as IA-64. The following barrier is also mandatory since FW will
12472          * assumes packets must have BDs.
12473          */
12474         wmb();
12475
12476         fp->tx_db.data.prod += nbd;
12477         barrier();
12478         DOORBELL(bp, fp->index, fp->tx_db.raw);
12479
12480         mmiowb();
12481
12482         fp->tx_bd_prod += nbd;
12483
12484         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
12485                 netif_tx_stop_queue(txq);
12486
12487                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12488                  * ordering of set_bit() in netif_tx_stop_queue() and read of
12489                  * fp->bd_tx_cons */
12490                 smp_mb();
12491
12492                 fp->eth_q_stats.driver_xoff++;
12493                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
12494                         netif_tx_wake_queue(txq);
12495         }
12496         fp->tx_pkt++;
12497
12498         return NETDEV_TX_OK;
12499 }
12500
12501 /* called with rtnl_lock */
12502 static int bnx2x_open(struct net_device *dev)
12503 {
12504         struct bnx2x *bp = netdev_priv(dev);
12505
12506         netif_carrier_off(dev);
12507
12508         bnx2x_set_power_state(bp, PCI_D0);
12509
12510         if (!bnx2x_reset_is_done(bp)) {
12511                 do {
12512                         /* Reset MCP mail box sequence if there is on going
12513                          * recovery
12514                          */
12515                         bp->fw_seq = 0;
12516
12517                         /* If it's the first function to load and reset done
12518                          * is still not cleared it may mean that. We don't
12519                          * check the attention state here because it may have
12520                          * already been cleared by a "common" reset but we
12521                          * shell proceed with "process kill" anyway.
12522                          */
12523                         if ((bnx2x_get_load_cnt(bp) == 0) &&
12524                                 bnx2x_trylock_hw_lock(bp,
12525                                 HW_LOCK_RESOURCE_RESERVED_08) &&
12526                                 (!bnx2x_leader_reset(bp))) {
12527                                 DP(NETIF_MSG_HW, "Recovered in open\n");
12528                                 break;
12529                         }
12530
12531                         bnx2x_set_power_state(bp, PCI_D3hot);
12532
12533                         printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12534                         " completed yet. Try again later. If u still see this"
12535                         " message after a few retries then power cycle is"
12536                         " required.\n", bp->dev->name);
12537
12538                         return -EAGAIN;
12539                 } while (0);
12540         }
12541
12542         bp->recovery_state = BNX2X_RECOVERY_DONE;
12543
12544         return bnx2x_nic_load(bp, LOAD_OPEN);
12545 }
12546
12547 /* called with rtnl_lock */
12548 static int bnx2x_close(struct net_device *dev)
12549 {
12550         struct bnx2x *bp = netdev_priv(dev);
12551
12552         /* Unload the driver, release IRQs */
12553         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12554         if (atomic_read(&bp->pdev->enable_cnt) == 1)
12555                 if (!CHIP_REV_IS_SLOW(bp))
12556                         bnx2x_set_power_state(bp, PCI_D3hot);
12557
12558         return 0;
12559 }
12560
12561 /* called with netif_tx_lock from dev_mcast.c */
12562 static void bnx2x_set_rx_mode(struct net_device *dev)
12563 {
12564         struct bnx2x *bp = netdev_priv(dev);
12565         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12566         int port = BP_PORT(bp);
12567
12568         if (bp->state != BNX2X_STATE_OPEN) {
12569                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12570                 return;
12571         }
12572
12573         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
12574
12575         if (dev->flags & IFF_PROMISC)
12576                 rx_mode = BNX2X_RX_MODE_PROMISC;
12577
12578         else if ((dev->flags & IFF_ALLMULTI) ||
12579                  ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
12580                   CHIP_IS_E1(bp)))
12581                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12582
12583         else { /* some multicasts */
12584                 if (CHIP_IS_E1(bp)) {
12585                         int i, old, offset;
12586                         struct netdev_hw_addr *ha;
12587                         struct mac_configuration_cmd *config =
12588                                                 bnx2x_sp(bp, mcast_config);
12589
12590                         i = 0;
12591                         netdev_for_each_mc_addr(ha, dev) {
12592                                 config->config_table[i].
12593                                         cam_entry.msb_mac_addr =
12594                                         swab16(*(u16 *)&ha->addr[0]);
12595                                 config->config_table[i].
12596                                         cam_entry.middle_mac_addr =
12597                                         swab16(*(u16 *)&ha->addr[2]);
12598                                 config->config_table[i].
12599                                         cam_entry.lsb_mac_addr =
12600                                         swab16(*(u16 *)&ha->addr[4]);
12601                                 config->config_table[i].cam_entry.flags =
12602                                                         cpu_to_le16(port);
12603                                 config->config_table[i].
12604                                         target_table_entry.flags = 0;
12605                                 config->config_table[i].target_table_entry.
12606                                         clients_bit_vector =
12607                                                 cpu_to_le32(1 << BP_L_ID(bp));
12608                                 config->config_table[i].
12609                                         target_table_entry.vlan_id = 0;
12610
12611                                 DP(NETIF_MSG_IFUP,
12612                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
12613                                    config->config_table[i].
12614                                                 cam_entry.msb_mac_addr,
12615                                    config->config_table[i].
12616                                                 cam_entry.middle_mac_addr,
12617                                    config->config_table[i].
12618                                                 cam_entry.lsb_mac_addr);
12619                                 i++;
12620                         }
12621                         old = config->hdr.length;
12622                         if (old > i) {
12623                                 for (; i < old; i++) {
12624                                         if (CAM_IS_INVALID(config->
12625                                                            config_table[i])) {
12626                                                 /* already invalidated */
12627                                                 break;
12628                                         }
12629                                         /* invalidate */
12630                                         CAM_INVALIDATE(config->
12631                                                        config_table[i]);
12632                                 }
12633                         }
12634
12635                         if (CHIP_REV_IS_SLOW(bp))
12636                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
12637                         else
12638                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
12639
12640                         config->hdr.length = i;
12641                         config->hdr.offset = offset;
12642                         config->hdr.client_id = bp->fp->cl_id;
12643                         config->hdr.reserved1 = 0;
12644
12645                         bp->set_mac_pending++;
12646                         smp_wmb();
12647
12648                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
12649                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
12650                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
12651                                       0);
12652                 } else { /* E1H */
12653                         /* Accept one or more multicasts */
12654                         struct netdev_hw_addr *ha;
12655                         u32 mc_filter[MC_HASH_SIZE];
12656                         u32 crc, bit, regidx;
12657                         int i;
12658
12659                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
12660
12661                         netdev_for_each_mc_addr(ha, dev) {
12662                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
12663                                    ha->addr);
12664
12665                                 crc = crc32c_le(0, ha->addr, ETH_ALEN);
12666                                 bit = (crc >> 24) & 0xff;
12667                                 regidx = bit >> 5;
12668                                 bit &= 0x1f;
12669                                 mc_filter[regidx] |= (1 << bit);
12670                         }
12671
12672                         for (i = 0; i < MC_HASH_SIZE; i++)
12673                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
12674                                        mc_filter[i]);
12675                 }
12676         }
12677
12678         bp->rx_mode = rx_mode;
12679         bnx2x_set_storm_rx_mode(bp);
12680 }
12681
12682 /* called with rtnl_lock */
12683 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12684 {
12685         struct sockaddr *addr = p;
12686         struct bnx2x *bp = netdev_priv(dev);
12687
12688         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
12689                 return -EINVAL;
12690
12691         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12692         if (netif_running(dev)) {
12693                 if (CHIP_IS_E1(bp))
12694                         bnx2x_set_eth_mac_addr_e1(bp, 1);
12695                 else
12696                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
12697         }
12698
12699         return 0;
12700 }
12701
12702 /* called with rtnl_lock */
12703 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12704                            int devad, u16 addr)
12705 {
12706         struct bnx2x *bp = netdev_priv(netdev);
12707         u16 value;
12708         int rc;
12709         u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12710
12711         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12712            prtad, devad, addr);
12713
12714         if (prtad != bp->mdio.prtad) {
12715                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12716                    prtad, bp->mdio.prtad);
12717                 return -EINVAL;
12718         }
12719
12720         /* The HW expects different devad if CL22 is used */
12721         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12722
12723         bnx2x_acquire_phy_lock(bp);
12724         rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
12725                              devad, addr, &value);
12726         bnx2x_release_phy_lock(bp);
12727         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12728
12729         if (!rc)
12730                 rc = value;
12731         return rc;
12732 }
12733
12734 /* called with rtnl_lock */
12735 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12736                             u16 addr, u16 value)
12737 {
12738         struct bnx2x *bp = netdev_priv(netdev);
12739         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12740         int rc;
12741
12742         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
12743                            " value 0x%x\n", prtad, devad, addr, value);
12744
12745         if (prtad != bp->mdio.prtad) {
12746                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12747                    prtad, bp->mdio.prtad);
12748                 return -EINVAL;
12749         }
12750
12751         /* The HW expects different devad if CL22 is used */
12752         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12753
12754         bnx2x_acquire_phy_lock(bp);
12755         rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
12756                               devad, addr, value);
12757         bnx2x_release_phy_lock(bp);
12758         return rc;
12759 }
12760
12761 /* called with rtnl_lock */
12762 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12763 {
12764         struct bnx2x *bp = netdev_priv(dev);
12765         struct mii_ioctl_data *mdio = if_mii(ifr);
12766
12767         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12768            mdio->phy_id, mdio->reg_num, mdio->val_in);
12769
12770         if (!netif_running(dev))
12771                 return -EAGAIN;
12772
12773         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12774 }
12775
12776 /* called with rtnl_lock */
12777 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12778 {
12779         struct bnx2x *bp = netdev_priv(dev);
12780         int rc = 0;
12781
12782         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12783                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12784                 return -EAGAIN;
12785         }
12786
12787         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12788             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12789                 return -EINVAL;
12790
12791         /* This does not race with packet allocation
12792          * because the actual alloc size is
12793          * only updated as part of load
12794          */
12795         dev->mtu = new_mtu;
12796
12797         if (netif_running(dev)) {
12798                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12799                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
12800         }
12801
12802         return rc;
12803 }
12804
12805 static void bnx2x_tx_timeout(struct net_device *dev)
12806 {
12807         struct bnx2x *bp = netdev_priv(dev);
12808
12809 #ifdef BNX2X_STOP_ON_ERROR
12810         if (!bp->panic)
12811                 bnx2x_panic();
12812 #endif
12813         /* This allows the netif to be shutdown gracefully before resetting */
12814         schedule_delayed_work(&bp->reset_task, 0);
12815 }
12816
12817 #ifdef BCM_VLAN
12818 /* called with rtnl_lock */
12819 static void bnx2x_vlan_rx_register(struct net_device *dev,
12820                                    struct vlan_group *vlgrp)
12821 {
12822         struct bnx2x *bp = netdev_priv(dev);
12823
12824         bp->vlgrp = vlgrp;
12825
12826         /* Set flags according to the required capabilities */
12827         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12828
12829         if (dev->features & NETIF_F_HW_VLAN_TX)
12830                 bp->flags |= HW_VLAN_TX_FLAG;
12831
12832         if (dev->features & NETIF_F_HW_VLAN_RX)
12833                 bp->flags |= HW_VLAN_RX_FLAG;
12834
12835         if (netif_running(dev))
12836                 bnx2x_set_client_config(bp);
12837 }
12838
12839 #endif
12840
12841 #ifdef CONFIG_NET_POLL_CONTROLLER
12842 static void poll_bnx2x(struct net_device *dev)
12843 {
12844         struct bnx2x *bp = netdev_priv(dev);
12845
12846         disable_irq(bp->pdev->irq);
12847         bnx2x_interrupt(bp->pdev->irq, dev);
12848         enable_irq(bp->pdev->irq);
12849 }
12850 #endif
12851
12852 static const struct net_device_ops bnx2x_netdev_ops = {
12853         .ndo_open               = bnx2x_open,
12854         .ndo_stop               = bnx2x_close,
12855         .ndo_start_xmit         = bnx2x_start_xmit,
12856         .ndo_set_multicast_list = bnx2x_set_rx_mode,
12857         .ndo_set_mac_address    = bnx2x_change_mac_addr,
12858         .ndo_validate_addr      = eth_validate_addr,
12859         .ndo_do_ioctl           = bnx2x_ioctl,
12860         .ndo_change_mtu         = bnx2x_change_mtu,
12861         .ndo_tx_timeout         = bnx2x_tx_timeout,
12862 #ifdef BCM_VLAN
12863         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
12864 #endif
12865 #ifdef CONFIG_NET_POLL_CONTROLLER
12866         .ndo_poll_controller    = poll_bnx2x,
12867 #endif
12868 };
12869
12870 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
12871                                     struct net_device *dev)
12872 {
12873         struct bnx2x *bp;
12874         int rc;
12875
12876         SET_NETDEV_DEV(dev, &pdev->dev);
12877         bp = netdev_priv(dev);
12878
12879         bp->dev = dev;
12880         bp->pdev = pdev;
12881         bp->flags = 0;
12882         bp->func = PCI_FUNC(pdev->devfn);
12883
12884         rc = pci_enable_device(pdev);
12885         if (rc) {
12886                 dev_err(&bp->pdev->dev,
12887                         "Cannot enable PCI device, aborting\n");
12888                 goto err_out;
12889         }
12890
12891         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12892                 dev_err(&bp->pdev->dev,
12893                         "Cannot find PCI device base address, aborting\n");
12894                 rc = -ENODEV;
12895                 goto err_out_disable;
12896         }
12897
12898         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12899                 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
12900                        " base address, aborting\n");
12901                 rc = -ENODEV;
12902                 goto err_out_disable;
12903         }
12904
12905         if (atomic_read(&pdev->enable_cnt) == 1) {
12906                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12907                 if (rc) {
12908                         dev_err(&bp->pdev->dev,
12909                                 "Cannot obtain PCI resources, aborting\n");
12910                         goto err_out_disable;
12911                 }
12912
12913                 pci_set_master(pdev);
12914                 pci_save_state(pdev);
12915         }
12916
12917         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12918         if (bp->pm_cap == 0) {
12919                 dev_err(&bp->pdev->dev,
12920                         "Cannot find power management capability, aborting\n");
12921                 rc = -EIO;
12922                 goto err_out_release;
12923         }
12924
12925         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
12926         if (bp->pcie_cap == 0) {
12927                 dev_err(&bp->pdev->dev,
12928                         "Cannot find PCI Express capability, aborting\n");
12929                 rc = -EIO;
12930                 goto err_out_release;
12931         }
12932
12933         if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
12934                 bp->flags |= USING_DAC_FLAG;
12935                 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
12936                         dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
12937                                " failed, aborting\n");
12938                         rc = -EIO;
12939                         goto err_out_release;
12940                 }
12941
12942         } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12943                 dev_err(&bp->pdev->dev,
12944                         "System does not support DMA, aborting\n");
12945                 rc = -EIO;
12946                 goto err_out_release;
12947         }
12948
12949         dev->mem_start = pci_resource_start(pdev, 0);
12950         dev->base_addr = dev->mem_start;
12951         dev->mem_end = pci_resource_end(pdev, 0);
12952
12953         dev->irq = pdev->irq;
12954
12955         bp->regview = pci_ioremap_bar(pdev, 0);
12956         if (!bp->regview) {
12957                 dev_err(&bp->pdev->dev,
12958                         "Cannot map register space, aborting\n");
12959                 rc = -ENOMEM;
12960                 goto err_out_release;
12961         }
12962
12963         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12964                                         min_t(u64, BNX2X_DB_SIZE,
12965                                               pci_resource_len(pdev, 2)));
12966         if (!bp->doorbells) {
12967                 dev_err(&bp->pdev->dev,
12968                         "Cannot map doorbell space, aborting\n");
12969                 rc = -ENOMEM;
12970                 goto err_out_unmap;
12971         }
12972
12973         bnx2x_set_power_state(bp, PCI_D0);
12974
12975         /* clean indirect addresses */
12976         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12977                                PCICFG_VENDOR_ID_OFFSET);
12978         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
12979         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
12980         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
12981         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
12982
12983         /* Reset the load counter */
12984         bnx2x_clear_load_cnt(bp);
12985
12986         dev->watchdog_timeo = TX_TIMEOUT;
12987
12988         dev->netdev_ops = &bnx2x_netdev_ops;
12989         dev->ethtool_ops = &bnx2x_ethtool_ops;
12990         dev->features |= NETIF_F_SG;
12991         dev->features |= NETIF_F_HW_CSUM;
12992         if (bp->flags & USING_DAC_FLAG)
12993                 dev->features |= NETIF_F_HIGHDMA;
12994         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
12995         dev->features |= NETIF_F_TSO6;
12996 #ifdef BCM_VLAN
12997         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
12998         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12999
13000         dev->vlan_features |= NETIF_F_SG;
13001         dev->vlan_features |= NETIF_F_HW_CSUM;
13002         if (bp->flags & USING_DAC_FLAG)
13003                 dev->vlan_features |= NETIF_F_HIGHDMA;
13004         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13005         dev->vlan_features |= NETIF_F_TSO6;
13006 #endif
13007
13008         /* get_port_hwinfo() will set prtad and mmds properly */
13009         bp->mdio.prtad = MDIO_PRTAD_NONE;
13010         bp->mdio.mmds = 0;
13011         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13012         bp->mdio.dev = dev;
13013         bp->mdio.mdio_read = bnx2x_mdio_read;
13014         bp->mdio.mdio_write = bnx2x_mdio_write;
13015
13016         return 0;
13017
13018 err_out_unmap:
13019         if (bp->regview) {
13020                 iounmap(bp->regview);
13021                 bp->regview = NULL;
13022         }
13023         if (bp->doorbells) {
13024                 iounmap(bp->doorbells);
13025                 bp->doorbells = NULL;
13026         }
13027
13028 err_out_release:
13029         if (atomic_read(&pdev->enable_cnt) == 1)
13030                 pci_release_regions(pdev);
13031
13032 err_out_disable:
13033         pci_disable_device(pdev);
13034         pci_set_drvdata(pdev, NULL);
13035
13036 err_out:
13037         return rc;
13038 }
13039
13040 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
13041                                                  int *width, int *speed)
13042 {
13043         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
13044
13045         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
13046
13047         /* return value of 1=2.5GHz 2=5GHz */
13048         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
13049 }
13050
13051 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
13052 {
13053         const struct firmware *firmware = bp->firmware;
13054         struct bnx2x_fw_file_hdr *fw_hdr;
13055         struct bnx2x_fw_file_section *sections;
13056         u32 offset, len, num_ops;
13057         u16 *ops_offsets;
13058         int i;
13059         const u8 *fw_ver;
13060
13061         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
13062                 return -EINVAL;
13063
13064         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13065         sections = (struct bnx2x_fw_file_section *)fw_hdr;
13066
13067         /* Make sure none of the offsets and sizes make us read beyond
13068          * the end of the firmware data */
13069         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13070                 offset = be32_to_cpu(sections[i].offset);
13071                 len = be32_to_cpu(sections[i].len);
13072                 if (offset + len > firmware->size) {
13073                         dev_err(&bp->pdev->dev,
13074                                 "Section %d length is out of bounds\n", i);
13075                         return -EINVAL;
13076                 }
13077         }
13078
13079         /* Likewise for the init_ops offsets */
13080         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13081         ops_offsets = (u16 *)(firmware->data + offset);
13082         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13083
13084         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13085                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
13086                         dev_err(&bp->pdev->dev,
13087                                 "Section offset %d is out of bounds\n", i);
13088                         return -EINVAL;
13089                 }
13090         }
13091
13092         /* Check FW version */
13093         offset = be32_to_cpu(fw_hdr->fw_version.offset);
13094         fw_ver = firmware->data + offset;
13095         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13096             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13097             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13098             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
13099                 dev_err(&bp->pdev->dev,
13100                         "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
13101                        fw_ver[0], fw_ver[1], fw_ver[2],
13102                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
13103                        BCM_5710_FW_MINOR_VERSION,
13104                        BCM_5710_FW_REVISION_VERSION,
13105                        BCM_5710_FW_ENGINEERING_VERSION);
13106                 return -EINVAL;
13107         }
13108
13109         return 0;
13110 }
13111
13112 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13113 {
13114         const __be32 *source = (const __be32 *)_source;
13115         u32 *target = (u32 *)_target;
13116         u32 i;
13117
13118         for (i = 0; i < n/4; i++)
13119                 target[i] = be32_to_cpu(source[i]);
13120 }
13121
13122 /*
13123    Ops array is stored in the following format:
13124    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
13125  */
13126 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
13127 {
13128         const __be32 *source = (const __be32 *)_source;
13129         struct raw_op *target = (struct raw_op *)_target;
13130         u32 i, j, tmp;
13131
13132         for (i = 0, j = 0; i < n/8; i++, j += 2) {
13133                 tmp = be32_to_cpu(source[j]);
13134                 target[i].op = (tmp >> 24) & 0xff;
13135                 target[i].offset = tmp & 0xffffff;
13136                 target[i].raw_data = be32_to_cpu(source[j + 1]);
13137         }
13138 }
13139
13140 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13141 {
13142         const __be16 *source = (const __be16 *)_source;
13143         u16 *target = (u16 *)_target;
13144         u32 i;
13145
13146         for (i = 0; i < n/2; i++)
13147                 target[i] = be16_to_cpu(source[i]);
13148 }
13149
13150 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
13151 do {                                                                    \
13152         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
13153         bp->arr = kmalloc(len, GFP_KERNEL);                             \
13154         if (!bp->arr) {                                                 \
13155                 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
13156                 goto lbl;                                               \
13157         }                                                               \
13158         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
13159              (u8 *)bp->arr, len);                                       \
13160 } while (0)
13161
13162 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
13163 {
13164         const char *fw_file_name;
13165         struct bnx2x_fw_file_hdr *fw_hdr;
13166         int rc;
13167
13168         if (CHIP_IS_E1(bp))
13169                 fw_file_name = FW_FILE_NAME_E1;
13170         else if (CHIP_IS_E1H(bp))
13171                 fw_file_name = FW_FILE_NAME_E1H;
13172         else {
13173                 dev_err(dev, "Unsupported chip revision\n");
13174                 return -EINVAL;
13175         }
13176
13177         dev_info(dev, "Loading %s\n", fw_file_name);
13178
13179         rc = request_firmware(&bp->firmware, fw_file_name, dev);
13180         if (rc) {
13181                 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
13182                 goto request_firmware_exit;
13183         }
13184
13185         rc = bnx2x_check_firmware(bp);
13186         if (rc) {
13187                 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
13188                 goto request_firmware_exit;
13189         }
13190
13191         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13192
13193         /* Initialize the pointers to the init arrays */
13194         /* Blob */
13195         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13196
13197         /* Opcodes */
13198         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13199
13200         /* Offsets */
13201         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13202                             be16_to_cpu_n);
13203
13204         /* STORMs firmware */
13205         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13206                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13207         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
13208                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13209         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13210                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13211         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
13212                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
13213         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13214                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13215         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
13216                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13217         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13218                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13219         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
13220                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
13221
13222         return 0;
13223
13224 init_offsets_alloc_err:
13225         kfree(bp->init_ops);
13226 init_ops_alloc_err:
13227         kfree(bp->init_data);
13228 request_firmware_exit:
13229         release_firmware(bp->firmware);
13230
13231         return rc;
13232 }
13233
13234
13235 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
13236                                     const struct pci_device_id *ent)
13237 {
13238         struct net_device *dev = NULL;
13239         struct bnx2x *bp;
13240         int pcie_width, pcie_speed;
13241         int rc;
13242
13243         /* dev zeroed in init_etherdev */
13244         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
13245         if (!dev) {
13246                 dev_err(&pdev->dev, "Cannot allocate net device\n");
13247                 return -ENOMEM;
13248         }
13249
13250         bp = netdev_priv(dev);
13251         bp->msg_enable = debug;
13252
13253         pci_set_drvdata(pdev, dev);
13254
13255         rc = bnx2x_init_dev(pdev, dev);
13256         if (rc < 0) {
13257                 free_netdev(dev);
13258                 return rc;
13259         }
13260
13261         rc = bnx2x_init_bp(bp);
13262         if (rc)
13263                 goto init_one_exit;
13264
13265         /* Set init arrays */
13266         rc = bnx2x_init_firmware(bp, &pdev->dev);
13267         if (rc) {
13268                 dev_err(&pdev->dev, "Error loading firmware\n");
13269                 goto init_one_exit;
13270         }
13271
13272         rc = register_netdev(dev);
13273         if (rc) {
13274                 dev_err(&pdev->dev, "Cannot register net device\n");
13275                 goto init_one_exit;
13276         }
13277
13278         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
13279         netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
13280                " IRQ %d, ", board_info[ent->driver_data].name,
13281                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13282                pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
13283                dev->base_addr, bp->pdev->irq);
13284         pr_cont("node addr %pM\n", dev->dev_addr);
13285
13286         return 0;
13287
13288 init_one_exit:
13289         if (bp->regview)
13290                 iounmap(bp->regview);
13291
13292         if (bp->doorbells)
13293                 iounmap(bp->doorbells);
13294
13295         free_netdev(dev);
13296
13297         if (atomic_read(&pdev->enable_cnt) == 1)
13298                 pci_release_regions(pdev);
13299
13300         pci_disable_device(pdev);
13301         pci_set_drvdata(pdev, NULL);
13302
13303         return rc;
13304 }
13305
13306 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13307 {
13308         struct net_device *dev = pci_get_drvdata(pdev);
13309         struct bnx2x *bp;
13310
13311         if (!dev) {
13312                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13313                 return;
13314         }
13315         bp = netdev_priv(dev);
13316
13317         unregister_netdev(dev);
13318
13319         /* Make sure RESET task is not scheduled before continuing */
13320         cancel_delayed_work_sync(&bp->reset_task);
13321
13322         kfree(bp->init_ops_offsets);
13323         kfree(bp->init_ops);
13324         kfree(bp->init_data);
13325         release_firmware(bp->firmware);
13326
13327         if (bp->regview)
13328                 iounmap(bp->regview);
13329
13330         if (bp->doorbells)
13331                 iounmap(bp->doorbells);
13332
13333         free_netdev(dev);
13334
13335         if (atomic_read(&pdev->enable_cnt) == 1)
13336                 pci_release_regions(pdev);
13337
13338         pci_disable_device(pdev);
13339         pci_set_drvdata(pdev, NULL);
13340 }
13341
13342 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13343 {
13344         struct net_device *dev = pci_get_drvdata(pdev);
13345         struct bnx2x *bp;
13346
13347         if (!dev) {
13348                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13349                 return -ENODEV;
13350         }
13351         bp = netdev_priv(dev);
13352
13353         rtnl_lock();
13354
13355         pci_save_state(pdev);
13356
13357         if (!netif_running(dev)) {
13358                 rtnl_unlock();
13359                 return 0;
13360         }
13361
13362         netif_device_detach(dev);
13363
13364         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
13365
13366         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
13367
13368         rtnl_unlock();
13369
13370         return 0;
13371 }
13372
13373 static int bnx2x_resume(struct pci_dev *pdev)
13374 {
13375         struct net_device *dev = pci_get_drvdata(pdev);
13376         struct bnx2x *bp;
13377         int rc;
13378
13379         if (!dev) {
13380                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13381                 return -ENODEV;
13382         }
13383         bp = netdev_priv(dev);
13384
13385         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13386                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13387                 return -EAGAIN;
13388         }
13389
13390         rtnl_lock();
13391
13392         pci_restore_state(pdev);
13393
13394         if (!netif_running(dev)) {
13395                 rtnl_unlock();
13396                 return 0;
13397         }
13398
13399         bnx2x_set_power_state(bp, PCI_D0);
13400         netif_device_attach(dev);
13401
13402         rc = bnx2x_nic_load(bp, LOAD_OPEN);
13403
13404         rtnl_unlock();
13405
13406         return rc;
13407 }
13408
13409 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13410 {
13411         int i;
13412
13413         bp->state = BNX2X_STATE_ERROR;
13414
13415         bp->rx_mode = BNX2X_RX_MODE_NONE;
13416
13417         bnx2x_netif_stop(bp, 0);
13418
13419         del_timer_sync(&bp->timer);
13420         bp->stats_state = STATS_STATE_DISABLED;
13421         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
13422
13423         /* Release IRQs */
13424         bnx2x_free_irq(bp, false);
13425
13426         if (CHIP_IS_E1(bp)) {
13427                 struct mac_configuration_cmd *config =
13428                                                 bnx2x_sp(bp, mcast_config);
13429
13430                 for (i = 0; i < config->hdr.length; i++)
13431                         CAM_INVALIDATE(config->config_table[i]);
13432         }
13433
13434         /* Free SKBs, SGEs, TPA pool and driver internals */
13435         bnx2x_free_skbs(bp);
13436         for_each_queue(bp, i)
13437                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
13438         for_each_queue(bp, i)
13439                 netif_napi_del(&bnx2x_fp(bp, i, napi));
13440         bnx2x_free_mem(bp);
13441
13442         bp->state = BNX2X_STATE_CLOSED;
13443
13444         netif_carrier_off(bp->dev);
13445
13446         return 0;
13447 }
13448
13449 static void bnx2x_eeh_recover(struct bnx2x *bp)
13450 {
13451         u32 val;
13452
13453         mutex_init(&bp->port.phy_mutex);
13454
13455         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
13456         bp->link_params.shmem_base = bp->common.shmem_base;
13457         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
13458
13459         if (!bp->common.shmem_base ||
13460             (bp->common.shmem_base < 0xA0000) ||
13461             (bp->common.shmem_base >= 0xC0000)) {
13462                 BNX2X_DEV_INFO("MCP not active\n");
13463                 bp->flags |= NO_MCP_FLAG;
13464                 return;
13465         }
13466
13467         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
13468         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13469                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13470                 BNX2X_ERR("BAD MCP validity signature\n");
13471
13472         if (!BP_NOMCP(bp)) {
13473                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
13474                               & DRV_MSG_SEQ_NUMBER_MASK);
13475                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
13476         }
13477 }
13478
13479 /**
13480  * bnx2x_io_error_detected - called when PCI error is detected
13481  * @pdev: Pointer to PCI device
13482  * @state: The current pci connection state
13483  *
13484  * This function is called after a PCI bus error affecting
13485  * this device has been detected.
13486  */
13487 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13488                                                 pci_channel_state_t state)
13489 {
13490         struct net_device *dev = pci_get_drvdata(pdev);
13491         struct bnx2x *bp = netdev_priv(dev);
13492
13493         rtnl_lock();
13494
13495         netif_device_detach(dev);
13496
13497         if (state == pci_channel_io_perm_failure) {
13498                 rtnl_unlock();
13499                 return PCI_ERS_RESULT_DISCONNECT;
13500         }
13501
13502         if (netif_running(dev))
13503                 bnx2x_eeh_nic_unload(bp);
13504
13505         pci_disable_device(pdev);
13506
13507         rtnl_unlock();
13508
13509         /* Request a slot reset */
13510         return PCI_ERS_RESULT_NEED_RESET;
13511 }
13512
13513 /**
13514  * bnx2x_io_slot_reset - called after the PCI bus has been reset
13515  * @pdev: Pointer to PCI device
13516  *
13517  * Restart the card from scratch, as if from a cold-boot.
13518  */
13519 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13520 {
13521         struct net_device *dev = pci_get_drvdata(pdev);
13522         struct bnx2x *bp = netdev_priv(dev);
13523
13524         rtnl_lock();
13525
13526         if (pci_enable_device(pdev)) {
13527                 dev_err(&pdev->dev,
13528                         "Cannot re-enable PCI device after reset\n");
13529                 rtnl_unlock();
13530                 return PCI_ERS_RESULT_DISCONNECT;
13531         }
13532
13533         pci_set_master(pdev);
13534         pci_restore_state(pdev);
13535
13536         if (netif_running(dev))
13537                 bnx2x_set_power_state(bp, PCI_D0);
13538
13539         rtnl_unlock();
13540
13541         return PCI_ERS_RESULT_RECOVERED;
13542 }
13543
13544 /**
13545  * bnx2x_io_resume - called when traffic can start flowing again
13546  * @pdev: Pointer to PCI device
13547  *
13548  * This callback is called when the error recovery driver tells us that
13549  * its OK to resume normal operation.
13550  */
13551 static void bnx2x_io_resume(struct pci_dev *pdev)
13552 {
13553         struct net_device *dev = pci_get_drvdata(pdev);
13554         struct bnx2x *bp = netdev_priv(dev);
13555
13556         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13557                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13558                 return;
13559         }
13560
13561         rtnl_lock();
13562
13563         bnx2x_eeh_recover(bp);
13564
13565         if (netif_running(dev))
13566                 bnx2x_nic_load(bp, LOAD_NORMAL);
13567
13568         netif_device_attach(dev);
13569
13570         rtnl_unlock();
13571 }
13572
13573 static struct pci_error_handlers bnx2x_err_handler = {
13574         .error_detected = bnx2x_io_error_detected,
13575         .slot_reset     = bnx2x_io_slot_reset,
13576         .resume         = bnx2x_io_resume,
13577 };
13578
13579 static struct pci_driver bnx2x_pci_driver = {
13580         .name        = DRV_MODULE_NAME,
13581         .id_table    = bnx2x_pci_tbl,
13582         .probe       = bnx2x_init_one,
13583         .remove      = __devexit_p(bnx2x_remove_one),
13584         .suspend     = bnx2x_suspend,
13585         .resume      = bnx2x_resume,
13586         .err_handler = &bnx2x_err_handler,
13587 };
13588
13589 static int __init bnx2x_init(void)
13590 {
13591         int ret;
13592
13593         pr_info("%s", version);
13594
13595         bnx2x_wq = create_singlethread_workqueue("bnx2x");
13596         if (bnx2x_wq == NULL) {
13597                 pr_err("Cannot create workqueue\n");
13598                 return -ENOMEM;
13599         }
13600
13601         ret = pci_register_driver(&bnx2x_pci_driver);
13602         if (ret) {
13603                 pr_err("Cannot register driver\n");
13604                 destroy_workqueue(bnx2x_wq);
13605         }
13606         return ret;
13607 }
13608
13609 static void __exit bnx2x_cleanup(void)
13610 {
13611         pci_unregister_driver(&bnx2x_pci_driver);
13612
13613         destroy_workqueue(bnx2x_wq);
13614 }
13615
13616 module_init(bnx2x_init);
13617 module_exit(bnx2x_cleanup);
13618
13619 #ifdef BCM_CNIC
13620
13621 /* count denotes the number of new completions we have seen */
13622 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13623 {
13624         struct eth_spe *spe;
13625
13626 #ifdef BNX2X_STOP_ON_ERROR
13627         if (unlikely(bp->panic))
13628                 return;
13629 #endif
13630
13631         spin_lock_bh(&bp->spq_lock);
13632         bp->cnic_spq_pending -= count;
13633
13634         for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
13635              bp->cnic_spq_pending++) {
13636
13637                 if (!bp->cnic_kwq_pending)
13638                         break;
13639
13640                 spe = bnx2x_sp_get_next(bp);
13641                 *spe = *bp->cnic_kwq_cons;
13642
13643                 bp->cnic_kwq_pending--;
13644
13645                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
13646                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13647
13648                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13649                         bp->cnic_kwq_cons = bp->cnic_kwq;
13650                 else
13651                         bp->cnic_kwq_cons++;
13652         }
13653         bnx2x_sp_prod_update(bp);
13654         spin_unlock_bh(&bp->spq_lock);
13655 }
13656
13657 static int bnx2x_cnic_sp_queue(struct net_device *dev,
13658                                struct kwqe_16 *kwqes[], u32 count)
13659 {
13660         struct bnx2x *bp = netdev_priv(dev);
13661         int i;
13662
13663 #ifdef BNX2X_STOP_ON_ERROR
13664         if (unlikely(bp->panic))
13665                 return -EIO;
13666 #endif
13667
13668         spin_lock_bh(&bp->spq_lock);
13669
13670         for (i = 0; i < count; i++) {
13671                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13672
13673                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13674                         break;
13675
13676                 *bp->cnic_kwq_prod = *spe;
13677
13678                 bp->cnic_kwq_pending++;
13679
13680                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
13681                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
13682                    spe->data.mac_config_addr.hi,
13683                    spe->data.mac_config_addr.lo,
13684                    bp->cnic_kwq_pending);
13685
13686                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13687                         bp->cnic_kwq_prod = bp->cnic_kwq;
13688                 else
13689                         bp->cnic_kwq_prod++;
13690         }
13691
13692         spin_unlock_bh(&bp->spq_lock);
13693
13694         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13695                 bnx2x_cnic_sp_post(bp, 0);
13696
13697         return i;
13698 }
13699
13700 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13701 {
13702         struct cnic_ops *c_ops;
13703         int rc = 0;
13704
13705         mutex_lock(&bp->cnic_mutex);
13706         c_ops = bp->cnic_ops;
13707         if (c_ops)
13708                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13709         mutex_unlock(&bp->cnic_mutex);
13710
13711         return rc;
13712 }
13713
13714 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13715 {
13716         struct cnic_ops *c_ops;
13717         int rc = 0;
13718
13719         rcu_read_lock();
13720         c_ops = rcu_dereference(bp->cnic_ops);
13721         if (c_ops)
13722                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13723         rcu_read_unlock();
13724
13725         return rc;
13726 }
13727
13728 /*
13729  * for commands that have no data
13730  */
13731 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13732 {
13733         struct cnic_ctl_info ctl = {0};
13734
13735         ctl.cmd = cmd;
13736
13737         return bnx2x_cnic_ctl_send(bp, &ctl);
13738 }
13739
13740 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
13741 {
13742         struct cnic_ctl_info ctl;
13743
13744         /* first we tell CNIC and only then we count this as a completion */
13745         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13746         ctl.data.comp.cid = cid;
13747
13748         bnx2x_cnic_ctl_send_bh(bp, &ctl);
13749         bnx2x_cnic_sp_post(bp, 1);
13750 }
13751
13752 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13753 {
13754         struct bnx2x *bp = netdev_priv(dev);
13755         int rc = 0;
13756
13757         switch (ctl->cmd) {
13758         case DRV_CTL_CTXTBL_WR_CMD: {
13759                 u32 index = ctl->data.io.offset;
13760                 dma_addr_t addr = ctl->data.io.dma_addr;
13761
13762                 bnx2x_ilt_wr(bp, index, addr);
13763                 break;
13764         }
13765
13766         case DRV_CTL_COMPLETION_CMD: {
13767                 int count = ctl->data.comp.comp_count;
13768
13769                 bnx2x_cnic_sp_post(bp, count);
13770                 break;
13771         }
13772
13773         /* rtnl_lock is held.  */
13774         case DRV_CTL_START_L2_CMD: {
13775                 u32 cli = ctl->data.ring.client_id;
13776
13777                 bp->rx_mode_cl_mask |= (1 << cli);
13778                 bnx2x_set_storm_rx_mode(bp);
13779                 break;
13780         }
13781
13782         /* rtnl_lock is held.  */
13783         case DRV_CTL_STOP_L2_CMD: {
13784                 u32 cli = ctl->data.ring.client_id;
13785
13786                 bp->rx_mode_cl_mask &= ~(1 << cli);
13787                 bnx2x_set_storm_rx_mode(bp);
13788                 break;
13789         }
13790
13791         default:
13792                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13793                 rc = -EINVAL;
13794         }
13795
13796         return rc;
13797 }
13798
13799 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13800 {
13801         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13802
13803         if (bp->flags & USING_MSIX_FLAG) {
13804                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13805                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13806                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13807         } else {
13808                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13809                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13810         }
13811         cp->irq_arr[0].status_blk = bp->cnic_sb;
13812         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
13813         cp->irq_arr[1].status_blk = bp->def_status_blk;
13814         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13815
13816         cp->num_irq = 2;
13817 }
13818
13819 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13820                                void *data)
13821 {
13822         struct bnx2x *bp = netdev_priv(dev);
13823         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13824
13825         if (ops == NULL)
13826                 return -EINVAL;
13827
13828         if (atomic_read(&bp->intr_sem) != 0)
13829                 return -EBUSY;
13830
13831         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13832         if (!bp->cnic_kwq)
13833                 return -ENOMEM;
13834
13835         bp->cnic_kwq_cons = bp->cnic_kwq;
13836         bp->cnic_kwq_prod = bp->cnic_kwq;
13837         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13838
13839         bp->cnic_spq_pending = 0;
13840         bp->cnic_kwq_pending = 0;
13841
13842         bp->cnic_data = data;
13843
13844         cp->num_irq = 0;
13845         cp->drv_state = CNIC_DRV_STATE_REGD;
13846
13847         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
13848
13849         bnx2x_setup_cnic_irq_info(bp);
13850         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
13851         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
13852         rcu_assign_pointer(bp->cnic_ops, ops);
13853
13854         return 0;
13855 }
13856
13857 static int bnx2x_unregister_cnic(struct net_device *dev)
13858 {
13859         struct bnx2x *bp = netdev_priv(dev);
13860         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13861
13862         mutex_lock(&bp->cnic_mutex);
13863         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
13864                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
13865                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
13866         }
13867         cp->drv_state = 0;
13868         rcu_assign_pointer(bp->cnic_ops, NULL);
13869         mutex_unlock(&bp->cnic_mutex);
13870         synchronize_rcu();
13871         kfree(bp->cnic_kwq);
13872         bp->cnic_kwq = NULL;
13873
13874         return 0;
13875 }
13876
13877 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13878 {
13879         struct bnx2x *bp = netdev_priv(dev);
13880         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13881
13882         cp->drv_owner = THIS_MODULE;
13883         cp->chip_id = CHIP_ID(bp);
13884         cp->pdev = bp->pdev;
13885         cp->io_base = bp->regview;
13886         cp->io_base2 = bp->doorbells;
13887         cp->max_kwqe_pending = 8;
13888         cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
13889         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
13890         cp->ctx_tbl_len = CNIC_ILT_LINES;
13891         cp->starting_cid = BCM_CNIC_CID_START;
13892         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13893         cp->drv_ctl = bnx2x_drv_ctl;
13894         cp->drv_register_cnic = bnx2x_register_cnic;
13895         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13896
13897         return cp;
13898 }
13899 EXPORT_SYMBOL(bnx2x_cnic_probe);
13900
13901 #endif /* BCM_CNIC */
13902