]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/bnx2x_main.c
Merge branch 'ebt_config_compat_v4' of git://git.breakpoint.cc/fw/nf-next-2.6
[net-next-2.6.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52 #include <linux/stringify.h>
53
54
55 #include "bnx2x.h"
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
59
60 #define DRV_MODULE_VERSION      "1.52.1-6"
61 #define DRV_MODULE_RELDATE      "2010/02/16"
62 #define BNX2X_BC_VER            0x040200
63
64 #include <linux/firmware.h>
65 #include "bnx2x_fw_file_hdr.h"
66 /* FW files */
67 #define FW_FILE_VERSION                                 \
68         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
69         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
70         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
71         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72 #define FW_FILE_NAME_E1         "bnx2x-e1-" FW_FILE_VERSION ".fw"
73 #define FW_FILE_NAME_E1H        "bnx2x-e1h-" FW_FILE_VERSION ".fw"
74
75 /* Time in jiffies before concluding the transmitter is hung */
76 #define TX_TIMEOUT              (5*HZ)
77
78 static char version[] __devinitdata =
79         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
80         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
82 MODULE_AUTHOR("Eliezer Tamir");
83 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_MODULE_VERSION);
86 MODULE_FIRMWARE(FW_FILE_NAME_E1);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
88
89 static int multi_mode = 1;
90 module_param(multi_mode, int, 0);
91 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92                              "(0 Disable; 1 Enable (default))");
93
94 static int num_queues;
95 module_param(num_queues, int, 0);
96 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97                                 " (default is as a number of CPUs)");
98
99 static int disable_tpa;
100 module_param(disable_tpa, int, 0);
101 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
102
103 static int int_mode;
104 module_param(int_mode, int, 0);
105 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
106
107 static int dropless_fc;
108 module_param(dropless_fc, int, 0);
109 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
111 static int poll;
112 module_param(poll, int, 0);
113 MODULE_PARM_DESC(poll, " Use polling (for debug)");
114
115 static int mrrs = -1;
116 module_param(mrrs, int, 0);
117 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
119 static int debug;
120 module_param(debug, int, 0);
121 MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
124
125 static struct workqueue_struct *bnx2x_wq;
126
127 enum bnx2x_board_type {
128         BCM57710 = 0,
129         BCM57711 = 1,
130         BCM57711E = 2,
131 };
132
133 /* indexed by board_type, above */
134 static struct {
135         char *name;
136 } board_info[] __devinitdata = {
137         { "Broadcom NetXtreme II BCM57710 XGb" },
138         { "Broadcom NetXtreme II BCM57711 XGb" },
139         { "Broadcom NetXtreme II BCM57711E XGb" }
140 };
141
142
143 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
144         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
147         { 0 }
148 };
149
150 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
151
152 /****************************************************************************
153 * General service functions
154 ****************************************************************************/
155
156 /* used only at init
157  * locking is done by mcp
158  */
159 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
160 {
161         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164                                PCICFG_VENDOR_ID_OFFSET);
165 }
166
167 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168 {
169         u32 val;
170
171         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174                                PCICFG_VENDOR_ID_OFFSET);
175
176         return val;
177 }
178
179 static const u32 dmae_reg_go_c[] = {
180         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
184 };
185
186 /* copy command into DMAE command memory and set DMAE command go */
187 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188                             int idx)
189 {
190         u32 cmd_offset;
191         int i;
192
193         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
196
197                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
199         }
200         REG_WR(bp, dmae_reg_go_c[idx], 1);
201 }
202
203 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204                       u32 len32)
205 {
206         struct dmae_command dmae;
207         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
208         int cnt = 200;
209
210         if (!bp->dmae_ready) {
211                 u32 *data = bnx2x_sp(bp, wb_data[0]);
212
213                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
214                    "  using indirect\n", dst_addr, len32);
215                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216                 return;
217         }
218
219         memset(&dmae, 0, sizeof(struct dmae_command));
220
221         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
224 #ifdef __BIG_ENDIAN
225                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
226 #else
227                        DMAE_CMD_ENDIANITY_DW_SWAP |
228 #endif
229                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231         dmae.src_addr_lo = U64_LO(dma_addr);
232         dmae.src_addr_hi = U64_HI(dma_addr);
233         dmae.dst_addr_lo = dst_addr >> 2;
234         dmae.dst_addr_hi = 0;
235         dmae.len = len32;
236         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238         dmae.comp_val = DMAE_COMP_VAL;
239
240         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
241            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
242                     "dst_addr [%x:%08x (%08x)]\n"
243            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
244            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
247         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
248            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
250
251         mutex_lock(&bp->dmae_mutex);
252
253         *wb_comp = 0;
254
255         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
256
257         udelay(5);
258
259         while (*wb_comp != DMAE_COMP_VAL) {
260                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
261
262                 if (!cnt) {
263                         BNX2X_ERR("DMAE timeout!\n");
264                         break;
265                 }
266                 cnt--;
267                 /* adjust delay for emulation/FPGA */
268                 if (CHIP_REV_IS_SLOW(bp))
269                         msleep(100);
270                 else
271                         udelay(5);
272         }
273
274         mutex_unlock(&bp->dmae_mutex);
275 }
276
277 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
278 {
279         struct dmae_command dmae;
280         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
281         int cnt = 200;
282
283         if (!bp->dmae_ready) {
284                 u32 *data = bnx2x_sp(bp, wb_data[0]);
285                 int i;
286
287                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
288                    "  using indirect\n", src_addr, len32);
289                 for (i = 0; i < len32; i++)
290                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291                 return;
292         }
293
294         memset(&dmae, 0, sizeof(struct dmae_command));
295
296         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
299 #ifdef __BIG_ENDIAN
300                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
301 #else
302                        DMAE_CMD_ENDIANITY_DW_SWAP |
303 #endif
304                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306         dmae.src_addr_lo = src_addr >> 2;
307         dmae.src_addr_hi = 0;
308         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310         dmae.len = len32;
311         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313         dmae.comp_val = DMAE_COMP_VAL;
314
315         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
316            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
317                     "dst_addr [%x:%08x (%08x)]\n"
318            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
319            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
322
323         mutex_lock(&bp->dmae_mutex);
324
325         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
326         *wb_comp = 0;
327
328         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
329
330         udelay(5);
331
332         while (*wb_comp != DMAE_COMP_VAL) {
333
334                 if (!cnt) {
335                         BNX2X_ERR("DMAE timeout!\n");
336                         break;
337                 }
338                 cnt--;
339                 /* adjust delay for emulation/FPGA */
340                 if (CHIP_REV_IS_SLOW(bp))
341                         msleep(100);
342                 else
343                         udelay(5);
344         }
345         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
346            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
348
349         mutex_unlock(&bp->dmae_mutex);
350 }
351
352 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353                                u32 addr, u32 len)
354 {
355         int offset = 0;
356
357         while (len > DMAE_LEN32_WR_MAX) {
358                 bnx2x_write_dmae(bp, phys_addr + offset,
359                                  addr + offset, DMAE_LEN32_WR_MAX);
360                 offset += DMAE_LEN32_WR_MAX * 4;
361                 len -= DMAE_LEN32_WR_MAX;
362         }
363
364         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
365 }
366
367 /* used only for slowpath so not inlined */
368 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369 {
370         u32 wb_write[2];
371
372         wb_write[0] = val_hi;
373         wb_write[1] = val_lo;
374         REG_WR_DMAE(bp, reg, wb_write, 2);
375 }
376
377 #ifdef USE_WB_RD
378 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379 {
380         u32 wb_data[2];
381
382         REG_RD_DMAE(bp, reg, wb_data, 2);
383
384         return HILO_U64(wb_data[0], wb_data[1]);
385 }
386 #endif
387
388 static int bnx2x_mc_assert(struct bnx2x *bp)
389 {
390         char last_idx;
391         int i, rc = 0;
392         u32 row0, row1, row2, row3;
393
394         /* XSTORM */
395         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
396                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
397         if (last_idx)
398                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
399
400         /* print the asserts */
401         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
402
403                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404                               XSTORM_ASSERT_LIST_OFFSET(i));
405                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
407                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
409                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
411
412                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
413                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
414                                   " 0x%08x 0x%08x 0x%08x\n",
415                                   i, row3, row2, row1, row0);
416                         rc++;
417                 } else {
418                         break;
419                 }
420         }
421
422         /* TSTORM */
423         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
424                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
425         if (last_idx)
426                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
427
428         /* print the asserts */
429         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
430
431                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432                               TSTORM_ASSERT_LIST_OFFSET(i));
433                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
435                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
437                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
439
440                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
441                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
442                                   " 0x%08x 0x%08x 0x%08x\n",
443                                   i, row3, row2, row1, row0);
444                         rc++;
445                 } else {
446                         break;
447                 }
448         }
449
450         /* CSTORM */
451         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
452                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
453         if (last_idx)
454                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
455
456         /* print the asserts */
457         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
458
459                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460                               CSTORM_ASSERT_LIST_OFFSET(i));
461                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
463                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
465                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
467
468                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
469                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
470                                   " 0x%08x 0x%08x 0x%08x\n",
471                                   i, row3, row2, row1, row0);
472                         rc++;
473                 } else {
474                         break;
475                 }
476         }
477
478         /* USTORM */
479         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
480                            USTORM_ASSERT_LIST_INDEX_OFFSET);
481         if (last_idx)
482                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
483
484         /* print the asserts */
485         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
486
487                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
488                               USTORM_ASSERT_LIST_OFFSET(i));
489                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
490                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
491                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
492                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
493                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
494                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
495
496                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
497                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
498                                   " 0x%08x 0x%08x 0x%08x\n",
499                                   i, row3, row2, row1, row0);
500                         rc++;
501                 } else {
502                         break;
503                 }
504         }
505
506         return rc;
507 }
508
509 static void bnx2x_fw_dump(struct bnx2x *bp)
510 {
511         u32 mark, offset;
512         __be32 data[9];
513         int word;
514
515         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
516         mark = ((mark + 0x3) & ~0x3);
517         pr_err("begin fw dump (mark 0x%x)\n", mark);
518
519         pr_err("");
520         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
521                 for (word = 0; word < 8; word++)
522                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
523                                                   offset + 4*word));
524                 data[8] = 0x0;
525                 pr_cont("%s", (char *)data);
526         }
527         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
528                 for (word = 0; word < 8; word++)
529                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
530                                                   offset + 4*word));
531                 data[8] = 0x0;
532                 pr_cont("%s", (char *)data);
533         }
534         pr_err("end of fw dump\n");
535 }
536
537 static void bnx2x_panic_dump(struct bnx2x *bp)
538 {
539         int i;
540         u16 j, start, end;
541
542         bp->stats_state = STATS_STATE_DISABLED;
543         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
544
545         BNX2X_ERR("begin crash dump -----------------\n");
546
547         /* Indices */
548         /* Common */
549         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
550                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
551                   "  spq_prod_idx(%u)\n",
552                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
554
555         /* Rx */
556         for_each_queue(bp, i) {
557                 struct bnx2x_fastpath *fp = &bp->fp[i];
558
559                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
560                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
561                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
562                           i, fp->rx_bd_prod, fp->rx_bd_cons,
563                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
565                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
566                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
567                           fp->rx_sge_prod, fp->last_max_sge,
568                           le16_to_cpu(fp->fp_u_idx),
569                           fp->status_blk->u_status_block.status_block_index);
570         }
571
572         /* Tx */
573         for_each_queue(bp, i) {
574                 struct bnx2x_fastpath *fp = &bp->fp[i];
575
576                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
577                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
578                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
579                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
580                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
581                           "  tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
582                           fp->status_blk->c_status_block.status_block_index,
583                           fp->tx_db.data.prod);
584         }
585
586         /* Rings */
587         /* Rx */
588         for_each_queue(bp, i) {
589                 struct bnx2x_fastpath *fp = &bp->fp[i];
590
591                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
592                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
593                 for (j = start; j != end; j = RX_BD(j + 1)) {
594                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
595                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
596
597                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
598                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
599                 }
600
601                 start = RX_SGE(fp->rx_sge_prod);
602                 end = RX_SGE(fp->last_max_sge);
603                 for (j = start; j != end; j = RX_SGE(j + 1)) {
604                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
605                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
606
607                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
608                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
609                 }
610
611                 start = RCQ_BD(fp->rx_comp_cons - 10);
612                 end = RCQ_BD(fp->rx_comp_cons + 503);
613                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
614                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
615
616                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
617                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
618                 }
619         }
620
621         /* Tx */
622         for_each_queue(bp, i) {
623                 struct bnx2x_fastpath *fp = &bp->fp[i];
624
625                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
626                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
627                 for (j = start; j != end; j = TX_BD(j + 1)) {
628                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
629
630                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
631                                   i, j, sw_bd->skb, sw_bd->first_bd);
632                 }
633
634                 start = TX_BD(fp->tx_bd_cons - 10);
635                 end = TX_BD(fp->tx_bd_cons + 254);
636                 for (j = start; j != end; j = TX_BD(j + 1)) {
637                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
638
639                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
640                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
641                 }
642         }
643
644         bnx2x_fw_dump(bp);
645         bnx2x_mc_assert(bp);
646         BNX2X_ERR("end crash dump -----------------\n");
647 }
648
649 static void bnx2x_int_enable(struct bnx2x *bp)
650 {
651         int port = BP_PORT(bp);
652         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
653         u32 val = REG_RD(bp, addr);
654         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
655         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
656
657         if (msix) {
658                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
659                          HC_CONFIG_0_REG_INT_LINE_EN_0);
660                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
661                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
662         } else if (msi) {
663                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
664                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
665                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
666                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
667         } else {
668                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
669                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
670                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
671                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
672
673                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674                    val, port, addr);
675
676                 REG_WR(bp, addr, val);
677
678                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
679         }
680
681         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
682            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
683
684         REG_WR(bp, addr, val);
685         /*
686          * Ensure that HC_CONFIG is written before leading/trailing edge config
687          */
688         mmiowb();
689         barrier();
690
691         if (CHIP_IS_E1H(bp)) {
692                 /* init leading/trailing edge */
693                 if (IS_E1HMF(bp)) {
694                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
695                         if (bp->port.pmf)
696                                 /* enable nig and gpio3 attention */
697                                 val |= 0x1100;
698                 } else
699                         val = 0xffff;
700
701                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
702                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
703         }
704
705         /* Make sure that interrupts are indeed enabled from here on */
706         mmiowb();
707 }
708
709 static void bnx2x_int_disable(struct bnx2x *bp)
710 {
711         int port = BP_PORT(bp);
712         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
713         u32 val = REG_RD(bp, addr);
714
715         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
716                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
717                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
718                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
719
720         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
721            val, port, addr);
722
723         /* flush all outstanding writes */
724         mmiowb();
725
726         REG_WR(bp, addr, val);
727         if (REG_RD(bp, addr) != val)
728                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
729 }
730
731 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
732 {
733         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
734         int i, offset;
735
736         /* disable interrupt handling */
737         atomic_inc(&bp->intr_sem);
738         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
739
740         if (disable_hw)
741                 /* prevent the HW from sending interrupts */
742                 bnx2x_int_disable(bp);
743
744         /* make sure all ISRs are done */
745         if (msix) {
746                 synchronize_irq(bp->msix_table[0].vector);
747                 offset = 1;
748 #ifdef BCM_CNIC
749                 offset++;
750 #endif
751                 for_each_queue(bp, i)
752                         synchronize_irq(bp->msix_table[i + offset].vector);
753         } else
754                 synchronize_irq(bp->pdev->irq);
755
756         /* make sure sp_task is not running */
757         cancel_delayed_work(&bp->sp_task);
758         flush_workqueue(bnx2x_wq);
759 }
760
761 /* fast path */
762
763 /*
764  * General service functions
765  */
766
767 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
768                                 u8 storm, u16 index, u8 op, u8 update)
769 {
770         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
771                        COMMAND_REG_INT_ACK);
772         struct igu_ack_register igu_ack;
773
774         igu_ack.status_block_index = index;
775         igu_ack.sb_id_and_flags =
776                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
777                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
778                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
779                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
780
781         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
782            (*(u32 *)&igu_ack), hc_addr);
783         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
784
785         /* Make sure that ACK is written */
786         mmiowb();
787         barrier();
788 }
789
790 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
791 {
792         struct host_status_block *fpsb = fp->status_blk;
793
794         barrier(); /* status block is written to by the chip */
795         fp->fp_c_idx = fpsb->c_status_block.status_block_index;
796         fp->fp_u_idx = fpsb->u_status_block.status_block_index;
797 }
798
799 static u16 bnx2x_ack_int(struct bnx2x *bp)
800 {
801         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
802                        COMMAND_REG_SIMD_MASK);
803         u32 result = REG_RD(bp, hc_addr);
804
805         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
806            result, hc_addr);
807
808         return result;
809 }
810
811
812 /*
813  * fast path service functions
814  */
815
816 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
817 {
818         /* Tell compiler that consumer and producer can change */
819         barrier();
820         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
821 }
822
823 /* free skb in the packet ring at pos idx
824  * return idx of last bd freed
825  */
826 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
827                              u16 idx)
828 {
829         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
830         struct eth_tx_start_bd *tx_start_bd;
831         struct eth_tx_bd *tx_data_bd;
832         struct sk_buff *skb = tx_buf->skb;
833         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
834         int nbd;
835
836         /* prefetch skb end pointer to speedup dev_kfree_skb() */
837         prefetch(&skb->end);
838
839         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
840            idx, tx_buf, skb);
841
842         /* unmap first bd */
843         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
844         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
845         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
846                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
847
848         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
849 #ifdef BNX2X_STOP_ON_ERROR
850         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
851                 BNX2X_ERR("BAD nbd!\n");
852                 bnx2x_panic();
853         }
854 #endif
855         new_cons = nbd + tx_buf->first_bd;
856
857         /* Get the next bd */
858         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
859
860         /* Skip a parse bd... */
861         --nbd;
862         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863
864         /* ...and the TSO split header bd since they have no mapping */
865         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
866                 --nbd;
867                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
868         }
869
870         /* now free frags */
871         while (nbd > 0) {
872
873                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
874                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
875                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
876                                BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
877                 if (--nbd)
878                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
879         }
880
881         /* release skb */
882         WARN_ON(!skb);
883         dev_kfree_skb(skb);
884         tx_buf->first_bd = 0;
885         tx_buf->skb = NULL;
886
887         return new_cons;
888 }
889
890 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
891 {
892         s16 used;
893         u16 prod;
894         u16 cons;
895
896         barrier(); /* Tell compiler that prod and cons can change */
897         prod = fp->tx_bd_prod;
898         cons = fp->tx_bd_cons;
899
900         /* NUM_TX_RINGS = number of "next-page" entries
901            It will be used as a threshold */
902         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
903
904 #ifdef BNX2X_STOP_ON_ERROR
905         WARN_ON(used < 0);
906         WARN_ON(used > fp->bp->tx_ring_size);
907         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
908 #endif
909
910         return (s16)(fp->bp->tx_ring_size) - used;
911 }
912
913 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
914 {
915         u16 hw_cons;
916
917         /* Tell compiler that status block fields can change */
918         barrier();
919         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
920         return hw_cons != fp->tx_pkt_cons;
921 }
922
923 static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
924 {
925         struct bnx2x *bp = fp->bp;
926         struct netdev_queue *txq;
927         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
928
929 #ifdef BNX2X_STOP_ON_ERROR
930         if (unlikely(bp->panic))
931                 return -1;
932 #endif
933
934         txq = netdev_get_tx_queue(bp->dev, fp->index);
935         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
936         sw_cons = fp->tx_pkt_cons;
937
938         while (sw_cons != hw_cons) {
939                 u16 pkt_cons;
940
941                 pkt_cons = TX_BD(sw_cons);
942
943                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
944
945                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
946                    hw_cons, sw_cons, pkt_cons);
947
948 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
949                         rmb();
950                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
951                 }
952 */
953                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
954                 sw_cons++;
955         }
956
957         fp->tx_pkt_cons = sw_cons;
958         fp->tx_bd_cons = bd_cons;
959
960         /* TBD need a thresh? */
961         if (unlikely(netif_tx_queue_stopped(txq))) {
962
963                 /* Need to make the tx_bd_cons update visible to start_xmit()
964                  * before checking for netif_tx_queue_stopped().  Without the
965                  * memory barrier, there is a small possibility that
966                  * start_xmit() will miss it and cause the queue to be stopped
967                  * forever.
968                  */
969                 smp_mb();
970
971                 if ((netif_tx_queue_stopped(txq)) &&
972                     (bp->state == BNX2X_STATE_OPEN) &&
973                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
974                         netif_tx_wake_queue(txq);
975         }
976         return 0;
977 }
978
979 #ifdef BCM_CNIC
980 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
981 #endif
982
983 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
984                            union eth_rx_cqe *rr_cqe)
985 {
986         struct bnx2x *bp = fp->bp;
987         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
988         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
989
990         DP(BNX2X_MSG_SP,
991            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
992            fp->index, cid, command, bp->state,
993            rr_cqe->ramrod_cqe.ramrod_type);
994
995         bp->spq_left++;
996
997         if (fp->index) {
998                 switch (command | fp->state) {
999                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1000                                                 BNX2X_FP_STATE_OPENING):
1001                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1002                            cid);
1003                         fp->state = BNX2X_FP_STATE_OPEN;
1004                         break;
1005
1006                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1007                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1008                            cid);
1009                         fp->state = BNX2X_FP_STATE_HALTED;
1010                         break;
1011
1012                 default:
1013                         BNX2X_ERR("unexpected MC reply (%d)  "
1014                                   "fp->state is %x\n", command, fp->state);
1015                         break;
1016                 }
1017                 mb(); /* force bnx2x_wait_ramrod() to see the change */
1018                 return;
1019         }
1020
1021         switch (command | bp->state) {
1022         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1023                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1024                 bp->state = BNX2X_STATE_OPEN;
1025                 break;
1026
1027         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1028                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1029                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1030                 fp->state = BNX2X_FP_STATE_HALTED;
1031                 break;
1032
1033         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1034                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1035                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1036                 break;
1037
1038 #ifdef BCM_CNIC
1039         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1040                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1041                 bnx2x_cnic_cfc_comp(bp, cid);
1042                 break;
1043 #endif
1044
1045         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1046         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1047                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1048                 bp->set_mac_pending--;
1049                 smp_wmb();
1050                 break;
1051
1052         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1053                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1054                 bp->set_mac_pending--;
1055                 smp_wmb();
1056                 break;
1057
1058         default:
1059                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1060                           command, bp->state);
1061                 break;
1062         }
1063         mb(); /* force bnx2x_wait_ramrod() to see the change */
1064 }
1065
1066 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1067                                      struct bnx2x_fastpath *fp, u16 index)
1068 {
1069         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1070         struct page *page = sw_buf->page;
1071         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1072
1073         /* Skip "next page" elements */
1074         if (!page)
1075                 return;
1076
1077         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1078                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1079         __free_pages(page, PAGES_PER_SGE_SHIFT);
1080
1081         sw_buf->page = NULL;
1082         sge->addr_hi = 0;
1083         sge->addr_lo = 0;
1084 }
1085
1086 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1087                                            struct bnx2x_fastpath *fp, int last)
1088 {
1089         int i;
1090
1091         for (i = 0; i < last; i++)
1092                 bnx2x_free_rx_sge(bp, fp, i);
1093 }
1094
1095 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1096                                      struct bnx2x_fastpath *fp, u16 index)
1097 {
1098         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1099         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1100         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1101         dma_addr_t mapping;
1102
1103         if (unlikely(page == NULL))
1104                 return -ENOMEM;
1105
1106         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1107                                PCI_DMA_FROMDEVICE);
1108         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1109                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1110                 return -ENOMEM;
1111         }
1112
1113         sw_buf->page = page;
1114         pci_unmap_addr_set(sw_buf, mapping, mapping);
1115
1116         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1117         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1118
1119         return 0;
1120 }
1121
1122 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1123                                      struct bnx2x_fastpath *fp, u16 index)
1124 {
1125         struct sk_buff *skb;
1126         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1127         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1128         dma_addr_t mapping;
1129
1130         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1131         if (unlikely(skb == NULL))
1132                 return -ENOMEM;
1133
1134         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1135                                  PCI_DMA_FROMDEVICE);
1136         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1137                 dev_kfree_skb(skb);
1138                 return -ENOMEM;
1139         }
1140
1141         rx_buf->skb = skb;
1142         pci_unmap_addr_set(rx_buf, mapping, mapping);
1143
1144         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1145         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1146
1147         return 0;
1148 }
1149
1150 /* note that we are not allocating a new skb,
1151  * we are just moving one from cons to prod
1152  * we are not creating a new mapping,
1153  * so there is no need to check for dma_mapping_error().
1154  */
1155 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1156                                struct sk_buff *skb, u16 cons, u16 prod)
1157 {
1158         struct bnx2x *bp = fp->bp;
1159         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1160         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1161         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1162         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1163
1164         pci_dma_sync_single_for_device(bp->pdev,
1165                                        pci_unmap_addr(cons_rx_buf, mapping),
1166                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1167
1168         prod_rx_buf->skb = cons_rx_buf->skb;
1169         pci_unmap_addr_set(prod_rx_buf, mapping,
1170                            pci_unmap_addr(cons_rx_buf, mapping));
1171         *prod_bd = *cons_bd;
1172 }
1173
1174 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1175                                              u16 idx)
1176 {
1177         u16 last_max = fp->last_max_sge;
1178
1179         if (SUB_S16(idx, last_max) > 0)
1180                 fp->last_max_sge = idx;
1181 }
1182
1183 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1184 {
1185         int i, j;
1186
1187         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1188                 int idx = RX_SGE_CNT * i - 1;
1189
1190                 for (j = 0; j < 2; j++) {
1191                         SGE_MASK_CLEAR_BIT(fp, idx);
1192                         idx--;
1193                 }
1194         }
1195 }
1196
1197 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1198                                   struct eth_fast_path_rx_cqe *fp_cqe)
1199 {
1200         struct bnx2x *bp = fp->bp;
1201         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1202                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1203                       SGE_PAGE_SHIFT;
1204         u16 last_max, last_elem, first_elem;
1205         u16 delta = 0;
1206         u16 i;
1207
1208         if (!sge_len)
1209                 return;
1210
1211         /* First mark all used pages */
1212         for (i = 0; i < sge_len; i++)
1213                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1214
1215         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1216            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1217
1218         /* Here we assume that the last SGE index is the biggest */
1219         prefetch((void *)(fp->sge_mask));
1220         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1221
1222         last_max = RX_SGE(fp->last_max_sge);
1223         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1224         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1225
1226         /* If ring is not full */
1227         if (last_elem + 1 != first_elem)
1228                 last_elem++;
1229
1230         /* Now update the prod */
1231         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1232                 if (likely(fp->sge_mask[i]))
1233                         break;
1234
1235                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1236                 delta += RX_SGE_MASK_ELEM_SZ;
1237         }
1238
1239         if (delta > 0) {
1240                 fp->rx_sge_prod += delta;
1241                 /* clear page-end entries */
1242                 bnx2x_clear_sge_mask_next_elems(fp);
1243         }
1244
1245         DP(NETIF_MSG_RX_STATUS,
1246            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1247            fp->last_max_sge, fp->rx_sge_prod);
1248 }
1249
1250 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1251 {
1252         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1253         memset(fp->sge_mask, 0xff,
1254                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1255
1256         /* Clear the two last indices in the page to 1:
1257            these are the indices that correspond to the "next" element,
1258            hence will never be indicated and should be removed from
1259            the calculations. */
1260         bnx2x_clear_sge_mask_next_elems(fp);
1261 }
1262
1263 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1264                             struct sk_buff *skb, u16 cons, u16 prod)
1265 {
1266         struct bnx2x *bp = fp->bp;
1267         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1268         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1269         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1270         dma_addr_t mapping;
1271
1272         /* move empty skb from pool to prod and map it */
1273         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1274         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1275                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1276         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1277
1278         /* move partial skb from cons to pool (don't unmap yet) */
1279         fp->tpa_pool[queue] = *cons_rx_buf;
1280
1281         /* mark bin state as start - print error if current state != stop */
1282         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1283                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1284
1285         fp->tpa_state[queue] = BNX2X_TPA_START;
1286
1287         /* point prod_bd to new skb */
1288         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1289         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1290
1291 #ifdef BNX2X_STOP_ON_ERROR
1292         fp->tpa_queue_used |= (1 << queue);
1293 #ifdef __powerpc64__
1294         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1295 #else
1296         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1297 #endif
1298            fp->tpa_queue_used);
1299 #endif
1300 }
1301
1302 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1303                                struct sk_buff *skb,
1304                                struct eth_fast_path_rx_cqe *fp_cqe,
1305                                u16 cqe_idx)
1306 {
1307         struct sw_rx_page *rx_pg, old_rx_pg;
1308         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1309         u32 i, frag_len, frag_size, pages;
1310         int err;
1311         int j;
1312
1313         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1314         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1315
1316         /* This is needed in order to enable forwarding support */
1317         if (frag_size)
1318                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1319                                                max(frag_size, (u32)len_on_bd));
1320
1321 #ifdef BNX2X_STOP_ON_ERROR
1322         if (pages >
1323             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1324                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1325                           pages, cqe_idx);
1326                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1327                           fp_cqe->pkt_len, len_on_bd);
1328                 bnx2x_panic();
1329                 return -EINVAL;
1330         }
1331 #endif
1332
1333         /* Run through the SGL and compose the fragmented skb */
1334         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1335                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1336
1337                 /* FW gives the indices of the SGE as if the ring is an array
1338                    (meaning that "next" element will consume 2 indices) */
1339                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1340                 rx_pg = &fp->rx_page_ring[sge_idx];
1341                 old_rx_pg = *rx_pg;
1342
1343                 /* If we fail to allocate a substitute page, we simply stop
1344                    where we are and drop the whole packet */
1345                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1346                 if (unlikely(err)) {
1347                         fp->eth_q_stats.rx_skb_alloc_failed++;
1348                         return err;
1349                 }
1350
1351                 /* Unmap the page as we r going to pass it to the stack */
1352                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1353                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1354
1355                 /* Add one frag and update the appropriate fields in the skb */
1356                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1357
1358                 skb->data_len += frag_len;
1359                 skb->truesize += frag_len;
1360                 skb->len += frag_len;
1361
1362                 frag_size -= frag_len;
1363         }
1364
1365         return 0;
1366 }
1367
1368 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1369                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1370                            u16 cqe_idx)
1371 {
1372         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1373         struct sk_buff *skb = rx_buf->skb;
1374         /* alloc new skb */
1375         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1376
1377         /* Unmap skb in the pool anyway, as we are going to change
1378            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1379            fails. */
1380         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1381                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1382
1383         if (likely(new_skb)) {
1384                 /* fix ip xsum and give it to the stack */
1385                 /* (no need to map the new skb) */
1386 #ifdef BCM_VLAN
1387                 int is_vlan_cqe =
1388                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1389                          PARSING_FLAGS_VLAN);
1390                 int is_not_hwaccel_vlan_cqe =
1391                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1392 #endif
1393
1394                 prefetch(skb);
1395                 prefetch(((char *)(skb)) + 128);
1396
1397 #ifdef BNX2X_STOP_ON_ERROR
1398                 if (pad + len > bp->rx_buf_size) {
1399                         BNX2X_ERR("skb_put is about to fail...  "
1400                                   "pad %d  len %d  rx_buf_size %d\n",
1401                                   pad, len, bp->rx_buf_size);
1402                         bnx2x_panic();
1403                         return;
1404                 }
1405 #endif
1406
1407                 skb_reserve(skb, pad);
1408                 skb_put(skb, len);
1409
1410                 skb->protocol = eth_type_trans(skb, bp->dev);
1411                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1412
1413                 {
1414                         struct iphdr *iph;
1415
1416                         iph = (struct iphdr *)skb->data;
1417 #ifdef BCM_VLAN
1418                         /* If there is no Rx VLAN offloading -
1419                            take VLAN tag into an account */
1420                         if (unlikely(is_not_hwaccel_vlan_cqe))
1421                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1422 #endif
1423                         iph->check = 0;
1424                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1425                 }
1426
1427                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1428                                          &cqe->fast_path_cqe, cqe_idx)) {
1429 #ifdef BCM_VLAN
1430                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1431                             (!is_not_hwaccel_vlan_cqe))
1432                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1433                                                 le16_to_cpu(cqe->fast_path_cqe.
1434                                                             vlan_tag));
1435                         else
1436 #endif
1437                                 netif_receive_skb(skb);
1438                 } else {
1439                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1440                            " - dropping packet!\n");
1441                         dev_kfree_skb(skb);
1442                 }
1443
1444
1445                 /* put new skb in bin */
1446                 fp->tpa_pool[queue].skb = new_skb;
1447
1448         } else {
1449                 /* else drop the packet and keep the buffer in the bin */
1450                 DP(NETIF_MSG_RX_STATUS,
1451                    "Failed to allocate new skb - dropping packet!\n");
1452                 fp->eth_q_stats.rx_skb_alloc_failed++;
1453         }
1454
1455         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1456 }
1457
1458 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1459                                         struct bnx2x_fastpath *fp,
1460                                         u16 bd_prod, u16 rx_comp_prod,
1461                                         u16 rx_sge_prod)
1462 {
1463         struct ustorm_eth_rx_producers rx_prods = {0};
1464         int i;
1465
1466         /* Update producers */
1467         rx_prods.bd_prod = bd_prod;
1468         rx_prods.cqe_prod = rx_comp_prod;
1469         rx_prods.sge_prod = rx_sge_prod;
1470
1471         /*
1472          * Make sure that the BD and SGE data is updated before updating the
1473          * producers since FW might read the BD/SGE right after the producer
1474          * is updated.
1475          * This is only applicable for weak-ordered memory model archs such
1476          * as IA-64. The following barrier is also mandatory since FW will
1477          * assumes BDs must have buffers.
1478          */
1479         wmb();
1480
1481         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1482                 REG_WR(bp, BAR_USTRORM_INTMEM +
1483                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1484                        ((u32 *)&rx_prods)[i]);
1485
1486         mmiowb(); /* keep prod updates ordered */
1487
1488         DP(NETIF_MSG_RX_STATUS,
1489            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1490            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1491 }
1492
1493 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1494 {
1495         struct bnx2x *bp = fp->bp;
1496         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1497         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1498         int rx_pkt = 0;
1499
1500 #ifdef BNX2X_STOP_ON_ERROR
1501         if (unlikely(bp->panic))
1502                 return 0;
1503 #endif
1504
1505         /* CQ "next element" is of the size of the regular element,
1506            that's why it's ok here */
1507         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1508         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1509                 hw_comp_cons++;
1510
1511         bd_cons = fp->rx_bd_cons;
1512         bd_prod = fp->rx_bd_prod;
1513         bd_prod_fw = bd_prod;
1514         sw_comp_cons = fp->rx_comp_cons;
1515         sw_comp_prod = fp->rx_comp_prod;
1516
1517         /* Memory barrier necessary as speculative reads of the rx
1518          * buffer can be ahead of the index in the status block
1519          */
1520         rmb();
1521
1522         DP(NETIF_MSG_RX_STATUS,
1523            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1524            fp->index, hw_comp_cons, sw_comp_cons);
1525
1526         while (sw_comp_cons != hw_comp_cons) {
1527                 struct sw_rx_bd *rx_buf = NULL;
1528                 struct sk_buff *skb;
1529                 union eth_rx_cqe *cqe;
1530                 u8 cqe_fp_flags;
1531                 u16 len, pad;
1532
1533                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1534                 bd_prod = RX_BD(bd_prod);
1535                 bd_cons = RX_BD(bd_cons);
1536
1537                 /* Prefetch the page containing the BD descriptor
1538                    at producer's index. It will be needed when new skb is
1539                    allocated */
1540                 prefetch((void *)(PAGE_ALIGN((unsigned long)
1541                                              (&fp->rx_desc_ring[bd_prod])) -
1542                                   PAGE_SIZE + 1));
1543
1544                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1545                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1546
1547                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1548                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1549                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1550                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1551                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1552                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1553
1554                 /* is this a slowpath msg? */
1555                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1556                         bnx2x_sp_event(fp, cqe);
1557                         goto next_cqe;
1558
1559                 /* this is an rx packet */
1560                 } else {
1561                         rx_buf = &fp->rx_buf_ring[bd_cons];
1562                         skb = rx_buf->skb;
1563                         prefetch(skb);
1564                         prefetch((u8 *)skb + 256);
1565                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1566                         pad = cqe->fast_path_cqe.placement_offset;
1567
1568                         /* If CQE is marked both TPA_START and TPA_END
1569                            it is a non-TPA CQE */
1570                         if ((!fp->disable_tpa) &&
1571                             (TPA_TYPE(cqe_fp_flags) !=
1572                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1573                                 u16 queue = cqe->fast_path_cqe.queue_index;
1574
1575                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1576                                         DP(NETIF_MSG_RX_STATUS,
1577                                            "calling tpa_start on queue %d\n",
1578                                            queue);
1579
1580                                         bnx2x_tpa_start(fp, queue, skb,
1581                                                         bd_cons, bd_prod);
1582                                         goto next_rx;
1583                                 }
1584
1585                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1586                                         DP(NETIF_MSG_RX_STATUS,
1587                                            "calling tpa_stop on queue %d\n",
1588                                            queue);
1589
1590                                         if (!BNX2X_RX_SUM_FIX(cqe))
1591                                                 BNX2X_ERR("STOP on none TCP "
1592                                                           "data\n");
1593
1594                                         /* This is a size of the linear data
1595                                            on this skb */
1596                                         len = le16_to_cpu(cqe->fast_path_cqe.
1597                                                                 len_on_bd);
1598                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1599                                                     len, cqe, comp_ring_cons);
1600 #ifdef BNX2X_STOP_ON_ERROR
1601                                         if (bp->panic)
1602                                                 return 0;
1603 #endif
1604
1605                                         bnx2x_update_sge_prod(fp,
1606                                                         &cqe->fast_path_cqe);
1607                                         goto next_cqe;
1608                                 }
1609                         }
1610
1611                         pci_dma_sync_single_for_device(bp->pdev,
1612                                         pci_unmap_addr(rx_buf, mapping),
1613                                                        pad + RX_COPY_THRESH,
1614                                                        PCI_DMA_FROMDEVICE);
1615                         prefetch(skb);
1616                         prefetch(((char *)(skb)) + 128);
1617
1618                         /* is this an error packet? */
1619                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1620                                 DP(NETIF_MSG_RX_ERR,
1621                                    "ERROR  flags %x  rx packet %u\n",
1622                                    cqe_fp_flags, sw_comp_cons);
1623                                 fp->eth_q_stats.rx_err_discard_pkt++;
1624                                 goto reuse_rx;
1625                         }
1626
1627                         /* Since we don't have a jumbo ring
1628                          * copy small packets if mtu > 1500
1629                          */
1630                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1631                             (len <= RX_COPY_THRESH)) {
1632                                 struct sk_buff *new_skb;
1633
1634                                 new_skb = netdev_alloc_skb(bp->dev,
1635                                                            len + pad);
1636                                 if (new_skb == NULL) {
1637                                         DP(NETIF_MSG_RX_ERR,
1638                                            "ERROR  packet dropped "
1639                                            "because of alloc failure\n");
1640                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1641                                         goto reuse_rx;
1642                                 }
1643
1644                                 /* aligned copy */
1645                                 skb_copy_from_linear_data_offset(skb, pad,
1646                                                     new_skb->data + pad, len);
1647                                 skb_reserve(new_skb, pad);
1648                                 skb_put(new_skb, len);
1649
1650                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1651
1652                                 skb = new_skb;
1653
1654                         } else
1655                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1656                                 pci_unmap_single(bp->pdev,
1657                                         pci_unmap_addr(rx_buf, mapping),
1658                                                  bp->rx_buf_size,
1659                                                  PCI_DMA_FROMDEVICE);
1660                                 skb_reserve(skb, pad);
1661                                 skb_put(skb, len);
1662
1663                         } else {
1664                                 DP(NETIF_MSG_RX_ERR,
1665                                    "ERROR  packet dropped because "
1666                                    "of alloc failure\n");
1667                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1668 reuse_rx:
1669                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1670                                 goto next_rx;
1671                         }
1672
1673                         skb->protocol = eth_type_trans(skb, bp->dev);
1674
1675                         skb->ip_summed = CHECKSUM_NONE;
1676                         if (bp->rx_csum) {
1677                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1678                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1679                                 else
1680                                         fp->eth_q_stats.hw_csum_err++;
1681                         }
1682                 }
1683
1684                 skb_record_rx_queue(skb, fp->index);
1685
1686 #ifdef BCM_VLAN
1687                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1688                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1689                      PARSING_FLAGS_VLAN))
1690                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1691                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1692                 else
1693 #endif
1694                         netif_receive_skb(skb);
1695
1696
1697 next_rx:
1698                 rx_buf->skb = NULL;
1699
1700                 bd_cons = NEXT_RX_IDX(bd_cons);
1701                 bd_prod = NEXT_RX_IDX(bd_prod);
1702                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1703                 rx_pkt++;
1704 next_cqe:
1705                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1706                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1707
1708                 if (rx_pkt == budget)
1709                         break;
1710         } /* while */
1711
1712         fp->rx_bd_cons = bd_cons;
1713         fp->rx_bd_prod = bd_prod_fw;
1714         fp->rx_comp_cons = sw_comp_cons;
1715         fp->rx_comp_prod = sw_comp_prod;
1716
1717         /* Update producers */
1718         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1719                              fp->rx_sge_prod);
1720
1721         fp->rx_pkt += rx_pkt;
1722         fp->rx_calls++;
1723
1724         return rx_pkt;
1725 }
1726
1727 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1728 {
1729         struct bnx2x_fastpath *fp = fp_cookie;
1730         struct bnx2x *bp = fp->bp;
1731
1732         /* Return here if interrupt is disabled */
1733         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1734                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1735                 return IRQ_HANDLED;
1736         }
1737
1738         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1739            fp->index, fp->sb_id);
1740         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1741
1742 #ifdef BNX2X_STOP_ON_ERROR
1743         if (unlikely(bp->panic))
1744                 return IRQ_HANDLED;
1745 #endif
1746
1747         /* Handle Rx and Tx according to MSI-X vector */
1748         prefetch(fp->rx_cons_sb);
1749         prefetch(fp->tx_cons_sb);
1750         prefetch(&fp->status_blk->u_status_block.status_block_index);
1751         prefetch(&fp->status_blk->c_status_block.status_block_index);
1752         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1753
1754         return IRQ_HANDLED;
1755 }
1756
1757 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1758 {
1759         struct bnx2x *bp = netdev_priv(dev_instance);
1760         u16 status = bnx2x_ack_int(bp);
1761         u16 mask;
1762         int i;
1763
1764         /* Return here if interrupt is shared and it's not for us */
1765         if (unlikely(status == 0)) {
1766                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1767                 return IRQ_NONE;
1768         }
1769         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1770
1771         /* Return here if interrupt is disabled */
1772         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1773                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1774                 return IRQ_HANDLED;
1775         }
1776
1777 #ifdef BNX2X_STOP_ON_ERROR
1778         if (unlikely(bp->panic))
1779                 return IRQ_HANDLED;
1780 #endif
1781
1782         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1783                 struct bnx2x_fastpath *fp = &bp->fp[i];
1784
1785                 mask = 0x2 << fp->sb_id;
1786                 if (status & mask) {
1787                         /* Handle Rx and Tx according to SB id */
1788                         prefetch(fp->rx_cons_sb);
1789                         prefetch(&fp->status_blk->u_status_block.
1790                                                 status_block_index);
1791                         prefetch(fp->tx_cons_sb);
1792                         prefetch(&fp->status_blk->c_status_block.
1793                                                 status_block_index);
1794                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1795                         status &= ~mask;
1796                 }
1797         }
1798
1799 #ifdef BCM_CNIC
1800         mask = 0x2 << CNIC_SB_ID(bp);
1801         if (status & (mask | 0x1)) {
1802                 struct cnic_ops *c_ops = NULL;
1803
1804                 rcu_read_lock();
1805                 c_ops = rcu_dereference(bp->cnic_ops);
1806                 if (c_ops)
1807                         c_ops->cnic_handler(bp->cnic_data, NULL);
1808                 rcu_read_unlock();
1809
1810                 status &= ~mask;
1811         }
1812 #endif
1813
1814         if (unlikely(status & 0x1)) {
1815                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1816
1817                 status &= ~0x1;
1818                 if (!status)
1819                         return IRQ_HANDLED;
1820         }
1821
1822         if (status)
1823                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1824                    status);
1825
1826         return IRQ_HANDLED;
1827 }
1828
1829 /* end of fast path */
1830
1831 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1832
1833 /* Link */
1834
1835 /*
1836  * General service functions
1837  */
1838
1839 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1840 {
1841         u32 lock_status;
1842         u32 resource_bit = (1 << resource);
1843         int func = BP_FUNC(bp);
1844         u32 hw_lock_control_reg;
1845         int cnt;
1846
1847         /* Validating that the resource is within range */
1848         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1849                 DP(NETIF_MSG_HW,
1850                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1851                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1852                 return -EINVAL;
1853         }
1854
1855         if (func <= 5) {
1856                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1857         } else {
1858                 hw_lock_control_reg =
1859                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1860         }
1861
1862         /* Validating that the resource is not already taken */
1863         lock_status = REG_RD(bp, hw_lock_control_reg);
1864         if (lock_status & resource_bit) {
1865                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1866                    lock_status, resource_bit);
1867                 return -EEXIST;
1868         }
1869
1870         /* Try for 5 second every 5ms */
1871         for (cnt = 0; cnt < 1000; cnt++) {
1872                 /* Try to acquire the lock */
1873                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1874                 lock_status = REG_RD(bp, hw_lock_control_reg);
1875                 if (lock_status & resource_bit)
1876                         return 0;
1877
1878                 msleep(5);
1879         }
1880         DP(NETIF_MSG_HW, "Timeout\n");
1881         return -EAGAIN;
1882 }
1883
1884 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1885 {
1886         u32 lock_status;
1887         u32 resource_bit = (1 << resource);
1888         int func = BP_FUNC(bp);
1889         u32 hw_lock_control_reg;
1890
1891         /* Validating that the resource is within range */
1892         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1893                 DP(NETIF_MSG_HW,
1894                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1895                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1896                 return -EINVAL;
1897         }
1898
1899         if (func <= 5) {
1900                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1901         } else {
1902                 hw_lock_control_reg =
1903                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1904         }
1905
1906         /* Validating that the resource is currently taken */
1907         lock_status = REG_RD(bp, hw_lock_control_reg);
1908         if (!(lock_status & resource_bit)) {
1909                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1910                    lock_status, resource_bit);
1911                 return -EFAULT;
1912         }
1913
1914         REG_WR(bp, hw_lock_control_reg, resource_bit);
1915         return 0;
1916 }
1917
1918 /* HW Lock for shared dual port PHYs */
1919 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1920 {
1921         mutex_lock(&bp->port.phy_mutex);
1922
1923         if (bp->port.need_hw_lock)
1924                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1925 }
1926
1927 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1928 {
1929         if (bp->port.need_hw_lock)
1930                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1931
1932         mutex_unlock(&bp->port.phy_mutex);
1933 }
1934
1935 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1936 {
1937         /* The GPIO should be swapped if swap register is set and active */
1938         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1939                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1940         int gpio_shift = gpio_num +
1941                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942         u32 gpio_mask = (1 << gpio_shift);
1943         u32 gpio_reg;
1944         int value;
1945
1946         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1947                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1948                 return -EINVAL;
1949         }
1950
1951         /* read GPIO value */
1952         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1953
1954         /* get the requested pin value */
1955         if ((gpio_reg & gpio_mask) == gpio_mask)
1956                 value = 1;
1957         else
1958                 value = 0;
1959
1960         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1961
1962         return value;
1963 }
1964
1965 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1966 {
1967         /* The GPIO should be swapped if swap register is set and active */
1968         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1969                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1970         int gpio_shift = gpio_num +
1971                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1972         u32 gpio_mask = (1 << gpio_shift);
1973         u32 gpio_reg;
1974
1975         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1976                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1977                 return -EINVAL;
1978         }
1979
1980         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1981         /* read GPIO and mask except the float bits */
1982         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1983
1984         switch (mode) {
1985         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1986                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1987                    gpio_num, gpio_shift);
1988                 /* clear FLOAT and set CLR */
1989                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1990                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1991                 break;
1992
1993         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1994                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1995                    gpio_num, gpio_shift);
1996                 /* clear FLOAT and set SET */
1997                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1998                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1999                 break;
2000
2001         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2002                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2003                    gpio_num, gpio_shift);
2004                 /* set FLOAT */
2005                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2006                 break;
2007
2008         default:
2009                 break;
2010         }
2011
2012         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2013         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2014
2015         return 0;
2016 }
2017
2018 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2019 {
2020         /* The GPIO should be swapped if swap register is set and active */
2021         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2022                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2023         int gpio_shift = gpio_num +
2024                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2025         u32 gpio_mask = (1 << gpio_shift);
2026         u32 gpio_reg;
2027
2028         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2029                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2030                 return -EINVAL;
2031         }
2032
2033         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2034         /* read GPIO int */
2035         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2036
2037         switch (mode) {
2038         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2039                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2040                                    "output low\n", gpio_num, gpio_shift);
2041                 /* clear SET and set CLR */
2042                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2043                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2044                 break;
2045
2046         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2047                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2048                                    "output high\n", gpio_num, gpio_shift);
2049                 /* clear CLR and set SET */
2050                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2051                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2052                 break;
2053
2054         default:
2055                 break;
2056         }
2057
2058         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2059         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2060
2061         return 0;
2062 }
2063
2064 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2065 {
2066         u32 spio_mask = (1 << spio_num);
2067         u32 spio_reg;
2068
2069         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2070             (spio_num > MISC_REGISTERS_SPIO_7)) {
2071                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2072                 return -EINVAL;
2073         }
2074
2075         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2076         /* read SPIO and mask except the float bits */
2077         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2078
2079         switch (mode) {
2080         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2081                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2082                 /* clear FLOAT and set CLR */
2083                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2084                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2085                 break;
2086
2087         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2088                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2089                 /* clear FLOAT and set SET */
2090                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2091                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2092                 break;
2093
2094         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2095                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2096                 /* set FLOAT */
2097                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2098                 break;
2099
2100         default:
2101                 break;
2102         }
2103
2104         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2105         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2106
2107         return 0;
2108 }
2109
2110 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2111 {
2112         switch (bp->link_vars.ieee_fc &
2113                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2114         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2115                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2116                                           ADVERTISED_Pause);
2117                 break;
2118
2119         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2120                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2121                                          ADVERTISED_Pause);
2122                 break;
2123
2124         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2125                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2126                 break;
2127
2128         default:
2129                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2130                                           ADVERTISED_Pause);
2131                 break;
2132         }
2133 }
2134
2135 static void bnx2x_link_report(struct bnx2x *bp)
2136 {
2137         if (bp->flags & MF_FUNC_DIS) {
2138                 netif_carrier_off(bp->dev);
2139                 netdev_err(bp->dev, "NIC Link is Down\n");
2140                 return;
2141         }
2142
2143         if (bp->link_vars.link_up) {
2144                 u16 line_speed;
2145
2146                 if (bp->state == BNX2X_STATE_OPEN)
2147                         netif_carrier_on(bp->dev);
2148                 netdev_info(bp->dev, "NIC Link is Up, ");
2149
2150                 line_speed = bp->link_vars.line_speed;
2151                 if (IS_E1HMF(bp)) {
2152                         u16 vn_max_rate;
2153
2154                         vn_max_rate =
2155                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2156                                  FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2157                         if (vn_max_rate < line_speed)
2158                                 line_speed = vn_max_rate;
2159                 }
2160                 pr_cont("%d Mbps ", line_speed);
2161
2162                 if (bp->link_vars.duplex == DUPLEX_FULL)
2163                         pr_cont("full duplex");
2164                 else
2165                         pr_cont("half duplex");
2166
2167                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2168                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2169                                 pr_cont(", receive ");
2170                                 if (bp->link_vars.flow_ctrl &
2171                                     BNX2X_FLOW_CTRL_TX)
2172                                         pr_cont("& transmit ");
2173                         } else {
2174                                 pr_cont(", transmit ");
2175                         }
2176                         pr_cont("flow control ON");
2177                 }
2178                 pr_cont("\n");
2179
2180         } else { /* link_down */
2181                 netif_carrier_off(bp->dev);
2182                 netdev_err(bp->dev, "NIC Link is Down\n");
2183         }
2184 }
2185
2186 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2187 {
2188         if (!BP_NOMCP(bp)) {
2189                 u8 rc;
2190
2191                 /* Initialize link parameters structure variables */
2192                 /* It is recommended to turn off RX FC for jumbo frames
2193                    for better performance */
2194                 if (bp->dev->mtu > 5000)
2195                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2196                 else
2197                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2198
2199                 bnx2x_acquire_phy_lock(bp);
2200
2201                 if (load_mode == LOAD_DIAG)
2202                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2203
2204                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2205
2206                 bnx2x_release_phy_lock(bp);
2207
2208                 bnx2x_calc_fc_adv(bp);
2209
2210                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2211                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2212                         bnx2x_link_report(bp);
2213                 }
2214
2215                 return rc;
2216         }
2217         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2218         return -EINVAL;
2219 }
2220
2221 static void bnx2x_link_set(struct bnx2x *bp)
2222 {
2223         if (!BP_NOMCP(bp)) {
2224                 bnx2x_acquire_phy_lock(bp);
2225                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2226                 bnx2x_release_phy_lock(bp);
2227
2228                 bnx2x_calc_fc_adv(bp);
2229         } else
2230                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2231 }
2232
2233 static void bnx2x__link_reset(struct bnx2x *bp)
2234 {
2235         if (!BP_NOMCP(bp)) {
2236                 bnx2x_acquire_phy_lock(bp);
2237                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2238                 bnx2x_release_phy_lock(bp);
2239         } else
2240                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2241 }
2242
2243 static u8 bnx2x_link_test(struct bnx2x *bp)
2244 {
2245         u8 rc;
2246
2247         bnx2x_acquire_phy_lock(bp);
2248         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2249         bnx2x_release_phy_lock(bp);
2250
2251         return rc;
2252 }
2253
2254 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2255 {
2256         u32 r_param = bp->link_vars.line_speed / 8;
2257         u32 fair_periodic_timeout_usec;
2258         u32 t_fair;
2259
2260         memset(&(bp->cmng.rs_vars), 0,
2261                sizeof(struct rate_shaping_vars_per_port));
2262         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2263
2264         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2265         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2266
2267         /* this is the threshold below which no timer arming will occur
2268            1.25 coefficient is for the threshold to be a little bigger
2269            than the real time, to compensate for timer in-accuracy */
2270         bp->cmng.rs_vars.rs_threshold =
2271                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2272
2273         /* resolution of fairness timer */
2274         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2275         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2276         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2277
2278         /* this is the threshold below which we won't arm the timer anymore */
2279         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2280
2281         /* we multiply by 1e3/8 to get bytes/msec.
2282            We don't want the credits to pass a credit
2283            of the t_fair*FAIR_MEM (algorithm resolution) */
2284         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2285         /* since each tick is 4 usec */
2286         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2287 }
2288
2289 /* Calculates the sum of vn_min_rates.
2290    It's needed for further normalizing of the min_rates.
2291    Returns:
2292      sum of vn_min_rates.
2293        or
2294      0 - if all the min_rates are 0.
2295      In the later case fainess algorithm should be deactivated.
2296      If not all min_rates are zero then those that are zeroes will be set to 1.
2297  */
2298 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2299 {
2300         int all_zero = 1;
2301         int port = BP_PORT(bp);
2302         int vn;
2303
2304         bp->vn_weight_sum = 0;
2305         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2306                 int func = 2*vn + port;
2307                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2308                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2309                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2310
2311                 /* Skip hidden vns */
2312                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2313                         continue;
2314
2315                 /* If min rate is zero - set it to 1 */
2316                 if (!vn_min_rate)
2317                         vn_min_rate = DEF_MIN_RATE;
2318                 else
2319                         all_zero = 0;
2320
2321                 bp->vn_weight_sum += vn_min_rate;
2322         }
2323
2324         /* ... only if all min rates are zeros - disable fairness */
2325         if (all_zero) {
2326                 bp->cmng.flags.cmng_enables &=
2327                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2328                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2329                    "  fairness will be disabled\n");
2330         } else
2331                 bp->cmng.flags.cmng_enables |=
2332                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2333 }
2334
2335 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2336 {
2337         struct rate_shaping_vars_per_vn m_rs_vn;
2338         struct fairness_vars_per_vn m_fair_vn;
2339         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2340         u16 vn_min_rate, vn_max_rate;
2341         int i;
2342
2343         /* If function is hidden - set min and max to zeroes */
2344         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2345                 vn_min_rate = 0;
2346                 vn_max_rate = 0;
2347
2348         } else {
2349                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2350                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2351                 /* If min rate is zero - set it to 1 */
2352                 if (!vn_min_rate)
2353                         vn_min_rate = DEF_MIN_RATE;
2354                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2355                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2356         }
2357         DP(NETIF_MSG_IFUP,
2358            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
2359            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2360
2361         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2362         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2363
2364         /* global vn counter - maximal Mbps for this vn */
2365         m_rs_vn.vn_counter.rate = vn_max_rate;
2366
2367         /* quota - number of bytes transmitted in this period */
2368         m_rs_vn.vn_counter.quota =
2369                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2370
2371         if (bp->vn_weight_sum) {
2372                 /* credit for each period of the fairness algorithm:
2373                    number of bytes in T_FAIR (the vn share the port rate).
2374                    vn_weight_sum should not be larger than 10000, thus
2375                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2376                    than zero */
2377                 m_fair_vn.vn_credit_delta =
2378                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2379                                                  (8 * bp->vn_weight_sum))),
2380                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2381                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2382                    m_fair_vn.vn_credit_delta);
2383         }
2384
2385         /* Store it to internal memory */
2386         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2387                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2388                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2389                        ((u32 *)(&m_rs_vn))[i]);
2390
2391         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2392                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2393                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2394                        ((u32 *)(&m_fair_vn))[i]);
2395 }
2396
2397
2398 /* This function is called upon link interrupt */
2399 static void bnx2x_link_attn(struct bnx2x *bp)
2400 {
2401         /* Make sure that we are synced with the current statistics */
2402         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2403
2404         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2405
2406         if (bp->link_vars.link_up) {
2407
2408                 /* dropless flow control */
2409                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2410                         int port = BP_PORT(bp);
2411                         u32 pause_enabled = 0;
2412
2413                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2414                                 pause_enabled = 1;
2415
2416                         REG_WR(bp, BAR_USTRORM_INTMEM +
2417                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2418                                pause_enabled);
2419                 }
2420
2421                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2422                         struct host_port_stats *pstats;
2423
2424                         pstats = bnx2x_sp(bp, port_stats);
2425                         /* reset old bmac stats */
2426                         memset(&(pstats->mac_stx[0]), 0,
2427                                sizeof(struct mac_stx));
2428                 }
2429                 if (bp->state == BNX2X_STATE_OPEN)
2430                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2431         }
2432
2433         /* indicate link status */
2434         bnx2x_link_report(bp);
2435
2436         if (IS_E1HMF(bp)) {
2437                 int port = BP_PORT(bp);
2438                 int func;
2439                 int vn;
2440
2441                 /* Set the attention towards other drivers on the same port */
2442                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2443                         if (vn == BP_E1HVN(bp))
2444                                 continue;
2445
2446                         func = ((vn << 1) | port);
2447                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2448                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2449                 }
2450
2451                 if (bp->link_vars.link_up) {
2452                         int i;
2453
2454                         /* Init rate shaping and fairness contexts */
2455                         bnx2x_init_port_minmax(bp);
2456
2457                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2458                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2459
2460                         /* Store it to internal memory */
2461                         for (i = 0;
2462                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2463                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2464                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2465                                        ((u32 *)(&bp->cmng))[i]);
2466                 }
2467         }
2468 }
2469
2470 static void bnx2x__link_status_update(struct bnx2x *bp)
2471 {
2472         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2473                 return;
2474
2475         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2476
2477         if (bp->link_vars.link_up)
2478                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2479         else
2480                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2481
2482         bnx2x_calc_vn_weight_sum(bp);
2483
2484         /* indicate link status */
2485         bnx2x_link_report(bp);
2486 }
2487
2488 static void bnx2x_pmf_update(struct bnx2x *bp)
2489 {
2490         int port = BP_PORT(bp);
2491         u32 val;
2492
2493         bp->port.pmf = 1;
2494         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2495
2496         /* enable nig attention */
2497         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2498         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2499         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2500
2501         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2502 }
2503
2504 /* end of Link */
2505
2506 /* slow path */
2507
2508 /*
2509  * General service functions
2510  */
2511
2512 /* send the MCP a request, block until there is a reply */
2513 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2514 {
2515         int func = BP_FUNC(bp);
2516         u32 seq = ++bp->fw_seq;
2517         u32 rc = 0;
2518         u32 cnt = 1;
2519         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2520
2521         mutex_lock(&bp->fw_mb_mutex);
2522         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2523         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2524
2525         do {
2526                 /* let the FW do it's magic ... */
2527                 msleep(delay);
2528
2529                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2530
2531                 /* Give the FW up to 5 second (500*10ms) */
2532         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2533
2534         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2535            cnt*delay, rc, seq);
2536
2537         /* is this a reply to our command? */
2538         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2539                 rc &= FW_MSG_CODE_MASK;
2540         else {
2541                 /* FW BUG! */
2542                 BNX2X_ERR("FW failed to respond!\n");
2543                 bnx2x_fw_dump(bp);
2544                 rc = 0;
2545         }
2546         mutex_unlock(&bp->fw_mb_mutex);
2547
2548         return rc;
2549 }
2550
2551 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2552 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2553 static void bnx2x_set_rx_mode(struct net_device *dev);
2554
2555 static void bnx2x_e1h_disable(struct bnx2x *bp)
2556 {
2557         int port = BP_PORT(bp);
2558
2559         netif_tx_disable(bp->dev);
2560
2561         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2562
2563         netif_carrier_off(bp->dev);
2564 }
2565
2566 static void bnx2x_e1h_enable(struct bnx2x *bp)
2567 {
2568         int port = BP_PORT(bp);
2569
2570         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2571
2572         /* Tx queue should be only reenabled */
2573         netif_tx_wake_all_queues(bp->dev);
2574
2575         /*
2576          * Should not call netif_carrier_on since it will be called if the link
2577          * is up when checking for link state
2578          */
2579 }
2580
2581 static void bnx2x_update_min_max(struct bnx2x *bp)
2582 {
2583         int port = BP_PORT(bp);
2584         int vn, i;
2585
2586         /* Init rate shaping and fairness contexts */
2587         bnx2x_init_port_minmax(bp);
2588
2589         bnx2x_calc_vn_weight_sum(bp);
2590
2591         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2592                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2593
2594         if (bp->port.pmf) {
2595                 int func;
2596
2597                 /* Set the attention towards other drivers on the same port */
2598                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2599                         if (vn == BP_E1HVN(bp))
2600                                 continue;
2601
2602                         func = ((vn << 1) | port);
2603                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2604                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2605                 }
2606
2607                 /* Store it to internal memory */
2608                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2609                         REG_WR(bp, BAR_XSTRORM_INTMEM +
2610                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2611                                ((u32 *)(&bp->cmng))[i]);
2612         }
2613 }
2614
2615 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2616 {
2617         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2618
2619         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2620
2621                 /*
2622                  * This is the only place besides the function initialization
2623                  * where the bp->flags can change so it is done without any
2624                  * locks
2625                  */
2626                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2627                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2628                         bp->flags |= MF_FUNC_DIS;
2629
2630                         bnx2x_e1h_disable(bp);
2631                 } else {
2632                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2633                         bp->flags &= ~MF_FUNC_DIS;
2634
2635                         bnx2x_e1h_enable(bp);
2636                 }
2637                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2638         }
2639         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2640
2641                 bnx2x_update_min_max(bp);
2642                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2643         }
2644
2645         /* Report results to MCP */
2646         if (dcc_event)
2647                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2648         else
2649                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2650 }
2651
2652 /* must be called under the spq lock */
2653 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2654 {
2655         struct eth_spe *next_spe = bp->spq_prod_bd;
2656
2657         if (bp->spq_prod_bd == bp->spq_last_bd) {
2658                 bp->spq_prod_bd = bp->spq;
2659                 bp->spq_prod_idx = 0;
2660                 DP(NETIF_MSG_TIMER, "end of spq\n");
2661         } else {
2662                 bp->spq_prod_bd++;
2663                 bp->spq_prod_idx++;
2664         }
2665         return next_spe;
2666 }
2667
2668 /* must be called under the spq lock */
2669 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2670 {
2671         int func = BP_FUNC(bp);
2672
2673         /* Make sure that BD data is updated before writing the producer */
2674         wmb();
2675
2676         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2677                bp->spq_prod_idx);
2678         mmiowb();
2679 }
2680
2681 /* the slow path queue is odd since completions arrive on the fastpath ring */
2682 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2683                          u32 data_hi, u32 data_lo, int common)
2684 {
2685         struct eth_spe *spe;
2686
2687         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2688            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2689            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2690            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2691            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2692
2693 #ifdef BNX2X_STOP_ON_ERROR
2694         if (unlikely(bp->panic))
2695                 return -EIO;
2696 #endif
2697
2698         spin_lock_bh(&bp->spq_lock);
2699
2700         if (!bp->spq_left) {
2701                 BNX2X_ERR("BUG! SPQ ring full!\n");
2702                 spin_unlock_bh(&bp->spq_lock);
2703                 bnx2x_panic();
2704                 return -EBUSY;
2705         }
2706
2707         spe = bnx2x_sp_get_next(bp);
2708
2709         /* CID needs port number to be encoded int it */
2710         spe->hdr.conn_and_cmd_data =
2711                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2712                                      HW_CID(bp, cid)));
2713         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2714         if (common)
2715                 spe->hdr.type |=
2716                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2717
2718         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2719         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2720
2721         bp->spq_left--;
2722
2723         bnx2x_sp_prod_update(bp);
2724         spin_unlock_bh(&bp->spq_lock);
2725         return 0;
2726 }
2727
2728 /* acquire split MCP access lock register */
2729 static int bnx2x_acquire_alr(struct bnx2x *bp)
2730 {
2731         u32 i, j, val;
2732         int rc = 0;
2733
2734         might_sleep();
2735         i = 100;
2736         for (j = 0; j < i*10; j++) {
2737                 val = (1UL << 31);
2738                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2739                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2740                 if (val & (1L << 31))
2741                         break;
2742
2743                 msleep(5);
2744         }
2745         if (!(val & (1L << 31))) {
2746                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2747                 rc = -EBUSY;
2748         }
2749
2750         return rc;
2751 }
2752
2753 /* release split MCP access lock register */
2754 static void bnx2x_release_alr(struct bnx2x *bp)
2755 {
2756         u32 val = 0;
2757
2758         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2759 }
2760
2761 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2762 {
2763         struct host_def_status_block *def_sb = bp->def_status_blk;
2764         u16 rc = 0;
2765
2766         barrier(); /* status block is written to by the chip */
2767         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2768                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2769                 rc |= 1;
2770         }
2771         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2772                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2773                 rc |= 2;
2774         }
2775         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2776                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2777                 rc |= 4;
2778         }
2779         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2780                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2781                 rc |= 8;
2782         }
2783         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2784                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2785                 rc |= 16;
2786         }
2787         return rc;
2788 }
2789
2790 /*
2791  * slow path service functions
2792  */
2793
2794 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2795 {
2796         int port = BP_PORT(bp);
2797         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2798                        COMMAND_REG_ATTN_BITS_SET);
2799         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2800                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2801         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2802                                        NIG_REG_MASK_INTERRUPT_PORT0;
2803         u32 aeu_mask;
2804         u32 nig_mask = 0;
2805
2806         if (bp->attn_state & asserted)
2807                 BNX2X_ERR("IGU ERROR\n");
2808
2809         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2810         aeu_mask = REG_RD(bp, aeu_addr);
2811
2812         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2813            aeu_mask, asserted);
2814         aeu_mask &= ~(asserted & 0xff);
2815         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2816
2817         REG_WR(bp, aeu_addr, aeu_mask);
2818         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2819
2820         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2821         bp->attn_state |= asserted;
2822         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2823
2824         if (asserted & ATTN_HARD_WIRED_MASK) {
2825                 if (asserted & ATTN_NIG_FOR_FUNC) {
2826
2827                         bnx2x_acquire_phy_lock(bp);
2828
2829                         /* save nig interrupt mask */
2830                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2831                         REG_WR(bp, nig_int_mask_addr, 0);
2832
2833                         bnx2x_link_attn(bp);
2834
2835                         /* handle unicore attn? */
2836                 }
2837                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2838                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2839
2840                 if (asserted & GPIO_2_FUNC)
2841                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2842
2843                 if (asserted & GPIO_3_FUNC)
2844                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2845
2846                 if (asserted & GPIO_4_FUNC)
2847                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2848
2849                 if (port == 0) {
2850                         if (asserted & ATTN_GENERAL_ATTN_1) {
2851                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2852                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2853                         }
2854                         if (asserted & ATTN_GENERAL_ATTN_2) {
2855                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2856                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2857                         }
2858                         if (asserted & ATTN_GENERAL_ATTN_3) {
2859                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2860                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2861                         }
2862                 } else {
2863                         if (asserted & ATTN_GENERAL_ATTN_4) {
2864                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2865                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2866                         }
2867                         if (asserted & ATTN_GENERAL_ATTN_5) {
2868                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2869                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2870                         }
2871                         if (asserted & ATTN_GENERAL_ATTN_6) {
2872                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2873                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2874                         }
2875                 }
2876
2877         } /* if hardwired */
2878
2879         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2880            asserted, hc_addr);
2881         REG_WR(bp, hc_addr, asserted);
2882
2883         /* now set back the mask */
2884         if (asserted & ATTN_NIG_FOR_FUNC) {
2885                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2886                 bnx2x_release_phy_lock(bp);
2887         }
2888 }
2889
2890 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2891 {
2892         int port = BP_PORT(bp);
2893
2894         /* mark the failure */
2895         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2896         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2897         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2898                  bp->link_params.ext_phy_config);
2899
2900         /* log the failure */
2901         netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
2902                    "Please contact Dell Support for assistance.\n");
2903 }
2904
2905 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2906 {
2907         int port = BP_PORT(bp);
2908         int reg_offset;
2909         u32 val, swap_val, swap_override;
2910
2911         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2912                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2913
2914         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2915
2916                 val = REG_RD(bp, reg_offset);
2917                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2918                 REG_WR(bp, reg_offset, val);
2919
2920                 BNX2X_ERR("SPIO5 hw attention\n");
2921
2922                 /* Fan failure attention */
2923                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2924                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2925                         /* Low power mode is controlled by GPIO 2 */
2926                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2927                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2928                         /* The PHY reset is controlled by GPIO 1 */
2929                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2930                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2931                         break;
2932
2933                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2934                         /* The PHY reset is controlled by GPIO 1 */
2935                         /* fake the port number to cancel the swap done in
2936                            set_gpio() */
2937                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2938                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2939                         port = (swap_val && swap_override) ^ 1;
2940                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2941                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2942                         break;
2943
2944                 default:
2945                         break;
2946                 }
2947                 bnx2x_fan_failure(bp);
2948         }
2949
2950         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2951                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2952                 bnx2x_acquire_phy_lock(bp);
2953                 bnx2x_handle_module_detect_int(&bp->link_params);
2954                 bnx2x_release_phy_lock(bp);
2955         }
2956
2957         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2958
2959                 val = REG_RD(bp, reg_offset);
2960                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2961                 REG_WR(bp, reg_offset, val);
2962
2963                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2964                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2965                 bnx2x_panic();
2966         }
2967 }
2968
2969 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2970 {
2971         u32 val;
2972
2973         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2974
2975                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2976                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2977                 /* DORQ discard attention */
2978                 if (val & 0x2)
2979                         BNX2X_ERR("FATAL error from DORQ\n");
2980         }
2981
2982         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2983
2984                 int port = BP_PORT(bp);
2985                 int reg_offset;
2986
2987                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2988                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2989
2990                 val = REG_RD(bp, reg_offset);
2991                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2992                 REG_WR(bp, reg_offset, val);
2993
2994                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2995                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2996                 bnx2x_panic();
2997         }
2998 }
2999
3000 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3001 {
3002         u32 val;
3003
3004         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3005
3006                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3007                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3008                 /* CFC error attention */
3009                 if (val & 0x2)
3010                         BNX2X_ERR("FATAL error from CFC\n");
3011         }
3012
3013         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3014
3015                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3016                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3017                 /* RQ_USDMDP_FIFO_OVERFLOW */
3018                 if (val & 0x18000)
3019                         BNX2X_ERR("FATAL error from PXP\n");
3020         }
3021
3022         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3023
3024                 int port = BP_PORT(bp);
3025                 int reg_offset;
3026
3027                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3028                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3029
3030                 val = REG_RD(bp, reg_offset);
3031                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3032                 REG_WR(bp, reg_offset, val);
3033
3034                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3035                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3036                 bnx2x_panic();
3037         }
3038 }
3039
3040 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3041 {
3042         u32 val;
3043
3044         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3045
3046                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3047                         int func = BP_FUNC(bp);
3048
3049                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3050                         bp->mf_config = SHMEM_RD(bp,
3051                                            mf_cfg.func_mf_config[func].config);
3052                         val = SHMEM_RD(bp, func_mb[func].drv_status);
3053                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3054                                 bnx2x_dcc_event(bp,
3055                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3056                         bnx2x__link_status_update(bp);
3057                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3058                                 bnx2x_pmf_update(bp);
3059
3060                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3061
3062                         BNX2X_ERR("MC assert!\n");
3063                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3064                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3065                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3066                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3067                         bnx2x_panic();
3068
3069                 } else if (attn & BNX2X_MCP_ASSERT) {
3070
3071                         BNX2X_ERR("MCP assert!\n");
3072                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3073                         bnx2x_fw_dump(bp);
3074
3075                 } else
3076                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3077         }
3078
3079         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3080                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3081                 if (attn & BNX2X_GRC_TIMEOUT) {
3082                         val = CHIP_IS_E1H(bp) ?
3083                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3084                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3085                 }
3086                 if (attn & BNX2X_GRC_RSV) {
3087                         val = CHIP_IS_E1H(bp) ?
3088                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3089                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3090                 }
3091                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3092         }
3093 }
3094
3095 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3096 {
3097         struct attn_route attn;
3098         struct attn_route group_mask;
3099         int port = BP_PORT(bp);
3100         int index;
3101         u32 reg_addr;
3102         u32 val;
3103         u32 aeu_mask;
3104
3105         /* need to take HW lock because MCP or other port might also
3106            try to handle this event */
3107         bnx2x_acquire_alr(bp);
3108
3109         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3110         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3111         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3112         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3113         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3114            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3115
3116         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3117                 if (deasserted & (1 << index)) {
3118                         group_mask = bp->attn_group[index];
3119
3120                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3121                            index, group_mask.sig[0], group_mask.sig[1],
3122                            group_mask.sig[2], group_mask.sig[3]);
3123
3124                         bnx2x_attn_int_deasserted3(bp,
3125                                         attn.sig[3] & group_mask.sig[3]);
3126                         bnx2x_attn_int_deasserted1(bp,
3127                                         attn.sig[1] & group_mask.sig[1]);
3128                         bnx2x_attn_int_deasserted2(bp,
3129                                         attn.sig[2] & group_mask.sig[2]);
3130                         bnx2x_attn_int_deasserted0(bp,
3131                                         attn.sig[0] & group_mask.sig[0]);
3132
3133                         if ((attn.sig[0] & group_mask.sig[0] &
3134                                                 HW_PRTY_ASSERT_SET_0) ||
3135                             (attn.sig[1] & group_mask.sig[1] &
3136                                                 HW_PRTY_ASSERT_SET_1) ||
3137                             (attn.sig[2] & group_mask.sig[2] &
3138                                                 HW_PRTY_ASSERT_SET_2))
3139                                 BNX2X_ERR("FATAL HW block parity attention\n");
3140                 }
3141         }
3142
3143         bnx2x_release_alr(bp);
3144
3145         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3146
3147         val = ~deasserted;
3148         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3149            val, reg_addr);
3150         REG_WR(bp, reg_addr, val);
3151
3152         if (~bp->attn_state & deasserted)
3153                 BNX2X_ERR("IGU ERROR\n");
3154
3155         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3156                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3157
3158         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3159         aeu_mask = REG_RD(bp, reg_addr);
3160
3161         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3162            aeu_mask, deasserted);
3163         aeu_mask |= (deasserted & 0xff);
3164         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3165
3166         REG_WR(bp, reg_addr, aeu_mask);
3167         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3168
3169         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3170         bp->attn_state &= ~deasserted;
3171         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3172 }
3173
3174 static void bnx2x_attn_int(struct bnx2x *bp)
3175 {
3176         /* read local copy of bits */
3177         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3178                                                                 attn_bits);
3179         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3180                                                                 attn_bits_ack);
3181         u32 attn_state = bp->attn_state;
3182
3183         /* look for changed bits */
3184         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3185         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3186
3187         DP(NETIF_MSG_HW,
3188            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3189            attn_bits, attn_ack, asserted, deasserted);
3190
3191         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3192                 BNX2X_ERR("BAD attention state\n");
3193
3194         /* handle bits that were raised */
3195         if (asserted)
3196                 bnx2x_attn_int_asserted(bp, asserted);
3197
3198         if (deasserted)
3199                 bnx2x_attn_int_deasserted(bp, deasserted);
3200 }
3201
3202 static void bnx2x_sp_task(struct work_struct *work)
3203 {
3204         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3205         u16 status;
3206
3207
3208         /* Return here if interrupt is disabled */
3209         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3210                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3211                 return;
3212         }
3213
3214         status = bnx2x_update_dsb_idx(bp);
3215 /*      if (status == 0)                                     */
3216 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3217
3218         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3219
3220         /* HW attentions */
3221         if (status & 0x1)
3222                 bnx2x_attn_int(bp);
3223
3224         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3225                      IGU_INT_NOP, 1);
3226         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3227                      IGU_INT_NOP, 1);
3228         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3229                      IGU_INT_NOP, 1);
3230         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3231                      IGU_INT_NOP, 1);
3232         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3233                      IGU_INT_ENABLE, 1);
3234
3235 }
3236
3237 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3238 {
3239         struct net_device *dev = dev_instance;
3240         struct bnx2x *bp = netdev_priv(dev);
3241
3242         /* Return here if interrupt is disabled */
3243         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3244                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3245                 return IRQ_HANDLED;
3246         }
3247
3248         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3249
3250 #ifdef BNX2X_STOP_ON_ERROR
3251         if (unlikely(bp->panic))
3252                 return IRQ_HANDLED;
3253 #endif
3254
3255 #ifdef BCM_CNIC
3256         {
3257                 struct cnic_ops *c_ops;
3258
3259                 rcu_read_lock();
3260                 c_ops = rcu_dereference(bp->cnic_ops);
3261                 if (c_ops)
3262                         c_ops->cnic_handler(bp->cnic_data, NULL);
3263                 rcu_read_unlock();
3264         }
3265 #endif
3266         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3267
3268         return IRQ_HANDLED;
3269 }
3270
3271 /* end of slow path */
3272
3273 /* Statistics */
3274
3275 /****************************************************************************
3276 * Macros
3277 ****************************************************************************/
3278
3279 /* sum[hi:lo] += add[hi:lo] */
3280 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3281         do { \
3282                 s_lo += a_lo; \
3283                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3284         } while (0)
3285
3286 /* difference = minuend - subtrahend */
3287 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3288         do { \
3289                 if (m_lo < s_lo) { \
3290                         /* underflow */ \
3291                         d_hi = m_hi - s_hi; \
3292                         if (d_hi > 0) { \
3293                                 /* we can 'loan' 1 */ \
3294                                 d_hi--; \
3295                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3296                         } else { \
3297                                 /* m_hi <= s_hi */ \
3298                                 d_hi = 0; \
3299                                 d_lo = 0; \
3300                         } \
3301                 } else { \
3302                         /* m_lo >= s_lo */ \
3303                         if (m_hi < s_hi) { \
3304                                 d_hi = 0; \
3305                                 d_lo = 0; \
3306                         } else { \
3307                                 /* m_hi >= s_hi */ \
3308                                 d_hi = m_hi - s_hi; \
3309                                 d_lo = m_lo - s_lo; \
3310                         } \
3311                 } \
3312         } while (0)
3313
3314 #define UPDATE_STAT64(s, t) \
3315         do { \
3316                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3317                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3318                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3319                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3320                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3321                        pstats->mac_stx[1].t##_lo, diff.lo); \
3322         } while (0)
3323
3324 #define UPDATE_STAT64_NIG(s, t) \
3325         do { \
3326                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3327                         diff.lo, new->s##_lo, old->s##_lo); \
3328                 ADD_64(estats->t##_hi, diff.hi, \
3329                        estats->t##_lo, diff.lo); \
3330         } while (0)
3331
3332 /* sum[hi:lo] += add */
3333 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3334         do { \
3335                 s_lo += a; \
3336                 s_hi += (s_lo < a) ? 1 : 0; \
3337         } while (0)
3338
3339 #define UPDATE_EXTEND_STAT(s) \
3340         do { \
3341                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3342                               pstats->mac_stx[1].s##_lo, \
3343                               new->s); \
3344         } while (0)
3345
3346 #define UPDATE_EXTEND_TSTAT(s, t) \
3347         do { \
3348                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3349                 old_tclient->s = tclient->s; \
3350                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3351         } while (0)
3352
3353 #define UPDATE_EXTEND_USTAT(s, t) \
3354         do { \
3355                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3356                 old_uclient->s = uclient->s; \
3357                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3358         } while (0)
3359
3360 #define UPDATE_EXTEND_XSTAT(s, t) \
3361         do { \
3362                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3363                 old_xclient->s = xclient->s; \
3364                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3365         } while (0)
3366
3367 /* minuend -= subtrahend */
3368 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3369         do { \
3370                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3371         } while (0)
3372
3373 /* minuend[hi:lo] -= subtrahend */
3374 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3375         do { \
3376                 SUB_64(m_hi, 0, m_lo, s); \
3377         } while (0)
3378
3379 #define SUB_EXTEND_USTAT(s, t) \
3380         do { \
3381                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3382                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3383         } while (0)
3384
3385 /*
3386  * General service functions
3387  */
3388
3389 static inline long bnx2x_hilo(u32 *hiref)
3390 {
3391         u32 lo = *(hiref + 1);
3392 #if (BITS_PER_LONG == 64)
3393         u32 hi = *hiref;
3394
3395         return HILO_U64(hi, lo);
3396 #else
3397         return lo;
3398 #endif
3399 }
3400
3401 /*
3402  * Init service functions
3403  */
3404
3405 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3406 {
3407         if (!bp->stats_pending) {
3408                 struct eth_query_ramrod_data ramrod_data = {0};
3409                 int i, rc;
3410
3411                 ramrod_data.drv_counter = bp->stats_counter++;
3412                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3413                 for_each_queue(bp, i)
3414                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3415
3416                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3417                                    ((u32 *)&ramrod_data)[1],
3418                                    ((u32 *)&ramrod_data)[0], 0);
3419                 if (rc == 0) {
3420                         /* stats ramrod has it's own slot on the spq */
3421                         bp->spq_left++;
3422                         bp->stats_pending = 1;
3423                 }
3424         }
3425 }
3426
3427 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3428 {
3429         struct dmae_command *dmae = &bp->stats_dmae;
3430         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3431
3432         *stats_comp = DMAE_COMP_VAL;
3433         if (CHIP_REV_IS_SLOW(bp))
3434                 return;
3435
3436         /* loader */
3437         if (bp->executer_idx) {
3438                 int loader_idx = PMF_DMAE_C(bp);
3439
3440                 memset(dmae, 0, sizeof(struct dmae_command));
3441
3442                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3443                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3444                                 DMAE_CMD_DST_RESET |
3445 #ifdef __BIG_ENDIAN
3446                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3447 #else
3448                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3449 #endif
3450                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3451                                                DMAE_CMD_PORT_0) |
3452                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3453                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3454                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3455                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3456                                      sizeof(struct dmae_command) *
3457                                      (loader_idx + 1)) >> 2;
3458                 dmae->dst_addr_hi = 0;
3459                 dmae->len = sizeof(struct dmae_command) >> 2;
3460                 if (CHIP_IS_E1(bp))
3461                         dmae->len--;
3462                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3463                 dmae->comp_addr_hi = 0;
3464                 dmae->comp_val = 1;
3465
3466                 *stats_comp = 0;
3467                 bnx2x_post_dmae(bp, dmae, loader_idx);
3468
3469         } else if (bp->func_stx) {
3470                 *stats_comp = 0;
3471                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3472         }
3473 }
3474
3475 static int bnx2x_stats_comp(struct bnx2x *bp)
3476 {
3477         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3478         int cnt = 10;
3479
3480         might_sleep();
3481         while (*stats_comp != DMAE_COMP_VAL) {
3482                 if (!cnt) {
3483                         BNX2X_ERR("timeout waiting for stats finished\n");
3484                         break;
3485                 }
3486                 cnt--;
3487                 msleep(1);
3488         }
3489         return 1;
3490 }
3491
3492 /*
3493  * Statistics service functions
3494  */
3495
3496 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3497 {
3498         struct dmae_command *dmae;
3499         u32 opcode;
3500         int loader_idx = PMF_DMAE_C(bp);
3501         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3502
3503         /* sanity */
3504         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3505                 BNX2X_ERR("BUG!\n");
3506                 return;
3507         }
3508
3509         bp->executer_idx = 0;
3510
3511         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3512                   DMAE_CMD_C_ENABLE |
3513                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3514 #ifdef __BIG_ENDIAN
3515                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3516 #else
3517                   DMAE_CMD_ENDIANITY_DW_SWAP |
3518 #endif
3519                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3520                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3521
3522         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3523         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3524         dmae->src_addr_lo = bp->port.port_stx >> 2;
3525         dmae->src_addr_hi = 0;
3526         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3527         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3528         dmae->len = DMAE_LEN32_RD_MAX;
3529         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3530         dmae->comp_addr_hi = 0;
3531         dmae->comp_val = 1;
3532
3533         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3534         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3535         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3536         dmae->src_addr_hi = 0;
3537         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3538                                    DMAE_LEN32_RD_MAX * 4);
3539         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3540                                    DMAE_LEN32_RD_MAX * 4);
3541         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3542         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3543         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3544         dmae->comp_val = DMAE_COMP_VAL;
3545
3546         *stats_comp = 0;
3547         bnx2x_hw_stats_post(bp);
3548         bnx2x_stats_comp(bp);
3549 }
3550
3551 static void bnx2x_port_stats_init(struct bnx2x *bp)
3552 {
3553         struct dmae_command *dmae;
3554         int port = BP_PORT(bp);
3555         int vn = BP_E1HVN(bp);
3556         u32 opcode;
3557         int loader_idx = PMF_DMAE_C(bp);
3558         u32 mac_addr;
3559         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3560
3561         /* sanity */
3562         if (!bp->link_vars.link_up || !bp->port.pmf) {
3563                 BNX2X_ERR("BUG!\n");
3564                 return;
3565         }
3566
3567         bp->executer_idx = 0;
3568
3569         /* MCP */
3570         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3571                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3572                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3573 #ifdef __BIG_ENDIAN
3574                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3575 #else
3576                   DMAE_CMD_ENDIANITY_DW_SWAP |
3577 #endif
3578                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3579                   (vn << DMAE_CMD_E1HVN_SHIFT));
3580
3581         if (bp->port.port_stx) {
3582
3583                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3584                 dmae->opcode = opcode;
3585                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3586                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3587                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3588                 dmae->dst_addr_hi = 0;
3589                 dmae->len = sizeof(struct host_port_stats) >> 2;
3590                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3591                 dmae->comp_addr_hi = 0;
3592                 dmae->comp_val = 1;
3593         }
3594
3595         if (bp->func_stx) {
3596
3597                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3598                 dmae->opcode = opcode;
3599                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3600                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3601                 dmae->dst_addr_lo = bp->func_stx >> 2;
3602                 dmae->dst_addr_hi = 0;
3603                 dmae->len = sizeof(struct host_func_stats) >> 2;
3604                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3605                 dmae->comp_addr_hi = 0;
3606                 dmae->comp_val = 1;
3607         }
3608
3609         /* MAC */
3610         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3611                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3612                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3613 #ifdef __BIG_ENDIAN
3614                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3615 #else
3616                   DMAE_CMD_ENDIANITY_DW_SWAP |
3617 #endif
3618                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3619                   (vn << DMAE_CMD_E1HVN_SHIFT));
3620
3621         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3622
3623                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3624                                    NIG_REG_INGRESS_BMAC0_MEM);
3625
3626                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3627                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3628                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3629                 dmae->opcode = opcode;
3630                 dmae->src_addr_lo = (mac_addr +
3631                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3632                 dmae->src_addr_hi = 0;
3633                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3634                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3635                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3636                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3637                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3638                 dmae->comp_addr_hi = 0;
3639                 dmae->comp_val = 1;
3640
3641                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3642                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3643                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3644                 dmae->opcode = opcode;
3645                 dmae->src_addr_lo = (mac_addr +
3646                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3647                 dmae->src_addr_hi = 0;
3648                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3649                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3650                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3651                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3652                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3653                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3654                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3655                 dmae->comp_addr_hi = 0;
3656                 dmae->comp_val = 1;
3657
3658         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3659
3660                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3661
3662                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3663                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3664                 dmae->opcode = opcode;
3665                 dmae->src_addr_lo = (mac_addr +
3666                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3667                 dmae->src_addr_hi = 0;
3668                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3669                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3670                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3671                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3672                 dmae->comp_addr_hi = 0;
3673                 dmae->comp_val = 1;
3674
3675                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3676                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3677                 dmae->opcode = opcode;
3678                 dmae->src_addr_lo = (mac_addr +
3679                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3680                 dmae->src_addr_hi = 0;
3681                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3682                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3683                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3684                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3685                 dmae->len = 1;
3686                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3687                 dmae->comp_addr_hi = 0;
3688                 dmae->comp_val = 1;
3689
3690                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3691                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3692                 dmae->opcode = opcode;
3693                 dmae->src_addr_lo = (mac_addr +
3694                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3695                 dmae->src_addr_hi = 0;
3696                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3697                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3698                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3699                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3700                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3701                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3702                 dmae->comp_addr_hi = 0;
3703                 dmae->comp_val = 1;
3704         }
3705
3706         /* NIG */
3707         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3708         dmae->opcode = opcode;
3709         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3710                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3711         dmae->src_addr_hi = 0;
3712         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3713         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3714         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3715         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3716         dmae->comp_addr_hi = 0;
3717         dmae->comp_val = 1;
3718
3719         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3720         dmae->opcode = opcode;
3721         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3722                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3723         dmae->src_addr_hi = 0;
3724         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3725                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3726         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3727                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3728         dmae->len = (2*sizeof(u32)) >> 2;
3729         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3730         dmae->comp_addr_hi = 0;
3731         dmae->comp_val = 1;
3732
3733         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3734         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3735                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3736                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3737 #ifdef __BIG_ENDIAN
3738                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3739 #else
3740                         DMAE_CMD_ENDIANITY_DW_SWAP |
3741 #endif
3742                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3743                         (vn << DMAE_CMD_E1HVN_SHIFT));
3744         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3745                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3746         dmae->src_addr_hi = 0;
3747         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3748                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3749         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3750                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3751         dmae->len = (2*sizeof(u32)) >> 2;
3752         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3753         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3754         dmae->comp_val = DMAE_COMP_VAL;
3755
3756         *stats_comp = 0;
3757 }
3758
3759 static void bnx2x_func_stats_init(struct bnx2x *bp)
3760 {
3761         struct dmae_command *dmae = &bp->stats_dmae;
3762         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3763
3764         /* sanity */
3765         if (!bp->func_stx) {
3766                 BNX2X_ERR("BUG!\n");
3767                 return;
3768         }
3769
3770         bp->executer_idx = 0;
3771         memset(dmae, 0, sizeof(struct dmae_command));
3772
3773         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3774                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3775                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3776 #ifdef __BIG_ENDIAN
3777                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3778 #else
3779                         DMAE_CMD_ENDIANITY_DW_SWAP |
3780 #endif
3781                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3782                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3783         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3784         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3785         dmae->dst_addr_lo = bp->func_stx >> 2;
3786         dmae->dst_addr_hi = 0;
3787         dmae->len = sizeof(struct host_func_stats) >> 2;
3788         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3789         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3790         dmae->comp_val = DMAE_COMP_VAL;
3791
3792         *stats_comp = 0;
3793 }
3794
3795 static void bnx2x_stats_start(struct bnx2x *bp)
3796 {
3797         if (bp->port.pmf)
3798                 bnx2x_port_stats_init(bp);
3799
3800         else if (bp->func_stx)
3801                 bnx2x_func_stats_init(bp);
3802
3803         bnx2x_hw_stats_post(bp);
3804         bnx2x_storm_stats_post(bp);
3805 }
3806
3807 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3808 {
3809         bnx2x_stats_comp(bp);
3810         bnx2x_stats_pmf_update(bp);
3811         bnx2x_stats_start(bp);
3812 }
3813
3814 static void bnx2x_stats_restart(struct bnx2x *bp)
3815 {
3816         bnx2x_stats_comp(bp);
3817         bnx2x_stats_start(bp);
3818 }
3819
3820 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3821 {
3822         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3823         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3824         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3825         struct {
3826                 u32 lo;
3827                 u32 hi;
3828         } diff;
3829
3830         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3831         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3832         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3833         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3834         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3835         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3836         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3837         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3838         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3839         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3840         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3841         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3842         UPDATE_STAT64(tx_stat_gt127,
3843                                 tx_stat_etherstatspkts65octetsto127octets);
3844         UPDATE_STAT64(tx_stat_gt255,
3845                                 tx_stat_etherstatspkts128octetsto255octets);
3846         UPDATE_STAT64(tx_stat_gt511,
3847                                 tx_stat_etherstatspkts256octetsto511octets);
3848         UPDATE_STAT64(tx_stat_gt1023,
3849                                 tx_stat_etherstatspkts512octetsto1023octets);
3850         UPDATE_STAT64(tx_stat_gt1518,
3851                                 tx_stat_etherstatspkts1024octetsto1522octets);
3852         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3853         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3854         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3855         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3856         UPDATE_STAT64(tx_stat_gterr,
3857                                 tx_stat_dot3statsinternalmactransmiterrors);
3858         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3859
3860         estats->pause_frames_received_hi =
3861                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3862         estats->pause_frames_received_lo =
3863                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3864
3865         estats->pause_frames_sent_hi =
3866                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3867         estats->pause_frames_sent_lo =
3868                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3869 }
3870
3871 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3872 {
3873         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3874         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3875         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3876
3877         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3878         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3879         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3880         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3881         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3882         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3883         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3884         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3885         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3886         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3887         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3888         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3889         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3890         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3891         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3892         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3893         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3894         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3895         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3896         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3897         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3898         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3899         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3900         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3901         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3902         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3903         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3904         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3905         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3906         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3907         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3908
3909         estats->pause_frames_received_hi =
3910                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3911         estats->pause_frames_received_lo =
3912                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3913         ADD_64(estats->pause_frames_received_hi,
3914                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3915                estats->pause_frames_received_lo,
3916                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3917
3918         estats->pause_frames_sent_hi =
3919                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3920         estats->pause_frames_sent_lo =
3921                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3922         ADD_64(estats->pause_frames_sent_hi,
3923                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3924                estats->pause_frames_sent_lo,
3925                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3926 }
3927
3928 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3929 {
3930         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3931         struct nig_stats *old = &(bp->port.old_nig_stats);
3932         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3933         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3934         struct {
3935                 u32 lo;
3936                 u32 hi;
3937         } diff;
3938         u32 nig_timer_max;
3939
3940         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3941                 bnx2x_bmac_stats_update(bp);
3942
3943         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3944                 bnx2x_emac_stats_update(bp);
3945
3946         else { /* unreached */
3947                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3948                 return -1;
3949         }
3950
3951         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3952                       new->brb_discard - old->brb_discard);
3953         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3954                       new->brb_truncate - old->brb_truncate);
3955
3956         UPDATE_STAT64_NIG(egress_mac_pkt0,
3957                                         etherstatspkts1024octetsto1522octets);
3958         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3959
3960         memcpy(old, new, sizeof(struct nig_stats));
3961
3962         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3963                sizeof(struct mac_stx));
3964         estats->brb_drop_hi = pstats->brb_drop_hi;
3965         estats->brb_drop_lo = pstats->brb_drop_lo;
3966
3967         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3968
3969         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3970         if (nig_timer_max != estats->nig_timer_max) {
3971                 estats->nig_timer_max = nig_timer_max;
3972                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3973         }
3974
3975         return 0;
3976 }
3977
3978 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3979 {
3980         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3981         struct tstorm_per_port_stats *tport =
3982                                         &stats->tstorm_common.port_statistics;
3983         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3984         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3985         int i;
3986
3987         memcpy(&(fstats->total_bytes_received_hi),
3988                &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
3989                sizeof(struct host_func_stats) - 2*sizeof(u32));
3990         estats->error_bytes_received_hi = 0;
3991         estats->error_bytes_received_lo = 0;
3992         estats->etherstatsoverrsizepkts_hi = 0;
3993         estats->etherstatsoverrsizepkts_lo = 0;
3994         estats->no_buff_discard_hi = 0;
3995         estats->no_buff_discard_lo = 0;
3996
3997         for_each_queue(bp, i) {
3998                 struct bnx2x_fastpath *fp = &bp->fp[i];
3999                 int cl_id = fp->cl_id;
4000                 struct tstorm_per_client_stats *tclient =
4001                                 &stats->tstorm_common.client_statistics[cl_id];
4002                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4003                 struct ustorm_per_client_stats *uclient =
4004                                 &stats->ustorm_common.client_statistics[cl_id];
4005                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4006                 struct xstorm_per_client_stats *xclient =
4007                                 &stats->xstorm_common.client_statistics[cl_id];
4008                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4009                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4010                 u32 diff;
4011
4012                 /* are storm stats valid? */
4013                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4014                                                         bp->stats_counter) {
4015                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4016                            "  xstorm counter (%d) != stats_counter (%d)\n",
4017                            i, xclient->stats_counter, bp->stats_counter);
4018                         return -1;
4019                 }
4020                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4021                                                         bp->stats_counter) {
4022                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4023                            "  tstorm counter (%d) != stats_counter (%d)\n",
4024                            i, tclient->stats_counter, bp->stats_counter);
4025                         return -2;
4026                 }
4027                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4028                                                         bp->stats_counter) {
4029                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4030                            "  ustorm counter (%d) != stats_counter (%d)\n",
4031                            i, uclient->stats_counter, bp->stats_counter);
4032                         return -4;
4033                 }
4034
4035                 qstats->total_bytes_received_hi =
4036                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4037                 qstats->total_bytes_received_lo =
4038                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4039
4040                 ADD_64(qstats->total_bytes_received_hi,
4041                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4042                        qstats->total_bytes_received_lo,
4043                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4044
4045                 ADD_64(qstats->total_bytes_received_hi,
4046                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4047                        qstats->total_bytes_received_lo,
4048                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4049
4050                 qstats->valid_bytes_received_hi =
4051                                         qstats->total_bytes_received_hi;
4052                 qstats->valid_bytes_received_lo =
4053                                         qstats->total_bytes_received_lo;
4054
4055                 qstats->error_bytes_received_hi =
4056                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
4057                 qstats->error_bytes_received_lo =
4058                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
4059
4060                 ADD_64(qstats->total_bytes_received_hi,
4061                        qstats->error_bytes_received_hi,
4062                        qstats->total_bytes_received_lo,
4063                        qstats->error_bytes_received_lo);
4064
4065                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4066                                         total_unicast_packets_received);
4067                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4068                                         total_multicast_packets_received);
4069                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4070                                         total_broadcast_packets_received);
4071                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4072                                         etherstatsoverrsizepkts);
4073                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4074
4075                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4076                                         total_unicast_packets_received);
4077                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4078                                         total_multicast_packets_received);
4079                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4080                                         total_broadcast_packets_received);
4081                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4082                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4083                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4084
4085                 qstats->total_bytes_transmitted_hi =
4086                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4087                 qstats->total_bytes_transmitted_lo =
4088                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4089
4090                 ADD_64(qstats->total_bytes_transmitted_hi,
4091                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
4092                        qstats->total_bytes_transmitted_lo,
4093                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
4094
4095                 ADD_64(qstats->total_bytes_transmitted_hi,
4096                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4097                        qstats->total_bytes_transmitted_lo,
4098                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4099
4100                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4101                                         total_unicast_packets_transmitted);
4102                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4103                                         total_multicast_packets_transmitted);
4104                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4105                                         total_broadcast_packets_transmitted);
4106
4107                 old_tclient->checksum_discard = tclient->checksum_discard;
4108                 old_tclient->ttl0_discard = tclient->ttl0_discard;
4109
4110                 ADD_64(fstats->total_bytes_received_hi,
4111                        qstats->total_bytes_received_hi,
4112                        fstats->total_bytes_received_lo,
4113                        qstats->total_bytes_received_lo);
4114                 ADD_64(fstats->total_bytes_transmitted_hi,
4115                        qstats->total_bytes_transmitted_hi,
4116                        fstats->total_bytes_transmitted_lo,
4117                        qstats->total_bytes_transmitted_lo);
4118                 ADD_64(fstats->total_unicast_packets_received_hi,
4119                        qstats->total_unicast_packets_received_hi,
4120                        fstats->total_unicast_packets_received_lo,
4121                        qstats->total_unicast_packets_received_lo);
4122                 ADD_64(fstats->total_multicast_packets_received_hi,
4123                        qstats->total_multicast_packets_received_hi,
4124                        fstats->total_multicast_packets_received_lo,
4125                        qstats->total_multicast_packets_received_lo);
4126                 ADD_64(fstats->total_broadcast_packets_received_hi,
4127                        qstats->total_broadcast_packets_received_hi,
4128                        fstats->total_broadcast_packets_received_lo,
4129                        qstats->total_broadcast_packets_received_lo);
4130                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4131                        qstats->total_unicast_packets_transmitted_hi,
4132                        fstats->total_unicast_packets_transmitted_lo,
4133                        qstats->total_unicast_packets_transmitted_lo);
4134                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4135                        qstats->total_multicast_packets_transmitted_hi,
4136                        fstats->total_multicast_packets_transmitted_lo,
4137                        qstats->total_multicast_packets_transmitted_lo);
4138                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4139                        qstats->total_broadcast_packets_transmitted_hi,
4140                        fstats->total_broadcast_packets_transmitted_lo,
4141                        qstats->total_broadcast_packets_transmitted_lo);
4142                 ADD_64(fstats->valid_bytes_received_hi,
4143                        qstats->valid_bytes_received_hi,
4144                        fstats->valid_bytes_received_lo,
4145                        qstats->valid_bytes_received_lo);
4146
4147                 ADD_64(estats->error_bytes_received_hi,
4148                        qstats->error_bytes_received_hi,
4149                        estats->error_bytes_received_lo,
4150                        qstats->error_bytes_received_lo);
4151                 ADD_64(estats->etherstatsoverrsizepkts_hi,
4152                        qstats->etherstatsoverrsizepkts_hi,
4153                        estats->etherstatsoverrsizepkts_lo,
4154                        qstats->etherstatsoverrsizepkts_lo);
4155                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4156                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4157         }
4158
4159         ADD_64(fstats->total_bytes_received_hi,
4160                estats->rx_stat_ifhcinbadoctets_hi,
4161                fstats->total_bytes_received_lo,
4162                estats->rx_stat_ifhcinbadoctets_lo);
4163
4164         memcpy(estats, &(fstats->total_bytes_received_hi),
4165                sizeof(struct host_func_stats) - 2*sizeof(u32));
4166
4167         ADD_64(estats->etherstatsoverrsizepkts_hi,
4168                estats->rx_stat_dot3statsframestoolong_hi,
4169                estats->etherstatsoverrsizepkts_lo,
4170                estats->rx_stat_dot3statsframestoolong_lo);
4171         ADD_64(estats->error_bytes_received_hi,
4172                estats->rx_stat_ifhcinbadoctets_hi,
4173                estats->error_bytes_received_lo,
4174                estats->rx_stat_ifhcinbadoctets_lo);
4175
4176         if (bp->port.pmf) {
4177                 estats->mac_filter_discard =
4178                                 le32_to_cpu(tport->mac_filter_discard);
4179                 estats->xxoverflow_discard =
4180                                 le32_to_cpu(tport->xxoverflow_discard);
4181                 estats->brb_truncate_discard =
4182                                 le32_to_cpu(tport->brb_truncate_discard);
4183                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4184         }
4185
4186         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4187
4188         bp->stats_pending = 0;
4189
4190         return 0;
4191 }
4192
4193 static void bnx2x_net_stats_update(struct bnx2x *bp)
4194 {
4195         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4196         struct net_device_stats *nstats = &bp->dev->stats;
4197         int i;
4198
4199         nstats->rx_packets =
4200                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4201                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4202                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4203
4204         nstats->tx_packets =
4205                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4206                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4207                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4208
4209         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4210
4211         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4212
4213         nstats->rx_dropped = estats->mac_discard;
4214         for_each_queue(bp, i)
4215                 nstats->rx_dropped +=
4216                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4217
4218         nstats->tx_dropped = 0;
4219
4220         nstats->multicast =
4221                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4222
4223         nstats->collisions =
4224                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4225
4226         nstats->rx_length_errors =
4227                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4228                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4229         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4230                                  bnx2x_hilo(&estats->brb_truncate_hi);
4231         nstats->rx_crc_errors =
4232                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4233         nstats->rx_frame_errors =
4234                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4235         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4236         nstats->rx_missed_errors = estats->xxoverflow_discard;
4237
4238         nstats->rx_errors = nstats->rx_length_errors +
4239                             nstats->rx_over_errors +
4240                             nstats->rx_crc_errors +
4241                             nstats->rx_frame_errors +
4242                             nstats->rx_fifo_errors +
4243                             nstats->rx_missed_errors;
4244
4245         nstats->tx_aborted_errors =
4246                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4247                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4248         nstats->tx_carrier_errors =
4249                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4250         nstats->tx_fifo_errors = 0;
4251         nstats->tx_heartbeat_errors = 0;
4252         nstats->tx_window_errors = 0;
4253
4254         nstats->tx_errors = nstats->tx_aborted_errors +
4255                             nstats->tx_carrier_errors +
4256             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4257 }
4258
4259 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4260 {
4261         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4262         int i;
4263
4264         estats->driver_xoff = 0;
4265         estats->rx_err_discard_pkt = 0;
4266         estats->rx_skb_alloc_failed = 0;
4267         estats->hw_csum_err = 0;
4268         for_each_queue(bp, i) {
4269                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4270
4271                 estats->driver_xoff += qstats->driver_xoff;
4272                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4273                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4274                 estats->hw_csum_err += qstats->hw_csum_err;
4275         }
4276 }
4277
4278 static void bnx2x_stats_update(struct bnx2x *bp)
4279 {
4280         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4281
4282         if (*stats_comp != DMAE_COMP_VAL)
4283                 return;
4284
4285         if (bp->port.pmf)
4286                 bnx2x_hw_stats_update(bp);
4287
4288         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4289                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4290                 bnx2x_panic();
4291                 return;
4292         }
4293
4294         bnx2x_net_stats_update(bp);
4295         bnx2x_drv_stats_update(bp);
4296
4297         if (netif_msg_timer(bp)) {
4298                 struct bnx2x_fastpath *fp0_rx = bp->fp;
4299                 struct bnx2x_fastpath *fp0_tx = bp->fp;
4300                 struct tstorm_per_client_stats *old_tclient =
4301                                                         &bp->fp->old_tclient;
4302                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4303                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4304                 struct net_device_stats *nstats = &bp->dev->stats;
4305                 int i;
4306
4307                 netdev_printk(KERN_DEBUG, bp->dev, "\n");
4308                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4309                                   "  tx pkt (%lx)\n",
4310                        bnx2x_tx_avail(fp0_tx),
4311                        le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4312                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4313                                   "  rx pkt (%lx)\n",
4314                        (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4315                              fp0_rx->rx_comp_cons),
4316                        le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4317                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4318                                   "brb truncate %u\n",
4319                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4320                        qstats->driver_xoff,
4321                        estats->brb_drop_lo, estats->brb_truncate_lo);
4322                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4323                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4324                         "mac_discard %u  mac_filter_discard %u  "
4325                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4326                         "ttl0_discard %u\n",
4327                        le32_to_cpu(old_tclient->checksum_discard),
4328                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4329                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4330                        estats->mac_discard, estats->mac_filter_discard,
4331                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4332                        le32_to_cpu(old_tclient->ttl0_discard));
4333
4334                 for_each_queue(bp, i) {
4335                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4336                                bnx2x_fp(bp, i, tx_pkt),
4337                                bnx2x_fp(bp, i, rx_pkt),
4338                                bnx2x_fp(bp, i, rx_calls));
4339                 }
4340         }
4341
4342         bnx2x_hw_stats_post(bp);
4343         bnx2x_storm_stats_post(bp);
4344 }
4345
4346 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4347 {
4348         struct dmae_command *dmae;
4349         u32 opcode;
4350         int loader_idx = PMF_DMAE_C(bp);
4351         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4352
4353         bp->executer_idx = 0;
4354
4355         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4356                   DMAE_CMD_C_ENABLE |
4357                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4358 #ifdef __BIG_ENDIAN
4359                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4360 #else
4361                   DMAE_CMD_ENDIANITY_DW_SWAP |
4362 #endif
4363                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4364                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4365
4366         if (bp->port.port_stx) {
4367
4368                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4369                 if (bp->func_stx)
4370                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4371                 else
4372                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4373                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4374                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4375                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4376                 dmae->dst_addr_hi = 0;
4377                 dmae->len = sizeof(struct host_port_stats) >> 2;
4378                 if (bp->func_stx) {
4379                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4380                         dmae->comp_addr_hi = 0;
4381                         dmae->comp_val = 1;
4382                 } else {
4383                         dmae->comp_addr_lo =
4384                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4385                         dmae->comp_addr_hi =
4386                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4387                         dmae->comp_val = DMAE_COMP_VAL;
4388
4389                         *stats_comp = 0;
4390                 }
4391         }
4392
4393         if (bp->func_stx) {
4394
4395                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4396                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4397                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4398                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4399                 dmae->dst_addr_lo = bp->func_stx >> 2;
4400                 dmae->dst_addr_hi = 0;
4401                 dmae->len = sizeof(struct host_func_stats) >> 2;
4402                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4403                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4404                 dmae->comp_val = DMAE_COMP_VAL;
4405
4406                 *stats_comp = 0;
4407         }
4408 }
4409
4410 static void bnx2x_stats_stop(struct bnx2x *bp)
4411 {
4412         int update = 0;
4413
4414         bnx2x_stats_comp(bp);
4415
4416         if (bp->port.pmf)
4417                 update = (bnx2x_hw_stats_update(bp) == 0);
4418
4419         update |= (bnx2x_storm_stats_update(bp) == 0);
4420
4421         if (update) {
4422                 bnx2x_net_stats_update(bp);
4423
4424                 if (bp->port.pmf)
4425                         bnx2x_port_stats_stop(bp);
4426
4427                 bnx2x_hw_stats_post(bp);
4428                 bnx2x_stats_comp(bp);
4429         }
4430 }
4431
4432 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4433 {
4434 }
4435
4436 static const struct {
4437         void (*action)(struct bnx2x *bp);
4438         enum bnx2x_stats_state next_state;
4439 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4440 /* state        event   */
4441 {
4442 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4443 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4444 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4445 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4446 },
4447 {
4448 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4449 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4450 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4451 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4452 }
4453 };
4454
4455 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4456 {
4457         enum bnx2x_stats_state state = bp->stats_state;
4458
4459         bnx2x_stats_stm[state][event].action(bp);
4460         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4461
4462         /* Make sure the state has been "changed" */
4463         smp_wmb();
4464
4465         if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
4466                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4467                    state, event, bp->stats_state);
4468 }
4469
4470 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4471 {
4472         struct dmae_command *dmae;
4473         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4474
4475         /* sanity */
4476         if (!bp->port.pmf || !bp->port.port_stx) {
4477                 BNX2X_ERR("BUG!\n");
4478                 return;
4479         }
4480
4481         bp->executer_idx = 0;
4482
4483         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4484         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4485                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4486                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4487 #ifdef __BIG_ENDIAN
4488                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4489 #else
4490                         DMAE_CMD_ENDIANITY_DW_SWAP |
4491 #endif
4492                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4493                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4494         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4495         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4496         dmae->dst_addr_lo = bp->port.port_stx >> 2;
4497         dmae->dst_addr_hi = 0;
4498         dmae->len = sizeof(struct host_port_stats) >> 2;
4499         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4500         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4501         dmae->comp_val = DMAE_COMP_VAL;
4502
4503         *stats_comp = 0;
4504         bnx2x_hw_stats_post(bp);
4505         bnx2x_stats_comp(bp);
4506 }
4507
4508 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4509 {
4510         int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4511         int port = BP_PORT(bp);
4512         int func;
4513         u32 func_stx;
4514
4515         /* sanity */
4516         if (!bp->port.pmf || !bp->func_stx) {
4517                 BNX2X_ERR("BUG!\n");
4518                 return;
4519         }
4520
4521         /* save our func_stx */
4522         func_stx = bp->func_stx;
4523
4524         for (vn = VN_0; vn < vn_max; vn++) {
4525                 func = 2*vn + port;
4526
4527                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4528                 bnx2x_func_stats_init(bp);
4529                 bnx2x_hw_stats_post(bp);
4530                 bnx2x_stats_comp(bp);
4531         }
4532
4533         /* restore our func_stx */
4534         bp->func_stx = func_stx;
4535 }
4536
4537 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4538 {
4539         struct dmae_command *dmae = &bp->stats_dmae;
4540         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4541
4542         /* sanity */
4543         if (!bp->func_stx) {
4544                 BNX2X_ERR("BUG!\n");
4545                 return;
4546         }
4547
4548         bp->executer_idx = 0;
4549         memset(dmae, 0, sizeof(struct dmae_command));
4550
4551         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4552                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4553                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4554 #ifdef __BIG_ENDIAN
4555                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4556 #else
4557                         DMAE_CMD_ENDIANITY_DW_SWAP |
4558 #endif
4559                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4560                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4561         dmae->src_addr_lo = bp->func_stx >> 2;
4562         dmae->src_addr_hi = 0;
4563         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4564         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4565         dmae->len = sizeof(struct host_func_stats) >> 2;
4566         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4567         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4568         dmae->comp_val = DMAE_COMP_VAL;
4569
4570         *stats_comp = 0;
4571         bnx2x_hw_stats_post(bp);
4572         bnx2x_stats_comp(bp);
4573 }
4574
4575 static void bnx2x_stats_init(struct bnx2x *bp)
4576 {
4577         int port = BP_PORT(bp);
4578         int func = BP_FUNC(bp);
4579         int i;
4580
4581         bp->stats_pending = 0;
4582         bp->executer_idx = 0;
4583         bp->stats_counter = 0;
4584
4585         /* port and func stats for management */
4586         if (!BP_NOMCP(bp)) {
4587                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4588                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4589
4590         } else {
4591                 bp->port.port_stx = 0;
4592                 bp->func_stx = 0;
4593         }
4594         DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
4595            bp->port.port_stx, bp->func_stx);
4596
4597         /* port stats */
4598         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4599         bp->port.old_nig_stats.brb_discard =
4600                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4601         bp->port.old_nig_stats.brb_truncate =
4602                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4603         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4604                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4605         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4606                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4607
4608         /* function stats */
4609         for_each_queue(bp, i) {
4610                 struct bnx2x_fastpath *fp = &bp->fp[i];
4611
4612                 memset(&fp->old_tclient, 0,
4613                        sizeof(struct tstorm_per_client_stats));
4614                 memset(&fp->old_uclient, 0,
4615                        sizeof(struct ustorm_per_client_stats));
4616                 memset(&fp->old_xclient, 0,
4617                        sizeof(struct xstorm_per_client_stats));
4618                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4619         }
4620
4621         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4622         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4623
4624         bp->stats_state = STATS_STATE_DISABLED;
4625
4626         if (bp->port.pmf) {
4627                 if (bp->port.port_stx)
4628                         bnx2x_port_stats_base_init(bp);
4629
4630                 if (bp->func_stx)
4631                         bnx2x_func_stats_base_init(bp);
4632
4633         } else if (bp->func_stx)
4634                 bnx2x_func_stats_base_update(bp);
4635 }
4636
4637 static void bnx2x_timer(unsigned long data)
4638 {
4639         struct bnx2x *bp = (struct bnx2x *) data;
4640
4641         if (!netif_running(bp->dev))
4642                 return;
4643
4644         if (atomic_read(&bp->intr_sem) != 0)
4645                 goto timer_restart;
4646
4647         if (poll) {
4648                 struct bnx2x_fastpath *fp = &bp->fp[0];
4649                 int rc;
4650
4651                 bnx2x_tx_int(fp);
4652                 rc = bnx2x_rx_int(fp, 1000);
4653         }
4654
4655         if (!BP_NOMCP(bp)) {
4656                 int func = BP_FUNC(bp);
4657                 u32 drv_pulse;
4658                 u32 mcp_pulse;
4659
4660                 ++bp->fw_drv_pulse_wr_seq;
4661                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4662                 /* TBD - add SYSTEM_TIME */
4663                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4664                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4665
4666                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4667                              MCP_PULSE_SEQ_MASK);
4668                 /* The delta between driver pulse and mcp response
4669                  * should be 1 (before mcp response) or 0 (after mcp response)
4670                  */
4671                 if ((drv_pulse != mcp_pulse) &&
4672                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4673                         /* someone lost a heartbeat... */
4674                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4675                                   drv_pulse, mcp_pulse);
4676                 }
4677         }
4678
4679         if (bp->state == BNX2X_STATE_OPEN)
4680                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4681
4682 timer_restart:
4683         mod_timer(&bp->timer, jiffies + bp->current_interval);
4684 }
4685
4686 /* end of Statistics */
4687
4688 /* nic init */
4689
4690 /*
4691  * nic init service functions
4692  */
4693
4694 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4695 {
4696         int port = BP_PORT(bp);
4697
4698         /* "CSTORM" */
4699         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4700                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4701                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4702         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4703                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4704                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4705 }
4706
4707 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4708                           dma_addr_t mapping, int sb_id)
4709 {
4710         int port = BP_PORT(bp);
4711         int func = BP_FUNC(bp);
4712         int index;
4713         u64 section;
4714
4715         /* USTORM */
4716         section = ((u64)mapping) + offsetof(struct host_status_block,
4717                                             u_status_block);
4718         sb->u_status_block.status_block_id = sb_id;
4719
4720         REG_WR(bp, BAR_CSTRORM_INTMEM +
4721                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4722         REG_WR(bp, BAR_CSTRORM_INTMEM +
4723                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4724                U64_HI(section));
4725         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4726                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4727
4728         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4729                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4730                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4731
4732         /* CSTORM */
4733         section = ((u64)mapping) + offsetof(struct host_status_block,
4734                                             c_status_block);
4735         sb->c_status_block.status_block_id = sb_id;
4736
4737         REG_WR(bp, BAR_CSTRORM_INTMEM +
4738                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4739         REG_WR(bp, BAR_CSTRORM_INTMEM +
4740                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4741                U64_HI(section));
4742         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4743                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4744
4745         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4746                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4747                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4748
4749         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4750 }
4751
4752 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4753 {
4754         int func = BP_FUNC(bp);
4755
4756         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4757                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4758                         sizeof(struct tstorm_def_status_block)/4);
4759         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4760                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4761                         sizeof(struct cstorm_def_status_block_u)/4);
4762         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4763                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4764                         sizeof(struct cstorm_def_status_block_c)/4);
4765         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4766                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4767                         sizeof(struct xstorm_def_status_block)/4);
4768 }
4769
4770 static void bnx2x_init_def_sb(struct bnx2x *bp,
4771                               struct host_def_status_block *def_sb,
4772                               dma_addr_t mapping, int sb_id)
4773 {
4774         int port = BP_PORT(bp);
4775         int func = BP_FUNC(bp);
4776         int index, val, reg_offset;
4777         u64 section;
4778
4779         /* ATTN */
4780         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4781                                             atten_status_block);
4782         def_sb->atten_status_block.status_block_id = sb_id;
4783
4784         bp->attn_state = 0;
4785
4786         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4787                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4788
4789         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4790                 bp->attn_group[index].sig[0] = REG_RD(bp,
4791                                                      reg_offset + 0x10*index);
4792                 bp->attn_group[index].sig[1] = REG_RD(bp,
4793                                                reg_offset + 0x4 + 0x10*index);
4794                 bp->attn_group[index].sig[2] = REG_RD(bp,
4795                                                reg_offset + 0x8 + 0x10*index);
4796                 bp->attn_group[index].sig[3] = REG_RD(bp,
4797                                                reg_offset + 0xc + 0x10*index);
4798         }
4799
4800         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4801                              HC_REG_ATTN_MSG0_ADDR_L);
4802
4803         REG_WR(bp, reg_offset, U64_LO(section));
4804         REG_WR(bp, reg_offset + 4, U64_HI(section));
4805
4806         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4807
4808         val = REG_RD(bp, reg_offset);
4809         val |= sb_id;
4810         REG_WR(bp, reg_offset, val);
4811
4812         /* USTORM */
4813         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4814                                             u_def_status_block);
4815         def_sb->u_def_status_block.status_block_id = sb_id;
4816
4817         REG_WR(bp, BAR_CSTRORM_INTMEM +
4818                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4819         REG_WR(bp, BAR_CSTRORM_INTMEM +
4820                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4821                U64_HI(section));
4822         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4823                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4824
4825         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4826                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4827                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4828
4829         /* CSTORM */
4830         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4831                                             c_def_status_block);
4832         def_sb->c_def_status_block.status_block_id = sb_id;
4833
4834         REG_WR(bp, BAR_CSTRORM_INTMEM +
4835                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4836         REG_WR(bp, BAR_CSTRORM_INTMEM +
4837                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4838                U64_HI(section));
4839         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4840                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4841
4842         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4843                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4844                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4845
4846         /* TSTORM */
4847         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4848                                             t_def_status_block);
4849         def_sb->t_def_status_block.status_block_id = sb_id;
4850
4851         REG_WR(bp, BAR_TSTRORM_INTMEM +
4852                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4853         REG_WR(bp, BAR_TSTRORM_INTMEM +
4854                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4855                U64_HI(section));
4856         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4857                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4858
4859         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4860                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4861                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4862
4863         /* XSTORM */
4864         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4865                                             x_def_status_block);
4866         def_sb->x_def_status_block.status_block_id = sb_id;
4867
4868         REG_WR(bp, BAR_XSTRORM_INTMEM +
4869                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4870         REG_WR(bp, BAR_XSTRORM_INTMEM +
4871                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4872                U64_HI(section));
4873         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4874                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4875
4876         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4877                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4878                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4879
4880         bp->stats_pending = 0;
4881         bp->set_mac_pending = 0;
4882
4883         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4884 }
4885
4886 static void bnx2x_update_coalesce(struct bnx2x *bp)
4887 {
4888         int port = BP_PORT(bp);
4889         int i;
4890
4891         for_each_queue(bp, i) {
4892                 int sb_id = bp->fp[i].sb_id;
4893
4894                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4895                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4896                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4897                                                       U_SB_ETH_RX_CQ_INDEX),
4898                         bp->rx_ticks/(4 * BNX2X_BTR));
4899                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4900                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4901                                                        U_SB_ETH_RX_CQ_INDEX),
4902                          (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4903
4904                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4905                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4906                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4907                                                       C_SB_ETH_TX_CQ_INDEX),
4908                         bp->tx_ticks/(4 * BNX2X_BTR));
4909                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4910                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4911                                                        C_SB_ETH_TX_CQ_INDEX),
4912                          (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4913         }
4914 }
4915
4916 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4917                                        struct bnx2x_fastpath *fp, int last)
4918 {
4919         int i;
4920
4921         for (i = 0; i < last; i++) {
4922                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4923                 struct sk_buff *skb = rx_buf->skb;
4924
4925                 if (skb == NULL) {
4926                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4927                         continue;
4928                 }
4929
4930                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4931                         pci_unmap_single(bp->pdev,
4932                                          pci_unmap_addr(rx_buf, mapping),
4933                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4934
4935                 dev_kfree_skb(skb);
4936                 rx_buf->skb = NULL;
4937         }
4938 }
4939
4940 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4941 {
4942         int func = BP_FUNC(bp);
4943         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4944                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4945         u16 ring_prod, cqe_ring_prod;
4946         int i, j;
4947
4948         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4949         DP(NETIF_MSG_IFUP,
4950            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4951
4952         if (bp->flags & TPA_ENABLE_FLAG) {
4953
4954                 for_each_queue(bp, j) {
4955                         struct bnx2x_fastpath *fp = &bp->fp[j];
4956
4957                         for (i = 0; i < max_agg_queues; i++) {
4958                                 fp->tpa_pool[i].skb =
4959                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4960                                 if (!fp->tpa_pool[i].skb) {
4961                                         BNX2X_ERR("Failed to allocate TPA "
4962                                                   "skb pool for queue[%d] - "
4963                                                   "disabling TPA on this "
4964                                                   "queue!\n", j);
4965                                         bnx2x_free_tpa_pool(bp, fp, i);
4966                                         fp->disable_tpa = 1;
4967                                         break;
4968                                 }
4969                                 pci_unmap_addr_set((struct sw_rx_bd *)
4970                                                         &bp->fp->tpa_pool[i],
4971                                                    mapping, 0);
4972                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4973                         }
4974                 }
4975         }
4976
4977         for_each_queue(bp, j) {
4978                 struct bnx2x_fastpath *fp = &bp->fp[j];
4979
4980                 fp->rx_bd_cons = 0;
4981                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4982                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4983
4984                 /* "next page" elements initialization */
4985                 /* SGE ring */
4986                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4987                         struct eth_rx_sge *sge;
4988
4989                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4990                         sge->addr_hi =
4991                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4992                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4993                         sge->addr_lo =
4994                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4995                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4996                 }
4997
4998                 bnx2x_init_sge_ring_bit_mask(fp);
4999
5000                 /* RX BD ring */
5001                 for (i = 1; i <= NUM_RX_RINGS; i++) {
5002                         struct eth_rx_bd *rx_bd;
5003
5004                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5005                         rx_bd->addr_hi =
5006                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5007                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5008                         rx_bd->addr_lo =
5009                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5010                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5011                 }
5012
5013                 /* CQ ring */
5014                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5015                         struct eth_rx_cqe_next_page *nextpg;
5016
5017                         nextpg = (struct eth_rx_cqe_next_page *)
5018                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5019                         nextpg->addr_hi =
5020                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5021                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5022                         nextpg->addr_lo =
5023                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5024                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5025                 }
5026
5027                 /* Allocate SGEs and initialize the ring elements */
5028                 for (i = 0, ring_prod = 0;
5029                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5030
5031                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5032                                 BNX2X_ERR("was only able to allocate "
5033                                           "%d rx sges\n", i);
5034                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5035                                 /* Cleanup already allocated elements */
5036                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5037                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5038                                 fp->disable_tpa = 1;
5039                                 ring_prod = 0;
5040                                 break;
5041                         }
5042                         ring_prod = NEXT_SGE_IDX(ring_prod);
5043                 }
5044                 fp->rx_sge_prod = ring_prod;
5045
5046                 /* Allocate BDs and initialize BD ring */
5047                 fp->rx_comp_cons = 0;
5048                 cqe_ring_prod = ring_prod = 0;
5049                 for (i = 0; i < bp->rx_ring_size; i++) {
5050                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5051                                 BNX2X_ERR("was only able to allocate "
5052                                           "%d rx skbs on queue[%d]\n", i, j);
5053                                 fp->eth_q_stats.rx_skb_alloc_failed++;
5054                                 break;
5055                         }
5056                         ring_prod = NEXT_RX_IDX(ring_prod);
5057                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5058                         WARN_ON(ring_prod <= i);
5059                 }
5060
5061                 fp->rx_bd_prod = ring_prod;
5062                 /* must not have more available CQEs than BDs */
5063                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5064                                        cqe_ring_prod);
5065                 fp->rx_pkt = fp->rx_calls = 0;
5066
5067                 /* Warning!
5068                  * this will generate an interrupt (to the TSTORM)
5069                  * must only be done after chip is initialized
5070                  */
5071                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5072                                      fp->rx_sge_prod);
5073                 if (j != 0)
5074                         continue;
5075
5076                 REG_WR(bp, BAR_USTRORM_INTMEM +
5077                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5078                        U64_LO(fp->rx_comp_mapping));
5079                 REG_WR(bp, BAR_USTRORM_INTMEM +
5080                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5081                        U64_HI(fp->rx_comp_mapping));
5082         }
5083 }
5084
5085 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5086 {
5087         int i, j;
5088
5089         for_each_queue(bp, j) {
5090                 struct bnx2x_fastpath *fp = &bp->fp[j];
5091
5092                 for (i = 1; i <= NUM_TX_RINGS; i++) {
5093                         struct eth_tx_next_bd *tx_next_bd =
5094                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5095
5096                         tx_next_bd->addr_hi =
5097                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5098                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5099                         tx_next_bd->addr_lo =
5100                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5101                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5102                 }
5103
5104                 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5105                 fp->tx_db.data.zero_fill1 = 0;
5106                 fp->tx_db.data.prod = 0;
5107
5108                 fp->tx_pkt_prod = 0;
5109                 fp->tx_pkt_cons = 0;
5110                 fp->tx_bd_prod = 0;
5111                 fp->tx_bd_cons = 0;
5112                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5113                 fp->tx_pkt = 0;
5114         }
5115 }
5116
5117 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5118 {
5119         int func = BP_FUNC(bp);
5120
5121         spin_lock_init(&bp->spq_lock);
5122
5123         bp->spq_left = MAX_SPQ_PENDING;
5124         bp->spq_prod_idx = 0;
5125         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5126         bp->spq_prod_bd = bp->spq;
5127         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5128
5129         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5130                U64_LO(bp->spq_mapping));
5131         REG_WR(bp,
5132                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5133                U64_HI(bp->spq_mapping));
5134
5135         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5136                bp->spq_prod_idx);
5137 }
5138
5139 static void bnx2x_init_context(struct bnx2x *bp)
5140 {
5141         int i;
5142
5143         /* Rx */
5144         for_each_queue(bp, i) {
5145                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5146                 struct bnx2x_fastpath *fp = &bp->fp[i];
5147                 u8 cl_id = fp->cl_id;
5148
5149                 context->ustorm_st_context.common.sb_index_numbers =
5150                                                 BNX2X_RX_SB_INDEX_NUM;
5151                 context->ustorm_st_context.common.clientId = cl_id;
5152                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5153                 context->ustorm_st_context.common.flags =
5154                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5155                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5156                 context->ustorm_st_context.common.statistics_counter_id =
5157                                                 cl_id;
5158                 context->ustorm_st_context.common.mc_alignment_log_size =
5159                                                 BNX2X_RX_ALIGN_SHIFT;
5160                 context->ustorm_st_context.common.bd_buff_size =
5161                                                 bp->rx_buf_size;
5162                 context->ustorm_st_context.common.bd_page_base_hi =
5163                                                 U64_HI(fp->rx_desc_mapping);
5164                 context->ustorm_st_context.common.bd_page_base_lo =
5165                                                 U64_LO(fp->rx_desc_mapping);
5166                 if (!fp->disable_tpa) {
5167                         context->ustorm_st_context.common.flags |=
5168                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5169                         context->ustorm_st_context.common.sge_buff_size =
5170                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5171                                          (u32)0xffff);
5172                         context->ustorm_st_context.common.sge_page_base_hi =
5173                                                 U64_HI(fp->rx_sge_mapping);
5174                         context->ustorm_st_context.common.sge_page_base_lo =
5175                                                 U64_LO(fp->rx_sge_mapping);
5176
5177                         context->ustorm_st_context.common.max_sges_for_packet =
5178                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5179                         context->ustorm_st_context.common.max_sges_for_packet =
5180                                 ((context->ustorm_st_context.common.
5181                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
5182                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5183                 }
5184
5185                 context->ustorm_ag_context.cdu_usage =
5186                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5187                                                CDU_REGION_NUMBER_UCM_AG,
5188                                                ETH_CONNECTION_TYPE);
5189
5190                 context->xstorm_ag_context.cdu_reserved =
5191                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5192                                                CDU_REGION_NUMBER_XCM_AG,
5193                                                ETH_CONNECTION_TYPE);
5194         }
5195
5196         /* Tx */
5197         for_each_queue(bp, i) {
5198                 struct bnx2x_fastpath *fp = &bp->fp[i];
5199                 struct eth_context *context =
5200                         bnx2x_sp(bp, context[i].eth);
5201
5202                 context->cstorm_st_context.sb_index_number =
5203                                                 C_SB_ETH_TX_CQ_INDEX;
5204                 context->cstorm_st_context.status_block_id = fp->sb_id;
5205
5206                 context->xstorm_st_context.tx_bd_page_base_hi =
5207                                                 U64_HI(fp->tx_desc_mapping);
5208                 context->xstorm_st_context.tx_bd_page_base_lo =
5209                                                 U64_LO(fp->tx_desc_mapping);
5210                 context->xstorm_st_context.statistics_data = (fp->cl_id |
5211                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5212         }
5213 }
5214
5215 static void bnx2x_init_ind_table(struct bnx2x *bp)
5216 {
5217         int func = BP_FUNC(bp);
5218         int i;
5219
5220         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5221                 return;
5222
5223         DP(NETIF_MSG_IFUP,
5224            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
5225         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5226                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5227                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5228                         bp->fp->cl_id + (i % bp->num_queues));
5229 }
5230
5231 static void bnx2x_set_client_config(struct bnx2x *bp)
5232 {
5233         struct tstorm_eth_client_config tstorm_client = {0};
5234         int port = BP_PORT(bp);
5235         int i;
5236
5237         tstorm_client.mtu = bp->dev->mtu;
5238         tstorm_client.config_flags =
5239                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5240                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5241 #ifdef BCM_VLAN
5242         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5243                 tstorm_client.config_flags |=
5244                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5245                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5246         }
5247 #endif
5248
5249         for_each_queue(bp, i) {
5250                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5251
5252                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5253                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5254                        ((u32 *)&tstorm_client)[0]);
5255                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5256                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5257                        ((u32 *)&tstorm_client)[1]);
5258         }
5259
5260         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5261            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5262 }
5263
5264 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5265 {
5266         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5267         int mode = bp->rx_mode;
5268         int mask = bp->rx_mode_cl_mask;
5269         int func = BP_FUNC(bp);
5270         int port = BP_PORT(bp);
5271         int i;
5272         /* All but management unicast packets should pass to the host as well */
5273         u32 llh_mask =
5274                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5275                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5276                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5277                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5278
5279         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
5280
5281         switch (mode) {
5282         case BNX2X_RX_MODE_NONE: /* no Rx */
5283                 tstorm_mac_filter.ucast_drop_all = mask;
5284                 tstorm_mac_filter.mcast_drop_all = mask;
5285                 tstorm_mac_filter.bcast_drop_all = mask;
5286                 break;
5287
5288         case BNX2X_RX_MODE_NORMAL:
5289                 tstorm_mac_filter.bcast_accept_all = mask;
5290                 break;
5291
5292         case BNX2X_RX_MODE_ALLMULTI:
5293                 tstorm_mac_filter.mcast_accept_all = mask;
5294                 tstorm_mac_filter.bcast_accept_all = mask;
5295                 break;
5296
5297         case BNX2X_RX_MODE_PROMISC:
5298                 tstorm_mac_filter.ucast_accept_all = mask;
5299                 tstorm_mac_filter.mcast_accept_all = mask;
5300                 tstorm_mac_filter.bcast_accept_all = mask;
5301                 /* pass management unicast packets as well */
5302                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5303                 break;
5304
5305         default:
5306                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5307                 break;
5308         }
5309
5310         REG_WR(bp,
5311                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5312                llh_mask);
5313
5314         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5315                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5316                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5317                        ((u32 *)&tstorm_mac_filter)[i]);
5318
5319 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5320                    ((u32 *)&tstorm_mac_filter)[i]); */
5321         }
5322
5323         if (mode != BNX2X_RX_MODE_NONE)
5324                 bnx2x_set_client_config(bp);
5325 }
5326
5327 static void bnx2x_init_internal_common(struct bnx2x *bp)
5328 {
5329         int i;
5330
5331         /* Zero this manually as its initialization is
5332            currently missing in the initTool */
5333         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5334                 REG_WR(bp, BAR_USTRORM_INTMEM +
5335                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5336 }
5337
5338 static void bnx2x_init_internal_port(struct bnx2x *bp)
5339 {
5340         int port = BP_PORT(bp);
5341
5342         REG_WR(bp,
5343                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5344         REG_WR(bp,
5345                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5346         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5347         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5348 }
5349
5350 static void bnx2x_init_internal_func(struct bnx2x *bp)
5351 {
5352         struct tstorm_eth_function_common_config tstorm_config = {0};
5353         struct stats_indication_flags stats_flags = {0};
5354         int port = BP_PORT(bp);
5355         int func = BP_FUNC(bp);
5356         int i, j;
5357         u32 offset;
5358         u16 max_agg_size;
5359
5360         if (is_multi(bp)) {
5361                 tstorm_config.config_flags = MULTI_FLAGS(bp);
5362                 tstorm_config.rss_result_mask = MULTI_MASK;
5363         }
5364
5365         /* Enable TPA if needed */
5366         if (bp->flags & TPA_ENABLE_FLAG)
5367                 tstorm_config.config_flags |=
5368                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5369
5370         if (IS_E1HMF(bp))
5371                 tstorm_config.config_flags |=
5372                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5373
5374         tstorm_config.leading_client_id = BP_L_ID(bp);
5375
5376         REG_WR(bp, BAR_TSTRORM_INTMEM +
5377                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5378                (*(u32 *)&tstorm_config));
5379
5380         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5381         bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5382         bnx2x_set_storm_rx_mode(bp);
5383
5384         for_each_queue(bp, i) {
5385                 u8 cl_id = bp->fp[i].cl_id;
5386
5387                 /* reset xstorm per client statistics */
5388                 offset = BAR_XSTRORM_INTMEM +
5389                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5390                 for (j = 0;
5391                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5392                         REG_WR(bp, offset + j*4, 0);
5393
5394                 /* reset tstorm per client statistics */
5395                 offset = BAR_TSTRORM_INTMEM +
5396                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5397                 for (j = 0;
5398                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5399                         REG_WR(bp, offset + j*4, 0);
5400
5401                 /* reset ustorm per client statistics */
5402                 offset = BAR_USTRORM_INTMEM +
5403                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5404                 for (j = 0;
5405                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5406                         REG_WR(bp, offset + j*4, 0);
5407         }
5408
5409         /* Init statistics related context */
5410         stats_flags.collect_eth = 1;
5411
5412         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5413                ((u32 *)&stats_flags)[0]);
5414         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5415                ((u32 *)&stats_flags)[1]);
5416
5417         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5418                ((u32 *)&stats_flags)[0]);
5419         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5420                ((u32 *)&stats_flags)[1]);
5421
5422         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5423                ((u32 *)&stats_flags)[0]);
5424         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5425                ((u32 *)&stats_flags)[1]);
5426
5427         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5428                ((u32 *)&stats_flags)[0]);
5429         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5430                ((u32 *)&stats_flags)[1]);
5431
5432         REG_WR(bp, BAR_XSTRORM_INTMEM +
5433                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5434                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5435         REG_WR(bp, BAR_XSTRORM_INTMEM +
5436                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5437                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5438
5439         REG_WR(bp, BAR_TSTRORM_INTMEM +
5440                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5441                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5442         REG_WR(bp, BAR_TSTRORM_INTMEM +
5443                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5444                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5445
5446         REG_WR(bp, BAR_USTRORM_INTMEM +
5447                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5448                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5449         REG_WR(bp, BAR_USTRORM_INTMEM +
5450                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5451                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5452
5453         if (CHIP_IS_E1H(bp)) {
5454                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5455                         IS_E1HMF(bp));
5456                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5457                         IS_E1HMF(bp));
5458                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5459                         IS_E1HMF(bp));
5460                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5461                         IS_E1HMF(bp));
5462
5463                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5464                          bp->e1hov);
5465         }
5466
5467         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5468         max_agg_size =
5469                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5470                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5471                     (u32)0xffff);
5472         for_each_queue(bp, i) {
5473                 struct bnx2x_fastpath *fp = &bp->fp[i];
5474
5475                 REG_WR(bp, BAR_USTRORM_INTMEM +
5476                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5477                        U64_LO(fp->rx_comp_mapping));
5478                 REG_WR(bp, BAR_USTRORM_INTMEM +
5479                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5480                        U64_HI(fp->rx_comp_mapping));
5481
5482                 /* Next page */
5483                 REG_WR(bp, BAR_USTRORM_INTMEM +
5484                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5485                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5486                 REG_WR(bp, BAR_USTRORM_INTMEM +
5487                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5488                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5489
5490                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5491                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5492                          max_agg_size);
5493         }
5494
5495         /* dropless flow control */
5496         if (CHIP_IS_E1H(bp)) {
5497                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5498
5499                 rx_pause.bd_thr_low = 250;
5500                 rx_pause.cqe_thr_low = 250;
5501                 rx_pause.cos = 1;
5502                 rx_pause.sge_thr_low = 0;
5503                 rx_pause.bd_thr_high = 350;
5504                 rx_pause.cqe_thr_high = 350;
5505                 rx_pause.sge_thr_high = 0;
5506
5507                 for_each_queue(bp, i) {
5508                         struct bnx2x_fastpath *fp = &bp->fp[i];
5509
5510                         if (!fp->disable_tpa) {
5511                                 rx_pause.sge_thr_low = 150;
5512                                 rx_pause.sge_thr_high = 250;
5513                         }
5514
5515
5516                         offset = BAR_USTRORM_INTMEM +
5517                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5518                                                                    fp->cl_id);
5519                         for (j = 0;
5520                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5521                              j++)
5522                                 REG_WR(bp, offset + j*4,
5523                                        ((u32 *)&rx_pause)[j]);
5524                 }
5525         }
5526
5527         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5528
5529         /* Init rate shaping and fairness contexts */
5530         if (IS_E1HMF(bp)) {
5531                 int vn;
5532
5533                 /* During init there is no active link
5534                    Until link is up, set link rate to 10Gbps */
5535                 bp->link_vars.line_speed = SPEED_10000;
5536                 bnx2x_init_port_minmax(bp);
5537
5538                 if (!BP_NOMCP(bp))
5539                         bp->mf_config =
5540                               SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5541                 bnx2x_calc_vn_weight_sum(bp);
5542
5543                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5544                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5545
5546                 /* Enable rate shaping and fairness */
5547                 bp->cmng.flags.cmng_enables |=
5548                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5549
5550         } else {
5551                 /* rate shaping and fairness are disabled */
5552                 DP(NETIF_MSG_IFUP,
5553                    "single function mode  minmax will be disabled\n");
5554         }
5555
5556
5557         /* Store it to internal memory */
5558         if (bp->port.pmf)
5559                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5560                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5561                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5562                                ((u32 *)(&bp->cmng))[i]);
5563 }
5564
5565 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5566 {
5567         switch (load_code) {
5568         case FW_MSG_CODE_DRV_LOAD_COMMON:
5569                 bnx2x_init_internal_common(bp);
5570                 /* no break */
5571
5572         case FW_MSG_CODE_DRV_LOAD_PORT:
5573                 bnx2x_init_internal_port(bp);
5574                 /* no break */
5575
5576         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5577                 bnx2x_init_internal_func(bp);
5578                 break;
5579
5580         default:
5581                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5582                 break;
5583         }
5584 }
5585
5586 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5587 {
5588         int i;
5589
5590         for_each_queue(bp, i) {
5591                 struct bnx2x_fastpath *fp = &bp->fp[i];
5592
5593                 fp->bp = bp;
5594                 fp->state = BNX2X_FP_STATE_CLOSED;
5595                 fp->index = i;
5596                 fp->cl_id = BP_L_ID(bp) + i;
5597 #ifdef BCM_CNIC
5598                 fp->sb_id = fp->cl_id + 1;
5599 #else
5600                 fp->sb_id = fp->cl_id;
5601 #endif
5602                 DP(NETIF_MSG_IFUP,
5603                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5604                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5605                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5606                               fp->sb_id);
5607                 bnx2x_update_fpsb_idx(fp);
5608         }
5609
5610         /* ensure status block indices were read */
5611         rmb();
5612
5613
5614         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5615                           DEF_SB_ID);
5616         bnx2x_update_dsb_idx(bp);
5617         bnx2x_update_coalesce(bp);
5618         bnx2x_init_rx_rings(bp);
5619         bnx2x_init_tx_ring(bp);
5620         bnx2x_init_sp_ring(bp);
5621         bnx2x_init_context(bp);
5622         bnx2x_init_internal(bp, load_code);
5623         bnx2x_init_ind_table(bp);
5624         bnx2x_stats_init(bp);
5625
5626         /* At this point, we are ready for interrupts */
5627         atomic_set(&bp->intr_sem, 0);
5628
5629         /* flush all before enabling interrupts */
5630         mb();
5631         mmiowb();
5632
5633         bnx2x_int_enable(bp);
5634
5635         /* Check for SPIO5 */
5636         bnx2x_attn_int_deasserted0(bp,
5637                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5638                                    AEU_INPUTS_ATTN_BITS_SPIO5);
5639 }
5640
5641 /* end of nic init */
5642
5643 /*
5644  * gzip service functions
5645  */
5646
5647 static int bnx2x_gunzip_init(struct bnx2x *bp)
5648 {
5649         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5650                                               &bp->gunzip_mapping);
5651         if (bp->gunzip_buf  == NULL)
5652                 goto gunzip_nomem1;
5653
5654         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5655         if (bp->strm  == NULL)
5656                 goto gunzip_nomem2;
5657
5658         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5659                                       GFP_KERNEL);
5660         if (bp->strm->workspace == NULL)
5661                 goto gunzip_nomem3;
5662
5663         return 0;
5664
5665 gunzip_nomem3:
5666         kfree(bp->strm);
5667         bp->strm = NULL;
5668
5669 gunzip_nomem2:
5670         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5671                             bp->gunzip_mapping);
5672         bp->gunzip_buf = NULL;
5673
5674 gunzip_nomem1:
5675         netdev_err(bp->dev, "Cannot allocate firmware buffer for un-compression\n");
5676         return -ENOMEM;
5677 }
5678
5679 static void bnx2x_gunzip_end(struct bnx2x *bp)
5680 {
5681         kfree(bp->strm->workspace);
5682
5683         kfree(bp->strm);
5684         bp->strm = NULL;
5685
5686         if (bp->gunzip_buf) {
5687                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5688                                     bp->gunzip_mapping);
5689                 bp->gunzip_buf = NULL;
5690         }
5691 }
5692
5693 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5694 {
5695         int n, rc;
5696
5697         /* check gzip header */
5698         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5699                 BNX2X_ERR("Bad gzip header\n");
5700                 return -EINVAL;
5701         }
5702
5703         n = 10;
5704
5705 #define FNAME                           0x8
5706
5707         if (zbuf[3] & FNAME)
5708                 while ((zbuf[n++] != 0) && (n < len));
5709
5710         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5711         bp->strm->avail_in = len - n;
5712         bp->strm->next_out = bp->gunzip_buf;
5713         bp->strm->avail_out = FW_BUF_SIZE;
5714
5715         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5716         if (rc != Z_OK)
5717                 return rc;
5718
5719         rc = zlib_inflate(bp->strm, Z_FINISH);
5720         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5721                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
5722                            bp->strm->msg);
5723
5724         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5725         if (bp->gunzip_outlen & 0x3)
5726                 netdev_err(bp->dev, "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
5727                            bp->gunzip_outlen);
5728         bp->gunzip_outlen >>= 2;
5729
5730         zlib_inflateEnd(bp->strm);
5731
5732         if (rc == Z_STREAM_END)
5733                 return 0;
5734
5735         return rc;
5736 }
5737
5738 /* nic load/unload */
5739
5740 /*
5741  * General service functions
5742  */
5743
5744 /* send a NIG loopback debug packet */
5745 static void bnx2x_lb_pckt(struct bnx2x *bp)
5746 {
5747         u32 wb_write[3];
5748
5749         /* Ethernet source and destination addresses */
5750         wb_write[0] = 0x55555555;
5751         wb_write[1] = 0x55555555;
5752         wb_write[2] = 0x20;             /* SOP */
5753         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5754
5755         /* NON-IP protocol */
5756         wb_write[0] = 0x09000000;
5757         wb_write[1] = 0x55555555;
5758         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5759         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5760 }
5761
5762 /* some of the internal memories
5763  * are not directly readable from the driver
5764  * to test them we send debug packets
5765  */
5766 static int bnx2x_int_mem_test(struct bnx2x *bp)
5767 {
5768         int factor;
5769         int count, i;
5770         u32 val = 0;
5771
5772         if (CHIP_REV_IS_FPGA(bp))
5773                 factor = 120;
5774         else if (CHIP_REV_IS_EMUL(bp))
5775                 factor = 200;
5776         else
5777                 factor = 1;
5778
5779         DP(NETIF_MSG_HW, "start part1\n");
5780
5781         /* Disable inputs of parser neighbor blocks */
5782         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5783         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5784         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5785         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5786
5787         /*  Write 0 to parser credits for CFC search request */
5788         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5789
5790         /* send Ethernet packet */
5791         bnx2x_lb_pckt(bp);
5792
5793         /* TODO do i reset NIG statistic? */
5794         /* Wait until NIG register shows 1 packet of size 0x10 */
5795         count = 1000 * factor;
5796         while (count) {
5797
5798                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5799                 val = *bnx2x_sp(bp, wb_data[0]);
5800                 if (val == 0x10)
5801                         break;
5802
5803                 msleep(10);
5804                 count--;
5805         }
5806         if (val != 0x10) {
5807                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5808                 return -1;
5809         }
5810
5811         /* Wait until PRS register shows 1 packet */
5812         count = 1000 * factor;
5813         while (count) {
5814                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5815                 if (val == 1)
5816                         break;
5817
5818                 msleep(10);
5819                 count--;
5820         }
5821         if (val != 0x1) {
5822                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5823                 return -2;
5824         }
5825
5826         /* Reset and init BRB, PRS */
5827         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5828         msleep(50);
5829         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5830         msleep(50);
5831         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5832         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5833
5834         DP(NETIF_MSG_HW, "part2\n");
5835
5836         /* Disable inputs of parser neighbor blocks */
5837         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5838         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5839         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5840         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5841
5842         /* Write 0 to parser credits for CFC search request */
5843         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5844
5845         /* send 10 Ethernet packets */
5846         for (i = 0; i < 10; i++)
5847                 bnx2x_lb_pckt(bp);
5848
5849         /* Wait until NIG register shows 10 + 1
5850            packets of size 11*0x10 = 0xb0 */
5851         count = 1000 * factor;
5852         while (count) {
5853
5854                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5855                 val = *bnx2x_sp(bp, wb_data[0]);
5856                 if (val == 0xb0)
5857                         break;
5858
5859                 msleep(10);
5860                 count--;
5861         }
5862         if (val != 0xb0) {
5863                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5864                 return -3;
5865         }
5866
5867         /* Wait until PRS register shows 2 packets */
5868         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5869         if (val != 2)
5870                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5871
5872         /* Write 1 to parser credits for CFC search request */
5873         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5874
5875         /* Wait until PRS register shows 3 packets */
5876         msleep(10 * factor);
5877         /* Wait until NIG register shows 1 packet of size 0x10 */
5878         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5879         if (val != 3)
5880                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5881
5882         /* clear NIG EOP FIFO */
5883         for (i = 0; i < 11; i++)
5884                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5885         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5886         if (val != 1) {
5887                 BNX2X_ERR("clear of NIG failed\n");
5888                 return -4;
5889         }
5890
5891         /* Reset and init BRB, PRS, NIG */
5892         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5893         msleep(50);
5894         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5895         msleep(50);
5896         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5897         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5898 #ifndef BCM_CNIC
5899         /* set NIC mode */
5900         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5901 #endif
5902
5903         /* Enable inputs of parser neighbor blocks */
5904         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5905         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5906         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5907         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5908
5909         DP(NETIF_MSG_HW, "done\n");
5910
5911         return 0; /* OK */
5912 }
5913
5914 static void enable_blocks_attention(struct bnx2x *bp)
5915 {
5916         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5917         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5918         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5919         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5920         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5921         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5922         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5923         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5924         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5925 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5926 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5927         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5928         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5929         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5930 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5931 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5932         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5933         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5934         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5935         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5936 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5937 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5938         if (CHIP_REV_IS_FPGA(bp))
5939                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5940         else
5941                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5942         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5943         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5944         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5945 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5946 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5947         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5948         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5949 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5950         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5951 }
5952
5953
5954 static void bnx2x_reset_common(struct bnx2x *bp)
5955 {
5956         /* reset_common */
5957         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5958                0xd3ffff7f);
5959         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5960 }
5961
5962 static void bnx2x_init_pxp(struct bnx2x *bp)
5963 {
5964         u16 devctl;
5965         int r_order, w_order;
5966
5967         pci_read_config_word(bp->pdev,
5968                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5969         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5970         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5971         if (bp->mrrs == -1)
5972                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5973         else {
5974                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5975                 r_order = bp->mrrs;
5976         }
5977
5978         bnx2x_init_pxp_arb(bp, r_order, w_order);
5979 }
5980
5981 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5982 {
5983         u32 val;
5984         u8 port;
5985         u8 is_required = 0;
5986
5987         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5988               SHARED_HW_CFG_FAN_FAILURE_MASK;
5989
5990         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5991                 is_required = 1;
5992
5993         /*
5994          * The fan failure mechanism is usually related to the PHY type since
5995          * the power consumption of the board is affected by the PHY. Currently,
5996          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5997          */
5998         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5999                 for (port = PORT_0; port < PORT_MAX; port++) {
6000                         u32 phy_type =
6001                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
6002                                          external_phy_config) &
6003                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6004                         is_required |=
6005                                 ((phy_type ==
6006                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6007                                  (phy_type ==
6008                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6009                                  (phy_type ==
6010                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6011                 }
6012
6013         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6014
6015         if (is_required == 0)
6016                 return;
6017
6018         /* Fan failure is indicated by SPIO 5 */
6019         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6020                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
6021
6022         /* set to active low mode */
6023         val = REG_RD(bp, MISC_REG_SPIO_INT);
6024         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6025                                 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6026         REG_WR(bp, MISC_REG_SPIO_INT, val);
6027
6028         /* enable interrupt to signal the IGU */
6029         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6030         val |= (1 << MISC_REGISTERS_SPIO_5);
6031         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6032 }
6033
6034 static int bnx2x_init_common(struct bnx2x *bp)
6035 {
6036         u32 val, i;
6037 #ifdef BCM_CNIC
6038         u32 wb_write[2];
6039 #endif
6040
6041         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
6042
6043         bnx2x_reset_common(bp);
6044         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6045         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6046
6047         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6048         if (CHIP_IS_E1H(bp))
6049                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6050
6051         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6052         msleep(30);
6053         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6054
6055         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6056         if (CHIP_IS_E1(bp)) {
6057                 /* enable HW interrupt from PXP on USDM overflow
6058                    bit 16 on INT_MASK_0 */
6059                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6060         }
6061
6062         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6063         bnx2x_init_pxp(bp);
6064
6065 #ifdef __BIG_ENDIAN
6066         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6067         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6068         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6069         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6070         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6071         /* make sure this value is 0 */
6072         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6073
6074 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6075         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6076         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6077         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6078         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6079 #endif
6080
6081         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6082 #ifdef BCM_CNIC
6083         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6084         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6085         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6086 #endif
6087
6088         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6089                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6090
6091         /* let the HW do it's magic ... */
6092         msleep(100);
6093         /* finish PXP init */
6094         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6095         if (val != 1) {
6096                 BNX2X_ERR("PXP2 CFG failed\n");
6097                 return -EBUSY;
6098         }
6099         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6100         if (val != 1) {
6101                 BNX2X_ERR("PXP2 RD_INIT failed\n");
6102                 return -EBUSY;
6103         }
6104
6105         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6106         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6107
6108         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6109
6110         /* clean the DMAE memory */
6111         bp->dmae_ready = 1;
6112         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6113
6114         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6115         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6116         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6117         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6118
6119         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6120         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6121         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6122         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6123
6124         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6125
6126 #ifdef BCM_CNIC
6127         wb_write[0] = 0;
6128         wb_write[1] = 0;
6129         for (i = 0; i < 64; i++) {
6130                 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6131                 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6132
6133                 if (CHIP_IS_E1H(bp)) {
6134                         REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6135                         bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6136                                           wb_write, 2);
6137                 }
6138         }
6139 #endif
6140         /* soft reset pulse */
6141         REG_WR(bp, QM_REG_SOFT_RESET, 1);
6142         REG_WR(bp, QM_REG_SOFT_RESET, 0);
6143
6144 #ifdef BCM_CNIC
6145         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6146 #endif
6147
6148         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6149         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6150         if (!CHIP_REV_IS_SLOW(bp)) {
6151                 /* enable hw interrupt from doorbell Q */
6152                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6153         }
6154
6155         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6156         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6157         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6158 #ifndef BCM_CNIC
6159         /* set NIC mode */
6160         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6161 #endif
6162         if (CHIP_IS_E1H(bp))
6163                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6164
6165         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6166         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6167         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6168         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6169
6170         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6171         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6172         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6173         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6174
6175         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6176         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6177         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6178         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6179
6180         /* sync semi rtc */
6181         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6182                0x80000000);
6183         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6184                0x80000000);
6185
6186         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6187         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6188         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6189
6190         REG_WR(bp, SRC_REG_SOFT_RST, 1);
6191         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6192                 REG_WR(bp, i, 0xc0cac01a);
6193                 /* TODO: replace with something meaningful */
6194         }
6195         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6196 #ifdef BCM_CNIC
6197         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6198         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6199         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6200         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6201         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6202         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6203         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6204         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6205         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6206         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6207 #endif
6208         REG_WR(bp, SRC_REG_SOFT_RST, 0);
6209
6210         if (sizeof(union cdu_context) != 1024)
6211                 /* we currently assume that a context is 1024 bytes */
6212                 pr_alert("please adjust the size of cdu_context(%ld)\n",
6213                          (long)sizeof(union cdu_context));
6214
6215         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6216         val = (4 << 24) + (0 << 12) + 1024;
6217         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6218
6219         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6220         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6221         /* enable context validation interrupt from CFC */
6222         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6223
6224         /* set the thresholds to prevent CFC/CDU race */
6225         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6226
6227         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6228         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6229
6230         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6231         /* Reset PCIE errors for debug */
6232         REG_WR(bp, 0x2814, 0xffffffff);
6233         REG_WR(bp, 0x3820, 0xffffffff);
6234
6235         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6236         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6237         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6238         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6239
6240         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6241         if (CHIP_IS_E1H(bp)) {
6242                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6243                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6244         }
6245
6246         if (CHIP_REV_IS_SLOW(bp))
6247                 msleep(200);
6248
6249         /* finish CFC init */
6250         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6251         if (val != 1) {
6252                 BNX2X_ERR("CFC LL_INIT failed\n");
6253                 return -EBUSY;
6254         }
6255         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6256         if (val != 1) {
6257                 BNX2X_ERR("CFC AC_INIT failed\n");
6258                 return -EBUSY;
6259         }
6260         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6261         if (val != 1) {
6262                 BNX2X_ERR("CFC CAM_INIT failed\n");
6263                 return -EBUSY;
6264         }
6265         REG_WR(bp, CFC_REG_DEBUG0, 0);
6266
6267         /* read NIG statistic
6268            to see if this is our first up since powerup */
6269         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6270         val = *bnx2x_sp(bp, wb_data[0]);
6271
6272         /* do internal memory self test */
6273         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6274                 BNX2X_ERR("internal mem self test failed\n");
6275                 return -EBUSY;
6276         }
6277
6278         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6279         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6280         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6281         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6282         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6283                 bp->port.need_hw_lock = 1;
6284                 break;
6285
6286         default:
6287                 break;
6288         }
6289
6290         bnx2x_setup_fan_failure_detection(bp);
6291
6292         /* clear PXP2 attentions */
6293         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6294
6295         enable_blocks_attention(bp);
6296
6297         if (!BP_NOMCP(bp)) {
6298                 bnx2x_acquire_phy_lock(bp);
6299                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6300                 bnx2x_release_phy_lock(bp);
6301         } else
6302                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6303
6304         return 0;
6305 }
6306
6307 static int bnx2x_init_port(struct bnx2x *bp)
6308 {
6309         int port = BP_PORT(bp);
6310         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6311         u32 low, high;
6312         u32 val;
6313
6314         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
6315
6316         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6317
6318         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6319         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6320
6321         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6322         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6323         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6324         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6325
6326 #ifdef BCM_CNIC
6327         REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6328
6329         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6330         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6331         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6332 #endif
6333         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6334
6335         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6336         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6337                 /* no pause for emulation and FPGA */
6338                 low = 0;
6339                 high = 513;
6340         } else {
6341                 if (IS_E1HMF(bp))
6342                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6343                 else if (bp->dev->mtu > 4096) {
6344                         if (bp->flags & ONE_PORT_FLAG)
6345                                 low = 160;
6346                         else {
6347                                 val = bp->dev->mtu;
6348                                 /* (24*1024 + val*4)/256 */
6349                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6350                         }
6351                 } else
6352                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6353                 high = low + 56;        /* 14*1024/256 */
6354         }
6355         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6356         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6357
6358
6359         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6360
6361         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6362         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6363         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6364         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6365
6366         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6367         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6368         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6369         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6370
6371         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6372         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6373
6374         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6375
6376         /* configure PBF to work without PAUSE mtu 9000 */
6377         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6378
6379         /* update threshold */
6380         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6381         /* update init credit */
6382         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6383
6384         /* probe changes */
6385         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6386         msleep(5);
6387         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6388
6389 #ifdef BCM_CNIC
6390         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6391 #endif
6392         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6393         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6394
6395         if (CHIP_IS_E1(bp)) {
6396                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6397                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6398         }
6399         bnx2x_init_block(bp, HC_BLOCK, init_stage);
6400
6401         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6402         /* init aeu_mask_attn_func_0/1:
6403          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6404          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6405          *             bits 4-7 are used for "per vn group attention" */
6406         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6407                (IS_E1HMF(bp) ? 0xF7 : 0x7));
6408
6409         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6410         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6411         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6412         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6413         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6414
6415         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6416
6417         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6418
6419         if (CHIP_IS_E1H(bp)) {
6420                 /* 0x2 disable e1hov, 0x1 enable */
6421                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6422                        (IS_E1HMF(bp) ? 0x1 : 0x2));
6423
6424                 {
6425                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6426                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6427                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6428                 }
6429         }
6430
6431         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6432         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6433
6434         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6435         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6436                 {
6437                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6438
6439                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6440                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6441
6442                 /* The GPIO should be swapped if the swap register is
6443                    set and active */
6444                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6445                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6446
6447                 /* Select function upon port-swap configuration */
6448                 if (port == 0) {
6449                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6450                         aeu_gpio_mask = (swap_val && swap_override) ?
6451                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6452                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6453                 } else {
6454                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6455                         aeu_gpio_mask = (swap_val && swap_override) ?
6456                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6457                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6458                 }
6459                 val = REG_RD(bp, offset);
6460                 /* add GPIO3 to group */
6461                 val |= aeu_gpio_mask;
6462                 REG_WR(bp, offset, val);
6463                 }
6464                 break;
6465
6466         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6467         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6468                 /* add SPIO 5 to group 0 */
6469                 {
6470                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6471                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6472                 val = REG_RD(bp, reg_addr);
6473                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6474                 REG_WR(bp, reg_addr, val);
6475                 }
6476                 break;
6477
6478         default:
6479                 break;
6480         }
6481
6482         bnx2x__link_reset(bp);
6483
6484         return 0;
6485 }
6486
6487 #define ILT_PER_FUNC            (768/2)
6488 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6489 /* the phys address is shifted right 12 bits and has an added
6490    1=valid bit added to the 53rd bit
6491    then since this is a wide register(TM)
6492    we split it into two 32 bit writes
6493  */
6494 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6495 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6496 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6497 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6498
6499 #ifdef BCM_CNIC
6500 #define CNIC_ILT_LINES          127
6501 #define CNIC_CTX_PER_ILT        16
6502 #else
6503 #define CNIC_ILT_LINES          0
6504 #endif
6505
6506 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6507 {
6508         int reg;
6509
6510         if (CHIP_IS_E1H(bp))
6511                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6512         else /* E1 */
6513                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6514
6515         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6516 }
6517
6518 static int bnx2x_init_func(struct bnx2x *bp)
6519 {
6520         int port = BP_PORT(bp);
6521         int func = BP_FUNC(bp);
6522         u32 addr, val;
6523         int i;
6524
6525         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
6526
6527         /* set MSI reconfigure capability */
6528         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6529         val = REG_RD(bp, addr);
6530         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6531         REG_WR(bp, addr, val);
6532
6533         i = FUNC_ILT_BASE(func);
6534
6535         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6536         if (CHIP_IS_E1H(bp)) {
6537                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6538                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6539         } else /* E1 */
6540                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6541                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6542
6543 #ifdef BCM_CNIC
6544         i += 1 + CNIC_ILT_LINES;
6545         bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6546         if (CHIP_IS_E1(bp))
6547                 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6548         else {
6549                 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6550                 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6551         }
6552
6553         i++;
6554         bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6555         if (CHIP_IS_E1(bp))
6556                 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6557         else {
6558                 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6559                 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6560         }
6561
6562         i++;
6563         bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6564         if (CHIP_IS_E1(bp))
6565                 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6566         else {
6567                 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6568                 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6569         }
6570
6571         /* tell the searcher where the T2 table is */
6572         REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6573
6574         bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6575                     U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6576
6577         bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6578                     U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6579                     U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6580
6581         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6582 #endif
6583
6584         if (CHIP_IS_E1H(bp)) {
6585                 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6586                 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6587                 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6588                 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6589                 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6590                 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6591                 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6592                 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6593                 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
6594
6595                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6596                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6597         }
6598
6599         /* HC init per function */
6600         if (CHIP_IS_E1H(bp)) {
6601                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6602
6603                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6604                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6605         }
6606         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6607
6608         /* Reset PCIE errors for debug */
6609         REG_WR(bp, 0x2114, 0xffffffff);
6610         REG_WR(bp, 0x2120, 0xffffffff);
6611
6612         return 0;
6613 }
6614
6615 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6616 {
6617         int i, rc = 0;
6618
6619         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
6620            BP_FUNC(bp), load_code);
6621
6622         bp->dmae_ready = 0;
6623         mutex_init(&bp->dmae_mutex);
6624         rc = bnx2x_gunzip_init(bp);
6625         if (rc)
6626                 return rc;
6627
6628         switch (load_code) {
6629         case FW_MSG_CODE_DRV_LOAD_COMMON:
6630                 rc = bnx2x_init_common(bp);
6631                 if (rc)
6632                         goto init_hw_err;
6633                 /* no break */
6634
6635         case FW_MSG_CODE_DRV_LOAD_PORT:
6636                 bp->dmae_ready = 1;
6637                 rc = bnx2x_init_port(bp);
6638                 if (rc)
6639                         goto init_hw_err;
6640                 /* no break */
6641
6642         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6643                 bp->dmae_ready = 1;
6644                 rc = bnx2x_init_func(bp);
6645                 if (rc)
6646                         goto init_hw_err;
6647                 break;
6648
6649         default:
6650                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6651                 break;
6652         }
6653
6654         if (!BP_NOMCP(bp)) {
6655                 int func = BP_FUNC(bp);
6656
6657                 bp->fw_drv_pulse_wr_seq =
6658                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6659                                  DRV_PULSE_SEQ_MASK);
6660                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6661         }
6662
6663         /* this needs to be done before gunzip end */
6664         bnx2x_zero_def_sb(bp);
6665         for_each_queue(bp, i)
6666                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6667 #ifdef BCM_CNIC
6668         bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6669 #endif
6670
6671 init_hw_err:
6672         bnx2x_gunzip_end(bp);
6673
6674         return rc;
6675 }
6676
6677 static void bnx2x_free_mem(struct bnx2x *bp)
6678 {
6679
6680 #define BNX2X_PCI_FREE(x, y, size) \
6681         do { \
6682                 if (x) { \
6683                         pci_free_consistent(bp->pdev, size, x, y); \
6684                         x = NULL; \
6685                         y = 0; \
6686                 } \
6687         } while (0)
6688
6689 #define BNX2X_FREE(x) \
6690         do { \
6691                 if (x) { \
6692                         vfree(x); \
6693                         x = NULL; \
6694                 } \
6695         } while (0)
6696
6697         int i;
6698
6699         /* fastpath */
6700         /* Common */
6701         for_each_queue(bp, i) {
6702
6703                 /* status blocks */
6704                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6705                                bnx2x_fp(bp, i, status_blk_mapping),
6706                                sizeof(struct host_status_block));
6707         }
6708         /* Rx */
6709         for_each_queue(bp, i) {
6710
6711                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6712                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6713                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6714                                bnx2x_fp(bp, i, rx_desc_mapping),
6715                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
6716
6717                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6718                                bnx2x_fp(bp, i, rx_comp_mapping),
6719                                sizeof(struct eth_fast_path_rx_cqe) *
6720                                NUM_RCQ_BD);
6721
6722                 /* SGE ring */
6723                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6724                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6725                                bnx2x_fp(bp, i, rx_sge_mapping),
6726                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6727         }
6728         /* Tx */
6729         for_each_queue(bp, i) {
6730
6731                 /* fastpath tx rings: tx_buf tx_desc */
6732                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6733                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6734                                bnx2x_fp(bp, i, tx_desc_mapping),
6735                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6736         }
6737         /* end of fastpath */
6738
6739         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6740                        sizeof(struct host_def_status_block));
6741
6742         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6743                        sizeof(struct bnx2x_slowpath));
6744
6745 #ifdef BCM_CNIC
6746         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6747         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6748         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6749         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6750         BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6751                        sizeof(struct host_status_block));
6752 #endif
6753         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6754
6755 #undef BNX2X_PCI_FREE
6756 #undef BNX2X_KFREE
6757 }
6758
6759 static int bnx2x_alloc_mem(struct bnx2x *bp)
6760 {
6761
6762 #define BNX2X_PCI_ALLOC(x, y, size) \
6763         do { \
6764                 x = pci_alloc_consistent(bp->pdev, size, y); \
6765                 if (x == NULL) \
6766                         goto alloc_mem_err; \
6767                 memset(x, 0, size); \
6768         } while (0)
6769
6770 #define BNX2X_ALLOC(x, size) \
6771         do { \
6772                 x = vmalloc(size); \
6773                 if (x == NULL) \
6774                         goto alloc_mem_err; \
6775                 memset(x, 0, size); \
6776         } while (0)
6777
6778         int i;
6779
6780         /* fastpath */
6781         /* Common */
6782         for_each_queue(bp, i) {
6783                 bnx2x_fp(bp, i, bp) = bp;
6784
6785                 /* status blocks */
6786                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6787                                 &bnx2x_fp(bp, i, status_blk_mapping),
6788                                 sizeof(struct host_status_block));
6789         }
6790         /* Rx */
6791         for_each_queue(bp, i) {
6792
6793                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6794                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6795                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6796                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6797                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6798                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6799
6800                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6801                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6802                                 sizeof(struct eth_fast_path_rx_cqe) *
6803                                 NUM_RCQ_BD);
6804
6805                 /* SGE ring */
6806                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6807                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6808                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6809                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6810                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6811         }
6812         /* Tx */
6813         for_each_queue(bp, i) {
6814
6815                 /* fastpath tx rings: tx_buf tx_desc */
6816                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6817                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6818                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6819                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6820                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6821         }
6822         /* end of fastpath */
6823
6824         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6825                         sizeof(struct host_def_status_block));
6826
6827         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6828                         sizeof(struct bnx2x_slowpath));
6829
6830 #ifdef BCM_CNIC
6831         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6832
6833         /* allocate searcher T2 table
6834            we allocate 1/4 of alloc num for T2
6835           (which is not entered into the ILT) */
6836         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6837
6838         /* Initialize T2 (for 1024 connections) */
6839         for (i = 0; i < 16*1024; i += 64)
6840                 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6841
6842         /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
6843         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6844
6845         /* QM queues (128*MAX_CONN) */
6846         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6847
6848         BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6849                         sizeof(struct host_status_block));
6850 #endif
6851
6852         /* Slow path ring */
6853         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6854
6855         return 0;
6856
6857 alloc_mem_err:
6858         bnx2x_free_mem(bp);
6859         return -ENOMEM;
6860
6861 #undef BNX2X_PCI_ALLOC
6862 #undef BNX2X_ALLOC
6863 }
6864
6865 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6866 {
6867         int i;
6868
6869         for_each_queue(bp, i) {
6870                 struct bnx2x_fastpath *fp = &bp->fp[i];
6871
6872                 u16 bd_cons = fp->tx_bd_cons;
6873                 u16 sw_prod = fp->tx_pkt_prod;
6874                 u16 sw_cons = fp->tx_pkt_cons;
6875
6876                 while (sw_cons != sw_prod) {
6877                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6878                         sw_cons++;
6879                 }
6880         }
6881 }
6882
6883 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6884 {
6885         int i, j;
6886
6887         for_each_queue(bp, j) {
6888                 struct bnx2x_fastpath *fp = &bp->fp[j];
6889
6890                 for (i = 0; i < NUM_RX_BD; i++) {
6891                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6892                         struct sk_buff *skb = rx_buf->skb;
6893
6894                         if (skb == NULL)
6895                                 continue;
6896
6897                         pci_unmap_single(bp->pdev,
6898                                          pci_unmap_addr(rx_buf, mapping),
6899                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6900
6901                         rx_buf->skb = NULL;
6902                         dev_kfree_skb(skb);
6903                 }
6904                 if (!fp->disable_tpa)
6905                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6906                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6907                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6908         }
6909 }
6910
6911 static void bnx2x_free_skbs(struct bnx2x *bp)
6912 {
6913         bnx2x_free_tx_skbs(bp);
6914         bnx2x_free_rx_skbs(bp);
6915 }
6916
6917 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6918 {
6919         int i, offset = 1;
6920
6921         free_irq(bp->msix_table[0].vector, bp->dev);
6922         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6923            bp->msix_table[0].vector);
6924
6925 #ifdef BCM_CNIC
6926         offset++;
6927 #endif
6928         for_each_queue(bp, i) {
6929                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6930                    "state %x\n", i, bp->msix_table[i + offset].vector,
6931                    bnx2x_fp(bp, i, state));
6932
6933                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6934         }
6935 }
6936
6937 static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
6938 {
6939         if (bp->flags & USING_MSIX_FLAG) {
6940                 if (!disable_only)
6941                         bnx2x_free_msix_irqs(bp);
6942                 pci_disable_msix(bp->pdev);
6943                 bp->flags &= ~USING_MSIX_FLAG;
6944
6945         } else if (bp->flags & USING_MSI_FLAG) {
6946                 if (!disable_only)
6947                         free_irq(bp->pdev->irq, bp->dev);
6948                 pci_disable_msi(bp->pdev);
6949                 bp->flags &= ~USING_MSI_FLAG;
6950
6951         } else if (!disable_only)
6952                 free_irq(bp->pdev->irq, bp->dev);
6953 }
6954
6955 static int bnx2x_enable_msix(struct bnx2x *bp)
6956 {
6957         int i, rc, offset = 1;
6958         int igu_vec = 0;
6959
6960         bp->msix_table[0].entry = igu_vec;
6961         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6962
6963 #ifdef BCM_CNIC
6964         igu_vec = BP_L_ID(bp) + offset;
6965         bp->msix_table[1].entry = igu_vec;
6966         DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6967         offset++;
6968 #endif
6969         for_each_queue(bp, i) {
6970                 igu_vec = BP_L_ID(bp) + offset + i;
6971                 bp->msix_table[i + offset].entry = igu_vec;
6972                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6973                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6974         }
6975
6976         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6977                              BNX2X_NUM_QUEUES(bp) + offset);
6978         if (rc) {
6979                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
6980                 return rc;
6981         }
6982
6983         bp->flags |= USING_MSIX_FLAG;
6984
6985         return 0;
6986 }
6987
6988 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6989 {
6990         int i, rc, offset = 1;
6991
6992         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6993                          bp->dev->name, bp->dev);
6994         if (rc) {
6995                 BNX2X_ERR("request sp irq failed\n");
6996                 return -EBUSY;
6997         }
6998
6999 #ifdef BCM_CNIC
7000         offset++;
7001 #endif
7002         for_each_queue(bp, i) {
7003                 struct bnx2x_fastpath *fp = &bp->fp[i];
7004                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7005                          bp->dev->name, i);
7006
7007                 rc = request_irq(bp->msix_table[i + offset].vector,
7008                                  bnx2x_msix_fp_int, 0, fp->name, fp);
7009                 if (rc) {
7010                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
7011                         bnx2x_free_msix_irqs(bp);
7012                         return -EBUSY;
7013                 }
7014
7015                 fp->state = BNX2X_FP_STATE_IRQ;
7016         }
7017
7018         i = BNX2X_NUM_QUEUES(bp);
7019         netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
7020                     bp->msix_table[0].vector,
7021                     0, bp->msix_table[offset].vector,
7022                     i - 1, bp->msix_table[offset + i - 1].vector);
7023
7024         return 0;
7025 }
7026
7027 static int bnx2x_enable_msi(struct bnx2x *bp)
7028 {
7029         int rc;
7030
7031         rc = pci_enable_msi(bp->pdev);
7032         if (rc) {
7033                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7034                 return -1;
7035         }
7036         bp->flags |= USING_MSI_FLAG;
7037
7038         return 0;
7039 }
7040
7041 static int bnx2x_req_irq(struct bnx2x *bp)
7042 {
7043         unsigned long flags;
7044         int rc;
7045
7046         if (bp->flags & USING_MSI_FLAG)
7047                 flags = 0;
7048         else
7049                 flags = IRQF_SHARED;
7050
7051         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7052                          bp->dev->name, bp->dev);
7053         if (!rc)
7054                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7055
7056         return rc;
7057 }
7058
7059 static void bnx2x_napi_enable(struct bnx2x *bp)
7060 {
7061         int i;
7062
7063         for_each_queue(bp, i)
7064                 napi_enable(&bnx2x_fp(bp, i, napi));
7065 }
7066
7067 static void bnx2x_napi_disable(struct bnx2x *bp)
7068 {
7069         int i;
7070
7071         for_each_queue(bp, i)
7072                 napi_disable(&bnx2x_fp(bp, i, napi));
7073 }
7074
7075 static void bnx2x_netif_start(struct bnx2x *bp)
7076 {
7077         int intr_sem;
7078
7079         intr_sem = atomic_dec_and_test(&bp->intr_sem);
7080         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7081
7082         if (intr_sem) {
7083                 if (netif_running(bp->dev)) {
7084                         bnx2x_napi_enable(bp);
7085                         bnx2x_int_enable(bp);
7086                         if (bp->state == BNX2X_STATE_OPEN)
7087                                 netif_tx_wake_all_queues(bp->dev);
7088                 }
7089         }
7090 }
7091
7092 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7093 {
7094         bnx2x_int_disable_sync(bp, disable_hw);
7095         bnx2x_napi_disable(bp);
7096         netif_tx_disable(bp->dev);
7097 }
7098
7099 /*
7100  * Init service functions
7101  */
7102
7103 /**
7104  * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7105  *
7106  * @param bp driver descriptor
7107  * @param set set or clear an entry (1 or 0)
7108  * @param mac pointer to a buffer containing a MAC
7109  * @param cl_bit_vec bit vector of clients to register a MAC for
7110  * @param cam_offset offset in a CAM to use
7111  * @param with_bcast set broadcast MAC as well
7112  */
7113 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7114                                       u32 cl_bit_vec, u8 cam_offset,
7115                                       u8 with_bcast)
7116 {
7117         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7118         int port = BP_PORT(bp);
7119
7120         /* CAM allocation
7121          * unicasts 0-31:port0 32-63:port1
7122          * multicast 64-127:port0 128-191:port1
7123          */
7124         config->hdr.length = 1 + (with_bcast ? 1 : 0);
7125         config->hdr.offset = cam_offset;
7126         config->hdr.client_id = 0xff;
7127         config->hdr.reserved1 = 0;
7128
7129         /* primary MAC */
7130         config->config_table[0].cam_entry.msb_mac_addr =
7131                                         swab16(*(u16 *)&mac[0]);
7132         config->config_table[0].cam_entry.middle_mac_addr =
7133                                         swab16(*(u16 *)&mac[2]);
7134         config->config_table[0].cam_entry.lsb_mac_addr =
7135                                         swab16(*(u16 *)&mac[4]);
7136         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7137         if (set)
7138                 config->config_table[0].target_table_entry.flags = 0;
7139         else
7140                 CAM_INVALIDATE(config->config_table[0]);
7141         config->config_table[0].target_table_entry.clients_bit_vector =
7142                                                 cpu_to_le32(cl_bit_vec);
7143         config->config_table[0].target_table_entry.vlan_id = 0;
7144
7145         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7146            (set ? "setting" : "clearing"),
7147            config->config_table[0].cam_entry.msb_mac_addr,
7148            config->config_table[0].cam_entry.middle_mac_addr,
7149            config->config_table[0].cam_entry.lsb_mac_addr);
7150
7151         /* broadcast */
7152         if (with_bcast) {
7153                 config->config_table[1].cam_entry.msb_mac_addr =
7154                         cpu_to_le16(0xffff);
7155                 config->config_table[1].cam_entry.middle_mac_addr =
7156                         cpu_to_le16(0xffff);
7157                 config->config_table[1].cam_entry.lsb_mac_addr =
7158                         cpu_to_le16(0xffff);
7159                 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7160                 if (set)
7161                         config->config_table[1].target_table_entry.flags =
7162                                         TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7163                 else
7164                         CAM_INVALIDATE(config->config_table[1]);
7165                 config->config_table[1].target_table_entry.clients_bit_vector =
7166                                                         cpu_to_le32(cl_bit_vec);
7167                 config->config_table[1].target_table_entry.vlan_id = 0;
7168         }
7169
7170         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7171                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7172                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7173 }
7174
7175 /**
7176  * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7177  *
7178  * @param bp driver descriptor
7179  * @param set set or clear an entry (1 or 0)
7180  * @param mac pointer to a buffer containing a MAC
7181  * @param cl_bit_vec bit vector of clients to register a MAC for
7182  * @param cam_offset offset in a CAM to use
7183  */
7184 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7185                                        u32 cl_bit_vec, u8 cam_offset)
7186 {
7187         struct mac_configuration_cmd_e1h *config =
7188                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7189
7190         config->hdr.length = 1;
7191         config->hdr.offset = cam_offset;
7192         config->hdr.client_id = 0xff;
7193         config->hdr.reserved1 = 0;
7194
7195         /* primary MAC */
7196         config->config_table[0].msb_mac_addr =
7197                                         swab16(*(u16 *)&mac[0]);
7198         config->config_table[0].middle_mac_addr =
7199                                         swab16(*(u16 *)&mac[2]);
7200         config->config_table[0].lsb_mac_addr =
7201                                         swab16(*(u16 *)&mac[4]);
7202         config->config_table[0].clients_bit_vector =
7203                                         cpu_to_le32(cl_bit_vec);
7204         config->config_table[0].vlan_id = 0;
7205         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7206         if (set)
7207                 config->config_table[0].flags = BP_PORT(bp);
7208         else
7209                 config->config_table[0].flags =
7210                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7211
7212         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
7213            (set ? "setting" : "clearing"),
7214            config->config_table[0].msb_mac_addr,
7215            config->config_table[0].middle_mac_addr,
7216            config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7217
7218         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7219                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7220                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7221 }
7222
7223 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7224                              int *state_p, int poll)
7225 {
7226         /* can take a while if any port is running */
7227         int cnt = 5000;
7228
7229         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7230            poll ? "polling" : "waiting", state, idx);
7231
7232         might_sleep();
7233         while (cnt--) {
7234                 if (poll) {
7235                         bnx2x_rx_int(bp->fp, 10);
7236                         /* if index is different from 0
7237                          * the reply for some commands will
7238                          * be on the non default queue
7239                          */
7240                         if (idx)
7241                                 bnx2x_rx_int(&bp->fp[idx], 10);
7242                 }
7243
7244                 mb(); /* state is changed by bnx2x_sp_event() */
7245                 if (*state_p == state) {
7246 #ifdef BNX2X_STOP_ON_ERROR
7247                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
7248 #endif
7249                         return 0;
7250                 }
7251
7252                 msleep(1);
7253
7254                 if (bp->panic)
7255                         return -EIO;
7256         }
7257
7258         /* timeout! */
7259         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7260                   poll ? "polling" : "waiting", state, idx);
7261 #ifdef BNX2X_STOP_ON_ERROR
7262         bnx2x_panic();
7263 #endif
7264
7265         return -EBUSY;
7266 }
7267
7268 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7269 {
7270         bp->set_mac_pending++;
7271         smp_wmb();
7272
7273         bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7274                                    (1 << bp->fp->cl_id), BP_FUNC(bp));
7275
7276         /* Wait for a completion */
7277         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7278 }
7279
7280 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7281 {
7282         bp->set_mac_pending++;
7283         smp_wmb();
7284
7285         bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7286                                   (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7287                                   1);
7288
7289         /* Wait for a completion */
7290         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7291 }
7292
7293 #ifdef BCM_CNIC
7294 /**
7295  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7296  * MAC(s). This function will wait until the ramdord completion
7297  * returns.
7298  *
7299  * @param bp driver handle
7300  * @param set set or clear the CAM entry
7301  *
7302  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7303  */
7304 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7305 {
7306         u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7307
7308         bp->set_mac_pending++;
7309         smp_wmb();
7310
7311         /* Send a SET_MAC ramrod */
7312         if (CHIP_IS_E1(bp))
7313                 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7314                                   cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7315                                   1);
7316         else
7317                 /* CAM allocation for E1H
7318                 * unicasts: by func number
7319                 * multicast: 20+FUNC*20, 20 each
7320                 */
7321                 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7322                                    cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7323
7324         /* Wait for a completion when setting */
7325         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7326
7327         return 0;
7328 }
7329 #endif
7330
7331 static int bnx2x_setup_leading(struct bnx2x *bp)
7332 {
7333         int rc;
7334
7335         /* reset IGU state */
7336         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7337
7338         /* SETUP ramrod */
7339         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7340
7341         /* Wait for completion */
7342         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7343
7344         return rc;
7345 }
7346
7347 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7348 {
7349         struct bnx2x_fastpath *fp = &bp->fp[index];
7350
7351         /* reset IGU state */
7352         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7353
7354         /* SETUP ramrod */
7355         fp->state = BNX2X_FP_STATE_OPENING;
7356         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7357                       fp->cl_id, 0);
7358
7359         /* Wait for completion */
7360         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7361                                  &(fp->state), 0);
7362 }
7363
7364 static int bnx2x_poll(struct napi_struct *napi, int budget);
7365
7366 static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7367 {
7368
7369         switch (bp->multi_mode) {
7370         case ETH_RSS_MODE_DISABLED:
7371                 bp->num_queues = 1;
7372                 break;
7373
7374         case ETH_RSS_MODE_REGULAR:
7375                 if (num_queues)
7376                         bp->num_queues = min_t(u32, num_queues,
7377                                                   BNX2X_MAX_QUEUES(bp));
7378                 else
7379                         bp->num_queues = min_t(u32, num_online_cpus(),
7380                                                   BNX2X_MAX_QUEUES(bp));
7381                 break;
7382
7383
7384         default:
7385                 bp->num_queues = 1;
7386                 break;
7387         }
7388 }
7389
7390 static int bnx2x_set_num_queues(struct bnx2x *bp)
7391 {
7392         int rc = 0;
7393
7394         switch (int_mode) {
7395         case INT_MODE_INTx:
7396         case INT_MODE_MSI:
7397                 bp->num_queues = 1;
7398                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7399                 break;
7400
7401         case INT_MODE_MSIX:
7402         default:
7403                 /* Set number of queues according to bp->multi_mode value */
7404                 bnx2x_set_num_queues_msix(bp);
7405
7406                 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7407                    bp->num_queues);
7408
7409                 /* if we can't use MSI-X we only need one fp,
7410                  * so try to enable MSI-X with the requested number of fp's
7411                  * and fallback to MSI or legacy INTx with one fp
7412                  */
7413                 rc = bnx2x_enable_msix(bp);
7414                 if (rc)
7415                         /* failed to enable MSI-X */
7416                         bp->num_queues = 1;
7417                 break;
7418         }
7419         bp->dev->real_num_tx_queues = bp->num_queues;
7420         return rc;
7421 }
7422
7423 #ifdef BCM_CNIC
7424 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7425 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7426 #endif
7427
7428 /* must be called with rtnl_lock */
7429 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7430 {
7431         u32 load_code;
7432         int i, rc;
7433
7434 #ifdef BNX2X_STOP_ON_ERROR
7435         if (unlikely(bp->panic))
7436                 return -EPERM;
7437 #endif
7438
7439         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7440
7441         rc = bnx2x_set_num_queues(bp);
7442
7443         if (bnx2x_alloc_mem(bp)) {
7444                 bnx2x_free_irq(bp, true);
7445                 return -ENOMEM;
7446         }
7447
7448         for_each_queue(bp, i)
7449                 bnx2x_fp(bp, i, disable_tpa) =
7450                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
7451
7452         for_each_queue(bp, i)
7453                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7454                                bnx2x_poll, 128);
7455
7456         bnx2x_napi_enable(bp);
7457
7458         if (bp->flags & USING_MSIX_FLAG) {
7459                 rc = bnx2x_req_msix_irqs(bp);
7460                 if (rc) {
7461                         bnx2x_free_irq(bp, true);
7462                         goto load_error1;
7463                 }
7464         } else {
7465                 /* Fall to INTx if failed to enable MSI-X due to lack of
7466                    memory (in bnx2x_set_num_queues()) */
7467                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7468                         bnx2x_enable_msi(bp);
7469                 bnx2x_ack_int(bp);
7470                 rc = bnx2x_req_irq(bp);
7471                 if (rc) {
7472                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
7473                         bnx2x_free_irq(bp, true);
7474                         goto load_error1;
7475                 }
7476                 if (bp->flags & USING_MSI_FLAG) {
7477                         bp->dev->irq = bp->pdev->irq;
7478                         netdev_info(bp->dev, "using MSI  IRQ %d\n",
7479                                     bp->pdev->irq);
7480                 }
7481         }
7482
7483         /* Send LOAD_REQUEST command to MCP
7484            Returns the type of LOAD command:
7485            if it is the first port to be initialized
7486            common blocks should be initialized, otherwise - not
7487         */
7488         if (!BP_NOMCP(bp)) {
7489                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7490                 if (!load_code) {
7491                         BNX2X_ERR("MCP response failure, aborting\n");
7492                         rc = -EBUSY;
7493                         goto load_error2;
7494                 }
7495                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7496                         rc = -EBUSY; /* other port in diagnostic mode */
7497                         goto load_error2;
7498                 }
7499
7500         } else {
7501                 int port = BP_PORT(bp);
7502
7503                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
7504                    load_count[0], load_count[1], load_count[2]);
7505                 load_count[0]++;
7506                 load_count[1 + port]++;
7507                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
7508                    load_count[0], load_count[1], load_count[2]);
7509                 if (load_count[0] == 1)
7510                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7511                 else if (load_count[1 + port] == 1)
7512                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7513                 else
7514                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7515         }
7516
7517         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7518             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7519                 bp->port.pmf = 1;
7520         else
7521                 bp->port.pmf = 0;
7522         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7523
7524         /* Initialize HW */
7525         rc = bnx2x_init_hw(bp, load_code);
7526         if (rc) {
7527                 BNX2X_ERR("HW init failed, aborting\n");
7528                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7529                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7530                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7531                 goto load_error2;
7532         }
7533
7534         /* Setup NIC internals and enable interrupts */
7535         bnx2x_nic_init(bp, load_code);
7536
7537         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7538             (bp->common.shmem2_base))
7539                 SHMEM2_WR(bp, dcc_support,
7540                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7541                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7542
7543         /* Send LOAD_DONE command to MCP */
7544         if (!BP_NOMCP(bp)) {
7545                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7546                 if (!load_code) {
7547                         BNX2X_ERR("MCP response failure, aborting\n");
7548                         rc = -EBUSY;
7549                         goto load_error3;
7550                 }
7551         }
7552
7553         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7554
7555         rc = bnx2x_setup_leading(bp);
7556         if (rc) {
7557                 BNX2X_ERR("Setup leading failed!\n");
7558 #ifndef BNX2X_STOP_ON_ERROR
7559                 goto load_error3;
7560 #else
7561                 bp->panic = 1;
7562                 return -EBUSY;
7563 #endif
7564         }
7565
7566         if (CHIP_IS_E1H(bp))
7567                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7568                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7569                         bp->flags |= MF_FUNC_DIS;
7570                 }
7571
7572         if (bp->state == BNX2X_STATE_OPEN) {
7573 #ifdef BCM_CNIC
7574                 /* Enable Timer scan */
7575                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7576 #endif
7577                 for_each_nondefault_queue(bp, i) {
7578                         rc = bnx2x_setup_multi(bp, i);
7579                         if (rc)
7580 #ifdef BCM_CNIC
7581                                 goto load_error4;
7582 #else
7583                                 goto load_error3;
7584 #endif
7585                 }
7586
7587                 if (CHIP_IS_E1(bp))
7588                         bnx2x_set_eth_mac_addr_e1(bp, 1);
7589                 else
7590                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
7591 #ifdef BCM_CNIC
7592                 /* Set iSCSI L2 MAC */
7593                 mutex_lock(&bp->cnic_mutex);
7594                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7595                         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7596                         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7597                         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
7598                                       CNIC_SB_ID(bp));
7599                 }
7600                 mutex_unlock(&bp->cnic_mutex);
7601 #endif
7602         }
7603
7604         if (bp->port.pmf)
7605                 bnx2x_initial_phy_init(bp, load_mode);
7606
7607         /* Start fast path */
7608         switch (load_mode) {
7609         case LOAD_NORMAL:
7610                 if (bp->state == BNX2X_STATE_OPEN) {
7611                         /* Tx queue should be only reenabled */
7612                         netif_tx_wake_all_queues(bp->dev);
7613                 }
7614                 /* Initialize the receive filter. */
7615                 bnx2x_set_rx_mode(bp->dev);
7616                 break;
7617
7618         case LOAD_OPEN:
7619                 netif_tx_start_all_queues(bp->dev);
7620                 if (bp->state != BNX2X_STATE_OPEN)
7621                         netif_tx_disable(bp->dev);
7622                 /* Initialize the receive filter. */
7623                 bnx2x_set_rx_mode(bp->dev);
7624                 break;
7625
7626         case LOAD_DIAG:
7627                 /* Initialize the receive filter. */
7628                 bnx2x_set_rx_mode(bp->dev);
7629                 bp->state = BNX2X_STATE_DIAG;
7630                 break;
7631
7632         default:
7633                 break;
7634         }
7635
7636         if (!bp->port.pmf)
7637                 bnx2x__link_status_update(bp);
7638
7639         /* start the timer */
7640         mod_timer(&bp->timer, jiffies + bp->current_interval);
7641
7642 #ifdef BCM_CNIC
7643         bnx2x_setup_cnic_irq_info(bp);
7644         if (bp->state == BNX2X_STATE_OPEN)
7645                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7646 #endif
7647
7648         return 0;
7649
7650 #ifdef BCM_CNIC
7651 load_error4:
7652         /* Disable Timer scan */
7653         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7654 #endif
7655 load_error3:
7656         bnx2x_int_disable_sync(bp, 1);
7657         if (!BP_NOMCP(bp)) {
7658                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7659                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7660         }
7661         bp->port.pmf = 0;
7662         /* Free SKBs, SGEs, TPA pool and driver internals */
7663         bnx2x_free_skbs(bp);
7664         for_each_queue(bp, i)
7665                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7666 load_error2:
7667         /* Release IRQs */
7668         bnx2x_free_irq(bp, false);
7669 load_error1:
7670         bnx2x_napi_disable(bp);
7671         for_each_queue(bp, i)
7672                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7673         bnx2x_free_mem(bp);
7674
7675         return rc;
7676 }
7677
7678 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7679 {
7680         struct bnx2x_fastpath *fp = &bp->fp[index];
7681         int rc;
7682
7683         /* halt the connection */
7684         fp->state = BNX2X_FP_STATE_HALTING;
7685         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7686
7687         /* Wait for completion */
7688         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7689                                &(fp->state), 1);
7690         if (rc) /* timeout */
7691                 return rc;
7692
7693         /* delete cfc entry */
7694         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7695
7696         /* Wait for completion */
7697         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7698                                &(fp->state), 1);
7699         return rc;
7700 }
7701
7702 static int bnx2x_stop_leading(struct bnx2x *bp)
7703 {
7704         __le16 dsb_sp_prod_idx;
7705         /* if the other port is handling traffic,
7706            this can take a lot of time */
7707         int cnt = 500;
7708         int rc;
7709
7710         might_sleep();
7711
7712         /* Send HALT ramrod */
7713         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7714         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7715
7716         /* Wait for completion */
7717         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7718                                &(bp->fp[0].state), 1);
7719         if (rc) /* timeout */
7720                 return rc;
7721
7722         dsb_sp_prod_idx = *bp->dsb_sp_prod;
7723
7724         /* Send PORT_DELETE ramrod */
7725         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7726
7727         /* Wait for completion to arrive on default status block
7728            we are going to reset the chip anyway
7729            so there is not much to do if this times out
7730          */
7731         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7732                 if (!cnt) {
7733                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7734                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7735                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
7736 #ifdef BNX2X_STOP_ON_ERROR
7737                         bnx2x_panic();
7738 #endif
7739                         rc = -EBUSY;
7740                         break;
7741                 }
7742                 cnt--;
7743                 msleep(1);
7744                 rmb(); /* Refresh the dsb_sp_prod */
7745         }
7746         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7747         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7748
7749         return rc;
7750 }
7751
7752 static void bnx2x_reset_func(struct bnx2x *bp)
7753 {
7754         int port = BP_PORT(bp);
7755         int func = BP_FUNC(bp);
7756         int base, i;
7757
7758         /* Configure IGU */
7759         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7760         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7761
7762 #ifdef BCM_CNIC
7763         /* Disable Timer scan */
7764         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7765         /*
7766          * Wait for at least 10ms and up to 2 second for the timers scan to
7767          * complete
7768          */
7769         for (i = 0; i < 200; i++) {
7770                 msleep(10);
7771                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7772                         break;
7773         }
7774 #endif
7775         /* Clear ILT */
7776         base = FUNC_ILT_BASE(func);
7777         for (i = base; i < base + ILT_PER_FUNC; i++)
7778                 bnx2x_ilt_wr(bp, i, 0);
7779 }
7780
7781 static void bnx2x_reset_port(struct bnx2x *bp)
7782 {
7783         int port = BP_PORT(bp);
7784         u32 val;
7785
7786         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7787
7788         /* Do not rcv packets to BRB */
7789         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7790         /* Do not direct rcv packets that are not for MCP to the BRB */
7791         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7792                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7793
7794         /* Configure AEU */
7795         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7796
7797         msleep(100);
7798         /* Check for BRB port occupancy */
7799         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7800         if (val)
7801                 DP(NETIF_MSG_IFDOWN,
7802                    "BRB1 is not empty  %d blocks are occupied\n", val);
7803
7804         /* TODO: Close Doorbell port? */
7805 }
7806
7807 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7808 {
7809         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7810            BP_FUNC(bp), reset_code);
7811
7812         switch (reset_code) {
7813         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7814                 bnx2x_reset_port(bp);
7815                 bnx2x_reset_func(bp);
7816                 bnx2x_reset_common(bp);
7817                 break;
7818
7819         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7820                 bnx2x_reset_port(bp);
7821                 bnx2x_reset_func(bp);
7822                 break;
7823
7824         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7825                 bnx2x_reset_func(bp);
7826                 break;
7827
7828         default:
7829                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7830                 break;
7831         }
7832 }
7833
7834 /* must be called with rtnl_lock */
7835 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7836 {
7837         int port = BP_PORT(bp);
7838         u32 reset_code = 0;
7839         int i, cnt, rc;
7840
7841 #ifdef BCM_CNIC
7842         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7843 #endif
7844         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7845
7846         /* Set "drop all" */
7847         bp->rx_mode = BNX2X_RX_MODE_NONE;
7848         bnx2x_set_storm_rx_mode(bp);
7849
7850         /* Disable HW interrupts, NAPI and Tx */
7851         bnx2x_netif_stop(bp, 1);
7852
7853         del_timer_sync(&bp->timer);
7854         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7855                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7856         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7857
7858         /* Release IRQs */
7859         bnx2x_free_irq(bp, false);
7860
7861         /* Wait until tx fastpath tasks complete */
7862         for_each_queue(bp, i) {
7863                 struct bnx2x_fastpath *fp = &bp->fp[i];
7864
7865                 cnt = 1000;
7866                 while (bnx2x_has_tx_work_unload(fp)) {
7867
7868                         bnx2x_tx_int(fp);
7869                         if (!cnt) {
7870                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7871                                           i);
7872 #ifdef BNX2X_STOP_ON_ERROR
7873                                 bnx2x_panic();
7874                                 return -EBUSY;
7875 #else
7876                                 break;
7877 #endif
7878                         }
7879                         cnt--;
7880                         msleep(1);
7881                 }
7882         }
7883         /* Give HW time to discard old tx messages */
7884         msleep(1);
7885
7886         if (CHIP_IS_E1(bp)) {
7887                 struct mac_configuration_cmd *config =
7888                                                 bnx2x_sp(bp, mcast_config);
7889
7890                 bnx2x_set_eth_mac_addr_e1(bp, 0);
7891
7892                 for (i = 0; i < config->hdr.length; i++)
7893                         CAM_INVALIDATE(config->config_table[i]);
7894
7895                 config->hdr.length = i;
7896                 if (CHIP_REV_IS_SLOW(bp))
7897                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7898                 else
7899                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7900                 config->hdr.client_id = bp->fp->cl_id;
7901                 config->hdr.reserved1 = 0;
7902
7903                 bp->set_mac_pending++;
7904                 smp_wmb();
7905
7906                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7907                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7908                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7909
7910         } else { /* E1H */
7911                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7912
7913                 bnx2x_set_eth_mac_addr_e1h(bp, 0);
7914
7915                 for (i = 0; i < MC_HASH_SIZE; i++)
7916                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7917
7918                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7919         }
7920 #ifdef BCM_CNIC
7921         /* Clear iSCSI L2 MAC */
7922         mutex_lock(&bp->cnic_mutex);
7923         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7924                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7925                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7926         }
7927         mutex_unlock(&bp->cnic_mutex);
7928 #endif
7929
7930         if (unload_mode == UNLOAD_NORMAL)
7931                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7932
7933         else if (bp->flags & NO_WOL_FLAG)
7934                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7935
7936         else if (bp->wol) {
7937                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7938                 u8 *mac_addr = bp->dev->dev_addr;
7939                 u32 val;
7940                 /* The mac address is written to entries 1-4 to
7941                    preserve entry 0 which is used by the PMF */
7942                 u8 entry = (BP_E1HVN(bp) + 1)*8;
7943
7944                 val = (mac_addr[0] << 8) | mac_addr[1];
7945                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7946
7947                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7948                       (mac_addr[4] << 8) | mac_addr[5];
7949                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7950
7951                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7952
7953         } else
7954                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7955
7956         /* Close multi and leading connections
7957            Completions for ramrods are collected in a synchronous way */
7958         for_each_nondefault_queue(bp, i)
7959                 if (bnx2x_stop_multi(bp, i))
7960                         goto unload_error;
7961
7962         rc = bnx2x_stop_leading(bp);
7963         if (rc) {
7964                 BNX2X_ERR("Stop leading failed!\n");
7965 #ifdef BNX2X_STOP_ON_ERROR
7966                 return -EBUSY;
7967 #else
7968                 goto unload_error;
7969 #endif
7970         }
7971
7972 unload_error:
7973         if (!BP_NOMCP(bp))
7974                 reset_code = bnx2x_fw_command(bp, reset_code);
7975         else {
7976                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
7977                    load_count[0], load_count[1], load_count[2]);
7978                 load_count[0]--;
7979                 load_count[1 + port]--;
7980                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
7981                    load_count[0], load_count[1], load_count[2]);
7982                 if (load_count[0] == 0)
7983                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7984                 else if (load_count[1 + port] == 0)
7985                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7986                 else
7987                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7988         }
7989
7990         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7991             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7992                 bnx2x__link_reset(bp);
7993
7994         /* Reset the chip */
7995         bnx2x_reset_chip(bp, reset_code);
7996
7997         /* Report UNLOAD_DONE to MCP */
7998         if (!BP_NOMCP(bp))
7999                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8000
8001         bp->port.pmf = 0;
8002
8003         /* Free SKBs, SGEs, TPA pool and driver internals */
8004         bnx2x_free_skbs(bp);
8005         for_each_queue(bp, i)
8006                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8007         for_each_queue(bp, i)
8008                 netif_napi_del(&bnx2x_fp(bp, i, napi));
8009         bnx2x_free_mem(bp);
8010
8011         bp->state = BNX2X_STATE_CLOSED;
8012
8013         netif_carrier_off(bp->dev);
8014
8015         return 0;
8016 }
8017
8018 static void bnx2x_reset_task(struct work_struct *work)
8019 {
8020         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8021
8022 #ifdef BNX2X_STOP_ON_ERROR
8023         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8024                   " so reset not done to allow debug dump,\n"
8025                   " you will need to reboot when done\n");
8026         return;
8027 #endif
8028
8029         rtnl_lock();
8030
8031         if (!netif_running(bp->dev))
8032                 goto reset_task_exit;
8033
8034         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8035         bnx2x_nic_load(bp, LOAD_NORMAL);
8036
8037 reset_task_exit:
8038         rtnl_unlock();
8039 }
8040
8041 /* end of nic load/unload */
8042
8043 /* ethtool_ops */
8044
8045 /*
8046  * Init service functions
8047  */
8048
8049 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8050 {
8051         switch (func) {
8052         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8053         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8054         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8055         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8056         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8057         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8058         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8059         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8060         default:
8061                 BNX2X_ERR("Unsupported function index: %d\n", func);
8062                 return (u32)(-1);
8063         }
8064 }
8065
8066 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8067 {
8068         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8069
8070         /* Flush all outstanding writes */
8071         mmiowb();
8072
8073         /* Pretend to be function 0 */
8074         REG_WR(bp, reg, 0);
8075         /* Flush the GRC transaction (in the chip) */
8076         new_val = REG_RD(bp, reg);
8077         if (new_val != 0) {
8078                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8079                           new_val);
8080                 BUG();
8081         }
8082
8083         /* From now we are in the "like-E1" mode */
8084         bnx2x_int_disable(bp);
8085
8086         /* Flush all outstanding writes */
8087         mmiowb();
8088
8089         /* Restore the original funtion settings */
8090         REG_WR(bp, reg, orig_func);
8091         new_val = REG_RD(bp, reg);
8092         if (new_val != orig_func) {
8093                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8094                           orig_func, new_val);
8095                 BUG();
8096         }
8097 }
8098
8099 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8100 {
8101         if (CHIP_IS_E1H(bp))
8102                 bnx2x_undi_int_disable_e1h(bp, func);
8103         else
8104                 bnx2x_int_disable(bp);
8105 }
8106
8107 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8108 {
8109         u32 val;
8110
8111         /* Check if there is any driver already loaded */
8112         val = REG_RD(bp, MISC_REG_UNPREPARED);
8113         if (val == 0x1) {
8114                 /* Check if it is the UNDI driver
8115                  * UNDI driver initializes CID offset for normal bell to 0x7
8116                  */
8117                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8118                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8119                 if (val == 0x7) {
8120                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8121                         /* save our func */
8122                         int func = BP_FUNC(bp);
8123                         u32 swap_en;
8124                         u32 swap_val;
8125
8126                         /* clear the UNDI indication */
8127                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8128
8129                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
8130
8131                         /* try unload UNDI on port 0 */
8132                         bp->func = 0;
8133                         bp->fw_seq =
8134                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8135                                 DRV_MSG_SEQ_NUMBER_MASK);
8136                         reset_code = bnx2x_fw_command(bp, reset_code);
8137
8138                         /* if UNDI is loaded on the other port */
8139                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8140
8141                                 /* send "DONE" for previous unload */
8142                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8143
8144                                 /* unload UNDI on port 1 */
8145                                 bp->func = 1;
8146                                 bp->fw_seq =
8147                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8148                                         DRV_MSG_SEQ_NUMBER_MASK);
8149                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8150
8151                                 bnx2x_fw_command(bp, reset_code);
8152                         }
8153
8154                         /* now it's safe to release the lock */
8155                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8156
8157                         bnx2x_undi_int_disable(bp, func);
8158
8159                         /* close input traffic and wait for it */
8160                         /* Do not rcv packets to BRB */
8161                         REG_WR(bp,
8162                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8163                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8164                         /* Do not direct rcv packets that are not for MCP to
8165                          * the BRB */
8166                         REG_WR(bp,
8167                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8168                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8169                         /* clear AEU */
8170                         REG_WR(bp,
8171                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8172                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8173                         msleep(10);
8174
8175                         /* save NIG port swap info */
8176                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8177                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
8178                         /* reset device */
8179                         REG_WR(bp,
8180                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8181                                0xd3ffffff);
8182                         REG_WR(bp,
8183                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8184                                0x1403);
8185                         /* take the NIG out of reset and restore swap values */
8186                         REG_WR(bp,
8187                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8188                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
8189                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8190                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8191
8192                         /* send unload done to the MCP */
8193                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8194
8195                         /* restore our func and fw_seq */
8196                         bp->func = func;
8197                         bp->fw_seq =
8198                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8199                                 DRV_MSG_SEQ_NUMBER_MASK);
8200
8201                 } else
8202                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8203         }
8204 }
8205
8206 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8207 {
8208         u32 val, val2, val3, val4, id;
8209         u16 pmc;
8210
8211         /* Get the chip revision id and number. */
8212         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8213         val = REG_RD(bp, MISC_REG_CHIP_NUM);
8214         id = ((val & 0xffff) << 16);
8215         val = REG_RD(bp, MISC_REG_CHIP_REV);
8216         id |= ((val & 0xf) << 12);
8217         val = REG_RD(bp, MISC_REG_CHIP_METAL);
8218         id |= ((val & 0xff) << 4);
8219         val = REG_RD(bp, MISC_REG_BOND_ID);
8220         id |= (val & 0xf);
8221         bp->common.chip_id = id;
8222         bp->link_params.chip_id = bp->common.chip_id;
8223         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8224
8225         val = (REG_RD(bp, 0x2874) & 0x55);
8226         if ((bp->common.chip_id & 0x1) ||
8227             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8228                 bp->flags |= ONE_PORT_FLAG;
8229                 BNX2X_DEV_INFO("single port device\n");
8230         }
8231
8232         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8233         bp->common.flash_size = (NVRAM_1MB_SIZE <<
8234                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
8235         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8236                        bp->common.flash_size, bp->common.flash_size);
8237
8238         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8239         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
8240         bp->link_params.shmem_base = bp->common.shmem_base;
8241         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
8242                        bp->common.shmem_base, bp->common.shmem2_base);
8243
8244         if (!bp->common.shmem_base ||
8245             (bp->common.shmem_base < 0xA0000) ||
8246             (bp->common.shmem_base >= 0xC0000)) {
8247                 BNX2X_DEV_INFO("MCP not active\n");
8248                 bp->flags |= NO_MCP_FLAG;
8249                 return;
8250         }
8251
8252         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8253         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8254                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8255                 BNX2X_ERR("BAD MCP validity signature\n");
8256
8257         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8258         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8259
8260         bp->link_params.hw_led_mode = ((bp->common.hw_config &
8261                                         SHARED_HW_CFG_LED_MODE_MASK) >>
8262                                        SHARED_HW_CFG_LED_MODE_SHIFT);
8263
8264         bp->link_params.feature_config_flags = 0;
8265         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8266         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8267                 bp->link_params.feature_config_flags |=
8268                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8269         else
8270                 bp->link_params.feature_config_flags &=
8271                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8272
8273         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8274         bp->common.bc_ver = val;
8275         BNX2X_DEV_INFO("bc_ver %X\n", val);
8276         if (val < BNX2X_BC_VER) {
8277                 /* for now only warn
8278                  * later we might need to enforce this */
8279                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8280                           " please upgrade BC\n", BNX2X_BC_VER, val);
8281         }
8282         bp->link_params.feature_config_flags |=
8283                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8284                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8285
8286         if (BP_E1HVN(bp) == 0) {
8287                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8288                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8289         } else {
8290                 /* no WOL capability for E1HVN != 0 */
8291                 bp->flags |= NO_WOL_FLAG;
8292         }
8293         BNX2X_DEV_INFO("%sWoL capable\n",
8294                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
8295
8296         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8297         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8298         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8299         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8300
8301         pr_info("part number %X-%X-%X-%X\n", val, val2, val3, val4);
8302 }
8303
8304 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8305                                                     u32 switch_cfg)
8306 {
8307         int port = BP_PORT(bp);
8308         u32 ext_phy_type;
8309
8310         switch (switch_cfg) {
8311         case SWITCH_CFG_1G:
8312                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8313
8314                 ext_phy_type =
8315                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8316                 switch (ext_phy_type) {
8317                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8318                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8319                                        ext_phy_type);
8320
8321                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8322                                                SUPPORTED_10baseT_Full |
8323                                                SUPPORTED_100baseT_Half |
8324                                                SUPPORTED_100baseT_Full |
8325                                                SUPPORTED_1000baseT_Full |
8326                                                SUPPORTED_2500baseX_Full |
8327                                                SUPPORTED_TP |
8328                                                SUPPORTED_FIBRE |
8329                                                SUPPORTED_Autoneg |
8330                                                SUPPORTED_Pause |
8331                                                SUPPORTED_Asym_Pause);
8332                         break;
8333
8334                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8335                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8336                                        ext_phy_type);
8337
8338                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8339                                                SUPPORTED_10baseT_Full |
8340                                                SUPPORTED_100baseT_Half |
8341                                                SUPPORTED_100baseT_Full |
8342                                                SUPPORTED_1000baseT_Full |
8343                                                SUPPORTED_TP |
8344                                                SUPPORTED_FIBRE |
8345                                                SUPPORTED_Autoneg |
8346                                                SUPPORTED_Pause |
8347                                                SUPPORTED_Asym_Pause);
8348                         break;
8349
8350                 default:
8351                         BNX2X_ERR("NVRAM config error. "
8352                                   "BAD SerDes ext_phy_config 0x%x\n",
8353                                   bp->link_params.ext_phy_config);
8354                         return;
8355                 }
8356
8357                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8358                                            port*0x10);
8359                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8360                 break;
8361
8362         case SWITCH_CFG_10G:
8363                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8364
8365                 ext_phy_type =
8366                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8367                 switch (ext_phy_type) {
8368                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8369                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8370                                        ext_phy_type);
8371
8372                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8373                                                SUPPORTED_10baseT_Full |
8374                                                SUPPORTED_100baseT_Half |
8375                                                SUPPORTED_100baseT_Full |
8376                                                SUPPORTED_1000baseT_Full |
8377                                                SUPPORTED_2500baseX_Full |
8378                                                SUPPORTED_10000baseT_Full |
8379                                                SUPPORTED_TP |
8380                                                SUPPORTED_FIBRE |
8381                                                SUPPORTED_Autoneg |
8382                                                SUPPORTED_Pause |
8383                                                SUPPORTED_Asym_Pause);
8384                         break;
8385
8386                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8387                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8388                                        ext_phy_type);
8389
8390                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8391                                                SUPPORTED_1000baseT_Full |
8392                                                SUPPORTED_FIBRE |
8393                                                SUPPORTED_Autoneg |
8394                                                SUPPORTED_Pause |
8395                                                SUPPORTED_Asym_Pause);
8396                         break;
8397
8398                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8399                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8400                                        ext_phy_type);
8401
8402                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8403                                                SUPPORTED_2500baseX_Full |
8404                                                SUPPORTED_1000baseT_Full |
8405                                                SUPPORTED_FIBRE |
8406                                                SUPPORTED_Autoneg |
8407                                                SUPPORTED_Pause |
8408                                                SUPPORTED_Asym_Pause);
8409                         break;
8410
8411                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8412                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8413                                        ext_phy_type);
8414
8415                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8416                                                SUPPORTED_FIBRE |
8417                                                SUPPORTED_Pause |
8418                                                SUPPORTED_Asym_Pause);
8419                         break;
8420
8421                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8422                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8423                                        ext_phy_type);
8424
8425                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8426                                                SUPPORTED_1000baseT_Full |
8427                                                SUPPORTED_FIBRE |
8428                                                SUPPORTED_Pause |
8429                                                SUPPORTED_Asym_Pause);
8430                         break;
8431
8432                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8433                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8434                                        ext_phy_type);
8435
8436                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8437                                                SUPPORTED_1000baseT_Full |
8438                                                SUPPORTED_Autoneg |
8439                                                SUPPORTED_FIBRE |
8440                                                SUPPORTED_Pause |
8441                                                SUPPORTED_Asym_Pause);
8442                         break;
8443
8444                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8445                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8446                                        ext_phy_type);
8447
8448                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8449                                                SUPPORTED_1000baseT_Full |
8450                                                SUPPORTED_Autoneg |
8451                                                SUPPORTED_FIBRE |
8452                                                SUPPORTED_Pause |
8453                                                SUPPORTED_Asym_Pause);
8454                         break;
8455
8456                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8457                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8458                                        ext_phy_type);
8459
8460                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8461                                                SUPPORTED_TP |
8462                                                SUPPORTED_Autoneg |
8463                                                SUPPORTED_Pause |
8464                                                SUPPORTED_Asym_Pause);
8465                         break;
8466
8467                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8468                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8469                                        ext_phy_type);
8470
8471                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8472                                                SUPPORTED_10baseT_Full |
8473                                                SUPPORTED_100baseT_Half |
8474                                                SUPPORTED_100baseT_Full |
8475                                                SUPPORTED_1000baseT_Full |
8476                                                SUPPORTED_10000baseT_Full |
8477                                                SUPPORTED_TP |
8478                                                SUPPORTED_Autoneg |
8479                                                SUPPORTED_Pause |
8480                                                SUPPORTED_Asym_Pause);
8481                         break;
8482
8483                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8484                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8485                                   bp->link_params.ext_phy_config);
8486                         break;
8487
8488                 default:
8489                         BNX2X_ERR("NVRAM config error. "
8490                                   "BAD XGXS ext_phy_config 0x%x\n",
8491                                   bp->link_params.ext_phy_config);
8492                         return;
8493                 }
8494
8495                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8496                                            port*0x18);
8497                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8498
8499                 break;
8500
8501         default:
8502                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8503                           bp->port.link_config);
8504                 return;
8505         }
8506         bp->link_params.phy_addr = bp->port.phy_addr;
8507
8508         /* mask what we support according to speed_cap_mask */
8509         if (!(bp->link_params.speed_cap_mask &
8510                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8511                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8512
8513         if (!(bp->link_params.speed_cap_mask &
8514                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8515                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8516
8517         if (!(bp->link_params.speed_cap_mask &
8518                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8519                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8520
8521         if (!(bp->link_params.speed_cap_mask &
8522                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8523                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8524
8525         if (!(bp->link_params.speed_cap_mask &
8526                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8527                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8528                                         SUPPORTED_1000baseT_Full);
8529
8530         if (!(bp->link_params.speed_cap_mask &
8531                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8532                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8533
8534         if (!(bp->link_params.speed_cap_mask &
8535                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8536                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8537
8538         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8539 }
8540
8541 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8542 {
8543         bp->link_params.req_duplex = DUPLEX_FULL;
8544
8545         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8546         case PORT_FEATURE_LINK_SPEED_AUTO:
8547                 if (bp->port.supported & SUPPORTED_Autoneg) {
8548                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8549                         bp->port.advertising = bp->port.supported;
8550                 } else {
8551                         u32 ext_phy_type =
8552                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8553
8554                         if ((ext_phy_type ==
8555                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8556                             (ext_phy_type ==
8557                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8558                                 /* force 10G, no AN */
8559                                 bp->link_params.req_line_speed = SPEED_10000;
8560                                 bp->port.advertising =
8561                                                 (ADVERTISED_10000baseT_Full |
8562                                                  ADVERTISED_FIBRE);
8563                                 break;
8564                         }
8565                         BNX2X_ERR("NVRAM config error. "
8566                                   "Invalid link_config 0x%x"
8567                                   "  Autoneg not supported\n",
8568                                   bp->port.link_config);
8569                         return;
8570                 }
8571                 break;
8572
8573         case PORT_FEATURE_LINK_SPEED_10M_FULL:
8574                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8575                         bp->link_params.req_line_speed = SPEED_10;
8576                         bp->port.advertising = (ADVERTISED_10baseT_Full |
8577                                                 ADVERTISED_TP);
8578                 } else {
8579                         BNX2X_ERR("NVRAM config error. "
8580                                   "Invalid link_config 0x%x"
8581                                   "  speed_cap_mask 0x%x\n",
8582                                   bp->port.link_config,
8583                                   bp->link_params.speed_cap_mask);
8584                         return;
8585                 }
8586                 break;
8587
8588         case PORT_FEATURE_LINK_SPEED_10M_HALF:
8589                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8590                         bp->link_params.req_line_speed = SPEED_10;
8591                         bp->link_params.req_duplex = DUPLEX_HALF;
8592                         bp->port.advertising = (ADVERTISED_10baseT_Half |
8593                                                 ADVERTISED_TP);
8594                 } else {
8595                         BNX2X_ERR("NVRAM config error. "
8596                                   "Invalid link_config 0x%x"
8597                                   "  speed_cap_mask 0x%x\n",
8598                                   bp->port.link_config,
8599                                   bp->link_params.speed_cap_mask);
8600                         return;
8601                 }
8602                 break;
8603
8604         case PORT_FEATURE_LINK_SPEED_100M_FULL:
8605                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8606                         bp->link_params.req_line_speed = SPEED_100;
8607                         bp->port.advertising = (ADVERTISED_100baseT_Full |
8608                                                 ADVERTISED_TP);
8609                 } else {
8610                         BNX2X_ERR("NVRAM config error. "
8611                                   "Invalid link_config 0x%x"
8612                                   "  speed_cap_mask 0x%x\n",
8613                                   bp->port.link_config,
8614                                   bp->link_params.speed_cap_mask);
8615                         return;
8616                 }
8617                 break;
8618
8619         case PORT_FEATURE_LINK_SPEED_100M_HALF:
8620                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8621                         bp->link_params.req_line_speed = SPEED_100;
8622                         bp->link_params.req_duplex = DUPLEX_HALF;
8623                         bp->port.advertising = (ADVERTISED_100baseT_Half |
8624                                                 ADVERTISED_TP);
8625                 } else {
8626                         BNX2X_ERR("NVRAM config error. "
8627                                   "Invalid link_config 0x%x"
8628                                   "  speed_cap_mask 0x%x\n",
8629                                   bp->port.link_config,
8630                                   bp->link_params.speed_cap_mask);
8631                         return;
8632                 }
8633                 break;
8634
8635         case PORT_FEATURE_LINK_SPEED_1G:
8636                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8637                         bp->link_params.req_line_speed = SPEED_1000;
8638                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
8639                                                 ADVERTISED_TP);
8640                 } else {
8641                         BNX2X_ERR("NVRAM config error. "
8642                                   "Invalid link_config 0x%x"
8643                                   "  speed_cap_mask 0x%x\n",
8644                                   bp->port.link_config,
8645                                   bp->link_params.speed_cap_mask);
8646                         return;
8647                 }
8648                 break;
8649
8650         case PORT_FEATURE_LINK_SPEED_2_5G:
8651                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8652                         bp->link_params.req_line_speed = SPEED_2500;
8653                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
8654                                                 ADVERTISED_TP);
8655                 } else {
8656                         BNX2X_ERR("NVRAM config error. "
8657                                   "Invalid link_config 0x%x"
8658                                   "  speed_cap_mask 0x%x\n",
8659                                   bp->port.link_config,
8660                                   bp->link_params.speed_cap_mask);
8661                         return;
8662                 }
8663                 break;
8664
8665         case PORT_FEATURE_LINK_SPEED_10G_CX4:
8666         case PORT_FEATURE_LINK_SPEED_10G_KX4:
8667         case PORT_FEATURE_LINK_SPEED_10G_KR:
8668                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8669                         bp->link_params.req_line_speed = SPEED_10000;
8670                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
8671                                                 ADVERTISED_FIBRE);
8672                 } else {
8673                         BNX2X_ERR("NVRAM config error. "
8674                                   "Invalid link_config 0x%x"
8675                                   "  speed_cap_mask 0x%x\n",
8676                                   bp->port.link_config,
8677                                   bp->link_params.speed_cap_mask);
8678                         return;
8679                 }
8680                 break;
8681
8682         default:
8683                 BNX2X_ERR("NVRAM config error. "
8684                           "BAD link speed link_config 0x%x\n",
8685                           bp->port.link_config);
8686                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8687                 bp->port.advertising = bp->port.supported;
8688                 break;
8689         }
8690
8691         bp->link_params.req_flow_ctrl = (bp->port.link_config &
8692                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8693         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8694             !(bp->port.supported & SUPPORTED_Autoneg))
8695                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8696
8697         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
8698                        "  advertising 0x%x\n",
8699                        bp->link_params.req_line_speed,
8700                        bp->link_params.req_duplex,
8701                        bp->link_params.req_flow_ctrl, bp->port.advertising);
8702 }
8703
8704 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8705 {
8706         mac_hi = cpu_to_be16(mac_hi);
8707         mac_lo = cpu_to_be32(mac_lo);
8708         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8709         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8710 }
8711
8712 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8713 {
8714         int port = BP_PORT(bp);
8715         u32 val, val2;
8716         u32 config;
8717         u16 i;
8718         u32 ext_phy_type;
8719
8720         bp->link_params.bp = bp;
8721         bp->link_params.port = port;
8722
8723         bp->link_params.lane_config =
8724                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8725         bp->link_params.ext_phy_config =
8726                 SHMEM_RD(bp,
8727                          dev_info.port_hw_config[port].external_phy_config);
8728         /* BCM8727_NOC => BCM8727 no over current */
8729         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8730             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8731                 bp->link_params.ext_phy_config &=
8732                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8733                 bp->link_params.ext_phy_config |=
8734                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8735                 bp->link_params.feature_config_flags |=
8736                         FEATURE_CONFIG_BCM8727_NOC;
8737         }
8738
8739         bp->link_params.speed_cap_mask =
8740                 SHMEM_RD(bp,
8741                          dev_info.port_hw_config[port].speed_capability_mask);
8742
8743         bp->port.link_config =
8744                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8745
8746         /* Get the 4 lanes xgxs config rx and tx */
8747         for (i = 0; i < 2; i++) {
8748                 val = SHMEM_RD(bp,
8749                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8750                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8751                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8752
8753                 val = SHMEM_RD(bp,
8754                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8755                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8756                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8757         }
8758
8759         /* If the device is capable of WoL, set the default state according
8760          * to the HW
8761          */
8762         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8763         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8764                    (config & PORT_FEATURE_WOL_ENABLED));
8765
8766         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
8767                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
8768                        bp->link_params.lane_config,
8769                        bp->link_params.ext_phy_config,
8770                        bp->link_params.speed_cap_mask, bp->port.link_config);
8771
8772         bp->link_params.switch_cfg |= (bp->port.link_config &
8773                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
8774         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8775
8776         bnx2x_link_settings_requested(bp);
8777
8778         /*
8779          * If connected directly, work with the internal PHY, otherwise, work
8780          * with the external PHY
8781          */
8782         ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8783         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8784                 bp->mdio.prtad = bp->link_params.phy_addr;
8785
8786         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8787                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8788                 bp->mdio.prtad =
8789                         XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
8790
8791         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8792         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8793         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8794         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8795         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8796
8797 #ifdef BCM_CNIC
8798         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8799         val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8800         bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8801 #endif
8802 }
8803
8804 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8805 {
8806         int func = BP_FUNC(bp);
8807         u32 val, val2;
8808         int rc = 0;
8809
8810         bnx2x_get_common_hwinfo(bp);
8811
8812         bp->e1hov = 0;
8813         bp->e1hmf = 0;
8814         if (CHIP_IS_E1H(bp)) {
8815                 bp->mf_config =
8816                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8817
8818                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8819                        FUNC_MF_CFG_E1HOV_TAG_MASK);
8820                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8821                         bp->e1hmf = 1;
8822                 BNX2X_DEV_INFO("%s function mode\n",
8823                                IS_E1HMF(bp) ? "multi" : "single");
8824
8825                 if (IS_E1HMF(bp)) {
8826                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8827                                                                 e1hov_tag) &
8828                                FUNC_MF_CFG_E1HOV_TAG_MASK);
8829                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8830                                 bp->e1hov = val;
8831                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8832                                                "(0x%04x)\n",
8833                                                func, bp->e1hov, bp->e1hov);
8834                         } else {
8835                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
8836                                           "  aborting\n", func);
8837                                 rc = -EPERM;
8838                         }
8839                 } else {
8840                         if (BP_E1HVN(bp)) {
8841                                 BNX2X_ERR("!!!  VN %d in single function mode,"
8842                                           "  aborting\n", BP_E1HVN(bp));
8843                                 rc = -EPERM;
8844                         }
8845                 }
8846         }
8847
8848         if (!BP_NOMCP(bp)) {
8849                 bnx2x_get_port_hwinfo(bp);
8850
8851                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8852                               DRV_MSG_SEQ_NUMBER_MASK);
8853                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8854         }
8855
8856         if (IS_E1HMF(bp)) {
8857                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8858                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
8859                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8860                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8861                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8862                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8863                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8864                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8865                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8866                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8867                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8868                                ETH_ALEN);
8869                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8870                                ETH_ALEN);
8871                 }
8872
8873                 return rc;
8874         }
8875
8876         if (BP_NOMCP(bp)) {
8877                 /* only supposed to happen on emulation/FPGA */
8878                 BNX2X_ERR("warning random MAC workaround active\n");
8879                 random_ether_addr(bp->dev->dev_addr);
8880                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8881         }
8882
8883         return rc;
8884 }
8885
8886 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8887 {
8888         int func = BP_FUNC(bp);
8889         int timer_interval;
8890         int rc;
8891
8892         /* Disable interrupt handling until HW is initialized */
8893         atomic_set(&bp->intr_sem, 1);
8894         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8895
8896         mutex_init(&bp->port.phy_mutex);
8897         mutex_init(&bp->fw_mb_mutex);
8898 #ifdef BCM_CNIC
8899         mutex_init(&bp->cnic_mutex);
8900 #endif
8901
8902         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8903         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8904
8905         rc = bnx2x_get_hwinfo(bp);
8906
8907         /* need to reset chip if undi was active */
8908         if (!BP_NOMCP(bp))
8909                 bnx2x_undi_unload(bp);
8910
8911         if (CHIP_REV_IS_FPGA(bp))
8912                 pr_err("FPGA detected\n");
8913
8914         if (BP_NOMCP(bp) && (func == 0))
8915                 pr_err("MCP disabled, must load devices in order!\n");
8916
8917         /* Set multi queue mode */
8918         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8919             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8920                 pr_err("Multi disabled since int_mode requested is not MSI-X\n");
8921                 multi_mode = ETH_RSS_MODE_DISABLED;
8922         }
8923         bp->multi_mode = multi_mode;
8924
8925
8926         /* Set TPA flags */
8927         if (disable_tpa) {
8928                 bp->flags &= ~TPA_ENABLE_FLAG;
8929                 bp->dev->features &= ~NETIF_F_LRO;
8930         } else {
8931                 bp->flags |= TPA_ENABLE_FLAG;
8932                 bp->dev->features |= NETIF_F_LRO;
8933         }
8934
8935         if (CHIP_IS_E1(bp))
8936                 bp->dropless_fc = 0;
8937         else
8938                 bp->dropless_fc = dropless_fc;
8939
8940         bp->mrrs = mrrs;
8941
8942         bp->tx_ring_size = MAX_TX_AVAIL;
8943         bp->rx_ring_size = MAX_RX_AVAIL;
8944
8945         bp->rx_csum = 1;
8946
8947         /* make sure that the numbers are in the right granularity */
8948         bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8949         bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8950
8951         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8952         bp->current_interval = (poll ? poll : timer_interval);
8953
8954         init_timer(&bp->timer);
8955         bp->timer.expires = jiffies + bp->current_interval;
8956         bp->timer.data = (unsigned long) bp;
8957         bp->timer.function = bnx2x_timer;
8958
8959         return rc;
8960 }
8961
8962 /*
8963  * ethtool service functions
8964  */
8965
8966 /* All ethtool functions called with rtnl_lock */
8967
8968 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8969 {
8970         struct bnx2x *bp = netdev_priv(dev);
8971
8972         cmd->supported = bp->port.supported;
8973         cmd->advertising = bp->port.advertising;
8974
8975         if ((bp->state == BNX2X_STATE_OPEN) &&
8976             !(bp->flags & MF_FUNC_DIS) &&
8977             (bp->link_vars.link_up)) {
8978                 cmd->speed = bp->link_vars.line_speed;
8979                 cmd->duplex = bp->link_vars.duplex;
8980                 if (IS_E1HMF(bp)) {
8981                         u16 vn_max_rate;
8982
8983                         vn_max_rate =
8984                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8985                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8986                         if (vn_max_rate < cmd->speed)
8987                                 cmd->speed = vn_max_rate;
8988                 }
8989         } else {
8990                 cmd->speed = -1;
8991                 cmd->duplex = -1;
8992         }
8993
8994         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8995                 u32 ext_phy_type =
8996                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8997
8998                 switch (ext_phy_type) {
8999                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9000                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9001                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9002                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9003                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9004                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9005                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9006                         cmd->port = PORT_FIBRE;
9007                         break;
9008
9009                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9010                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9011                         cmd->port = PORT_TP;
9012                         break;
9013
9014                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9015                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9016                                   bp->link_params.ext_phy_config);
9017                         break;
9018
9019                 default:
9020                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
9021                            bp->link_params.ext_phy_config);
9022                         break;
9023                 }
9024         } else
9025                 cmd->port = PORT_TP;
9026
9027         cmd->phy_address = bp->mdio.prtad;
9028         cmd->transceiver = XCVR_INTERNAL;
9029
9030         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9031                 cmd->autoneg = AUTONEG_ENABLE;
9032         else
9033                 cmd->autoneg = AUTONEG_DISABLE;
9034
9035         cmd->maxtxpkt = 0;
9036         cmd->maxrxpkt = 0;
9037
9038         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9039            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
9040            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
9041            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
9042            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9043            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9044            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9045
9046         return 0;
9047 }
9048
9049 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9050 {
9051         struct bnx2x *bp = netdev_priv(dev);
9052         u32 advertising;
9053
9054         if (IS_E1HMF(bp))
9055                 return 0;
9056
9057         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9058            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
9059            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
9060            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
9061            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9062            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9063            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9064
9065         if (cmd->autoneg == AUTONEG_ENABLE) {
9066                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9067                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
9068                         return -EINVAL;
9069                 }
9070
9071                 /* advertise the requested speed and duplex if supported */
9072                 cmd->advertising &= bp->port.supported;
9073
9074                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9075                 bp->link_params.req_duplex = DUPLEX_FULL;
9076                 bp->port.advertising |= (ADVERTISED_Autoneg |
9077                                          cmd->advertising);
9078
9079         } else { /* forced speed */
9080                 /* advertise the requested speed and duplex if supported */
9081                 switch (cmd->speed) {
9082                 case SPEED_10:
9083                         if (cmd->duplex == DUPLEX_FULL) {
9084                                 if (!(bp->port.supported &
9085                                       SUPPORTED_10baseT_Full)) {
9086                                         DP(NETIF_MSG_LINK,
9087                                            "10M full not supported\n");
9088                                         return -EINVAL;
9089                                 }
9090
9091                                 advertising = (ADVERTISED_10baseT_Full |
9092                                                ADVERTISED_TP);
9093                         } else {
9094                                 if (!(bp->port.supported &
9095                                       SUPPORTED_10baseT_Half)) {
9096                                         DP(NETIF_MSG_LINK,
9097                                            "10M half not supported\n");
9098                                         return -EINVAL;
9099                                 }
9100
9101                                 advertising = (ADVERTISED_10baseT_Half |
9102                                                ADVERTISED_TP);
9103                         }
9104                         break;
9105
9106                 case SPEED_100:
9107                         if (cmd->duplex == DUPLEX_FULL) {
9108                                 if (!(bp->port.supported &
9109                                                 SUPPORTED_100baseT_Full)) {
9110                                         DP(NETIF_MSG_LINK,
9111                                            "100M full not supported\n");
9112                                         return -EINVAL;
9113                                 }
9114
9115                                 advertising = (ADVERTISED_100baseT_Full |
9116                                                ADVERTISED_TP);
9117                         } else {
9118                                 if (!(bp->port.supported &
9119                                                 SUPPORTED_100baseT_Half)) {
9120                                         DP(NETIF_MSG_LINK,
9121                                            "100M half not supported\n");
9122                                         return -EINVAL;
9123                                 }
9124
9125                                 advertising = (ADVERTISED_100baseT_Half |
9126                                                ADVERTISED_TP);
9127                         }
9128                         break;
9129
9130                 case SPEED_1000:
9131                         if (cmd->duplex != DUPLEX_FULL) {
9132                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
9133                                 return -EINVAL;
9134                         }
9135
9136                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
9137                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
9138                                 return -EINVAL;
9139                         }
9140
9141                         advertising = (ADVERTISED_1000baseT_Full |
9142                                        ADVERTISED_TP);
9143                         break;
9144
9145                 case SPEED_2500:
9146                         if (cmd->duplex != DUPLEX_FULL) {
9147                                 DP(NETIF_MSG_LINK,
9148                                    "2.5G half not supported\n");
9149                                 return -EINVAL;
9150                         }
9151
9152                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
9153                                 DP(NETIF_MSG_LINK,
9154                                    "2.5G full not supported\n");
9155                                 return -EINVAL;
9156                         }
9157
9158                         advertising = (ADVERTISED_2500baseX_Full |
9159                                        ADVERTISED_TP);
9160                         break;
9161
9162                 case SPEED_10000:
9163                         if (cmd->duplex != DUPLEX_FULL) {
9164                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
9165                                 return -EINVAL;
9166                         }
9167
9168                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
9169                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
9170                                 return -EINVAL;
9171                         }
9172
9173                         advertising = (ADVERTISED_10000baseT_Full |
9174                                        ADVERTISED_FIBRE);
9175                         break;
9176
9177                 default:
9178                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
9179                         return -EINVAL;
9180                 }
9181
9182                 bp->link_params.req_line_speed = cmd->speed;
9183                 bp->link_params.req_duplex = cmd->duplex;
9184                 bp->port.advertising = advertising;
9185         }
9186
9187         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
9188            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
9189            bp->link_params.req_line_speed, bp->link_params.req_duplex,
9190            bp->port.advertising);
9191
9192         if (netif_running(dev)) {
9193                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9194                 bnx2x_link_set(bp);
9195         }
9196
9197         return 0;
9198 }
9199
9200 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9201 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9202
9203 static int bnx2x_get_regs_len(struct net_device *dev)
9204 {
9205         struct bnx2x *bp = netdev_priv(dev);
9206         int regdump_len = 0;
9207         int i;
9208
9209         if (CHIP_IS_E1(bp)) {
9210                 for (i = 0; i < REGS_COUNT; i++)
9211                         if (IS_E1_ONLINE(reg_addrs[i].info))
9212                                 regdump_len += reg_addrs[i].size;
9213
9214                 for (i = 0; i < WREGS_COUNT_E1; i++)
9215                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9216                                 regdump_len += wreg_addrs_e1[i].size *
9217                                         (1 + wreg_addrs_e1[i].read_regs_count);
9218
9219         } else { /* E1H */
9220                 for (i = 0; i < REGS_COUNT; i++)
9221                         if (IS_E1H_ONLINE(reg_addrs[i].info))
9222                                 regdump_len += reg_addrs[i].size;
9223
9224                 for (i = 0; i < WREGS_COUNT_E1H; i++)
9225                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9226                                 regdump_len += wreg_addrs_e1h[i].size *
9227                                         (1 + wreg_addrs_e1h[i].read_regs_count);
9228         }
9229         regdump_len *= 4;
9230         regdump_len += sizeof(struct dump_hdr);
9231
9232         return regdump_len;
9233 }
9234
9235 static void bnx2x_get_regs(struct net_device *dev,
9236                            struct ethtool_regs *regs, void *_p)
9237 {
9238         u32 *p = _p, i, j;
9239         struct bnx2x *bp = netdev_priv(dev);
9240         struct dump_hdr dump_hdr = {0};
9241
9242         regs->version = 0;
9243         memset(p, 0, regs->len);
9244
9245         if (!netif_running(bp->dev))
9246                 return;
9247
9248         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9249         dump_hdr.dump_sign = dump_sign_all;
9250         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9251         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9252         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9253         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9254         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9255
9256         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9257         p += dump_hdr.hdr_size + 1;
9258
9259         if (CHIP_IS_E1(bp)) {
9260                 for (i = 0; i < REGS_COUNT; i++)
9261                         if (IS_E1_ONLINE(reg_addrs[i].info))
9262                                 for (j = 0; j < reg_addrs[i].size; j++)
9263                                         *p++ = REG_RD(bp,
9264                                                       reg_addrs[i].addr + j*4);
9265
9266         } else { /* E1H */
9267                 for (i = 0; i < REGS_COUNT; i++)
9268                         if (IS_E1H_ONLINE(reg_addrs[i].info))
9269                                 for (j = 0; j < reg_addrs[i].size; j++)
9270                                         *p++ = REG_RD(bp,
9271                                                       reg_addrs[i].addr + j*4);
9272         }
9273 }
9274
9275 #define PHY_FW_VER_LEN                  10
9276
9277 static void bnx2x_get_drvinfo(struct net_device *dev,
9278                               struct ethtool_drvinfo *info)
9279 {
9280         struct bnx2x *bp = netdev_priv(dev);
9281         u8 phy_fw_ver[PHY_FW_VER_LEN];
9282
9283         strcpy(info->driver, DRV_MODULE_NAME);
9284         strcpy(info->version, DRV_MODULE_VERSION);
9285
9286         phy_fw_ver[0] = '\0';
9287         if (bp->port.pmf) {
9288                 bnx2x_acquire_phy_lock(bp);
9289                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9290                                              (bp->state != BNX2X_STATE_CLOSED),
9291                                              phy_fw_ver, PHY_FW_VER_LEN);
9292                 bnx2x_release_phy_lock(bp);
9293         }
9294
9295         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9296                  (bp->common.bc_ver & 0xff0000) >> 16,
9297                  (bp->common.bc_ver & 0xff00) >> 8,
9298                  (bp->common.bc_ver & 0xff),
9299                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9300         strcpy(info->bus_info, pci_name(bp->pdev));
9301         info->n_stats = BNX2X_NUM_STATS;
9302         info->testinfo_len = BNX2X_NUM_TESTS;
9303         info->eedump_len = bp->common.flash_size;
9304         info->regdump_len = bnx2x_get_regs_len(dev);
9305 }
9306
9307 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9308 {
9309         struct bnx2x *bp = netdev_priv(dev);
9310
9311         if (bp->flags & NO_WOL_FLAG) {
9312                 wol->supported = 0;
9313                 wol->wolopts = 0;
9314         } else {
9315                 wol->supported = WAKE_MAGIC;
9316                 if (bp->wol)
9317                         wol->wolopts = WAKE_MAGIC;
9318                 else
9319                         wol->wolopts = 0;
9320         }
9321         memset(&wol->sopass, 0, sizeof(wol->sopass));
9322 }
9323
9324 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9325 {
9326         struct bnx2x *bp = netdev_priv(dev);
9327
9328         if (wol->wolopts & ~WAKE_MAGIC)
9329                 return -EINVAL;
9330
9331         if (wol->wolopts & WAKE_MAGIC) {
9332                 if (bp->flags & NO_WOL_FLAG)
9333                         return -EINVAL;
9334
9335                 bp->wol = 1;
9336         } else
9337                 bp->wol = 0;
9338
9339         return 0;
9340 }
9341
9342 static u32 bnx2x_get_msglevel(struct net_device *dev)
9343 {
9344         struct bnx2x *bp = netdev_priv(dev);
9345
9346         return bp->msg_enable;
9347 }
9348
9349 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9350 {
9351         struct bnx2x *bp = netdev_priv(dev);
9352
9353         if (capable(CAP_NET_ADMIN))
9354                 bp->msg_enable = level;
9355 }
9356
9357 static int bnx2x_nway_reset(struct net_device *dev)
9358 {
9359         struct bnx2x *bp = netdev_priv(dev);
9360
9361         if (!bp->port.pmf)
9362                 return 0;
9363
9364         if (netif_running(dev)) {
9365                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9366                 bnx2x_link_set(bp);
9367         }
9368
9369         return 0;
9370 }
9371
9372 static u32 bnx2x_get_link(struct net_device *dev)
9373 {
9374         struct bnx2x *bp = netdev_priv(dev);
9375
9376         if (bp->flags & MF_FUNC_DIS)
9377                 return 0;
9378
9379         return bp->link_vars.link_up;
9380 }
9381
9382 static int bnx2x_get_eeprom_len(struct net_device *dev)
9383 {
9384         struct bnx2x *bp = netdev_priv(dev);
9385
9386         return bp->common.flash_size;
9387 }
9388
9389 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9390 {
9391         int port = BP_PORT(bp);
9392         int count, i;
9393         u32 val = 0;
9394
9395         /* adjust timeout for emulation/FPGA */
9396         count = NVRAM_TIMEOUT_COUNT;
9397         if (CHIP_REV_IS_SLOW(bp))
9398                 count *= 100;
9399
9400         /* request access to nvram interface */
9401         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9402                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9403
9404         for (i = 0; i < count*10; i++) {
9405                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9406                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9407                         break;
9408
9409                 udelay(5);
9410         }
9411
9412         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9413                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9414                 return -EBUSY;
9415         }
9416
9417         return 0;
9418 }
9419
9420 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9421 {
9422         int port = BP_PORT(bp);
9423         int count, i;
9424         u32 val = 0;
9425
9426         /* adjust timeout for emulation/FPGA */
9427         count = NVRAM_TIMEOUT_COUNT;
9428         if (CHIP_REV_IS_SLOW(bp))
9429                 count *= 100;
9430
9431         /* relinquish nvram interface */
9432         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9433                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9434
9435         for (i = 0; i < count*10; i++) {
9436                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9437                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9438                         break;
9439
9440                 udelay(5);
9441         }
9442
9443         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9444                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9445                 return -EBUSY;
9446         }
9447
9448         return 0;
9449 }
9450
9451 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9452 {
9453         u32 val;
9454
9455         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9456
9457         /* enable both bits, even on read */
9458         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9459                (val | MCPR_NVM_ACCESS_ENABLE_EN |
9460                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
9461 }
9462
9463 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9464 {
9465         u32 val;
9466
9467         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9468
9469         /* disable both bits, even after read */
9470         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9471                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9472                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9473 }
9474
9475 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9476                                   u32 cmd_flags)
9477 {
9478         int count, i, rc;
9479         u32 val;
9480
9481         /* build the command word */
9482         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9483
9484         /* need to clear DONE bit separately */
9485         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9486
9487         /* address of the NVRAM to read from */
9488         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9489                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9490
9491         /* issue a read command */
9492         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9493
9494         /* adjust timeout for emulation/FPGA */
9495         count = NVRAM_TIMEOUT_COUNT;
9496         if (CHIP_REV_IS_SLOW(bp))
9497                 count *= 100;
9498
9499         /* wait for completion */
9500         *ret_val = 0;
9501         rc = -EBUSY;
9502         for (i = 0; i < count; i++) {
9503                 udelay(5);
9504                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9505
9506                 if (val & MCPR_NVM_COMMAND_DONE) {
9507                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9508                         /* we read nvram data in cpu order
9509                          * but ethtool sees it as an array of bytes
9510                          * converting to big-endian will do the work */
9511                         *ret_val = cpu_to_be32(val);
9512                         rc = 0;
9513                         break;
9514                 }
9515         }
9516
9517         return rc;
9518 }
9519
9520 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9521                             int buf_size)
9522 {
9523         int rc;
9524         u32 cmd_flags;
9525         __be32 val;
9526
9527         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9528                 DP(BNX2X_MSG_NVM,
9529                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9530                    offset, buf_size);
9531                 return -EINVAL;
9532         }
9533
9534         if (offset + buf_size > bp->common.flash_size) {
9535                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9536                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9537                    offset, buf_size, bp->common.flash_size);
9538                 return -EINVAL;
9539         }
9540
9541         /* request access to nvram interface */
9542         rc = bnx2x_acquire_nvram_lock(bp);
9543         if (rc)
9544                 return rc;
9545
9546         /* enable access to nvram interface */
9547         bnx2x_enable_nvram_access(bp);
9548
9549         /* read the first word(s) */
9550         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9551         while ((buf_size > sizeof(u32)) && (rc == 0)) {
9552                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9553                 memcpy(ret_buf, &val, 4);
9554
9555                 /* advance to the next dword */
9556                 offset += sizeof(u32);
9557                 ret_buf += sizeof(u32);
9558                 buf_size -= sizeof(u32);
9559                 cmd_flags = 0;
9560         }
9561
9562         if (rc == 0) {
9563                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9564                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9565                 memcpy(ret_buf, &val, 4);
9566         }
9567
9568         /* disable access to nvram interface */
9569         bnx2x_disable_nvram_access(bp);
9570         bnx2x_release_nvram_lock(bp);
9571
9572         return rc;
9573 }
9574
9575 static int bnx2x_get_eeprom(struct net_device *dev,
9576                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9577 {
9578         struct bnx2x *bp = netdev_priv(dev);
9579         int rc;
9580
9581         if (!netif_running(dev))
9582                 return -EAGAIN;
9583
9584         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9585            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9586            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9587            eeprom->len, eeprom->len);
9588
9589         /* parameters already validated in ethtool_get_eeprom */
9590
9591         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9592
9593         return rc;
9594 }
9595
9596 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9597                                    u32 cmd_flags)
9598 {
9599         int count, i, rc;
9600
9601         /* build the command word */
9602         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9603
9604         /* need to clear DONE bit separately */
9605         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9606
9607         /* write the data */
9608         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9609
9610         /* address of the NVRAM to write to */
9611         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9612                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9613
9614         /* issue the write command */
9615         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9616
9617         /* adjust timeout for emulation/FPGA */
9618         count = NVRAM_TIMEOUT_COUNT;
9619         if (CHIP_REV_IS_SLOW(bp))
9620                 count *= 100;
9621
9622         /* wait for completion */
9623         rc = -EBUSY;
9624         for (i = 0; i < count; i++) {
9625                 udelay(5);
9626                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9627                 if (val & MCPR_NVM_COMMAND_DONE) {
9628                         rc = 0;
9629                         break;
9630                 }
9631         }
9632
9633         return rc;
9634 }
9635
9636 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
9637
9638 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9639                               int buf_size)
9640 {
9641         int rc;
9642         u32 cmd_flags;
9643         u32 align_offset;
9644         __be32 val;
9645
9646         if (offset + buf_size > bp->common.flash_size) {
9647                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9648                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9649                    offset, buf_size, bp->common.flash_size);
9650                 return -EINVAL;
9651         }
9652
9653         /* request access to nvram interface */
9654         rc = bnx2x_acquire_nvram_lock(bp);
9655         if (rc)
9656                 return rc;
9657
9658         /* enable access to nvram interface */
9659         bnx2x_enable_nvram_access(bp);
9660
9661         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9662         align_offset = (offset & ~0x03);
9663         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9664
9665         if (rc == 0) {
9666                 val &= ~(0xff << BYTE_OFFSET(offset));
9667                 val |= (*data_buf << BYTE_OFFSET(offset));
9668
9669                 /* nvram data is returned as an array of bytes
9670                  * convert it back to cpu order */
9671                 val = be32_to_cpu(val);
9672
9673                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9674                                              cmd_flags);
9675         }
9676
9677         /* disable access to nvram interface */
9678         bnx2x_disable_nvram_access(bp);
9679         bnx2x_release_nvram_lock(bp);
9680
9681         return rc;
9682 }
9683
9684 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9685                              int buf_size)
9686 {
9687         int rc;
9688         u32 cmd_flags;
9689         u32 val;
9690         u32 written_so_far;
9691
9692         if (buf_size == 1)      /* ethtool */
9693                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9694
9695         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9696                 DP(BNX2X_MSG_NVM,
9697                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9698                    offset, buf_size);
9699                 return -EINVAL;
9700         }
9701
9702         if (offset + buf_size > bp->common.flash_size) {
9703                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9704                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9705                    offset, buf_size, bp->common.flash_size);
9706                 return -EINVAL;
9707         }
9708
9709         /* request access to nvram interface */
9710         rc = bnx2x_acquire_nvram_lock(bp);
9711         if (rc)
9712                 return rc;
9713
9714         /* enable access to nvram interface */
9715         bnx2x_enable_nvram_access(bp);
9716
9717         written_so_far = 0;
9718         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9719         while ((written_so_far < buf_size) && (rc == 0)) {
9720                 if (written_so_far == (buf_size - sizeof(u32)))
9721                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9722                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9723                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9724                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9725                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9726
9727                 memcpy(&val, data_buf, 4);
9728
9729                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9730
9731                 /* advance to the next dword */
9732                 offset += sizeof(u32);
9733                 data_buf += sizeof(u32);
9734                 written_so_far += sizeof(u32);
9735                 cmd_flags = 0;
9736         }
9737
9738         /* disable access to nvram interface */
9739         bnx2x_disable_nvram_access(bp);
9740         bnx2x_release_nvram_lock(bp);
9741
9742         return rc;
9743 }
9744
9745 static int bnx2x_set_eeprom(struct net_device *dev,
9746                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9747 {
9748         struct bnx2x *bp = netdev_priv(dev);
9749         int port = BP_PORT(bp);
9750         int rc = 0;
9751
9752         if (!netif_running(dev))
9753                 return -EAGAIN;
9754
9755         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9756            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9757            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9758            eeprom->len, eeprom->len);
9759
9760         /* parameters already validated in ethtool_set_eeprom */
9761
9762         /* PHY eeprom can be accessed only by the PMF */
9763         if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9764             !bp->port.pmf)
9765                 return -EINVAL;
9766
9767         if (eeprom->magic == 0x50485950) {
9768                 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9769                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9770
9771                 bnx2x_acquire_phy_lock(bp);
9772                 rc |= bnx2x_link_reset(&bp->link_params,
9773                                        &bp->link_vars, 0);
9774                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9775                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9776                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9777                                        MISC_REGISTERS_GPIO_HIGH, port);
9778                 bnx2x_release_phy_lock(bp);
9779                 bnx2x_link_report(bp);
9780
9781         } else if (eeprom->magic == 0x50485952) {
9782                 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9783                 if (bp->state == BNX2X_STATE_OPEN) {
9784                         bnx2x_acquire_phy_lock(bp);
9785                         rc |= bnx2x_link_reset(&bp->link_params,
9786                                                &bp->link_vars, 1);
9787
9788                         rc |= bnx2x_phy_init(&bp->link_params,
9789                                              &bp->link_vars);
9790                         bnx2x_release_phy_lock(bp);
9791                         bnx2x_calc_fc_adv(bp);
9792                 }
9793         } else if (eeprom->magic == 0x53985943) {
9794                 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9795                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9796                                        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9797                         u8 ext_phy_addr =
9798                              XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9799
9800                         /* DSP Remove Download Mode */
9801                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9802                                        MISC_REGISTERS_GPIO_LOW, port);
9803
9804                         bnx2x_acquire_phy_lock(bp);
9805
9806                         bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9807
9808                         /* wait 0.5 sec to allow it to run */
9809                         msleep(500);
9810                         bnx2x_ext_phy_hw_reset(bp, port);
9811                         msleep(500);
9812                         bnx2x_release_phy_lock(bp);
9813                 }
9814         } else
9815                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9816
9817         return rc;
9818 }
9819
9820 static int bnx2x_get_coalesce(struct net_device *dev,
9821                               struct ethtool_coalesce *coal)
9822 {
9823         struct bnx2x *bp = netdev_priv(dev);
9824
9825         memset(coal, 0, sizeof(struct ethtool_coalesce));
9826
9827         coal->rx_coalesce_usecs = bp->rx_ticks;
9828         coal->tx_coalesce_usecs = bp->tx_ticks;
9829
9830         return 0;
9831 }
9832
9833 #define BNX2X_MAX_COALES_TOUT  (0xf0*12) /* Maximal coalescing timeout in us */
9834 static int bnx2x_set_coalesce(struct net_device *dev,
9835                               struct ethtool_coalesce *coal)
9836 {
9837         struct bnx2x *bp = netdev_priv(dev);
9838
9839         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9840         if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9841                 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9842
9843         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9844         if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9845                 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9846
9847         if (netif_running(dev))
9848                 bnx2x_update_coalesce(bp);
9849
9850         return 0;
9851 }
9852
9853 static void bnx2x_get_ringparam(struct net_device *dev,
9854                                 struct ethtool_ringparam *ering)
9855 {
9856         struct bnx2x *bp = netdev_priv(dev);
9857
9858         ering->rx_max_pending = MAX_RX_AVAIL;
9859         ering->rx_mini_max_pending = 0;
9860         ering->rx_jumbo_max_pending = 0;
9861
9862         ering->rx_pending = bp->rx_ring_size;
9863         ering->rx_mini_pending = 0;
9864         ering->rx_jumbo_pending = 0;
9865
9866         ering->tx_max_pending = MAX_TX_AVAIL;
9867         ering->tx_pending = bp->tx_ring_size;
9868 }
9869
9870 static int bnx2x_set_ringparam(struct net_device *dev,
9871                                struct ethtool_ringparam *ering)
9872 {
9873         struct bnx2x *bp = netdev_priv(dev);
9874         int rc = 0;
9875
9876         if ((ering->rx_pending > MAX_RX_AVAIL) ||
9877             (ering->tx_pending > MAX_TX_AVAIL) ||
9878             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9879                 return -EINVAL;
9880
9881         bp->rx_ring_size = ering->rx_pending;
9882         bp->tx_ring_size = ering->tx_pending;
9883
9884         if (netif_running(dev)) {
9885                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9886                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9887         }
9888
9889         return rc;
9890 }
9891
9892 static void bnx2x_get_pauseparam(struct net_device *dev,
9893                                  struct ethtool_pauseparam *epause)
9894 {
9895         struct bnx2x *bp = netdev_priv(dev);
9896
9897         epause->autoneg = (bp->link_params.req_flow_ctrl ==
9898                            BNX2X_FLOW_CTRL_AUTO) &&
9899                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9900
9901         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9902                             BNX2X_FLOW_CTRL_RX);
9903         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9904                             BNX2X_FLOW_CTRL_TX);
9905
9906         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9907            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9908            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9909 }
9910
9911 static int bnx2x_set_pauseparam(struct net_device *dev,
9912                                 struct ethtool_pauseparam *epause)
9913 {
9914         struct bnx2x *bp = netdev_priv(dev);
9915
9916         if (IS_E1HMF(bp))
9917                 return 0;
9918
9919         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9920            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9921            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9922
9923         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9924
9925         if (epause->rx_pause)
9926                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9927
9928         if (epause->tx_pause)
9929                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9930
9931         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9932                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9933
9934         if (epause->autoneg) {
9935                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9936                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
9937                         return -EINVAL;
9938                 }
9939
9940                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9941                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9942         }
9943
9944         DP(NETIF_MSG_LINK,
9945            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9946
9947         if (netif_running(dev)) {
9948                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9949                 bnx2x_link_set(bp);
9950         }
9951
9952         return 0;
9953 }
9954
9955 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9956 {
9957         struct bnx2x *bp = netdev_priv(dev);
9958         int changed = 0;
9959         int rc = 0;
9960
9961         /* TPA requires Rx CSUM offloading */
9962         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9963                 if (!disable_tpa) {
9964                         if (!(dev->features & NETIF_F_LRO)) {
9965                                 dev->features |= NETIF_F_LRO;
9966                                 bp->flags |= TPA_ENABLE_FLAG;
9967                                 changed = 1;
9968                         }
9969                 } else
9970                         rc = -EINVAL;
9971         } else if (dev->features & NETIF_F_LRO) {
9972                 dev->features &= ~NETIF_F_LRO;
9973                 bp->flags &= ~TPA_ENABLE_FLAG;
9974                 changed = 1;
9975         }
9976
9977         if (changed && netif_running(dev)) {
9978                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9979                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9980         }
9981
9982         return rc;
9983 }
9984
9985 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9986 {
9987         struct bnx2x *bp = netdev_priv(dev);
9988
9989         return bp->rx_csum;
9990 }
9991
9992 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9993 {
9994         struct bnx2x *bp = netdev_priv(dev);
9995         int rc = 0;
9996
9997         bp->rx_csum = data;
9998
9999         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10000            TPA'ed packets will be discarded due to wrong TCP CSUM */
10001         if (!data) {
10002                 u32 flags = ethtool_op_get_flags(dev);
10003
10004                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10005         }
10006
10007         return rc;
10008 }
10009
10010 static int bnx2x_set_tso(struct net_device *dev, u32 data)
10011 {
10012         if (data) {
10013                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10014                 dev->features |= NETIF_F_TSO6;
10015         } else {
10016                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
10017                 dev->features &= ~NETIF_F_TSO6;
10018         }
10019
10020         return 0;
10021 }
10022
10023 static const struct {
10024         char string[ETH_GSTRING_LEN];
10025 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
10026         { "register_test (offline)" },
10027         { "memory_test (offline)" },
10028         { "loopback_test (offline)" },
10029         { "nvram_test (online)" },
10030         { "interrupt_test (online)" },
10031         { "link_test (online)" },
10032         { "idle check (online)" }
10033 };
10034
10035 static int bnx2x_test_registers(struct bnx2x *bp)
10036 {
10037         int idx, i, rc = -ENODEV;
10038         u32 wr_val = 0;
10039         int port = BP_PORT(bp);
10040         static const struct {
10041                 u32  offset0;
10042                 u32  offset1;
10043                 u32  mask;
10044         } reg_tbl[] = {
10045 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
10046                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
10047                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
10048                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
10049                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
10050                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
10051                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
10052                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
10053                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
10054                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
10055 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
10056                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
10057                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
10058                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
10059                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
10060                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10061                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
10062                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
10063                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
10064                 { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
10065 /* 20 */        { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
10066                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
10067                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
10068                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
10069                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
10070                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
10071                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
10072                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
10073                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
10074                 { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
10075 /* 30 */        { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
10076                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
10077                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
10078                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10079                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
10080                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10081                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
10082
10083                 { 0xffffffff, 0, 0x00000000 }
10084         };
10085
10086         if (!netif_running(bp->dev))
10087                 return rc;
10088
10089         /* Repeat the test twice:
10090            First by writing 0x00000000, second by writing 0xffffffff */
10091         for (idx = 0; idx < 2; idx++) {
10092
10093                 switch (idx) {
10094                 case 0:
10095                         wr_val = 0;
10096                         break;
10097                 case 1:
10098                         wr_val = 0xffffffff;
10099                         break;
10100                 }
10101
10102                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10103                         u32 offset, mask, save_val, val;
10104
10105                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10106                         mask = reg_tbl[i].mask;
10107
10108                         save_val = REG_RD(bp, offset);
10109
10110                         REG_WR(bp, offset, wr_val);
10111                         val = REG_RD(bp, offset);
10112
10113                         /* Restore the original register's value */
10114                         REG_WR(bp, offset, save_val);
10115
10116                         /* verify that value is as expected value */
10117                         if ((val & mask) != (wr_val & mask))
10118                                 goto test_reg_exit;
10119                 }
10120         }
10121
10122         rc = 0;
10123
10124 test_reg_exit:
10125         return rc;
10126 }
10127
10128 static int bnx2x_test_memory(struct bnx2x *bp)
10129 {
10130         int i, j, rc = -ENODEV;
10131         u32 val;
10132         static const struct {
10133                 u32 offset;
10134                 int size;
10135         } mem_tbl[] = {
10136                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
10137                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10138                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
10139                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
10140                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
10141                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
10142                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
10143
10144                 { 0xffffffff, 0 }
10145         };
10146         static const struct {
10147                 char *name;
10148                 u32 offset;
10149                 u32 e1_mask;
10150                 u32 e1h_mask;
10151         } prty_tbl[] = {
10152                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
10153                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
10154                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
10155                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
10156                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
10157                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
10158
10159                 { NULL, 0xffffffff, 0, 0 }
10160         };
10161
10162         if (!netif_running(bp->dev))
10163                 return rc;
10164
10165         /* Go through all the memories */
10166         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10167                 for (j = 0; j < mem_tbl[i].size; j++)
10168                         REG_RD(bp, mem_tbl[i].offset + j*4);
10169
10170         /* Check the parity status */
10171         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10172                 val = REG_RD(bp, prty_tbl[i].offset);
10173                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10174                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
10175                         DP(NETIF_MSG_HW,
10176                            "%s is 0x%x\n", prty_tbl[i].name, val);
10177                         goto test_mem_exit;
10178                 }
10179         }
10180
10181         rc = 0;
10182
10183 test_mem_exit:
10184         return rc;
10185 }
10186
10187 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10188 {
10189         int cnt = 1000;
10190
10191         if (link_up)
10192                 while (bnx2x_link_test(bp) && cnt--)
10193                         msleep(10);
10194 }
10195
10196 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10197 {
10198         unsigned int pkt_size, num_pkts, i;
10199         struct sk_buff *skb;
10200         unsigned char *packet;
10201         struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10202         struct bnx2x_fastpath *fp_tx = &bp->fp[0];
10203         u16 tx_start_idx, tx_idx;
10204         u16 rx_start_idx, rx_idx;
10205         u16 pkt_prod, bd_prod;
10206         struct sw_tx_bd *tx_buf;
10207         struct eth_tx_start_bd *tx_start_bd;
10208         struct eth_tx_parse_bd *pbd = NULL;
10209         dma_addr_t mapping;
10210         union eth_rx_cqe *cqe;
10211         u8 cqe_fp_flags;
10212         struct sw_rx_bd *rx_buf;
10213         u16 len;
10214         int rc = -ENODEV;
10215
10216         /* check the loopback mode */
10217         switch (loopback_mode) {
10218         case BNX2X_PHY_LOOPBACK:
10219                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10220                         return -EINVAL;
10221                 break;
10222         case BNX2X_MAC_LOOPBACK:
10223                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
10224                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
10225                 break;
10226         default:
10227                 return -EINVAL;
10228         }
10229
10230         /* prepare the loopback packet */
10231         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10232                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
10233         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10234         if (!skb) {
10235                 rc = -ENOMEM;
10236                 goto test_loopback_exit;
10237         }
10238         packet = skb_put(skb, pkt_size);
10239         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
10240         memset(packet + ETH_ALEN, 0, ETH_ALEN);
10241         memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
10242         for (i = ETH_HLEN; i < pkt_size; i++)
10243                 packet[i] = (unsigned char) (i & 0xff);
10244
10245         /* send the loopback packet */
10246         num_pkts = 0;
10247         tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10248         rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10249
10250         pkt_prod = fp_tx->tx_pkt_prod++;
10251         tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10252         tx_buf->first_bd = fp_tx->tx_bd_prod;
10253         tx_buf->skb = skb;
10254         tx_buf->flags = 0;
10255
10256         bd_prod = TX_BD(fp_tx->tx_bd_prod);
10257         tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10258         mapping = pci_map_single(bp->pdev, skb->data,
10259                                  skb_headlen(skb), PCI_DMA_TODEVICE);
10260         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10261         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10262         tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10263         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10264         tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10265         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10266         tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10267                                 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10268
10269         /* turn on parsing and get a BD */
10270         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10271         pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10272
10273         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10274
10275         wmb();
10276
10277         fp_tx->tx_db.data.prod += 2;
10278         barrier();
10279         DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
10280
10281         mmiowb();
10282
10283         num_pkts++;
10284         fp_tx->tx_bd_prod += 2; /* start + pbd */
10285
10286         udelay(100);
10287
10288         tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10289         if (tx_idx != tx_start_idx + num_pkts)
10290                 goto test_loopback_exit;
10291
10292         rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10293         if (rx_idx != rx_start_idx + num_pkts)
10294                 goto test_loopback_exit;
10295
10296         cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
10297         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10298         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10299                 goto test_loopback_rx_exit;
10300
10301         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10302         if (len != pkt_size)
10303                 goto test_loopback_rx_exit;
10304
10305         rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
10306         skb = rx_buf->skb;
10307         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10308         for (i = ETH_HLEN; i < pkt_size; i++)
10309                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10310                         goto test_loopback_rx_exit;
10311
10312         rc = 0;
10313
10314 test_loopback_rx_exit:
10315
10316         fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10317         fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10318         fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10319         fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
10320
10321         /* Update producers */
10322         bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10323                              fp_rx->rx_sge_prod);
10324
10325 test_loopback_exit:
10326         bp->link_params.loopback_mode = LOOPBACK_NONE;
10327
10328         return rc;
10329 }
10330
10331 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10332 {
10333         int rc = 0, res;
10334
10335         if (!netif_running(bp->dev))
10336                 return BNX2X_LOOPBACK_FAILED;
10337
10338         bnx2x_netif_stop(bp, 1);
10339         bnx2x_acquire_phy_lock(bp);
10340
10341         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10342         if (res) {
10343                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
10344                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
10345         }
10346
10347         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10348         if (res) {
10349                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
10350                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
10351         }
10352
10353         bnx2x_release_phy_lock(bp);
10354         bnx2x_netif_start(bp);
10355
10356         return rc;
10357 }
10358
10359 #define CRC32_RESIDUAL                  0xdebb20e3
10360
10361 static int bnx2x_test_nvram(struct bnx2x *bp)
10362 {
10363         static const struct {
10364                 int offset;
10365                 int size;
10366         } nvram_tbl[] = {
10367                 {     0,  0x14 }, /* bootstrap */
10368                 {  0x14,  0xec }, /* dir */
10369                 { 0x100, 0x350 }, /* manuf_info */
10370                 { 0x450,  0xf0 }, /* feature_info */
10371                 { 0x640,  0x64 }, /* upgrade_key_info */
10372                 { 0x6a4,  0x64 },
10373                 { 0x708,  0x70 }, /* manuf_key_info */
10374                 { 0x778,  0x70 },
10375                 {     0,     0 }
10376         };
10377         __be32 buf[0x350 / 4];
10378         u8 *data = (u8 *)buf;
10379         int i, rc;
10380         u32 magic, crc;
10381
10382         rc = bnx2x_nvram_read(bp, 0, data, 4);
10383         if (rc) {
10384                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
10385                 goto test_nvram_exit;
10386         }
10387
10388         magic = be32_to_cpu(buf[0]);
10389         if (magic != 0x669955aa) {
10390                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10391                 rc = -ENODEV;
10392                 goto test_nvram_exit;
10393         }
10394
10395         for (i = 0; nvram_tbl[i].size; i++) {
10396
10397                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10398                                       nvram_tbl[i].size);
10399                 if (rc) {
10400                         DP(NETIF_MSG_PROBE,
10401                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10402                         goto test_nvram_exit;
10403                 }
10404
10405                 crc = ether_crc_le(nvram_tbl[i].size, data);
10406                 if (crc != CRC32_RESIDUAL) {
10407                         DP(NETIF_MSG_PROBE,
10408                            "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
10409                         rc = -ENODEV;
10410                         goto test_nvram_exit;
10411                 }
10412         }
10413
10414 test_nvram_exit:
10415         return rc;
10416 }
10417
10418 static int bnx2x_test_intr(struct bnx2x *bp)
10419 {
10420         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10421         int i, rc;
10422
10423         if (!netif_running(bp->dev))
10424                 return -ENODEV;
10425
10426         config->hdr.length = 0;
10427         if (CHIP_IS_E1(bp))
10428                 /* use last unicast entries */
10429                 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
10430         else
10431                 config->hdr.offset = BP_FUNC(bp);
10432         config->hdr.client_id = bp->fp->cl_id;
10433         config->hdr.reserved1 = 0;
10434
10435         bp->set_mac_pending++;
10436         smp_wmb();
10437         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10438                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10439                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10440         if (rc == 0) {
10441                 for (i = 0; i < 10; i++) {
10442                         if (!bp->set_mac_pending)
10443                                 break;
10444                         smp_rmb();
10445                         msleep_interruptible(10);
10446                 }
10447                 if (i == 10)
10448                         rc = -ENODEV;
10449         }
10450
10451         return rc;
10452 }
10453
10454 static void bnx2x_self_test(struct net_device *dev,
10455                             struct ethtool_test *etest, u64 *buf)
10456 {
10457         struct bnx2x *bp = netdev_priv(dev);
10458
10459         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10460
10461         if (!netif_running(dev))
10462                 return;
10463
10464         /* offline tests are not supported in MF mode */
10465         if (IS_E1HMF(bp))
10466                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10467
10468         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10469                 int port = BP_PORT(bp);
10470                 u32 val;
10471                 u8 link_up;
10472
10473                 /* save current value of input enable for TX port IF */
10474                 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10475                 /* disable input for TX port IF */
10476                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10477
10478                 link_up = (bnx2x_link_test(bp) == 0);
10479                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10480                 bnx2x_nic_load(bp, LOAD_DIAG);
10481                 /* wait until link state is restored */
10482                 bnx2x_wait_for_link(bp, link_up);
10483
10484                 if (bnx2x_test_registers(bp) != 0) {
10485                         buf[0] = 1;
10486                         etest->flags |= ETH_TEST_FL_FAILED;
10487                 }
10488                 if (bnx2x_test_memory(bp) != 0) {
10489                         buf[1] = 1;
10490                         etest->flags |= ETH_TEST_FL_FAILED;
10491                 }
10492                 buf[2] = bnx2x_test_loopback(bp, link_up);
10493                 if (buf[2] != 0)
10494                         etest->flags |= ETH_TEST_FL_FAILED;
10495
10496                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10497
10498                 /* restore input for TX port IF */
10499                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10500
10501                 bnx2x_nic_load(bp, LOAD_NORMAL);
10502                 /* wait until link state is restored */
10503                 bnx2x_wait_for_link(bp, link_up);
10504         }
10505         if (bnx2x_test_nvram(bp) != 0) {
10506                 buf[3] = 1;
10507                 etest->flags |= ETH_TEST_FL_FAILED;
10508         }
10509         if (bnx2x_test_intr(bp) != 0) {
10510                 buf[4] = 1;
10511                 etest->flags |= ETH_TEST_FL_FAILED;
10512         }
10513         if (bp->port.pmf)
10514                 if (bnx2x_link_test(bp) != 0) {
10515                         buf[5] = 1;
10516                         etest->flags |= ETH_TEST_FL_FAILED;
10517                 }
10518
10519 #ifdef BNX2X_EXTRA_DEBUG
10520         bnx2x_panic_dump(bp);
10521 #endif
10522 }
10523
10524 static const struct {
10525         long offset;
10526         int size;
10527         u8 string[ETH_GSTRING_LEN];
10528 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10529 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10530         { Q_STATS_OFFSET32(error_bytes_received_hi),
10531                                                 8, "[%d]: rx_error_bytes" },
10532         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10533                                                 8, "[%d]: rx_ucast_packets" },
10534         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10535                                                 8, "[%d]: rx_mcast_packets" },
10536         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10537                                                 8, "[%d]: rx_bcast_packets" },
10538         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10539         { Q_STATS_OFFSET32(rx_err_discard_pkt),
10540                                          4, "[%d]: rx_phy_ip_err_discards"},
10541         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10542                                          4, "[%d]: rx_skb_alloc_discard" },
10543         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10544
10545 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10546         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10547                                                         8, "[%d]: tx_packets" }
10548 };
10549
10550 static const struct {
10551         long offset;
10552         int size;
10553         u32 flags;
10554 #define STATS_FLAGS_PORT                1
10555 #define STATS_FLAGS_FUNC                2
10556 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10557         u8 string[ETH_GSTRING_LEN];
10558 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10559 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10560                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
10561         { STATS_OFFSET32(error_bytes_received_hi),
10562                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10563         { STATS_OFFSET32(total_unicast_packets_received_hi),
10564                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10565         { STATS_OFFSET32(total_multicast_packets_received_hi),
10566                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10567         { STATS_OFFSET32(total_broadcast_packets_received_hi),
10568                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10569         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10570                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10571         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10572                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
10573         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10574                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10575         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10576                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10577 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10578                                 8, STATS_FLAGS_PORT, "rx_fragments" },
10579         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10580                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
10581         { STATS_OFFSET32(no_buff_discard_hi),
10582                                 8, STATS_FLAGS_BOTH, "rx_discards" },
10583         { STATS_OFFSET32(mac_filter_discard),
10584                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10585         { STATS_OFFSET32(xxoverflow_discard),
10586                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10587         { STATS_OFFSET32(brb_drop_hi),
10588                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10589         { STATS_OFFSET32(brb_truncate_hi),
10590                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10591         { STATS_OFFSET32(pause_frames_received_hi),
10592                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10593         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10594                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10595         { STATS_OFFSET32(nig_timer_max),
10596                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10597 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10598                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10599         { STATS_OFFSET32(rx_skb_alloc_failed),
10600                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10601         { STATS_OFFSET32(hw_csum_err),
10602                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10603
10604         { STATS_OFFSET32(total_bytes_transmitted_hi),
10605                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
10606         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10607                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10608         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10609                                 8, STATS_FLAGS_BOTH, "tx_packets" },
10610         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10611                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10612         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10613                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10614         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10615                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10616         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10617                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10618 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10619                                 8, STATS_FLAGS_PORT, "tx_deferred" },
10620         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10621                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10622         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10623                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10624         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10625                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10626         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10627                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10628         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10629                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10630         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10631                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10632         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10633                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10634         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10635                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10636         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10637                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10638 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10639                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10640         { STATS_OFFSET32(pause_frames_sent_hi),
10641                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10642 };
10643
10644 #define IS_PORT_STAT(i) \
10645         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10646 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10647 #define IS_E1HMF_MODE_STAT(bp) \
10648                         (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
10649
10650 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10651 {
10652         struct bnx2x *bp = netdev_priv(dev);
10653         int i, num_stats;
10654
10655         switch(stringset) {
10656         case ETH_SS_STATS:
10657                 if (is_multi(bp)) {
10658                         num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
10659                         if (!IS_E1HMF_MODE_STAT(bp))
10660                                 num_stats += BNX2X_NUM_STATS;
10661                 } else {
10662                         if (IS_E1HMF_MODE_STAT(bp)) {
10663                                 num_stats = 0;
10664                                 for (i = 0; i < BNX2X_NUM_STATS; i++)
10665                                         if (IS_FUNC_STAT(i))
10666                                                 num_stats++;
10667                         } else
10668                                 num_stats = BNX2X_NUM_STATS;
10669                 }
10670                 return num_stats;
10671
10672         case ETH_SS_TEST:
10673                 return BNX2X_NUM_TESTS;
10674
10675         default:
10676                 return -EINVAL;
10677         }
10678 }
10679
10680 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10681 {
10682         struct bnx2x *bp = netdev_priv(dev);
10683         int i, j, k;
10684
10685         switch (stringset) {
10686         case ETH_SS_STATS:
10687                 if (is_multi(bp)) {
10688                         k = 0;
10689                         for_each_queue(bp, i) {
10690                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10691                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10692                                                 bnx2x_q_stats_arr[j].string, i);
10693                                 k += BNX2X_NUM_Q_STATS;
10694                         }
10695                         if (IS_E1HMF_MODE_STAT(bp))
10696                                 break;
10697                         for (j = 0; j < BNX2X_NUM_STATS; j++)
10698                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10699                                        bnx2x_stats_arr[j].string);
10700                 } else {
10701                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10702                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10703                                         continue;
10704                                 strcpy(buf + j*ETH_GSTRING_LEN,
10705                                        bnx2x_stats_arr[i].string);
10706                                 j++;
10707                         }
10708                 }
10709                 break;
10710
10711         case ETH_SS_TEST:
10712                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10713                 break;
10714         }
10715 }
10716
10717 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10718                                     struct ethtool_stats *stats, u64 *buf)
10719 {
10720         struct bnx2x *bp = netdev_priv(dev);
10721         u32 *hw_stats, *offset;
10722         int i, j, k;
10723
10724         if (is_multi(bp)) {
10725                 k = 0;
10726                 for_each_queue(bp, i) {
10727                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10728                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10729                                 if (bnx2x_q_stats_arr[j].size == 0) {
10730                                         /* skip this counter */
10731                                         buf[k + j] = 0;
10732                                         continue;
10733                                 }
10734                                 offset = (hw_stats +
10735                                           bnx2x_q_stats_arr[j].offset);
10736                                 if (bnx2x_q_stats_arr[j].size == 4) {
10737                                         /* 4-byte counter */
10738                                         buf[k + j] = (u64) *offset;
10739                                         continue;
10740                                 }
10741                                 /* 8-byte counter */
10742                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10743                         }
10744                         k += BNX2X_NUM_Q_STATS;
10745                 }
10746                 if (IS_E1HMF_MODE_STAT(bp))
10747                         return;
10748                 hw_stats = (u32 *)&bp->eth_stats;
10749                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10750                         if (bnx2x_stats_arr[j].size == 0) {
10751                                 /* skip this counter */
10752                                 buf[k + j] = 0;
10753                                 continue;
10754                         }
10755                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
10756                         if (bnx2x_stats_arr[j].size == 4) {
10757                                 /* 4-byte counter */
10758                                 buf[k + j] = (u64) *offset;
10759                                 continue;
10760                         }
10761                         /* 8-byte counter */
10762                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
10763                 }
10764         } else {
10765                 hw_stats = (u32 *)&bp->eth_stats;
10766                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10767                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10768                                 continue;
10769                         if (bnx2x_stats_arr[i].size == 0) {
10770                                 /* skip this counter */
10771                                 buf[j] = 0;
10772                                 j++;
10773                                 continue;
10774                         }
10775                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
10776                         if (bnx2x_stats_arr[i].size == 4) {
10777                                 /* 4-byte counter */
10778                                 buf[j] = (u64) *offset;
10779                                 j++;
10780                                 continue;
10781                         }
10782                         /* 8-byte counter */
10783                         buf[j] = HILO_U64(*offset, *(offset + 1));
10784                         j++;
10785                 }
10786         }
10787 }
10788
10789 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10790 {
10791         struct bnx2x *bp = netdev_priv(dev);
10792         int i;
10793
10794         if (!netif_running(dev))
10795                 return 0;
10796
10797         if (!bp->port.pmf)
10798                 return 0;
10799
10800         if (data == 0)
10801                 data = 2;
10802
10803         for (i = 0; i < (data * 2); i++) {
10804                 if ((i % 2) == 0)
10805                         bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10806                                       SPEED_1000);
10807                 else
10808                         bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
10809
10810                 msleep_interruptible(500);
10811                 if (signal_pending(current))
10812                         break;
10813         }
10814
10815         if (bp->link_vars.link_up)
10816                 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10817                               bp->link_vars.line_speed);
10818
10819         return 0;
10820 }
10821
10822 static const struct ethtool_ops bnx2x_ethtool_ops = {
10823         .get_settings           = bnx2x_get_settings,
10824         .set_settings           = bnx2x_set_settings,
10825         .get_drvinfo            = bnx2x_get_drvinfo,
10826         .get_regs_len           = bnx2x_get_regs_len,
10827         .get_regs               = bnx2x_get_regs,
10828         .get_wol                = bnx2x_get_wol,
10829         .set_wol                = bnx2x_set_wol,
10830         .get_msglevel           = bnx2x_get_msglevel,
10831         .set_msglevel           = bnx2x_set_msglevel,
10832         .nway_reset             = bnx2x_nway_reset,
10833         .get_link               = bnx2x_get_link,
10834         .get_eeprom_len         = bnx2x_get_eeprom_len,
10835         .get_eeprom             = bnx2x_get_eeprom,
10836         .set_eeprom             = bnx2x_set_eeprom,
10837         .get_coalesce           = bnx2x_get_coalesce,
10838         .set_coalesce           = bnx2x_set_coalesce,
10839         .get_ringparam          = bnx2x_get_ringparam,
10840         .set_ringparam          = bnx2x_set_ringparam,
10841         .get_pauseparam         = bnx2x_get_pauseparam,
10842         .set_pauseparam         = bnx2x_set_pauseparam,
10843         .get_rx_csum            = bnx2x_get_rx_csum,
10844         .set_rx_csum            = bnx2x_set_rx_csum,
10845         .get_tx_csum            = ethtool_op_get_tx_csum,
10846         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
10847         .set_flags              = bnx2x_set_flags,
10848         .get_flags              = ethtool_op_get_flags,
10849         .get_sg                 = ethtool_op_get_sg,
10850         .set_sg                 = ethtool_op_set_sg,
10851         .get_tso                = ethtool_op_get_tso,
10852         .set_tso                = bnx2x_set_tso,
10853         .self_test              = bnx2x_self_test,
10854         .get_sset_count         = bnx2x_get_sset_count,
10855         .get_strings            = bnx2x_get_strings,
10856         .phys_id                = bnx2x_phys_id,
10857         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
10858 };
10859
10860 /* end of ethtool_ops */
10861
10862 /****************************************************************************
10863 * General service functions
10864 ****************************************************************************/
10865
10866 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10867 {
10868         u16 pmcsr;
10869
10870         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10871
10872         switch (state) {
10873         case PCI_D0:
10874                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10875                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10876                                        PCI_PM_CTRL_PME_STATUS));
10877
10878                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10879                         /* delay required during transition out of D3hot */
10880                         msleep(20);
10881                 break;
10882
10883         case PCI_D3hot:
10884                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10885                 pmcsr |= 3;
10886
10887                 if (bp->wol)
10888                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10889
10890                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10891                                       pmcsr);
10892
10893                 /* No more memory access after this point until
10894                 * device is brought back to D0.
10895                 */
10896                 break;
10897
10898         default:
10899                 return -EINVAL;
10900         }
10901         return 0;
10902 }
10903
10904 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10905 {
10906         u16 rx_cons_sb;
10907
10908         /* Tell compiler that status block fields can change */
10909         barrier();
10910         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10911         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10912                 rx_cons_sb++;
10913         return (fp->rx_comp_cons != rx_cons_sb);
10914 }
10915
10916 /*
10917  * net_device service functions
10918  */
10919
10920 static int bnx2x_poll(struct napi_struct *napi, int budget)
10921 {
10922         int work_done = 0;
10923         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10924                                                  napi);
10925         struct bnx2x *bp = fp->bp;
10926
10927         while (1) {
10928 #ifdef BNX2X_STOP_ON_ERROR
10929                 if (unlikely(bp->panic)) {
10930                         napi_complete(napi);
10931                         return 0;
10932                 }
10933 #endif
10934
10935                 if (bnx2x_has_tx_work(fp))
10936                         bnx2x_tx_int(fp);
10937
10938                 if (bnx2x_has_rx_work(fp)) {
10939                         work_done += bnx2x_rx_int(fp, budget - work_done);
10940
10941                         /* must not complete if we consumed full budget */
10942                         if (work_done >= budget)
10943                                 break;
10944                 }
10945
10946                 /* Fall out from the NAPI loop if needed */
10947                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10948                         bnx2x_update_fpsb_idx(fp);
10949                 /* bnx2x_has_rx_work() reads the status block, thus we need
10950                  * to ensure that status block indices have been actually read
10951                  * (bnx2x_update_fpsb_idx) prior to this check
10952                  * (bnx2x_has_rx_work) so that we won't write the "newer"
10953                  * value of the status block to IGU (if there was a DMA right
10954                  * after bnx2x_has_rx_work and if there is no rmb, the memory
10955                  * reading (bnx2x_update_fpsb_idx) may be postponed to right
10956                  * before bnx2x_ack_sb). In this case there will never be
10957                  * another interrupt until there is another update of the
10958                  * status block, while there is still unhandled work.
10959                  */
10960                         rmb();
10961
10962                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10963                                 napi_complete(napi);
10964                                 /* Re-enable interrupts */
10965                                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10966                                              le16_to_cpu(fp->fp_c_idx),
10967                                              IGU_INT_NOP, 1);
10968                                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10969                                              le16_to_cpu(fp->fp_u_idx),
10970                                              IGU_INT_ENABLE, 1);
10971                                 break;
10972                         }
10973                 }
10974         }
10975
10976         return work_done;
10977 }
10978
10979
10980 /* we split the first BD into headers and data BDs
10981  * to ease the pain of our fellow microcode engineers
10982  * we use one mapping for both BDs
10983  * So far this has only been observed to happen
10984  * in Other Operating Systems(TM)
10985  */
10986 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10987                                    struct bnx2x_fastpath *fp,
10988                                    struct sw_tx_bd *tx_buf,
10989                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
10990                                    u16 bd_prod, int nbd)
10991 {
10992         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
10993         struct eth_tx_bd *d_tx_bd;
10994         dma_addr_t mapping;
10995         int old_len = le16_to_cpu(h_tx_bd->nbytes);
10996
10997         /* first fix first BD */
10998         h_tx_bd->nbd = cpu_to_le16(nbd);
10999         h_tx_bd->nbytes = cpu_to_le16(hlen);
11000
11001         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11002            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11003            h_tx_bd->addr_lo, h_tx_bd->nbd);
11004
11005         /* now get a new data BD
11006          * (after the pbd) and fill it */
11007         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11008         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11009
11010         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11011                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11012
11013         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11014         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11015         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
11016
11017         /* this marks the BD as one that has no individual mapping */
11018         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11019
11020         DP(NETIF_MSG_TX_QUEUED,
11021            "TSO split data size is %d (%x:%x)\n",
11022            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11023
11024         /* update tx_bd */
11025         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
11026
11027         return bd_prod;
11028 }
11029
11030 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11031 {
11032         if (fix > 0)
11033                 csum = (u16) ~csum_fold(csum_sub(csum,
11034                                 csum_partial(t_header - fix, fix, 0)));
11035
11036         else if (fix < 0)
11037                 csum = (u16) ~csum_fold(csum_add(csum,
11038                                 csum_partial(t_header, -fix, 0)));
11039
11040         return swab16(csum);
11041 }
11042
11043 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11044 {
11045         u32 rc;
11046
11047         if (skb->ip_summed != CHECKSUM_PARTIAL)
11048                 rc = XMIT_PLAIN;
11049
11050         else {
11051                 if (skb->protocol == htons(ETH_P_IPV6)) {
11052                         rc = XMIT_CSUM_V6;
11053                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11054                                 rc |= XMIT_CSUM_TCP;
11055
11056                 } else {
11057                         rc = XMIT_CSUM_V4;
11058                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11059                                 rc |= XMIT_CSUM_TCP;
11060                 }
11061         }
11062
11063         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
11064                 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
11065
11066         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
11067                 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
11068
11069         return rc;
11070 }
11071
11072 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11073 /* check if packet requires linearization (packet is too fragmented)
11074    no need to check fragmentation if page size > 8K (there will be no
11075    violation to FW restrictions) */
11076 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11077                              u32 xmit_type)
11078 {
11079         int to_copy = 0;
11080         int hlen = 0;
11081         int first_bd_sz = 0;
11082
11083         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11084         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11085
11086                 if (xmit_type & XMIT_GSO) {
11087                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11088                         /* Check if LSO packet needs to be copied:
11089                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11090                         int wnd_size = MAX_FETCH_BD - 3;
11091                         /* Number of windows to check */
11092                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11093                         int wnd_idx = 0;
11094                         int frag_idx = 0;
11095                         u32 wnd_sum = 0;
11096
11097                         /* Headers length */
11098                         hlen = (int)(skb_transport_header(skb) - skb->data) +
11099                                 tcp_hdrlen(skb);
11100
11101                         /* Amount of data (w/o headers) on linear part of SKB*/
11102                         first_bd_sz = skb_headlen(skb) - hlen;
11103
11104                         wnd_sum  = first_bd_sz;
11105
11106                         /* Calculate the first sum - it's special */
11107                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11108                                 wnd_sum +=
11109                                         skb_shinfo(skb)->frags[frag_idx].size;
11110
11111                         /* If there was data on linear skb data - check it */
11112                         if (first_bd_sz > 0) {
11113                                 if (unlikely(wnd_sum < lso_mss)) {
11114                                         to_copy = 1;
11115                                         goto exit_lbl;
11116                                 }
11117
11118                                 wnd_sum -= first_bd_sz;
11119                         }
11120
11121                         /* Others are easier: run through the frag list and
11122                            check all windows */
11123                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11124                                 wnd_sum +=
11125                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11126
11127                                 if (unlikely(wnd_sum < lso_mss)) {
11128                                         to_copy = 1;
11129                                         break;
11130                                 }
11131                                 wnd_sum -=
11132                                         skb_shinfo(skb)->frags[wnd_idx].size;
11133                         }
11134                 } else {
11135                         /* in non-LSO too fragmented packet should always
11136                            be linearized */
11137                         to_copy = 1;
11138                 }
11139         }
11140
11141 exit_lbl:
11142         if (unlikely(to_copy))
11143                 DP(NETIF_MSG_TX_QUEUED,
11144                    "Linearization IS REQUIRED for %s packet. "
11145                    "num_frags %d  hlen %d  first_bd_sz %d\n",
11146                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11147                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11148
11149         return to_copy;
11150 }
11151 #endif
11152
11153 /* called with netif_tx_lock
11154  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
11155  * netif_wake_queue()
11156  */
11157 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11158 {
11159         struct bnx2x *bp = netdev_priv(dev);
11160         struct bnx2x_fastpath *fp;
11161         struct netdev_queue *txq;
11162         struct sw_tx_bd *tx_buf;
11163         struct eth_tx_start_bd *tx_start_bd;
11164         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
11165         struct eth_tx_parse_bd *pbd = NULL;
11166         u16 pkt_prod, bd_prod;
11167         int nbd, fp_index;
11168         dma_addr_t mapping;
11169         u32 xmit_type = bnx2x_xmit_type(bp, skb);
11170         int i;
11171         u8 hlen = 0;
11172         __le16 pkt_size = 0;
11173
11174 #ifdef BNX2X_STOP_ON_ERROR
11175         if (unlikely(bp->panic))
11176                 return NETDEV_TX_BUSY;
11177 #endif
11178
11179         fp_index = skb_get_queue_mapping(skb);
11180         txq = netdev_get_tx_queue(dev, fp_index);
11181
11182         fp = &bp->fp[fp_index];
11183
11184         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
11185                 fp->eth_q_stats.driver_xoff++;
11186                 netif_tx_stop_queue(txq);
11187                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11188                 return NETDEV_TX_BUSY;
11189         }
11190
11191         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
11192            "  gso type %x  xmit_type %x\n",
11193            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11194            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11195
11196 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11197         /* First, check if we need to linearize the skb (due to FW
11198            restrictions). No need to check fragmentation if page size > 8K
11199            (there will be no violation to FW restrictions) */
11200         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11201                 /* Statistics of linearization */
11202                 bp->lin_cnt++;
11203                 if (skb_linearize(skb) != 0) {
11204                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11205                            "silently dropping this SKB\n");
11206                         dev_kfree_skb_any(skb);
11207                         return NETDEV_TX_OK;
11208                 }
11209         }
11210 #endif
11211
11212         /*
11213         Please read carefully. First we use one BD which we mark as start,
11214         then we have a parsing info BD (used for TSO or xsum),
11215         and only then we have the rest of the TSO BDs.
11216         (don't forget to mark the last one as last,
11217         and to unmap only AFTER you write to the BD ...)
11218         And above all, all pdb sizes are in words - NOT DWORDS!
11219         */
11220
11221         pkt_prod = fp->tx_pkt_prod++;
11222         bd_prod = TX_BD(fp->tx_bd_prod);
11223
11224         /* get a tx_buf and first BD */
11225         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
11226         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
11227
11228         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11229         tx_start_bd->general_data = (UNICAST_ADDRESS <<
11230                                      ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
11231         /* header nbd */
11232         tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
11233
11234         /* remember the first BD of the packet */
11235         tx_buf->first_bd = fp->tx_bd_prod;
11236         tx_buf->skb = skb;
11237         tx_buf->flags = 0;
11238
11239         DP(NETIF_MSG_TX_QUEUED,
11240            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
11241            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
11242
11243 #ifdef BCM_VLAN
11244         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11245             (bp->flags & HW_VLAN_TX_FLAG)) {
11246                 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11247                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
11248         } else
11249 #endif
11250                 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11251
11252         /* turn on parsing and get a BD */
11253         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11254         pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
11255
11256         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11257
11258         if (xmit_type & XMIT_CSUM) {
11259                 hlen = (skb_network_header(skb) - skb->data) / 2;
11260
11261                 /* for now NS flag is not used in Linux */
11262                 pbd->global_data =
11263                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11264                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
11265
11266                 pbd->ip_hlen = (skb_transport_header(skb) -
11267                                 skb_network_header(skb)) / 2;
11268
11269                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11270
11271                 pbd->total_hlen = cpu_to_le16(hlen);
11272                 hlen = hlen*2;
11273
11274                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
11275
11276                 if (xmit_type & XMIT_CSUM_V4)
11277                         tx_start_bd->bd_flags.as_bitfield |=
11278                                                 ETH_TX_BD_FLAGS_IP_CSUM;
11279                 else
11280                         tx_start_bd->bd_flags.as_bitfield |=
11281                                                 ETH_TX_BD_FLAGS_IPV6;
11282
11283                 if (xmit_type & XMIT_CSUM_TCP) {
11284                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11285
11286                 } else {
11287                         s8 fix = SKB_CS_OFF(skb); /* signed! */
11288
11289                         pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
11290
11291                         DP(NETIF_MSG_TX_QUEUED,
11292                            "hlen %d  fix %d  csum before fix %x\n",
11293                            le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
11294
11295                         /* HW bug: fixup the CSUM */
11296                         pbd->tcp_pseudo_csum =
11297                                 bnx2x_csum_fix(skb_transport_header(skb),
11298                                                SKB_CS(skb), fix);
11299
11300                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11301                            pbd->tcp_pseudo_csum);
11302                 }
11303         }
11304
11305         mapping = pci_map_single(bp->pdev, skb->data,
11306                                  skb_headlen(skb), PCI_DMA_TODEVICE);
11307
11308         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11309         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11310         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11311         tx_start_bd->nbd = cpu_to_le16(nbd);
11312         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11313         pkt_size = tx_start_bd->nbytes;
11314
11315         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
11316            "  nbytes %d  flags %x  vlan %x\n",
11317            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11318            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11319            tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
11320
11321         if (xmit_type & XMIT_GSO) {
11322
11323                 DP(NETIF_MSG_TX_QUEUED,
11324                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
11325                    skb->len, hlen, skb_headlen(skb),
11326                    skb_shinfo(skb)->gso_size);
11327
11328                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
11329
11330                 if (unlikely(skb_headlen(skb) > hlen))
11331                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11332                                                  hlen, bd_prod, ++nbd);
11333
11334                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11335                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
11336                 pbd->tcp_flags = pbd_tcp_flags(skb);
11337
11338                 if (xmit_type & XMIT_GSO_V4) {
11339                         pbd->ip_id = swab16(ip_hdr(skb)->id);
11340                         pbd->tcp_pseudo_csum =
11341                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11342                                                           ip_hdr(skb)->daddr,
11343                                                           0, IPPROTO_TCP, 0));
11344
11345                 } else
11346                         pbd->tcp_pseudo_csum =
11347                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11348                                                         &ipv6_hdr(skb)->daddr,
11349                                                         0, IPPROTO_TCP, 0));
11350
11351                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11352         }
11353         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
11354
11355         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11356                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
11357
11358                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11359                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11360                 if (total_pkt_bd == NULL)
11361                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11362
11363                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11364                                        frag->size, PCI_DMA_TODEVICE);
11365
11366                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11367                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11368                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11369                 le16_add_cpu(&pkt_size, frag->size);
11370
11371                 DP(NETIF_MSG_TX_QUEUED,
11372                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
11373                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11374                    le16_to_cpu(tx_data_bd->nbytes));
11375         }
11376
11377         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
11378
11379         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11380
11381         /* now send a tx doorbell, counting the next BD
11382          * if the packet contains or ends with it
11383          */
11384         if (TX_BD_POFF(bd_prod) < nbd)
11385                 nbd++;
11386
11387         if (total_pkt_bd != NULL)
11388                 total_pkt_bd->total_pkt_bytes = pkt_size;
11389
11390         if (pbd)
11391                 DP(NETIF_MSG_TX_QUEUED,
11392                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
11393                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
11394                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11395                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11396                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11397
11398         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
11399
11400         /*
11401          * Make sure that the BD data is updated before updating the producer
11402          * since FW might read the BD right after the producer is updated.
11403          * This is only applicable for weak-ordered memory model archs such
11404          * as IA-64. The following barrier is also mandatory since FW will
11405          * assumes packets must have BDs.
11406          */
11407         wmb();
11408
11409         fp->tx_db.data.prod += nbd;
11410         barrier();
11411         DOORBELL(bp, fp->index, fp->tx_db.raw);
11412
11413         mmiowb();
11414
11415         fp->tx_bd_prod += nbd;
11416
11417         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11418                 netif_tx_stop_queue(txq);
11419                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11420                    if we put Tx into XOFF state. */
11421                 smp_mb();
11422                 fp->eth_q_stats.driver_xoff++;
11423                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11424                         netif_tx_wake_queue(txq);
11425         }
11426         fp->tx_pkt++;
11427
11428         return NETDEV_TX_OK;
11429 }
11430
11431 /* called with rtnl_lock */
11432 static int bnx2x_open(struct net_device *dev)
11433 {
11434         struct bnx2x *bp = netdev_priv(dev);
11435
11436         netif_carrier_off(dev);
11437
11438         bnx2x_set_power_state(bp, PCI_D0);
11439
11440         return bnx2x_nic_load(bp, LOAD_OPEN);
11441 }
11442
11443 /* called with rtnl_lock */
11444 static int bnx2x_close(struct net_device *dev)
11445 {
11446         struct bnx2x *bp = netdev_priv(dev);
11447
11448         /* Unload the driver, release IRQs */
11449         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11450         if (atomic_read(&bp->pdev->enable_cnt) == 1)
11451                 if (!CHIP_REV_IS_SLOW(bp))
11452                         bnx2x_set_power_state(bp, PCI_D3hot);
11453
11454         return 0;
11455 }
11456
11457 /* called with netif_tx_lock from dev_mcast.c */
11458 static void bnx2x_set_rx_mode(struct net_device *dev)
11459 {
11460         struct bnx2x *bp = netdev_priv(dev);
11461         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11462         int port = BP_PORT(bp);
11463
11464         if (bp->state != BNX2X_STATE_OPEN) {
11465                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11466                 return;
11467         }
11468
11469         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11470
11471         if (dev->flags & IFF_PROMISC)
11472                 rx_mode = BNX2X_RX_MODE_PROMISC;
11473
11474         else if ((dev->flags & IFF_ALLMULTI) ||
11475                  ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
11476                   CHIP_IS_E1(bp)))
11477                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11478
11479         else { /* some multicasts */
11480                 if (CHIP_IS_E1(bp)) {
11481                         int i, old, offset;
11482                         struct dev_mc_list *mclist;
11483                         struct mac_configuration_cmd *config =
11484                                                 bnx2x_sp(bp, mcast_config);
11485
11486                         for (i = 0, mclist = dev->mc_list;
11487                              mclist && (i < netdev_mc_count(dev));
11488                              i++, mclist = mclist->next) {
11489
11490                                 config->config_table[i].
11491                                         cam_entry.msb_mac_addr =
11492                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
11493                                 config->config_table[i].
11494                                         cam_entry.middle_mac_addr =
11495                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
11496                                 config->config_table[i].
11497                                         cam_entry.lsb_mac_addr =
11498                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
11499                                 config->config_table[i].cam_entry.flags =
11500                                                         cpu_to_le16(port);
11501                                 config->config_table[i].
11502                                         target_table_entry.flags = 0;
11503                                 config->config_table[i].target_table_entry.
11504                                         clients_bit_vector =
11505                                                 cpu_to_le32(1 << BP_L_ID(bp));
11506                                 config->config_table[i].
11507                                         target_table_entry.vlan_id = 0;
11508
11509                                 DP(NETIF_MSG_IFUP,
11510                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11511                                    config->config_table[i].
11512                                                 cam_entry.msb_mac_addr,
11513                                    config->config_table[i].
11514                                                 cam_entry.middle_mac_addr,
11515                                    config->config_table[i].
11516                                                 cam_entry.lsb_mac_addr);
11517                         }
11518                         old = config->hdr.length;
11519                         if (old > i) {
11520                                 for (; i < old; i++) {
11521                                         if (CAM_IS_INVALID(config->
11522                                                            config_table[i])) {
11523                                                 /* already invalidated */
11524                                                 break;
11525                                         }
11526                                         /* invalidate */
11527                                         CAM_INVALIDATE(config->
11528                                                        config_table[i]);
11529                                 }
11530                         }
11531
11532                         if (CHIP_REV_IS_SLOW(bp))
11533                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11534                         else
11535                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
11536
11537                         config->hdr.length = i;
11538                         config->hdr.offset = offset;
11539                         config->hdr.client_id = bp->fp->cl_id;
11540                         config->hdr.reserved1 = 0;
11541
11542                         bp->set_mac_pending++;
11543                         smp_wmb();
11544
11545                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11546                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11547                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11548                                       0);
11549                 } else { /* E1H */
11550                         /* Accept one or more multicasts */
11551                         struct dev_mc_list *mclist;
11552                         u32 mc_filter[MC_HASH_SIZE];
11553                         u32 crc, bit, regidx;
11554                         int i;
11555
11556                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11557
11558                         for (i = 0, mclist = dev->mc_list;
11559                              mclist && (i < netdev_mc_count(dev));
11560                              i++, mclist = mclist->next) {
11561
11562                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11563                                    mclist->dmi_addr);
11564
11565                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11566                                 bit = (crc >> 24) & 0xff;
11567                                 regidx = bit >> 5;
11568                                 bit &= 0x1f;
11569                                 mc_filter[regidx] |= (1 << bit);
11570                         }
11571
11572                         for (i = 0; i < MC_HASH_SIZE; i++)
11573                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11574                                        mc_filter[i]);
11575                 }
11576         }
11577
11578         bp->rx_mode = rx_mode;
11579         bnx2x_set_storm_rx_mode(bp);
11580 }
11581
11582 /* called with rtnl_lock */
11583 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11584 {
11585         struct sockaddr *addr = p;
11586         struct bnx2x *bp = netdev_priv(dev);
11587
11588         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11589                 return -EINVAL;
11590
11591         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11592         if (netif_running(dev)) {
11593                 if (CHIP_IS_E1(bp))
11594                         bnx2x_set_eth_mac_addr_e1(bp, 1);
11595                 else
11596                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
11597         }
11598
11599         return 0;
11600 }
11601
11602 /* called with rtnl_lock */
11603 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11604                            int devad, u16 addr)
11605 {
11606         struct bnx2x *bp = netdev_priv(netdev);
11607         u16 value;
11608         int rc;
11609         u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11610
11611         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11612            prtad, devad, addr);
11613
11614         if (prtad != bp->mdio.prtad) {
11615                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11616                    prtad, bp->mdio.prtad);
11617                 return -EINVAL;
11618         }
11619
11620         /* The HW expects different devad if CL22 is used */
11621         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11622
11623         bnx2x_acquire_phy_lock(bp);
11624         rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11625                              devad, addr, &value);
11626         bnx2x_release_phy_lock(bp);
11627         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11628
11629         if (!rc)
11630                 rc = value;
11631         return rc;
11632 }
11633
11634 /* called with rtnl_lock */
11635 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11636                             u16 addr, u16 value)
11637 {
11638         struct bnx2x *bp = netdev_priv(netdev);
11639         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11640         int rc;
11641
11642         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11643                            " value 0x%x\n", prtad, devad, addr, value);
11644
11645         if (prtad != bp->mdio.prtad) {
11646                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11647                    prtad, bp->mdio.prtad);
11648                 return -EINVAL;
11649         }
11650
11651         /* The HW expects different devad if CL22 is used */
11652         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11653
11654         bnx2x_acquire_phy_lock(bp);
11655         rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11656                               devad, addr, value);
11657         bnx2x_release_phy_lock(bp);
11658         return rc;
11659 }
11660
11661 /* called with rtnl_lock */
11662 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11663 {
11664         struct bnx2x *bp = netdev_priv(dev);
11665         struct mii_ioctl_data *mdio = if_mii(ifr);
11666
11667         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11668            mdio->phy_id, mdio->reg_num, mdio->val_in);
11669
11670         if (!netif_running(dev))
11671                 return -EAGAIN;
11672
11673         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11674 }
11675
11676 /* called with rtnl_lock */
11677 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11678 {
11679         struct bnx2x *bp = netdev_priv(dev);
11680         int rc = 0;
11681
11682         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11683             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11684                 return -EINVAL;
11685
11686         /* This does not race with packet allocation
11687          * because the actual alloc size is
11688          * only updated as part of load
11689          */
11690         dev->mtu = new_mtu;
11691
11692         if (netif_running(dev)) {
11693                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11694                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11695         }
11696
11697         return rc;
11698 }
11699
11700 static void bnx2x_tx_timeout(struct net_device *dev)
11701 {
11702         struct bnx2x *bp = netdev_priv(dev);
11703
11704 #ifdef BNX2X_STOP_ON_ERROR
11705         if (!bp->panic)
11706                 bnx2x_panic();
11707 #endif
11708         /* This allows the netif to be shutdown gracefully before resetting */
11709         schedule_work(&bp->reset_task);
11710 }
11711
11712 #ifdef BCM_VLAN
11713 /* called with rtnl_lock */
11714 static void bnx2x_vlan_rx_register(struct net_device *dev,
11715                                    struct vlan_group *vlgrp)
11716 {
11717         struct bnx2x *bp = netdev_priv(dev);
11718
11719         bp->vlgrp = vlgrp;
11720
11721         /* Set flags according to the required capabilities */
11722         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11723
11724         if (dev->features & NETIF_F_HW_VLAN_TX)
11725                 bp->flags |= HW_VLAN_TX_FLAG;
11726
11727         if (dev->features & NETIF_F_HW_VLAN_RX)
11728                 bp->flags |= HW_VLAN_RX_FLAG;
11729
11730         if (netif_running(dev))
11731                 bnx2x_set_client_config(bp);
11732 }
11733
11734 #endif
11735
11736 #ifdef CONFIG_NET_POLL_CONTROLLER
11737 static void poll_bnx2x(struct net_device *dev)
11738 {
11739         struct bnx2x *bp = netdev_priv(dev);
11740
11741         disable_irq(bp->pdev->irq);
11742         bnx2x_interrupt(bp->pdev->irq, dev);
11743         enable_irq(bp->pdev->irq);
11744 }
11745 #endif
11746
11747 static const struct net_device_ops bnx2x_netdev_ops = {
11748         .ndo_open               = bnx2x_open,
11749         .ndo_stop               = bnx2x_close,
11750         .ndo_start_xmit         = bnx2x_start_xmit,
11751         .ndo_set_multicast_list = bnx2x_set_rx_mode,
11752         .ndo_set_mac_address    = bnx2x_change_mac_addr,
11753         .ndo_validate_addr      = eth_validate_addr,
11754         .ndo_do_ioctl           = bnx2x_ioctl,
11755         .ndo_change_mtu         = bnx2x_change_mtu,
11756         .ndo_tx_timeout         = bnx2x_tx_timeout,
11757 #ifdef BCM_VLAN
11758         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
11759 #endif
11760 #ifdef CONFIG_NET_POLL_CONTROLLER
11761         .ndo_poll_controller    = poll_bnx2x,
11762 #endif
11763 };
11764
11765 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11766                                     struct net_device *dev)
11767 {
11768         struct bnx2x *bp;
11769         int rc;
11770
11771         SET_NETDEV_DEV(dev, &pdev->dev);
11772         bp = netdev_priv(dev);
11773
11774         bp->dev = dev;
11775         bp->pdev = pdev;
11776         bp->flags = 0;
11777         bp->func = PCI_FUNC(pdev->devfn);
11778
11779         rc = pci_enable_device(pdev);
11780         if (rc) {
11781                 pr_err("Cannot enable PCI device, aborting\n");
11782                 goto err_out;
11783         }
11784
11785         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11786                 pr_err("Cannot find PCI device base address, aborting\n");
11787                 rc = -ENODEV;
11788                 goto err_out_disable;
11789         }
11790
11791         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11792                 pr_err("Cannot find second PCI device base address, aborting\n");
11793                 rc = -ENODEV;
11794                 goto err_out_disable;
11795         }
11796
11797         if (atomic_read(&pdev->enable_cnt) == 1) {
11798                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11799                 if (rc) {
11800                         pr_err("Cannot obtain PCI resources, aborting\n");
11801                         goto err_out_disable;
11802                 }
11803
11804                 pci_set_master(pdev);
11805                 pci_save_state(pdev);
11806         }
11807
11808         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11809         if (bp->pm_cap == 0) {
11810                 pr_err("Cannot find power management capability, aborting\n");
11811                 rc = -EIO;
11812                 goto err_out_release;
11813         }
11814
11815         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11816         if (bp->pcie_cap == 0) {
11817                 pr_err("Cannot find PCI Express capability, aborting\n");
11818                 rc = -EIO;
11819                 goto err_out_release;
11820         }
11821
11822         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11823                 bp->flags |= USING_DAC_FLAG;
11824                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11825                         pr_err("pci_set_consistent_dma_mask failed, aborting\n");
11826                         rc = -EIO;
11827                         goto err_out_release;
11828                 }
11829
11830         } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11831                 pr_err("System does not support DMA, aborting\n");
11832                 rc = -EIO;
11833                 goto err_out_release;
11834         }
11835
11836         dev->mem_start = pci_resource_start(pdev, 0);
11837         dev->base_addr = dev->mem_start;
11838         dev->mem_end = pci_resource_end(pdev, 0);
11839
11840         dev->irq = pdev->irq;
11841
11842         bp->regview = pci_ioremap_bar(pdev, 0);
11843         if (!bp->regview) {
11844                 pr_err("Cannot map register space, aborting\n");
11845                 rc = -ENOMEM;
11846                 goto err_out_release;
11847         }
11848
11849         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11850                                         min_t(u64, BNX2X_DB_SIZE,
11851                                               pci_resource_len(pdev, 2)));
11852         if (!bp->doorbells) {
11853                 pr_err("Cannot map doorbell space, aborting\n");
11854                 rc = -ENOMEM;
11855                 goto err_out_unmap;
11856         }
11857
11858         bnx2x_set_power_state(bp, PCI_D0);
11859
11860         /* clean indirect addresses */
11861         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11862                                PCICFG_VENDOR_ID_OFFSET);
11863         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11864         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11865         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11866         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11867
11868         dev->watchdog_timeo = TX_TIMEOUT;
11869
11870         dev->netdev_ops = &bnx2x_netdev_ops;
11871         dev->ethtool_ops = &bnx2x_ethtool_ops;
11872         dev->features |= NETIF_F_SG;
11873         dev->features |= NETIF_F_HW_CSUM;
11874         if (bp->flags & USING_DAC_FLAG)
11875                 dev->features |= NETIF_F_HIGHDMA;
11876         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11877         dev->features |= NETIF_F_TSO6;
11878 #ifdef BCM_VLAN
11879         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11880         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11881
11882         dev->vlan_features |= NETIF_F_SG;
11883         dev->vlan_features |= NETIF_F_HW_CSUM;
11884         if (bp->flags & USING_DAC_FLAG)
11885                 dev->vlan_features |= NETIF_F_HIGHDMA;
11886         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11887         dev->vlan_features |= NETIF_F_TSO6;
11888 #endif
11889
11890         /* get_port_hwinfo() will set prtad and mmds properly */
11891         bp->mdio.prtad = MDIO_PRTAD_NONE;
11892         bp->mdio.mmds = 0;
11893         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11894         bp->mdio.dev = dev;
11895         bp->mdio.mdio_read = bnx2x_mdio_read;
11896         bp->mdio.mdio_write = bnx2x_mdio_write;
11897
11898         return 0;
11899
11900 err_out_unmap:
11901         if (bp->regview) {
11902                 iounmap(bp->regview);
11903                 bp->regview = NULL;
11904         }
11905         if (bp->doorbells) {
11906                 iounmap(bp->doorbells);
11907                 bp->doorbells = NULL;
11908         }
11909
11910 err_out_release:
11911         if (atomic_read(&pdev->enable_cnt) == 1)
11912                 pci_release_regions(pdev);
11913
11914 err_out_disable:
11915         pci_disable_device(pdev);
11916         pci_set_drvdata(pdev, NULL);
11917
11918 err_out:
11919         return rc;
11920 }
11921
11922 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11923                                                  int *width, int *speed)
11924 {
11925         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11926
11927         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11928
11929         /* return value of 1=2.5GHz 2=5GHz */
11930         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11931 }
11932
11933 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11934 {
11935         const struct firmware *firmware = bp->firmware;
11936         struct bnx2x_fw_file_hdr *fw_hdr;
11937         struct bnx2x_fw_file_section *sections;
11938         u32 offset, len, num_ops;
11939         u16 *ops_offsets;
11940         int i;
11941         const u8 *fw_ver;
11942
11943         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11944                 return -EINVAL;
11945
11946         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11947         sections = (struct bnx2x_fw_file_section *)fw_hdr;
11948
11949         /* Make sure none of the offsets and sizes make us read beyond
11950          * the end of the firmware data */
11951         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11952                 offset = be32_to_cpu(sections[i].offset);
11953                 len = be32_to_cpu(sections[i].len);
11954                 if (offset + len > firmware->size) {
11955                         pr_err("Section %d length is out of bounds\n", i);
11956                         return -EINVAL;
11957                 }
11958         }
11959
11960         /* Likewise for the init_ops offsets */
11961         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11962         ops_offsets = (u16 *)(firmware->data + offset);
11963         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11964
11965         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11966                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11967                         pr_err("Section offset %d is out of bounds\n", i);
11968                         return -EINVAL;
11969                 }
11970         }
11971
11972         /* Check FW version */
11973         offset = be32_to_cpu(fw_hdr->fw_version.offset);
11974         fw_ver = firmware->data + offset;
11975         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11976             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11977             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11978             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11979                 pr_err("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
11980                        fw_ver[0], fw_ver[1], fw_ver[2],
11981                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11982                        BCM_5710_FW_MINOR_VERSION,
11983                        BCM_5710_FW_REVISION_VERSION,
11984                        BCM_5710_FW_ENGINEERING_VERSION);
11985                 return -EINVAL;
11986         }
11987
11988         return 0;
11989 }
11990
11991 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11992 {
11993         const __be32 *source = (const __be32 *)_source;
11994         u32 *target = (u32 *)_target;
11995         u32 i;
11996
11997         for (i = 0; i < n/4; i++)
11998                 target[i] = be32_to_cpu(source[i]);
11999 }
12000
12001 /*
12002    Ops array is stored in the following format:
12003    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12004  */
12005 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
12006 {
12007         const __be32 *source = (const __be32 *)_source;
12008         struct raw_op *target = (struct raw_op *)_target;
12009         u32 i, j, tmp;
12010
12011         for (i = 0, j = 0; i < n/8; i++, j += 2) {
12012                 tmp = be32_to_cpu(source[j]);
12013                 target[i].op = (tmp >> 24) & 0xff;
12014                 target[i].offset =  tmp & 0xffffff;
12015                 target[i].raw_data = be32_to_cpu(source[j+1]);
12016         }
12017 }
12018
12019 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12020 {
12021         const __be16 *source = (const __be16 *)_source;
12022         u16 *target = (u16 *)_target;
12023         u32 i;
12024
12025         for (i = 0; i < n/2; i++)
12026                 target[i] = be16_to_cpu(source[i]);
12027 }
12028
12029 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
12030 do {                                                                    \
12031         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
12032         bp->arr = kmalloc(len, GFP_KERNEL);                             \
12033         if (!bp->arr) {                                                 \
12034                 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
12035                 goto lbl;                                               \
12036         }                                                               \
12037         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
12038              (u8 *)bp->arr, len);                                       \
12039 } while (0)
12040
12041 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12042 {
12043         const char *fw_file_name;
12044         struct bnx2x_fw_file_hdr *fw_hdr;
12045         int rc;
12046
12047         if (CHIP_IS_E1(bp))
12048                 fw_file_name = FW_FILE_NAME_E1;
12049         else
12050                 fw_file_name = FW_FILE_NAME_E1H;
12051
12052         pr_info("Loading %s\n", fw_file_name);
12053
12054         rc = request_firmware(&bp->firmware, fw_file_name, dev);
12055         if (rc) {
12056                 pr_err("Can't load firmware file %s\n", fw_file_name);
12057                 goto request_firmware_exit;
12058         }
12059
12060         rc = bnx2x_check_firmware(bp);
12061         if (rc) {
12062                 pr_err("Corrupt firmware file %s\n", fw_file_name);
12063                 goto request_firmware_exit;
12064         }
12065
12066         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12067
12068         /* Initialize the pointers to the init arrays */
12069         /* Blob */
12070         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12071
12072         /* Opcodes */
12073         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12074
12075         /* Offsets */
12076         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12077                             be16_to_cpu_n);
12078
12079         /* STORMs firmware */
12080         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12081                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12082         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
12083                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12084         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12085                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12086         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
12087                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
12088         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12089                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12090         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
12091                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12092         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12093                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12094         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
12095                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
12096
12097         return 0;
12098
12099 init_offsets_alloc_err:
12100         kfree(bp->init_ops);
12101 init_ops_alloc_err:
12102         kfree(bp->init_data);
12103 request_firmware_exit:
12104         release_firmware(bp->firmware);
12105
12106         return rc;
12107 }
12108
12109
12110 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12111                                     const struct pci_device_id *ent)
12112 {
12113         struct net_device *dev = NULL;
12114         struct bnx2x *bp;
12115         int pcie_width, pcie_speed;
12116         int rc;
12117
12118         /* dev zeroed in init_etherdev */
12119         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
12120         if (!dev) {
12121                 pr_err("Cannot allocate net device\n");
12122                 return -ENOMEM;
12123         }
12124
12125         bp = netdev_priv(dev);
12126         bp->msg_enable = debug;
12127
12128         pci_set_drvdata(pdev, dev);
12129
12130         rc = bnx2x_init_dev(pdev, dev);
12131         if (rc < 0) {
12132                 free_netdev(dev);
12133                 return rc;
12134         }
12135
12136         rc = bnx2x_init_bp(bp);
12137         if (rc)
12138                 goto init_one_exit;
12139
12140         /* Set init arrays */
12141         rc = bnx2x_init_firmware(bp, &pdev->dev);
12142         if (rc) {
12143                 pr_err("Error loading firmware\n");
12144                 goto init_one_exit;
12145         }
12146
12147         rc = register_netdev(dev);
12148         if (rc) {
12149                 dev_err(&pdev->dev, "Cannot register net device\n");
12150                 goto init_one_exit;
12151         }
12152
12153         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12154         netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
12155                     board_info[ent->driver_data].name,
12156                     (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12157                     pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12158                     dev->base_addr, bp->pdev->irq, dev->dev_addr);
12159
12160         return 0;
12161
12162 init_one_exit:
12163         if (bp->regview)
12164                 iounmap(bp->regview);
12165
12166         if (bp->doorbells)
12167                 iounmap(bp->doorbells);
12168
12169         free_netdev(dev);
12170
12171         if (atomic_read(&pdev->enable_cnt) == 1)
12172                 pci_release_regions(pdev);
12173
12174         pci_disable_device(pdev);
12175         pci_set_drvdata(pdev, NULL);
12176
12177         return rc;
12178 }
12179
12180 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12181 {
12182         struct net_device *dev = pci_get_drvdata(pdev);
12183         struct bnx2x *bp;
12184
12185         if (!dev) {
12186                 pr_err("BAD net device from bnx2x_init_one\n");
12187                 return;
12188         }
12189         bp = netdev_priv(dev);
12190
12191         unregister_netdev(dev);
12192
12193         kfree(bp->init_ops_offsets);
12194         kfree(bp->init_ops);
12195         kfree(bp->init_data);
12196         release_firmware(bp->firmware);
12197
12198         if (bp->regview)
12199                 iounmap(bp->regview);
12200
12201         if (bp->doorbells)
12202                 iounmap(bp->doorbells);
12203
12204         free_netdev(dev);
12205
12206         if (atomic_read(&pdev->enable_cnt) == 1)
12207                 pci_release_regions(pdev);
12208
12209         pci_disable_device(pdev);
12210         pci_set_drvdata(pdev, NULL);
12211 }
12212
12213 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12214 {
12215         struct net_device *dev = pci_get_drvdata(pdev);
12216         struct bnx2x *bp;
12217
12218         if (!dev) {
12219                 pr_err("BAD net device from bnx2x_init_one\n");
12220                 return -ENODEV;
12221         }
12222         bp = netdev_priv(dev);
12223
12224         rtnl_lock();
12225
12226         pci_save_state(pdev);
12227
12228         if (!netif_running(dev)) {
12229                 rtnl_unlock();
12230                 return 0;
12231         }
12232
12233         netif_device_detach(dev);
12234
12235         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12236
12237         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
12238
12239         rtnl_unlock();
12240
12241         return 0;
12242 }
12243
12244 static int bnx2x_resume(struct pci_dev *pdev)
12245 {
12246         struct net_device *dev = pci_get_drvdata(pdev);
12247         struct bnx2x *bp;
12248         int rc;
12249
12250         if (!dev) {
12251                 pr_err("BAD net device from bnx2x_init_one\n");
12252                 return -ENODEV;
12253         }
12254         bp = netdev_priv(dev);
12255
12256         rtnl_lock();
12257
12258         pci_restore_state(pdev);
12259
12260         if (!netif_running(dev)) {
12261                 rtnl_unlock();
12262                 return 0;
12263         }
12264
12265         bnx2x_set_power_state(bp, PCI_D0);
12266         netif_device_attach(dev);
12267
12268         rc = bnx2x_nic_load(bp, LOAD_OPEN);
12269
12270         rtnl_unlock();
12271
12272         return rc;
12273 }
12274
12275 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12276 {
12277         int i;
12278
12279         bp->state = BNX2X_STATE_ERROR;
12280
12281         bp->rx_mode = BNX2X_RX_MODE_NONE;
12282
12283         bnx2x_netif_stop(bp, 0);
12284
12285         del_timer_sync(&bp->timer);
12286         bp->stats_state = STATS_STATE_DISABLED;
12287         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12288
12289         /* Release IRQs */
12290         bnx2x_free_irq(bp, false);
12291
12292         if (CHIP_IS_E1(bp)) {
12293                 struct mac_configuration_cmd *config =
12294                                                 bnx2x_sp(bp, mcast_config);
12295
12296                 for (i = 0; i < config->hdr.length; i++)
12297                         CAM_INVALIDATE(config->config_table[i]);
12298         }
12299
12300         /* Free SKBs, SGEs, TPA pool and driver internals */
12301         bnx2x_free_skbs(bp);
12302         for_each_queue(bp, i)
12303                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12304         for_each_queue(bp, i)
12305                 netif_napi_del(&bnx2x_fp(bp, i, napi));
12306         bnx2x_free_mem(bp);
12307
12308         bp->state = BNX2X_STATE_CLOSED;
12309
12310         netif_carrier_off(bp->dev);
12311
12312         return 0;
12313 }
12314
12315 static void bnx2x_eeh_recover(struct bnx2x *bp)
12316 {
12317         u32 val;
12318
12319         mutex_init(&bp->port.phy_mutex);
12320
12321         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12322         bp->link_params.shmem_base = bp->common.shmem_base;
12323         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12324
12325         if (!bp->common.shmem_base ||
12326             (bp->common.shmem_base < 0xA0000) ||
12327             (bp->common.shmem_base >= 0xC0000)) {
12328                 BNX2X_DEV_INFO("MCP not active\n");
12329                 bp->flags |= NO_MCP_FLAG;
12330                 return;
12331         }
12332
12333         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12334         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12335                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12336                 BNX2X_ERR("BAD MCP validity signature\n");
12337
12338         if (!BP_NOMCP(bp)) {
12339                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12340                               & DRV_MSG_SEQ_NUMBER_MASK);
12341                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12342         }
12343 }
12344
12345 /**
12346  * bnx2x_io_error_detected - called when PCI error is detected
12347  * @pdev: Pointer to PCI device
12348  * @state: The current pci connection state
12349  *
12350  * This function is called after a PCI bus error affecting
12351  * this device has been detected.
12352  */
12353 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12354                                                 pci_channel_state_t state)
12355 {
12356         struct net_device *dev = pci_get_drvdata(pdev);
12357         struct bnx2x *bp = netdev_priv(dev);
12358
12359         rtnl_lock();
12360
12361         netif_device_detach(dev);
12362
12363         if (state == pci_channel_io_perm_failure) {
12364                 rtnl_unlock();
12365                 return PCI_ERS_RESULT_DISCONNECT;
12366         }
12367
12368         if (netif_running(dev))
12369                 bnx2x_eeh_nic_unload(bp);
12370
12371         pci_disable_device(pdev);
12372
12373         rtnl_unlock();
12374
12375         /* Request a slot reset */
12376         return PCI_ERS_RESULT_NEED_RESET;
12377 }
12378
12379 /**
12380  * bnx2x_io_slot_reset - called after the PCI bus has been reset
12381  * @pdev: Pointer to PCI device
12382  *
12383  * Restart the card from scratch, as if from a cold-boot.
12384  */
12385 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12386 {
12387         struct net_device *dev = pci_get_drvdata(pdev);
12388         struct bnx2x *bp = netdev_priv(dev);
12389
12390         rtnl_lock();
12391
12392         if (pci_enable_device(pdev)) {
12393                 dev_err(&pdev->dev,
12394                         "Cannot re-enable PCI device after reset\n");
12395                 rtnl_unlock();
12396                 return PCI_ERS_RESULT_DISCONNECT;
12397         }
12398
12399         pci_set_master(pdev);
12400         pci_restore_state(pdev);
12401
12402         if (netif_running(dev))
12403                 bnx2x_set_power_state(bp, PCI_D0);
12404
12405         rtnl_unlock();
12406
12407         return PCI_ERS_RESULT_RECOVERED;
12408 }
12409
12410 /**
12411  * bnx2x_io_resume - called when traffic can start flowing again
12412  * @pdev: Pointer to PCI device
12413  *
12414  * This callback is called when the error recovery driver tells us that
12415  * its OK to resume normal operation.
12416  */
12417 static void bnx2x_io_resume(struct pci_dev *pdev)
12418 {
12419         struct net_device *dev = pci_get_drvdata(pdev);
12420         struct bnx2x *bp = netdev_priv(dev);
12421
12422         rtnl_lock();
12423
12424         bnx2x_eeh_recover(bp);
12425
12426         if (netif_running(dev))
12427                 bnx2x_nic_load(bp, LOAD_NORMAL);
12428
12429         netif_device_attach(dev);
12430
12431         rtnl_unlock();
12432 }
12433
12434 static struct pci_error_handlers bnx2x_err_handler = {
12435         .error_detected = bnx2x_io_error_detected,
12436         .slot_reset     = bnx2x_io_slot_reset,
12437         .resume         = bnx2x_io_resume,
12438 };
12439
12440 static struct pci_driver bnx2x_pci_driver = {
12441         .name        = DRV_MODULE_NAME,
12442         .id_table    = bnx2x_pci_tbl,
12443         .probe       = bnx2x_init_one,
12444         .remove      = __devexit_p(bnx2x_remove_one),
12445         .suspend     = bnx2x_suspend,
12446         .resume      = bnx2x_resume,
12447         .err_handler = &bnx2x_err_handler,
12448 };
12449
12450 static int __init bnx2x_init(void)
12451 {
12452         int ret;
12453
12454         pr_info("%s", version);
12455
12456         bnx2x_wq = create_singlethread_workqueue("bnx2x");
12457         if (bnx2x_wq == NULL) {
12458                 pr_err("Cannot create workqueue\n");
12459                 return -ENOMEM;
12460         }
12461
12462         ret = pci_register_driver(&bnx2x_pci_driver);
12463         if (ret) {
12464                 pr_err("Cannot register driver\n");
12465                 destroy_workqueue(bnx2x_wq);
12466         }
12467         return ret;
12468 }
12469
12470 static void __exit bnx2x_cleanup(void)
12471 {
12472         pci_unregister_driver(&bnx2x_pci_driver);
12473
12474         destroy_workqueue(bnx2x_wq);
12475 }
12476
12477 module_init(bnx2x_init);
12478 module_exit(bnx2x_cleanup);
12479
12480 #ifdef BCM_CNIC
12481
12482 /* count denotes the number of new completions we have seen */
12483 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12484 {
12485         struct eth_spe *spe;
12486
12487 #ifdef BNX2X_STOP_ON_ERROR
12488         if (unlikely(bp->panic))
12489                 return;
12490 #endif
12491
12492         spin_lock_bh(&bp->spq_lock);
12493         bp->cnic_spq_pending -= count;
12494
12495         for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12496              bp->cnic_spq_pending++) {
12497
12498                 if (!bp->cnic_kwq_pending)
12499                         break;
12500
12501                 spe = bnx2x_sp_get_next(bp);
12502                 *spe = *bp->cnic_kwq_cons;
12503
12504                 bp->cnic_kwq_pending--;
12505
12506                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12507                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12508
12509                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12510                         bp->cnic_kwq_cons = bp->cnic_kwq;
12511                 else
12512                         bp->cnic_kwq_cons++;
12513         }
12514         bnx2x_sp_prod_update(bp);
12515         spin_unlock_bh(&bp->spq_lock);
12516 }
12517
12518 static int bnx2x_cnic_sp_queue(struct net_device *dev,
12519                                struct kwqe_16 *kwqes[], u32 count)
12520 {
12521         struct bnx2x *bp = netdev_priv(dev);
12522         int i;
12523
12524 #ifdef BNX2X_STOP_ON_ERROR
12525         if (unlikely(bp->panic))
12526                 return -EIO;
12527 #endif
12528
12529         spin_lock_bh(&bp->spq_lock);
12530
12531         for (i = 0; i < count; i++) {
12532                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12533
12534                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12535                         break;
12536
12537                 *bp->cnic_kwq_prod = *spe;
12538
12539                 bp->cnic_kwq_pending++;
12540
12541                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12542                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
12543                    spe->data.mac_config_addr.hi,
12544                    spe->data.mac_config_addr.lo,
12545                    bp->cnic_kwq_pending);
12546
12547                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12548                         bp->cnic_kwq_prod = bp->cnic_kwq;
12549                 else
12550                         bp->cnic_kwq_prod++;
12551         }
12552
12553         spin_unlock_bh(&bp->spq_lock);
12554
12555         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12556                 bnx2x_cnic_sp_post(bp, 0);
12557
12558         return i;
12559 }
12560
12561 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12562 {
12563         struct cnic_ops *c_ops;
12564         int rc = 0;
12565
12566         mutex_lock(&bp->cnic_mutex);
12567         c_ops = bp->cnic_ops;
12568         if (c_ops)
12569                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12570         mutex_unlock(&bp->cnic_mutex);
12571
12572         return rc;
12573 }
12574
12575 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12576 {
12577         struct cnic_ops *c_ops;
12578         int rc = 0;
12579
12580         rcu_read_lock();
12581         c_ops = rcu_dereference(bp->cnic_ops);
12582         if (c_ops)
12583                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12584         rcu_read_unlock();
12585
12586         return rc;
12587 }
12588
12589 /*
12590  * for commands that have no data
12591  */
12592 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12593 {
12594         struct cnic_ctl_info ctl = {0};
12595
12596         ctl.cmd = cmd;
12597
12598         return bnx2x_cnic_ctl_send(bp, &ctl);
12599 }
12600
12601 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12602 {
12603         struct cnic_ctl_info ctl;
12604
12605         /* first we tell CNIC and only then we count this as a completion */
12606         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12607         ctl.data.comp.cid = cid;
12608
12609         bnx2x_cnic_ctl_send_bh(bp, &ctl);
12610         bnx2x_cnic_sp_post(bp, 1);
12611 }
12612
12613 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12614 {
12615         struct bnx2x *bp = netdev_priv(dev);
12616         int rc = 0;
12617
12618         switch (ctl->cmd) {
12619         case DRV_CTL_CTXTBL_WR_CMD: {
12620                 u32 index = ctl->data.io.offset;
12621                 dma_addr_t addr = ctl->data.io.dma_addr;
12622
12623                 bnx2x_ilt_wr(bp, index, addr);
12624                 break;
12625         }
12626
12627         case DRV_CTL_COMPLETION_CMD: {
12628                 int count = ctl->data.comp.comp_count;
12629
12630                 bnx2x_cnic_sp_post(bp, count);
12631                 break;
12632         }
12633
12634         /* rtnl_lock is held.  */
12635         case DRV_CTL_START_L2_CMD: {
12636                 u32 cli = ctl->data.ring.client_id;
12637
12638                 bp->rx_mode_cl_mask |= (1 << cli);
12639                 bnx2x_set_storm_rx_mode(bp);
12640                 break;
12641         }
12642
12643         /* rtnl_lock is held.  */
12644         case DRV_CTL_STOP_L2_CMD: {
12645                 u32 cli = ctl->data.ring.client_id;
12646
12647                 bp->rx_mode_cl_mask &= ~(1 << cli);
12648                 bnx2x_set_storm_rx_mode(bp);
12649                 break;
12650         }
12651
12652         default:
12653                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12654                 rc = -EINVAL;
12655         }
12656
12657         return rc;
12658 }
12659
12660 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12661 {
12662         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12663
12664         if (bp->flags & USING_MSIX_FLAG) {
12665                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12666                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12667                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12668         } else {
12669                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12670                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12671         }
12672         cp->irq_arr[0].status_blk = bp->cnic_sb;
12673         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12674         cp->irq_arr[1].status_blk = bp->def_status_blk;
12675         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12676
12677         cp->num_irq = 2;
12678 }
12679
12680 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12681                                void *data)
12682 {
12683         struct bnx2x *bp = netdev_priv(dev);
12684         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12685
12686         if (ops == NULL)
12687                 return -EINVAL;
12688
12689         if (atomic_read(&bp->intr_sem) != 0)
12690                 return -EBUSY;
12691
12692         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12693         if (!bp->cnic_kwq)
12694                 return -ENOMEM;
12695
12696         bp->cnic_kwq_cons = bp->cnic_kwq;
12697         bp->cnic_kwq_prod = bp->cnic_kwq;
12698         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12699
12700         bp->cnic_spq_pending = 0;
12701         bp->cnic_kwq_pending = 0;
12702
12703         bp->cnic_data = data;
12704
12705         cp->num_irq = 0;
12706         cp->drv_state = CNIC_DRV_STATE_REGD;
12707
12708         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12709
12710         bnx2x_setup_cnic_irq_info(bp);
12711         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12712         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12713         rcu_assign_pointer(bp->cnic_ops, ops);
12714
12715         return 0;
12716 }
12717
12718 static int bnx2x_unregister_cnic(struct net_device *dev)
12719 {
12720         struct bnx2x *bp = netdev_priv(dev);
12721         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12722
12723         mutex_lock(&bp->cnic_mutex);
12724         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12725                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12726                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12727         }
12728         cp->drv_state = 0;
12729         rcu_assign_pointer(bp->cnic_ops, NULL);
12730         mutex_unlock(&bp->cnic_mutex);
12731         synchronize_rcu();
12732         kfree(bp->cnic_kwq);
12733         bp->cnic_kwq = NULL;
12734
12735         return 0;
12736 }
12737
12738 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12739 {
12740         struct bnx2x *bp = netdev_priv(dev);
12741         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12742
12743         cp->drv_owner = THIS_MODULE;
12744         cp->chip_id = CHIP_ID(bp);
12745         cp->pdev = bp->pdev;
12746         cp->io_base = bp->regview;
12747         cp->io_base2 = bp->doorbells;
12748         cp->max_kwqe_pending = 8;
12749         cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12750         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12751         cp->ctx_tbl_len = CNIC_ILT_LINES;
12752         cp->starting_cid = BCM_CNIC_CID_START;
12753         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12754         cp->drv_ctl = bnx2x_drv_ctl;
12755         cp->drv_register_cnic = bnx2x_register_cnic;
12756         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12757
12758         return cp;
12759 }
12760 EXPORT_SYMBOL(bnx2x_cnic_probe);
12761
12762 #endif /* BCM_CNIC */
12763