]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/bnx2x_main.c
bnx2x: change smp_mb() comment to conform the true
[net-next-2.6.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52 #include <linux/stringify.h>
53
54
55 #include "bnx2x.h"
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
59
60 #define DRV_MODULE_VERSION      "1.52.1-7"
61 #define DRV_MODULE_RELDATE      "2010/02/28"
62 #define BNX2X_BC_VER            0x040200
63
64 #include <linux/firmware.h>
65 #include "bnx2x_fw_file_hdr.h"
66 /* FW files */
67 #define FW_FILE_VERSION                                 \
68         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
69         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
70         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
71         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72 #define FW_FILE_NAME_E1         "bnx2x-e1-" FW_FILE_VERSION ".fw"
73 #define FW_FILE_NAME_E1H        "bnx2x-e1h-" FW_FILE_VERSION ".fw"
74
75 /* Time in jiffies before concluding the transmitter is hung */
76 #define TX_TIMEOUT              (5*HZ)
77
78 static char version[] __devinitdata =
79         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
80         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
82 MODULE_AUTHOR("Eliezer Tamir");
83 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_MODULE_VERSION);
86 MODULE_FIRMWARE(FW_FILE_NAME_E1);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
88
89 static int multi_mode = 1;
90 module_param(multi_mode, int, 0);
91 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92                              "(0 Disable; 1 Enable (default))");
93
94 static int num_queues;
95 module_param(num_queues, int, 0);
96 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97                                 " (default is as a number of CPUs)");
98
99 static int disable_tpa;
100 module_param(disable_tpa, int, 0);
101 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
102
103 static int int_mode;
104 module_param(int_mode, int, 0);
105 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
106
107 static int dropless_fc;
108 module_param(dropless_fc, int, 0);
109 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
111 static int poll;
112 module_param(poll, int, 0);
113 MODULE_PARM_DESC(poll, " Use polling (for debug)");
114
115 static int mrrs = -1;
116 module_param(mrrs, int, 0);
117 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
119 static int debug;
120 module_param(debug, int, 0);
121 MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
124
125 static struct workqueue_struct *bnx2x_wq;
126
127 enum bnx2x_board_type {
128         BCM57710 = 0,
129         BCM57711 = 1,
130         BCM57711E = 2,
131 };
132
133 /* indexed by board_type, above */
134 static struct {
135         char *name;
136 } board_info[] __devinitdata = {
137         { "Broadcom NetXtreme II BCM57710 XGb" },
138         { "Broadcom NetXtreme II BCM57711 XGb" },
139         { "Broadcom NetXtreme II BCM57711E XGb" }
140 };
141
142
143 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
144         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
147         { 0 }
148 };
149
150 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
151
152 /****************************************************************************
153 * General service functions
154 ****************************************************************************/
155
156 /* used only at init
157  * locking is done by mcp
158  */
159 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
160 {
161         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164                                PCICFG_VENDOR_ID_OFFSET);
165 }
166
167 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168 {
169         u32 val;
170
171         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174                                PCICFG_VENDOR_ID_OFFSET);
175
176         return val;
177 }
178
179 static const u32 dmae_reg_go_c[] = {
180         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
184 };
185
186 /* copy command into DMAE command memory and set DMAE command go */
187 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188                             int idx)
189 {
190         u32 cmd_offset;
191         int i;
192
193         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
196
197                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
199         }
200         REG_WR(bp, dmae_reg_go_c[idx], 1);
201 }
202
203 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204                       u32 len32)
205 {
206         struct dmae_command dmae;
207         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
208         int cnt = 200;
209
210         if (!bp->dmae_ready) {
211                 u32 *data = bnx2x_sp(bp, wb_data[0]);
212
213                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
214                    "  using indirect\n", dst_addr, len32);
215                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216                 return;
217         }
218
219         memset(&dmae, 0, sizeof(struct dmae_command));
220
221         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
224 #ifdef __BIG_ENDIAN
225                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
226 #else
227                        DMAE_CMD_ENDIANITY_DW_SWAP |
228 #endif
229                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231         dmae.src_addr_lo = U64_LO(dma_addr);
232         dmae.src_addr_hi = U64_HI(dma_addr);
233         dmae.dst_addr_lo = dst_addr >> 2;
234         dmae.dst_addr_hi = 0;
235         dmae.len = len32;
236         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238         dmae.comp_val = DMAE_COMP_VAL;
239
240         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
241            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
242                     "dst_addr [%x:%08x (%08x)]\n"
243            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
244            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
247         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
248            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
250
251         mutex_lock(&bp->dmae_mutex);
252
253         *wb_comp = 0;
254
255         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
256
257         udelay(5);
258
259         while (*wb_comp != DMAE_COMP_VAL) {
260                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
261
262                 if (!cnt) {
263                         BNX2X_ERR("DMAE timeout!\n");
264                         break;
265                 }
266                 cnt--;
267                 /* adjust delay for emulation/FPGA */
268                 if (CHIP_REV_IS_SLOW(bp))
269                         msleep(100);
270                 else
271                         udelay(5);
272         }
273
274         mutex_unlock(&bp->dmae_mutex);
275 }
276
277 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
278 {
279         struct dmae_command dmae;
280         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
281         int cnt = 200;
282
283         if (!bp->dmae_ready) {
284                 u32 *data = bnx2x_sp(bp, wb_data[0]);
285                 int i;
286
287                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
288                    "  using indirect\n", src_addr, len32);
289                 for (i = 0; i < len32; i++)
290                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291                 return;
292         }
293
294         memset(&dmae, 0, sizeof(struct dmae_command));
295
296         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
299 #ifdef __BIG_ENDIAN
300                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
301 #else
302                        DMAE_CMD_ENDIANITY_DW_SWAP |
303 #endif
304                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306         dmae.src_addr_lo = src_addr >> 2;
307         dmae.src_addr_hi = 0;
308         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310         dmae.len = len32;
311         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313         dmae.comp_val = DMAE_COMP_VAL;
314
315         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
316            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
317                     "dst_addr [%x:%08x (%08x)]\n"
318            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
319            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
322
323         mutex_lock(&bp->dmae_mutex);
324
325         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
326         *wb_comp = 0;
327
328         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
329
330         udelay(5);
331
332         while (*wb_comp != DMAE_COMP_VAL) {
333
334                 if (!cnt) {
335                         BNX2X_ERR("DMAE timeout!\n");
336                         break;
337                 }
338                 cnt--;
339                 /* adjust delay for emulation/FPGA */
340                 if (CHIP_REV_IS_SLOW(bp))
341                         msleep(100);
342                 else
343                         udelay(5);
344         }
345         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
346            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
348
349         mutex_unlock(&bp->dmae_mutex);
350 }
351
352 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353                                u32 addr, u32 len)
354 {
355         int offset = 0;
356
357         while (len > DMAE_LEN32_WR_MAX) {
358                 bnx2x_write_dmae(bp, phys_addr + offset,
359                                  addr + offset, DMAE_LEN32_WR_MAX);
360                 offset += DMAE_LEN32_WR_MAX * 4;
361                 len -= DMAE_LEN32_WR_MAX;
362         }
363
364         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
365 }
366
367 /* used only for slowpath so not inlined */
368 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369 {
370         u32 wb_write[2];
371
372         wb_write[0] = val_hi;
373         wb_write[1] = val_lo;
374         REG_WR_DMAE(bp, reg, wb_write, 2);
375 }
376
377 #ifdef USE_WB_RD
378 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379 {
380         u32 wb_data[2];
381
382         REG_RD_DMAE(bp, reg, wb_data, 2);
383
384         return HILO_U64(wb_data[0], wb_data[1]);
385 }
386 #endif
387
388 static int bnx2x_mc_assert(struct bnx2x *bp)
389 {
390         char last_idx;
391         int i, rc = 0;
392         u32 row0, row1, row2, row3;
393
394         /* XSTORM */
395         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
396                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
397         if (last_idx)
398                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
399
400         /* print the asserts */
401         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
402
403                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404                               XSTORM_ASSERT_LIST_OFFSET(i));
405                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
407                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
409                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
411
412                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
413                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
414                                   " 0x%08x 0x%08x 0x%08x\n",
415                                   i, row3, row2, row1, row0);
416                         rc++;
417                 } else {
418                         break;
419                 }
420         }
421
422         /* TSTORM */
423         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
424                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
425         if (last_idx)
426                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
427
428         /* print the asserts */
429         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
430
431                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432                               TSTORM_ASSERT_LIST_OFFSET(i));
433                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
435                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
437                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
439
440                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
441                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
442                                   " 0x%08x 0x%08x 0x%08x\n",
443                                   i, row3, row2, row1, row0);
444                         rc++;
445                 } else {
446                         break;
447                 }
448         }
449
450         /* CSTORM */
451         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
452                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
453         if (last_idx)
454                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
455
456         /* print the asserts */
457         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
458
459                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460                               CSTORM_ASSERT_LIST_OFFSET(i));
461                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
463                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
465                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
467
468                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
469                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
470                                   " 0x%08x 0x%08x 0x%08x\n",
471                                   i, row3, row2, row1, row0);
472                         rc++;
473                 } else {
474                         break;
475                 }
476         }
477
478         /* USTORM */
479         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
480                            USTORM_ASSERT_LIST_INDEX_OFFSET);
481         if (last_idx)
482                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
483
484         /* print the asserts */
485         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
486
487                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
488                               USTORM_ASSERT_LIST_OFFSET(i));
489                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
490                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
491                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
492                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
493                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
494                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
495
496                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
497                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
498                                   " 0x%08x 0x%08x 0x%08x\n",
499                                   i, row3, row2, row1, row0);
500                         rc++;
501                 } else {
502                         break;
503                 }
504         }
505
506         return rc;
507 }
508
509 static void bnx2x_fw_dump(struct bnx2x *bp)
510 {
511         u32 mark, offset;
512         __be32 data[9];
513         int word;
514
515         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
516         mark = ((mark + 0x3) & ~0x3);
517         pr_err("begin fw dump (mark 0x%x)\n", mark);
518
519         pr_err("");
520         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
521                 for (word = 0; word < 8; word++)
522                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
523                                                   offset + 4*word));
524                 data[8] = 0x0;
525                 pr_cont("%s", (char *)data);
526         }
527         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
528                 for (word = 0; word < 8; word++)
529                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
530                                                   offset + 4*word));
531                 data[8] = 0x0;
532                 pr_cont("%s", (char *)data);
533         }
534         pr_err("end of fw dump\n");
535 }
536
537 static void bnx2x_panic_dump(struct bnx2x *bp)
538 {
539         int i;
540         u16 j, start, end;
541
542         bp->stats_state = STATS_STATE_DISABLED;
543         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
544
545         BNX2X_ERR("begin crash dump -----------------\n");
546
547         /* Indices */
548         /* Common */
549         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
550                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
551                   "  spq_prod_idx(%u)\n",
552                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
554
555         /* Rx */
556         for_each_queue(bp, i) {
557                 struct bnx2x_fastpath *fp = &bp->fp[i];
558
559                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
560                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
561                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
562                           i, fp->rx_bd_prod, fp->rx_bd_cons,
563                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
565                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
566                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
567                           fp->rx_sge_prod, fp->last_max_sge,
568                           le16_to_cpu(fp->fp_u_idx),
569                           fp->status_blk->u_status_block.status_block_index);
570         }
571
572         /* Tx */
573         for_each_queue(bp, i) {
574                 struct bnx2x_fastpath *fp = &bp->fp[i];
575
576                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
577                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
578                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
579                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
580                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
581                           "  tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
582                           fp->status_blk->c_status_block.status_block_index,
583                           fp->tx_db.data.prod);
584         }
585
586         /* Rings */
587         /* Rx */
588         for_each_queue(bp, i) {
589                 struct bnx2x_fastpath *fp = &bp->fp[i];
590
591                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
592                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
593                 for (j = start; j != end; j = RX_BD(j + 1)) {
594                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
595                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
596
597                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
598                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
599                 }
600
601                 start = RX_SGE(fp->rx_sge_prod);
602                 end = RX_SGE(fp->last_max_sge);
603                 for (j = start; j != end; j = RX_SGE(j + 1)) {
604                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
605                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
606
607                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
608                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
609                 }
610
611                 start = RCQ_BD(fp->rx_comp_cons - 10);
612                 end = RCQ_BD(fp->rx_comp_cons + 503);
613                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
614                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
615
616                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
617                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
618                 }
619         }
620
621         /* Tx */
622         for_each_queue(bp, i) {
623                 struct bnx2x_fastpath *fp = &bp->fp[i];
624
625                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
626                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
627                 for (j = start; j != end; j = TX_BD(j + 1)) {
628                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
629
630                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
631                                   i, j, sw_bd->skb, sw_bd->first_bd);
632                 }
633
634                 start = TX_BD(fp->tx_bd_cons - 10);
635                 end = TX_BD(fp->tx_bd_cons + 254);
636                 for (j = start; j != end; j = TX_BD(j + 1)) {
637                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
638
639                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
640                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
641                 }
642         }
643
644         bnx2x_fw_dump(bp);
645         bnx2x_mc_assert(bp);
646         BNX2X_ERR("end crash dump -----------------\n");
647 }
648
649 static void bnx2x_int_enable(struct bnx2x *bp)
650 {
651         int port = BP_PORT(bp);
652         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
653         u32 val = REG_RD(bp, addr);
654         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
655         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
656
657         if (msix) {
658                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
659                          HC_CONFIG_0_REG_INT_LINE_EN_0);
660                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
661                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
662         } else if (msi) {
663                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
664                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
665                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
666                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
667         } else {
668                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
669                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
670                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
671                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
672
673                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674                    val, port, addr);
675
676                 REG_WR(bp, addr, val);
677
678                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
679         }
680
681         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
682            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
683
684         REG_WR(bp, addr, val);
685         /*
686          * Ensure that HC_CONFIG is written before leading/trailing edge config
687          */
688         mmiowb();
689         barrier();
690
691         if (CHIP_IS_E1H(bp)) {
692                 /* init leading/trailing edge */
693                 if (IS_E1HMF(bp)) {
694                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
695                         if (bp->port.pmf)
696                                 /* enable nig and gpio3 attention */
697                                 val |= 0x1100;
698                 } else
699                         val = 0xffff;
700
701                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
702                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
703         }
704
705         /* Make sure that interrupts are indeed enabled from here on */
706         mmiowb();
707 }
708
709 static void bnx2x_int_disable(struct bnx2x *bp)
710 {
711         int port = BP_PORT(bp);
712         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
713         u32 val = REG_RD(bp, addr);
714
715         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
716                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
717                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
718                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
719
720         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
721            val, port, addr);
722
723         /* flush all outstanding writes */
724         mmiowb();
725
726         REG_WR(bp, addr, val);
727         if (REG_RD(bp, addr) != val)
728                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
729 }
730
731 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
732 {
733         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
734         int i, offset;
735
736         /* disable interrupt handling */
737         atomic_inc(&bp->intr_sem);
738         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
739
740         if (disable_hw)
741                 /* prevent the HW from sending interrupts */
742                 bnx2x_int_disable(bp);
743
744         /* make sure all ISRs are done */
745         if (msix) {
746                 synchronize_irq(bp->msix_table[0].vector);
747                 offset = 1;
748 #ifdef BCM_CNIC
749                 offset++;
750 #endif
751                 for_each_queue(bp, i)
752                         synchronize_irq(bp->msix_table[i + offset].vector);
753         } else
754                 synchronize_irq(bp->pdev->irq);
755
756         /* make sure sp_task is not running */
757         cancel_delayed_work(&bp->sp_task);
758         flush_workqueue(bnx2x_wq);
759 }
760
761 /* fast path */
762
763 /*
764  * General service functions
765  */
766
767 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
768                                 u8 storm, u16 index, u8 op, u8 update)
769 {
770         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
771                        COMMAND_REG_INT_ACK);
772         struct igu_ack_register igu_ack;
773
774         igu_ack.status_block_index = index;
775         igu_ack.sb_id_and_flags =
776                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
777                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
778                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
779                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
780
781         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
782            (*(u32 *)&igu_ack), hc_addr);
783         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
784
785         /* Make sure that ACK is written */
786         mmiowb();
787         barrier();
788 }
789
790 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
791 {
792         struct host_status_block *fpsb = fp->status_blk;
793
794         barrier(); /* status block is written to by the chip */
795         fp->fp_c_idx = fpsb->c_status_block.status_block_index;
796         fp->fp_u_idx = fpsb->u_status_block.status_block_index;
797 }
798
799 static u16 bnx2x_ack_int(struct bnx2x *bp)
800 {
801         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
802                        COMMAND_REG_SIMD_MASK);
803         u32 result = REG_RD(bp, hc_addr);
804
805         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
806            result, hc_addr);
807
808         return result;
809 }
810
811
812 /*
813  * fast path service functions
814  */
815
816 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
817 {
818         /* Tell compiler that consumer and producer can change */
819         barrier();
820         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
821 }
822
823 /* free skb in the packet ring at pos idx
824  * return idx of last bd freed
825  */
826 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
827                              u16 idx)
828 {
829         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
830         struct eth_tx_start_bd *tx_start_bd;
831         struct eth_tx_bd *tx_data_bd;
832         struct sk_buff *skb = tx_buf->skb;
833         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
834         int nbd;
835
836         /* prefetch skb end pointer to speedup dev_kfree_skb() */
837         prefetch(&skb->end);
838
839         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
840            idx, tx_buf, skb);
841
842         /* unmap first bd */
843         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
844         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
845         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
846                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
847
848         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
849 #ifdef BNX2X_STOP_ON_ERROR
850         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
851                 BNX2X_ERR("BAD nbd!\n");
852                 bnx2x_panic();
853         }
854 #endif
855         new_cons = nbd + tx_buf->first_bd;
856
857         /* Get the next bd */
858         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
859
860         /* Skip a parse bd... */
861         --nbd;
862         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863
864         /* ...and the TSO split header bd since they have no mapping */
865         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
866                 --nbd;
867                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
868         }
869
870         /* now free frags */
871         while (nbd > 0) {
872
873                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
874                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
875                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
876                                BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
877                 if (--nbd)
878                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
879         }
880
881         /* release skb */
882         WARN_ON(!skb);
883         dev_kfree_skb(skb);
884         tx_buf->first_bd = 0;
885         tx_buf->skb = NULL;
886
887         return new_cons;
888 }
889
890 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
891 {
892         s16 used;
893         u16 prod;
894         u16 cons;
895
896         prod = fp->tx_bd_prod;
897         cons = fp->tx_bd_cons;
898
899         /* NUM_TX_RINGS = number of "next-page" entries
900            It will be used as a threshold */
901         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
902
903 #ifdef BNX2X_STOP_ON_ERROR
904         WARN_ON(used < 0);
905         WARN_ON(used > fp->bp->tx_ring_size);
906         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
907 #endif
908
909         return (s16)(fp->bp->tx_ring_size) - used;
910 }
911
912 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
913 {
914         u16 hw_cons;
915
916         /* Tell compiler that status block fields can change */
917         barrier();
918         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
919         return hw_cons != fp->tx_pkt_cons;
920 }
921
922 static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
923 {
924         struct bnx2x *bp = fp->bp;
925         struct netdev_queue *txq;
926         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
927
928 #ifdef BNX2X_STOP_ON_ERROR
929         if (unlikely(bp->panic))
930                 return -1;
931 #endif
932
933         txq = netdev_get_tx_queue(bp->dev, fp->index);
934         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
935         sw_cons = fp->tx_pkt_cons;
936
937         while (sw_cons != hw_cons) {
938                 u16 pkt_cons;
939
940                 pkt_cons = TX_BD(sw_cons);
941
942                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
943
944                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
945                    hw_cons, sw_cons, pkt_cons);
946
947 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
948                         rmb();
949                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
950                 }
951 */
952                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
953                 sw_cons++;
954         }
955
956         fp->tx_pkt_cons = sw_cons;
957         fp->tx_bd_cons = bd_cons;
958
959         /* Need to make the tx_bd_cons update visible to start_xmit()
960          * before checking for netif_tx_queue_stopped().  Without the
961          * memory barrier, there is a small possibility that
962          * start_xmit() will miss it and cause the queue to be stopped
963          * forever.
964          */
965         smp_mb();
966
967         /* TBD need a thresh? */
968         if (unlikely(netif_tx_queue_stopped(txq))) {
969                 /* Taking tx_lock() is needed to prevent reenabling the queue
970                  * while it's empty. This could have happen if rx_action() gets
971                  * suspended in bnx2x_tx_int() after the condition before
972                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
973                  *
974                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
975                  * sends some packets consuming the whole queue again->
976                  * stops the queue
977                  */
978
979                 __netif_tx_lock(txq, smp_processor_id());
980
981                 if ((netif_tx_queue_stopped(txq)) &&
982                     (bp->state == BNX2X_STATE_OPEN) &&
983                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
984                         netif_tx_wake_queue(txq);
985
986                 __netif_tx_unlock(txq);
987         }
988         return 0;
989 }
990
991 #ifdef BCM_CNIC
992 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
993 #endif
994
995 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
996                            union eth_rx_cqe *rr_cqe)
997 {
998         struct bnx2x *bp = fp->bp;
999         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1000         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1001
1002         DP(BNX2X_MSG_SP,
1003            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
1004            fp->index, cid, command, bp->state,
1005            rr_cqe->ramrod_cqe.ramrod_type);
1006
1007         bp->spq_left++;
1008
1009         if (fp->index) {
1010                 switch (command | fp->state) {
1011                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1012                                                 BNX2X_FP_STATE_OPENING):
1013                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1014                            cid);
1015                         fp->state = BNX2X_FP_STATE_OPEN;
1016                         break;
1017
1018                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1019                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1020                            cid);
1021                         fp->state = BNX2X_FP_STATE_HALTED;
1022                         break;
1023
1024                 default:
1025                         BNX2X_ERR("unexpected MC reply (%d)  "
1026                                   "fp->state is %x\n", command, fp->state);
1027                         break;
1028                 }
1029                 mb(); /* force bnx2x_wait_ramrod() to see the change */
1030                 return;
1031         }
1032
1033         switch (command | bp->state) {
1034         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1035                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1036                 bp->state = BNX2X_STATE_OPEN;
1037                 break;
1038
1039         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1040                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1041                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1042                 fp->state = BNX2X_FP_STATE_HALTED;
1043                 break;
1044
1045         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1046                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1047                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1048                 break;
1049
1050 #ifdef BCM_CNIC
1051         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1052                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1053                 bnx2x_cnic_cfc_comp(bp, cid);
1054                 break;
1055 #endif
1056
1057         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1058         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1059                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1060                 bp->set_mac_pending--;
1061                 smp_wmb();
1062                 break;
1063
1064         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1065                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1066                 bp->set_mac_pending--;
1067                 smp_wmb();
1068                 break;
1069
1070         default:
1071                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1072                           command, bp->state);
1073                 break;
1074         }
1075         mb(); /* force bnx2x_wait_ramrod() to see the change */
1076 }
1077
1078 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1079                                      struct bnx2x_fastpath *fp, u16 index)
1080 {
1081         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1082         struct page *page = sw_buf->page;
1083         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1084
1085         /* Skip "next page" elements */
1086         if (!page)
1087                 return;
1088
1089         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1090                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1091         __free_pages(page, PAGES_PER_SGE_SHIFT);
1092
1093         sw_buf->page = NULL;
1094         sge->addr_hi = 0;
1095         sge->addr_lo = 0;
1096 }
1097
1098 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1099                                            struct bnx2x_fastpath *fp, int last)
1100 {
1101         int i;
1102
1103         for (i = 0; i < last; i++)
1104                 bnx2x_free_rx_sge(bp, fp, i);
1105 }
1106
1107 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1108                                      struct bnx2x_fastpath *fp, u16 index)
1109 {
1110         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1111         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1112         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1113         dma_addr_t mapping;
1114
1115         if (unlikely(page == NULL))
1116                 return -ENOMEM;
1117
1118         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1119                                PCI_DMA_FROMDEVICE);
1120         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1121                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1122                 return -ENOMEM;
1123         }
1124
1125         sw_buf->page = page;
1126         pci_unmap_addr_set(sw_buf, mapping, mapping);
1127
1128         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1129         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1130
1131         return 0;
1132 }
1133
1134 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1135                                      struct bnx2x_fastpath *fp, u16 index)
1136 {
1137         struct sk_buff *skb;
1138         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1139         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1140         dma_addr_t mapping;
1141
1142         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1143         if (unlikely(skb == NULL))
1144                 return -ENOMEM;
1145
1146         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1147                                  PCI_DMA_FROMDEVICE);
1148         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1149                 dev_kfree_skb(skb);
1150                 return -ENOMEM;
1151         }
1152
1153         rx_buf->skb = skb;
1154         pci_unmap_addr_set(rx_buf, mapping, mapping);
1155
1156         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1157         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1158
1159         return 0;
1160 }
1161
1162 /* note that we are not allocating a new skb,
1163  * we are just moving one from cons to prod
1164  * we are not creating a new mapping,
1165  * so there is no need to check for dma_mapping_error().
1166  */
1167 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1168                                struct sk_buff *skb, u16 cons, u16 prod)
1169 {
1170         struct bnx2x *bp = fp->bp;
1171         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1172         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1173         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1174         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1175
1176         pci_dma_sync_single_for_device(bp->pdev,
1177                                        pci_unmap_addr(cons_rx_buf, mapping),
1178                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1179
1180         prod_rx_buf->skb = cons_rx_buf->skb;
1181         pci_unmap_addr_set(prod_rx_buf, mapping,
1182                            pci_unmap_addr(cons_rx_buf, mapping));
1183         *prod_bd = *cons_bd;
1184 }
1185
1186 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1187                                              u16 idx)
1188 {
1189         u16 last_max = fp->last_max_sge;
1190
1191         if (SUB_S16(idx, last_max) > 0)
1192                 fp->last_max_sge = idx;
1193 }
1194
1195 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1196 {
1197         int i, j;
1198
1199         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1200                 int idx = RX_SGE_CNT * i - 1;
1201
1202                 for (j = 0; j < 2; j++) {
1203                         SGE_MASK_CLEAR_BIT(fp, idx);
1204                         idx--;
1205                 }
1206         }
1207 }
1208
1209 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1210                                   struct eth_fast_path_rx_cqe *fp_cqe)
1211 {
1212         struct bnx2x *bp = fp->bp;
1213         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1214                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1215                       SGE_PAGE_SHIFT;
1216         u16 last_max, last_elem, first_elem;
1217         u16 delta = 0;
1218         u16 i;
1219
1220         if (!sge_len)
1221                 return;
1222
1223         /* First mark all used pages */
1224         for (i = 0; i < sge_len; i++)
1225                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1226
1227         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1228            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1229
1230         /* Here we assume that the last SGE index is the biggest */
1231         prefetch((void *)(fp->sge_mask));
1232         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1233
1234         last_max = RX_SGE(fp->last_max_sge);
1235         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1236         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1237
1238         /* If ring is not full */
1239         if (last_elem + 1 != first_elem)
1240                 last_elem++;
1241
1242         /* Now update the prod */
1243         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1244                 if (likely(fp->sge_mask[i]))
1245                         break;
1246
1247                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1248                 delta += RX_SGE_MASK_ELEM_SZ;
1249         }
1250
1251         if (delta > 0) {
1252                 fp->rx_sge_prod += delta;
1253                 /* clear page-end entries */
1254                 bnx2x_clear_sge_mask_next_elems(fp);
1255         }
1256
1257         DP(NETIF_MSG_RX_STATUS,
1258            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1259            fp->last_max_sge, fp->rx_sge_prod);
1260 }
1261
1262 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1263 {
1264         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1265         memset(fp->sge_mask, 0xff,
1266                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1267
1268         /* Clear the two last indices in the page to 1:
1269            these are the indices that correspond to the "next" element,
1270            hence will never be indicated and should be removed from
1271            the calculations. */
1272         bnx2x_clear_sge_mask_next_elems(fp);
1273 }
1274
1275 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1276                             struct sk_buff *skb, u16 cons, u16 prod)
1277 {
1278         struct bnx2x *bp = fp->bp;
1279         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1280         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1281         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1282         dma_addr_t mapping;
1283
1284         /* move empty skb from pool to prod and map it */
1285         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1286         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1287                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1288         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1289
1290         /* move partial skb from cons to pool (don't unmap yet) */
1291         fp->tpa_pool[queue] = *cons_rx_buf;
1292
1293         /* mark bin state as start - print error if current state != stop */
1294         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1295                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1296
1297         fp->tpa_state[queue] = BNX2X_TPA_START;
1298
1299         /* point prod_bd to new skb */
1300         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1301         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1302
1303 #ifdef BNX2X_STOP_ON_ERROR
1304         fp->tpa_queue_used |= (1 << queue);
1305 #ifdef __powerpc64__
1306         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1307 #else
1308         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1309 #endif
1310            fp->tpa_queue_used);
1311 #endif
1312 }
1313
1314 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1315                                struct sk_buff *skb,
1316                                struct eth_fast_path_rx_cqe *fp_cqe,
1317                                u16 cqe_idx)
1318 {
1319         struct sw_rx_page *rx_pg, old_rx_pg;
1320         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1321         u32 i, frag_len, frag_size, pages;
1322         int err;
1323         int j;
1324
1325         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1326         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1327
1328         /* This is needed in order to enable forwarding support */
1329         if (frag_size)
1330                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1331                                                max(frag_size, (u32)len_on_bd));
1332
1333 #ifdef BNX2X_STOP_ON_ERROR
1334         if (pages >
1335             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1336                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1337                           pages, cqe_idx);
1338                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1339                           fp_cqe->pkt_len, len_on_bd);
1340                 bnx2x_panic();
1341                 return -EINVAL;
1342         }
1343 #endif
1344
1345         /* Run through the SGL and compose the fragmented skb */
1346         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1347                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1348
1349                 /* FW gives the indices of the SGE as if the ring is an array
1350                    (meaning that "next" element will consume 2 indices) */
1351                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1352                 rx_pg = &fp->rx_page_ring[sge_idx];
1353                 old_rx_pg = *rx_pg;
1354
1355                 /* If we fail to allocate a substitute page, we simply stop
1356                    where we are and drop the whole packet */
1357                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1358                 if (unlikely(err)) {
1359                         fp->eth_q_stats.rx_skb_alloc_failed++;
1360                         return err;
1361                 }
1362
1363                 /* Unmap the page as we r going to pass it to the stack */
1364                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1365                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1366
1367                 /* Add one frag and update the appropriate fields in the skb */
1368                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1369
1370                 skb->data_len += frag_len;
1371                 skb->truesize += frag_len;
1372                 skb->len += frag_len;
1373
1374                 frag_size -= frag_len;
1375         }
1376
1377         return 0;
1378 }
1379
1380 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1381                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1382                            u16 cqe_idx)
1383 {
1384         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1385         struct sk_buff *skb = rx_buf->skb;
1386         /* alloc new skb */
1387         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1388
1389         /* Unmap skb in the pool anyway, as we are going to change
1390            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1391            fails. */
1392         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1393                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1394
1395         if (likely(new_skb)) {
1396                 /* fix ip xsum and give it to the stack */
1397                 /* (no need to map the new skb) */
1398 #ifdef BCM_VLAN
1399                 int is_vlan_cqe =
1400                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1401                          PARSING_FLAGS_VLAN);
1402                 int is_not_hwaccel_vlan_cqe =
1403                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1404 #endif
1405
1406                 prefetch(skb);
1407                 prefetch(((char *)(skb)) + 128);
1408
1409 #ifdef BNX2X_STOP_ON_ERROR
1410                 if (pad + len > bp->rx_buf_size) {
1411                         BNX2X_ERR("skb_put is about to fail...  "
1412                                   "pad %d  len %d  rx_buf_size %d\n",
1413                                   pad, len, bp->rx_buf_size);
1414                         bnx2x_panic();
1415                         return;
1416                 }
1417 #endif
1418
1419                 skb_reserve(skb, pad);
1420                 skb_put(skb, len);
1421
1422                 skb->protocol = eth_type_trans(skb, bp->dev);
1423                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1424
1425                 {
1426                         struct iphdr *iph;
1427
1428                         iph = (struct iphdr *)skb->data;
1429 #ifdef BCM_VLAN
1430                         /* If there is no Rx VLAN offloading -
1431                            take VLAN tag into an account */
1432                         if (unlikely(is_not_hwaccel_vlan_cqe))
1433                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1434 #endif
1435                         iph->check = 0;
1436                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1437                 }
1438
1439                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1440                                          &cqe->fast_path_cqe, cqe_idx)) {
1441 #ifdef BCM_VLAN
1442                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1443                             (!is_not_hwaccel_vlan_cqe))
1444                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1445                                                 le16_to_cpu(cqe->fast_path_cqe.
1446                                                             vlan_tag));
1447                         else
1448 #endif
1449                                 netif_receive_skb(skb);
1450                 } else {
1451                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1452                            " - dropping packet!\n");
1453                         dev_kfree_skb(skb);
1454                 }
1455
1456
1457                 /* put new skb in bin */
1458                 fp->tpa_pool[queue].skb = new_skb;
1459
1460         } else {
1461                 /* else drop the packet and keep the buffer in the bin */
1462                 DP(NETIF_MSG_RX_STATUS,
1463                    "Failed to allocate new skb - dropping packet!\n");
1464                 fp->eth_q_stats.rx_skb_alloc_failed++;
1465         }
1466
1467         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1468 }
1469
1470 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1471                                         struct bnx2x_fastpath *fp,
1472                                         u16 bd_prod, u16 rx_comp_prod,
1473                                         u16 rx_sge_prod)
1474 {
1475         struct ustorm_eth_rx_producers rx_prods = {0};
1476         int i;
1477
1478         /* Update producers */
1479         rx_prods.bd_prod = bd_prod;
1480         rx_prods.cqe_prod = rx_comp_prod;
1481         rx_prods.sge_prod = rx_sge_prod;
1482
1483         /*
1484          * Make sure that the BD and SGE data is updated before updating the
1485          * producers since FW might read the BD/SGE right after the producer
1486          * is updated.
1487          * This is only applicable for weak-ordered memory model archs such
1488          * as IA-64. The following barrier is also mandatory since FW will
1489          * assumes BDs must have buffers.
1490          */
1491         wmb();
1492
1493         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1494                 REG_WR(bp, BAR_USTRORM_INTMEM +
1495                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1496                        ((u32 *)&rx_prods)[i]);
1497
1498         mmiowb(); /* keep prod updates ordered */
1499
1500         DP(NETIF_MSG_RX_STATUS,
1501            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1502            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1503 }
1504
1505 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1506 {
1507         struct bnx2x *bp = fp->bp;
1508         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1509         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1510         int rx_pkt = 0;
1511
1512 #ifdef BNX2X_STOP_ON_ERROR
1513         if (unlikely(bp->panic))
1514                 return 0;
1515 #endif
1516
1517         /* CQ "next element" is of the size of the regular element,
1518            that's why it's ok here */
1519         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1520         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1521                 hw_comp_cons++;
1522
1523         bd_cons = fp->rx_bd_cons;
1524         bd_prod = fp->rx_bd_prod;
1525         bd_prod_fw = bd_prod;
1526         sw_comp_cons = fp->rx_comp_cons;
1527         sw_comp_prod = fp->rx_comp_prod;
1528
1529         /* Memory barrier necessary as speculative reads of the rx
1530          * buffer can be ahead of the index in the status block
1531          */
1532         rmb();
1533
1534         DP(NETIF_MSG_RX_STATUS,
1535            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1536            fp->index, hw_comp_cons, sw_comp_cons);
1537
1538         while (sw_comp_cons != hw_comp_cons) {
1539                 struct sw_rx_bd *rx_buf = NULL;
1540                 struct sk_buff *skb;
1541                 union eth_rx_cqe *cqe;
1542                 u8 cqe_fp_flags;
1543                 u16 len, pad;
1544
1545                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1546                 bd_prod = RX_BD(bd_prod);
1547                 bd_cons = RX_BD(bd_cons);
1548
1549                 /* Prefetch the page containing the BD descriptor
1550                    at producer's index. It will be needed when new skb is
1551                    allocated */
1552                 prefetch((void *)(PAGE_ALIGN((unsigned long)
1553                                              (&fp->rx_desc_ring[bd_prod])) -
1554                                   PAGE_SIZE + 1));
1555
1556                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1557                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1558
1559                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1560                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1561                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1562                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1563                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1564                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1565
1566                 /* is this a slowpath msg? */
1567                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1568                         bnx2x_sp_event(fp, cqe);
1569                         goto next_cqe;
1570
1571                 /* this is an rx packet */
1572                 } else {
1573                         rx_buf = &fp->rx_buf_ring[bd_cons];
1574                         skb = rx_buf->skb;
1575                         prefetch(skb);
1576                         prefetch((u8 *)skb + 256);
1577                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1578                         pad = cqe->fast_path_cqe.placement_offset;
1579
1580                         /* If CQE is marked both TPA_START and TPA_END
1581                            it is a non-TPA CQE */
1582                         if ((!fp->disable_tpa) &&
1583                             (TPA_TYPE(cqe_fp_flags) !=
1584                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1585                                 u16 queue = cqe->fast_path_cqe.queue_index;
1586
1587                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1588                                         DP(NETIF_MSG_RX_STATUS,
1589                                            "calling tpa_start on queue %d\n",
1590                                            queue);
1591
1592                                         bnx2x_tpa_start(fp, queue, skb,
1593                                                         bd_cons, bd_prod);
1594                                         goto next_rx;
1595                                 }
1596
1597                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1598                                         DP(NETIF_MSG_RX_STATUS,
1599                                            "calling tpa_stop on queue %d\n",
1600                                            queue);
1601
1602                                         if (!BNX2X_RX_SUM_FIX(cqe))
1603                                                 BNX2X_ERR("STOP on none TCP "
1604                                                           "data\n");
1605
1606                                         /* This is a size of the linear data
1607                                            on this skb */
1608                                         len = le16_to_cpu(cqe->fast_path_cqe.
1609                                                                 len_on_bd);
1610                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1611                                                     len, cqe, comp_ring_cons);
1612 #ifdef BNX2X_STOP_ON_ERROR
1613                                         if (bp->panic)
1614                                                 return 0;
1615 #endif
1616
1617                                         bnx2x_update_sge_prod(fp,
1618                                                         &cqe->fast_path_cqe);
1619                                         goto next_cqe;
1620                                 }
1621                         }
1622
1623                         pci_dma_sync_single_for_device(bp->pdev,
1624                                         pci_unmap_addr(rx_buf, mapping),
1625                                                        pad + RX_COPY_THRESH,
1626                                                        PCI_DMA_FROMDEVICE);
1627                         prefetch(skb);
1628                         prefetch(((char *)(skb)) + 128);
1629
1630                         /* is this an error packet? */
1631                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1632                                 DP(NETIF_MSG_RX_ERR,
1633                                    "ERROR  flags %x  rx packet %u\n",
1634                                    cqe_fp_flags, sw_comp_cons);
1635                                 fp->eth_q_stats.rx_err_discard_pkt++;
1636                                 goto reuse_rx;
1637                         }
1638
1639                         /* Since we don't have a jumbo ring
1640                          * copy small packets if mtu > 1500
1641                          */
1642                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1643                             (len <= RX_COPY_THRESH)) {
1644                                 struct sk_buff *new_skb;
1645
1646                                 new_skb = netdev_alloc_skb(bp->dev,
1647                                                            len + pad);
1648                                 if (new_skb == NULL) {
1649                                         DP(NETIF_MSG_RX_ERR,
1650                                            "ERROR  packet dropped "
1651                                            "because of alloc failure\n");
1652                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1653                                         goto reuse_rx;
1654                                 }
1655
1656                                 /* aligned copy */
1657                                 skb_copy_from_linear_data_offset(skb, pad,
1658                                                     new_skb->data + pad, len);
1659                                 skb_reserve(new_skb, pad);
1660                                 skb_put(new_skb, len);
1661
1662                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1663
1664                                 skb = new_skb;
1665
1666                         } else
1667                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1668                                 pci_unmap_single(bp->pdev,
1669                                         pci_unmap_addr(rx_buf, mapping),
1670                                                  bp->rx_buf_size,
1671                                                  PCI_DMA_FROMDEVICE);
1672                                 skb_reserve(skb, pad);
1673                                 skb_put(skb, len);
1674
1675                         } else {
1676                                 DP(NETIF_MSG_RX_ERR,
1677                                    "ERROR  packet dropped because "
1678                                    "of alloc failure\n");
1679                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1680 reuse_rx:
1681                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1682                                 goto next_rx;
1683                         }
1684
1685                         skb->protocol = eth_type_trans(skb, bp->dev);
1686
1687                         skb->ip_summed = CHECKSUM_NONE;
1688                         if (bp->rx_csum) {
1689                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1690                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1691                                 else
1692                                         fp->eth_q_stats.hw_csum_err++;
1693                         }
1694                 }
1695
1696                 skb_record_rx_queue(skb, fp->index);
1697
1698 #ifdef BCM_VLAN
1699                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1700                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1701                      PARSING_FLAGS_VLAN))
1702                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1703                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1704                 else
1705 #endif
1706                         netif_receive_skb(skb);
1707
1708
1709 next_rx:
1710                 rx_buf->skb = NULL;
1711
1712                 bd_cons = NEXT_RX_IDX(bd_cons);
1713                 bd_prod = NEXT_RX_IDX(bd_prod);
1714                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1715                 rx_pkt++;
1716 next_cqe:
1717                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1718                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1719
1720                 if (rx_pkt == budget)
1721                         break;
1722         } /* while */
1723
1724         fp->rx_bd_cons = bd_cons;
1725         fp->rx_bd_prod = bd_prod_fw;
1726         fp->rx_comp_cons = sw_comp_cons;
1727         fp->rx_comp_prod = sw_comp_prod;
1728
1729         /* Update producers */
1730         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1731                              fp->rx_sge_prod);
1732
1733         fp->rx_pkt += rx_pkt;
1734         fp->rx_calls++;
1735
1736         return rx_pkt;
1737 }
1738
1739 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1740 {
1741         struct bnx2x_fastpath *fp = fp_cookie;
1742         struct bnx2x *bp = fp->bp;
1743
1744         /* Return here if interrupt is disabled */
1745         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1746                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1747                 return IRQ_HANDLED;
1748         }
1749
1750         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1751            fp->index, fp->sb_id);
1752         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1753
1754 #ifdef BNX2X_STOP_ON_ERROR
1755         if (unlikely(bp->panic))
1756                 return IRQ_HANDLED;
1757 #endif
1758
1759         /* Handle Rx and Tx according to MSI-X vector */
1760         prefetch(fp->rx_cons_sb);
1761         prefetch(fp->tx_cons_sb);
1762         prefetch(&fp->status_blk->u_status_block.status_block_index);
1763         prefetch(&fp->status_blk->c_status_block.status_block_index);
1764         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1765
1766         return IRQ_HANDLED;
1767 }
1768
1769 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1770 {
1771         struct bnx2x *bp = netdev_priv(dev_instance);
1772         u16 status = bnx2x_ack_int(bp);
1773         u16 mask;
1774         int i;
1775
1776         /* Return here if interrupt is shared and it's not for us */
1777         if (unlikely(status == 0)) {
1778                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1779                 return IRQ_NONE;
1780         }
1781         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1782
1783         /* Return here if interrupt is disabled */
1784         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1785                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1786                 return IRQ_HANDLED;
1787         }
1788
1789 #ifdef BNX2X_STOP_ON_ERROR
1790         if (unlikely(bp->panic))
1791                 return IRQ_HANDLED;
1792 #endif
1793
1794         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1795                 struct bnx2x_fastpath *fp = &bp->fp[i];
1796
1797                 mask = 0x2 << fp->sb_id;
1798                 if (status & mask) {
1799                         /* Handle Rx and Tx according to SB id */
1800                         prefetch(fp->rx_cons_sb);
1801                         prefetch(&fp->status_blk->u_status_block.
1802                                                 status_block_index);
1803                         prefetch(fp->tx_cons_sb);
1804                         prefetch(&fp->status_blk->c_status_block.
1805                                                 status_block_index);
1806                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1807                         status &= ~mask;
1808                 }
1809         }
1810
1811 #ifdef BCM_CNIC
1812         mask = 0x2 << CNIC_SB_ID(bp);
1813         if (status & (mask | 0x1)) {
1814                 struct cnic_ops *c_ops = NULL;
1815
1816                 rcu_read_lock();
1817                 c_ops = rcu_dereference(bp->cnic_ops);
1818                 if (c_ops)
1819                         c_ops->cnic_handler(bp->cnic_data, NULL);
1820                 rcu_read_unlock();
1821
1822                 status &= ~mask;
1823         }
1824 #endif
1825
1826         if (unlikely(status & 0x1)) {
1827                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1828
1829                 status &= ~0x1;
1830                 if (!status)
1831                         return IRQ_HANDLED;
1832         }
1833
1834         if (status)
1835                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1836                    status);
1837
1838         return IRQ_HANDLED;
1839 }
1840
1841 /* end of fast path */
1842
1843 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1844
1845 /* Link */
1846
1847 /*
1848  * General service functions
1849  */
1850
1851 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1852 {
1853         u32 lock_status;
1854         u32 resource_bit = (1 << resource);
1855         int func = BP_FUNC(bp);
1856         u32 hw_lock_control_reg;
1857         int cnt;
1858
1859         /* Validating that the resource is within range */
1860         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1861                 DP(NETIF_MSG_HW,
1862                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1863                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1864                 return -EINVAL;
1865         }
1866
1867         if (func <= 5) {
1868                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1869         } else {
1870                 hw_lock_control_reg =
1871                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1872         }
1873
1874         /* Validating that the resource is not already taken */
1875         lock_status = REG_RD(bp, hw_lock_control_reg);
1876         if (lock_status & resource_bit) {
1877                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1878                    lock_status, resource_bit);
1879                 return -EEXIST;
1880         }
1881
1882         /* Try for 5 second every 5ms */
1883         for (cnt = 0; cnt < 1000; cnt++) {
1884                 /* Try to acquire the lock */
1885                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1886                 lock_status = REG_RD(bp, hw_lock_control_reg);
1887                 if (lock_status & resource_bit)
1888                         return 0;
1889
1890                 msleep(5);
1891         }
1892         DP(NETIF_MSG_HW, "Timeout\n");
1893         return -EAGAIN;
1894 }
1895
1896 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1897 {
1898         u32 lock_status;
1899         u32 resource_bit = (1 << resource);
1900         int func = BP_FUNC(bp);
1901         u32 hw_lock_control_reg;
1902
1903         /* Validating that the resource is within range */
1904         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1905                 DP(NETIF_MSG_HW,
1906                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1907                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1908                 return -EINVAL;
1909         }
1910
1911         if (func <= 5) {
1912                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1913         } else {
1914                 hw_lock_control_reg =
1915                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1916         }
1917
1918         /* Validating that the resource is currently taken */
1919         lock_status = REG_RD(bp, hw_lock_control_reg);
1920         if (!(lock_status & resource_bit)) {
1921                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1922                    lock_status, resource_bit);
1923                 return -EFAULT;
1924         }
1925
1926         REG_WR(bp, hw_lock_control_reg, resource_bit);
1927         return 0;
1928 }
1929
1930 /* HW Lock for shared dual port PHYs */
1931 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1932 {
1933         mutex_lock(&bp->port.phy_mutex);
1934
1935         if (bp->port.need_hw_lock)
1936                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1937 }
1938
1939 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1940 {
1941         if (bp->port.need_hw_lock)
1942                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1943
1944         mutex_unlock(&bp->port.phy_mutex);
1945 }
1946
1947 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1948 {
1949         /* The GPIO should be swapped if swap register is set and active */
1950         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1951                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1952         int gpio_shift = gpio_num +
1953                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1954         u32 gpio_mask = (1 << gpio_shift);
1955         u32 gpio_reg;
1956         int value;
1957
1958         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1959                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1960                 return -EINVAL;
1961         }
1962
1963         /* read GPIO value */
1964         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1965
1966         /* get the requested pin value */
1967         if ((gpio_reg & gpio_mask) == gpio_mask)
1968                 value = 1;
1969         else
1970                 value = 0;
1971
1972         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1973
1974         return value;
1975 }
1976
1977 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1978 {
1979         /* The GPIO should be swapped if swap register is set and active */
1980         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1981                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1982         int gpio_shift = gpio_num +
1983                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1984         u32 gpio_mask = (1 << gpio_shift);
1985         u32 gpio_reg;
1986
1987         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1988                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1989                 return -EINVAL;
1990         }
1991
1992         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1993         /* read GPIO and mask except the float bits */
1994         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1995
1996         switch (mode) {
1997         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1998                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1999                    gpio_num, gpio_shift);
2000                 /* clear FLOAT and set CLR */
2001                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2002                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2003                 break;
2004
2005         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2006                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2007                    gpio_num, gpio_shift);
2008                 /* clear FLOAT and set SET */
2009                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2010                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2011                 break;
2012
2013         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2014                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2015                    gpio_num, gpio_shift);
2016                 /* set FLOAT */
2017                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2018                 break;
2019
2020         default:
2021                 break;
2022         }
2023
2024         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2025         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2026
2027         return 0;
2028 }
2029
2030 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2031 {
2032         /* The GPIO should be swapped if swap register is set and active */
2033         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2034                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2035         int gpio_shift = gpio_num +
2036                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2037         u32 gpio_mask = (1 << gpio_shift);
2038         u32 gpio_reg;
2039
2040         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2041                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2042                 return -EINVAL;
2043         }
2044
2045         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2046         /* read GPIO int */
2047         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2048
2049         switch (mode) {
2050         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2051                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2052                                    "output low\n", gpio_num, gpio_shift);
2053                 /* clear SET and set CLR */
2054                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2055                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2056                 break;
2057
2058         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2059                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2060                                    "output high\n", gpio_num, gpio_shift);
2061                 /* clear CLR and set SET */
2062                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2063                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2064                 break;
2065
2066         default:
2067                 break;
2068         }
2069
2070         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2071         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2072
2073         return 0;
2074 }
2075
2076 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2077 {
2078         u32 spio_mask = (1 << spio_num);
2079         u32 spio_reg;
2080
2081         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2082             (spio_num > MISC_REGISTERS_SPIO_7)) {
2083                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2084                 return -EINVAL;
2085         }
2086
2087         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2088         /* read SPIO and mask except the float bits */
2089         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2090
2091         switch (mode) {
2092         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2093                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2094                 /* clear FLOAT and set CLR */
2095                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2096                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2097                 break;
2098
2099         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2100                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2101                 /* clear FLOAT and set SET */
2102                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2103                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2104                 break;
2105
2106         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2107                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2108                 /* set FLOAT */
2109                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2110                 break;
2111
2112         default:
2113                 break;
2114         }
2115
2116         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2117         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2118
2119         return 0;
2120 }
2121
2122 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2123 {
2124         switch (bp->link_vars.ieee_fc &
2125                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2126         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2127                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2128                                           ADVERTISED_Pause);
2129                 break;
2130
2131         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2132                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2133                                          ADVERTISED_Pause);
2134                 break;
2135
2136         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2137                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2138                 break;
2139
2140         default:
2141                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2142                                           ADVERTISED_Pause);
2143                 break;
2144         }
2145 }
2146
2147 static void bnx2x_link_report(struct bnx2x *bp)
2148 {
2149         if (bp->flags & MF_FUNC_DIS) {
2150                 netif_carrier_off(bp->dev);
2151                 netdev_err(bp->dev, "NIC Link is Down\n");
2152                 return;
2153         }
2154
2155         if (bp->link_vars.link_up) {
2156                 u16 line_speed;
2157
2158                 if (bp->state == BNX2X_STATE_OPEN)
2159                         netif_carrier_on(bp->dev);
2160                 netdev_info(bp->dev, "NIC Link is Up, ");
2161
2162                 line_speed = bp->link_vars.line_speed;
2163                 if (IS_E1HMF(bp)) {
2164                         u16 vn_max_rate;
2165
2166                         vn_max_rate =
2167                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2168                                  FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2169                         if (vn_max_rate < line_speed)
2170                                 line_speed = vn_max_rate;
2171                 }
2172                 pr_cont("%d Mbps ", line_speed);
2173
2174                 if (bp->link_vars.duplex == DUPLEX_FULL)
2175                         pr_cont("full duplex");
2176                 else
2177                         pr_cont("half duplex");
2178
2179                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2180                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2181                                 pr_cont(", receive ");
2182                                 if (bp->link_vars.flow_ctrl &
2183                                     BNX2X_FLOW_CTRL_TX)
2184                                         pr_cont("& transmit ");
2185                         } else {
2186                                 pr_cont(", transmit ");
2187                         }
2188                         pr_cont("flow control ON");
2189                 }
2190                 pr_cont("\n");
2191
2192         } else { /* link_down */
2193                 netif_carrier_off(bp->dev);
2194                 netdev_err(bp->dev, "NIC Link is Down\n");
2195         }
2196 }
2197
2198 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2199 {
2200         if (!BP_NOMCP(bp)) {
2201                 u8 rc;
2202
2203                 /* Initialize link parameters structure variables */
2204                 /* It is recommended to turn off RX FC for jumbo frames
2205                    for better performance */
2206                 if (bp->dev->mtu > 5000)
2207                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2208                 else
2209                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2210
2211                 bnx2x_acquire_phy_lock(bp);
2212
2213                 if (load_mode == LOAD_DIAG)
2214                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2215
2216                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2217
2218                 bnx2x_release_phy_lock(bp);
2219
2220                 bnx2x_calc_fc_adv(bp);
2221
2222                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2223                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2224                         bnx2x_link_report(bp);
2225                 }
2226
2227                 return rc;
2228         }
2229         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2230         return -EINVAL;
2231 }
2232
2233 static void bnx2x_link_set(struct bnx2x *bp)
2234 {
2235         if (!BP_NOMCP(bp)) {
2236                 bnx2x_acquire_phy_lock(bp);
2237                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2238                 bnx2x_release_phy_lock(bp);
2239
2240                 bnx2x_calc_fc_adv(bp);
2241         } else
2242                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2243 }
2244
2245 static void bnx2x__link_reset(struct bnx2x *bp)
2246 {
2247         if (!BP_NOMCP(bp)) {
2248                 bnx2x_acquire_phy_lock(bp);
2249                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2250                 bnx2x_release_phy_lock(bp);
2251         } else
2252                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2253 }
2254
2255 static u8 bnx2x_link_test(struct bnx2x *bp)
2256 {
2257         u8 rc;
2258
2259         bnx2x_acquire_phy_lock(bp);
2260         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2261         bnx2x_release_phy_lock(bp);
2262
2263         return rc;
2264 }
2265
2266 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2267 {
2268         u32 r_param = bp->link_vars.line_speed / 8;
2269         u32 fair_periodic_timeout_usec;
2270         u32 t_fair;
2271
2272         memset(&(bp->cmng.rs_vars), 0,
2273                sizeof(struct rate_shaping_vars_per_port));
2274         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2275
2276         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2277         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2278
2279         /* this is the threshold below which no timer arming will occur
2280            1.25 coefficient is for the threshold to be a little bigger
2281            than the real time, to compensate for timer in-accuracy */
2282         bp->cmng.rs_vars.rs_threshold =
2283                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2284
2285         /* resolution of fairness timer */
2286         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2287         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2288         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2289
2290         /* this is the threshold below which we won't arm the timer anymore */
2291         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2292
2293         /* we multiply by 1e3/8 to get bytes/msec.
2294            We don't want the credits to pass a credit
2295            of the t_fair*FAIR_MEM (algorithm resolution) */
2296         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2297         /* since each tick is 4 usec */
2298         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2299 }
2300
2301 /* Calculates the sum of vn_min_rates.
2302    It's needed for further normalizing of the min_rates.
2303    Returns:
2304      sum of vn_min_rates.
2305        or
2306      0 - if all the min_rates are 0.
2307      In the later case fainess algorithm should be deactivated.
2308      If not all min_rates are zero then those that are zeroes will be set to 1.
2309  */
2310 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2311 {
2312         int all_zero = 1;
2313         int port = BP_PORT(bp);
2314         int vn;
2315
2316         bp->vn_weight_sum = 0;
2317         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2318                 int func = 2*vn + port;
2319                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2320                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2321                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2322
2323                 /* Skip hidden vns */
2324                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2325                         continue;
2326
2327                 /* If min rate is zero - set it to 1 */
2328                 if (!vn_min_rate)
2329                         vn_min_rate = DEF_MIN_RATE;
2330                 else
2331                         all_zero = 0;
2332
2333                 bp->vn_weight_sum += vn_min_rate;
2334         }
2335
2336         /* ... only if all min rates are zeros - disable fairness */
2337         if (all_zero) {
2338                 bp->cmng.flags.cmng_enables &=
2339                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2340                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2341                    "  fairness will be disabled\n");
2342         } else
2343                 bp->cmng.flags.cmng_enables |=
2344                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2345 }
2346
2347 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2348 {
2349         struct rate_shaping_vars_per_vn m_rs_vn;
2350         struct fairness_vars_per_vn m_fair_vn;
2351         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2352         u16 vn_min_rate, vn_max_rate;
2353         int i;
2354
2355         /* If function is hidden - set min and max to zeroes */
2356         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2357                 vn_min_rate = 0;
2358                 vn_max_rate = 0;
2359
2360         } else {
2361                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2362                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2363                 /* If min rate is zero - set it to 1 */
2364                 if (!vn_min_rate)
2365                         vn_min_rate = DEF_MIN_RATE;
2366                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2367                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2368         }
2369         DP(NETIF_MSG_IFUP,
2370            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
2371            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2372
2373         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2374         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2375
2376         /* global vn counter - maximal Mbps for this vn */
2377         m_rs_vn.vn_counter.rate = vn_max_rate;
2378
2379         /* quota - number of bytes transmitted in this period */
2380         m_rs_vn.vn_counter.quota =
2381                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2382
2383         if (bp->vn_weight_sum) {
2384                 /* credit for each period of the fairness algorithm:
2385                    number of bytes in T_FAIR (the vn share the port rate).
2386                    vn_weight_sum should not be larger than 10000, thus
2387                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2388                    than zero */
2389                 m_fair_vn.vn_credit_delta =
2390                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2391                                                  (8 * bp->vn_weight_sum))),
2392                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2393                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2394                    m_fair_vn.vn_credit_delta);
2395         }
2396
2397         /* Store it to internal memory */
2398         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2399                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2400                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2401                        ((u32 *)(&m_rs_vn))[i]);
2402
2403         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2404                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2405                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2406                        ((u32 *)(&m_fair_vn))[i]);
2407 }
2408
2409
2410 /* This function is called upon link interrupt */
2411 static void bnx2x_link_attn(struct bnx2x *bp)
2412 {
2413         /* Make sure that we are synced with the current statistics */
2414         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2415
2416         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2417
2418         if (bp->link_vars.link_up) {
2419
2420                 /* dropless flow control */
2421                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2422                         int port = BP_PORT(bp);
2423                         u32 pause_enabled = 0;
2424
2425                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2426                                 pause_enabled = 1;
2427
2428                         REG_WR(bp, BAR_USTRORM_INTMEM +
2429                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2430                                pause_enabled);
2431                 }
2432
2433                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2434                         struct host_port_stats *pstats;
2435
2436                         pstats = bnx2x_sp(bp, port_stats);
2437                         /* reset old bmac stats */
2438                         memset(&(pstats->mac_stx[0]), 0,
2439                                sizeof(struct mac_stx));
2440                 }
2441                 if (bp->state == BNX2X_STATE_OPEN)
2442                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2443         }
2444
2445         /* indicate link status */
2446         bnx2x_link_report(bp);
2447
2448         if (IS_E1HMF(bp)) {
2449                 int port = BP_PORT(bp);
2450                 int func;
2451                 int vn;
2452
2453                 /* Set the attention towards other drivers on the same port */
2454                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2455                         if (vn == BP_E1HVN(bp))
2456                                 continue;
2457
2458                         func = ((vn << 1) | port);
2459                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2460                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2461                 }
2462
2463                 if (bp->link_vars.link_up) {
2464                         int i;
2465
2466                         /* Init rate shaping and fairness contexts */
2467                         bnx2x_init_port_minmax(bp);
2468
2469                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2470                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2471
2472                         /* Store it to internal memory */
2473                         for (i = 0;
2474                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2475                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2476                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2477                                        ((u32 *)(&bp->cmng))[i]);
2478                 }
2479         }
2480 }
2481
2482 static void bnx2x__link_status_update(struct bnx2x *bp)
2483 {
2484         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2485                 return;
2486
2487         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2488
2489         if (bp->link_vars.link_up)
2490                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2491         else
2492                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2493
2494         bnx2x_calc_vn_weight_sum(bp);
2495
2496         /* indicate link status */
2497         bnx2x_link_report(bp);
2498 }
2499
2500 static void bnx2x_pmf_update(struct bnx2x *bp)
2501 {
2502         int port = BP_PORT(bp);
2503         u32 val;
2504
2505         bp->port.pmf = 1;
2506         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2507
2508         /* enable nig attention */
2509         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2510         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2511         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2512
2513         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2514 }
2515
2516 /* end of Link */
2517
2518 /* slow path */
2519
2520 /*
2521  * General service functions
2522  */
2523
2524 /* send the MCP a request, block until there is a reply */
2525 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2526 {
2527         int func = BP_FUNC(bp);
2528         u32 seq = ++bp->fw_seq;
2529         u32 rc = 0;
2530         u32 cnt = 1;
2531         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2532
2533         mutex_lock(&bp->fw_mb_mutex);
2534         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2535         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2536
2537         do {
2538                 /* let the FW do it's magic ... */
2539                 msleep(delay);
2540
2541                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2542
2543                 /* Give the FW up to 5 second (500*10ms) */
2544         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2545
2546         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2547            cnt*delay, rc, seq);
2548
2549         /* is this a reply to our command? */
2550         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2551                 rc &= FW_MSG_CODE_MASK;
2552         else {
2553                 /* FW BUG! */
2554                 BNX2X_ERR("FW failed to respond!\n");
2555                 bnx2x_fw_dump(bp);
2556                 rc = 0;
2557         }
2558         mutex_unlock(&bp->fw_mb_mutex);
2559
2560         return rc;
2561 }
2562
2563 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2564 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2565 static void bnx2x_set_rx_mode(struct net_device *dev);
2566
2567 static void bnx2x_e1h_disable(struct bnx2x *bp)
2568 {
2569         int port = BP_PORT(bp);
2570
2571         netif_tx_disable(bp->dev);
2572
2573         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2574
2575         netif_carrier_off(bp->dev);
2576 }
2577
2578 static void bnx2x_e1h_enable(struct bnx2x *bp)
2579 {
2580         int port = BP_PORT(bp);
2581
2582         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2583
2584         /* Tx queue should be only reenabled */
2585         netif_tx_wake_all_queues(bp->dev);
2586
2587         /*
2588          * Should not call netif_carrier_on since it will be called if the link
2589          * is up when checking for link state
2590          */
2591 }
2592
2593 static void bnx2x_update_min_max(struct bnx2x *bp)
2594 {
2595         int port = BP_PORT(bp);
2596         int vn, i;
2597
2598         /* Init rate shaping and fairness contexts */
2599         bnx2x_init_port_minmax(bp);
2600
2601         bnx2x_calc_vn_weight_sum(bp);
2602
2603         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2604                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2605
2606         if (bp->port.pmf) {
2607                 int func;
2608
2609                 /* Set the attention towards other drivers on the same port */
2610                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2611                         if (vn == BP_E1HVN(bp))
2612                                 continue;
2613
2614                         func = ((vn << 1) | port);
2615                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2616                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2617                 }
2618
2619                 /* Store it to internal memory */
2620                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2621                         REG_WR(bp, BAR_XSTRORM_INTMEM +
2622                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2623                                ((u32 *)(&bp->cmng))[i]);
2624         }
2625 }
2626
2627 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2628 {
2629         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2630
2631         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2632
2633                 /*
2634                  * This is the only place besides the function initialization
2635                  * where the bp->flags can change so it is done without any
2636                  * locks
2637                  */
2638                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2639                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2640                         bp->flags |= MF_FUNC_DIS;
2641
2642                         bnx2x_e1h_disable(bp);
2643                 } else {
2644                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2645                         bp->flags &= ~MF_FUNC_DIS;
2646
2647                         bnx2x_e1h_enable(bp);
2648                 }
2649                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2650         }
2651         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2652
2653                 bnx2x_update_min_max(bp);
2654                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2655         }
2656
2657         /* Report results to MCP */
2658         if (dcc_event)
2659                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2660         else
2661                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2662 }
2663
2664 /* must be called under the spq lock */
2665 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2666 {
2667         struct eth_spe *next_spe = bp->spq_prod_bd;
2668
2669         if (bp->spq_prod_bd == bp->spq_last_bd) {
2670                 bp->spq_prod_bd = bp->spq;
2671                 bp->spq_prod_idx = 0;
2672                 DP(NETIF_MSG_TIMER, "end of spq\n");
2673         } else {
2674                 bp->spq_prod_bd++;
2675                 bp->spq_prod_idx++;
2676         }
2677         return next_spe;
2678 }
2679
2680 /* must be called under the spq lock */
2681 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2682 {
2683         int func = BP_FUNC(bp);
2684
2685         /* Make sure that BD data is updated before writing the producer */
2686         wmb();
2687
2688         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2689                bp->spq_prod_idx);
2690         mmiowb();
2691 }
2692
2693 /* the slow path queue is odd since completions arrive on the fastpath ring */
2694 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2695                          u32 data_hi, u32 data_lo, int common)
2696 {
2697         struct eth_spe *spe;
2698
2699         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2700            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2701            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2702            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2703            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2704
2705 #ifdef BNX2X_STOP_ON_ERROR
2706         if (unlikely(bp->panic))
2707                 return -EIO;
2708 #endif
2709
2710         spin_lock_bh(&bp->spq_lock);
2711
2712         if (!bp->spq_left) {
2713                 BNX2X_ERR("BUG! SPQ ring full!\n");
2714                 spin_unlock_bh(&bp->spq_lock);
2715                 bnx2x_panic();
2716                 return -EBUSY;
2717         }
2718
2719         spe = bnx2x_sp_get_next(bp);
2720
2721         /* CID needs port number to be encoded int it */
2722         spe->hdr.conn_and_cmd_data =
2723                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2724                                      HW_CID(bp, cid)));
2725         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2726         if (common)
2727                 spe->hdr.type |=
2728                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2729
2730         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2731         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2732
2733         bp->spq_left--;
2734
2735         bnx2x_sp_prod_update(bp);
2736         spin_unlock_bh(&bp->spq_lock);
2737         return 0;
2738 }
2739
2740 /* acquire split MCP access lock register */
2741 static int bnx2x_acquire_alr(struct bnx2x *bp)
2742 {
2743         u32 i, j, val;
2744         int rc = 0;
2745
2746         might_sleep();
2747         i = 100;
2748         for (j = 0; j < i*10; j++) {
2749                 val = (1UL << 31);
2750                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2751                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2752                 if (val & (1L << 31))
2753                         break;
2754
2755                 msleep(5);
2756         }
2757         if (!(val & (1L << 31))) {
2758                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2759                 rc = -EBUSY;
2760         }
2761
2762         return rc;
2763 }
2764
2765 /* release split MCP access lock register */
2766 static void bnx2x_release_alr(struct bnx2x *bp)
2767 {
2768         u32 val = 0;
2769
2770         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2771 }
2772
2773 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2774 {
2775         struct host_def_status_block *def_sb = bp->def_status_blk;
2776         u16 rc = 0;
2777
2778         barrier(); /* status block is written to by the chip */
2779         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2780                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2781                 rc |= 1;
2782         }
2783         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2784                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2785                 rc |= 2;
2786         }
2787         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2788                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2789                 rc |= 4;
2790         }
2791         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2792                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2793                 rc |= 8;
2794         }
2795         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2796                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2797                 rc |= 16;
2798         }
2799         return rc;
2800 }
2801
2802 /*
2803  * slow path service functions
2804  */
2805
2806 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2807 {
2808         int port = BP_PORT(bp);
2809         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2810                        COMMAND_REG_ATTN_BITS_SET);
2811         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2812                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2813         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2814                                        NIG_REG_MASK_INTERRUPT_PORT0;
2815         u32 aeu_mask;
2816         u32 nig_mask = 0;
2817
2818         if (bp->attn_state & asserted)
2819                 BNX2X_ERR("IGU ERROR\n");
2820
2821         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2822         aeu_mask = REG_RD(bp, aeu_addr);
2823
2824         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2825            aeu_mask, asserted);
2826         aeu_mask &= ~(asserted & 0xff);
2827         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2828
2829         REG_WR(bp, aeu_addr, aeu_mask);
2830         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2831
2832         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2833         bp->attn_state |= asserted;
2834         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2835
2836         if (asserted & ATTN_HARD_WIRED_MASK) {
2837                 if (asserted & ATTN_NIG_FOR_FUNC) {
2838
2839                         bnx2x_acquire_phy_lock(bp);
2840
2841                         /* save nig interrupt mask */
2842                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2843                         REG_WR(bp, nig_int_mask_addr, 0);
2844
2845                         bnx2x_link_attn(bp);
2846
2847                         /* handle unicore attn? */
2848                 }
2849                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2850                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2851
2852                 if (asserted & GPIO_2_FUNC)
2853                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2854
2855                 if (asserted & GPIO_3_FUNC)
2856                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2857
2858                 if (asserted & GPIO_4_FUNC)
2859                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2860
2861                 if (port == 0) {
2862                         if (asserted & ATTN_GENERAL_ATTN_1) {
2863                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2864                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2865                         }
2866                         if (asserted & ATTN_GENERAL_ATTN_2) {
2867                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2868                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2869                         }
2870                         if (asserted & ATTN_GENERAL_ATTN_3) {
2871                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2872                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2873                         }
2874                 } else {
2875                         if (asserted & ATTN_GENERAL_ATTN_4) {
2876                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2877                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2878                         }
2879                         if (asserted & ATTN_GENERAL_ATTN_5) {
2880                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2881                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2882                         }
2883                         if (asserted & ATTN_GENERAL_ATTN_6) {
2884                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2885                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2886                         }
2887                 }
2888
2889         } /* if hardwired */
2890
2891         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2892            asserted, hc_addr);
2893         REG_WR(bp, hc_addr, asserted);
2894
2895         /* now set back the mask */
2896         if (asserted & ATTN_NIG_FOR_FUNC) {
2897                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2898                 bnx2x_release_phy_lock(bp);
2899         }
2900 }
2901
2902 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2903 {
2904         int port = BP_PORT(bp);
2905
2906         /* mark the failure */
2907         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2908         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2909         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2910                  bp->link_params.ext_phy_config);
2911
2912         /* log the failure */
2913         netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
2914                    "Please contact Dell Support for assistance.\n");
2915 }
2916
2917 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2918 {
2919         int port = BP_PORT(bp);
2920         int reg_offset;
2921         u32 val, swap_val, swap_override;
2922
2923         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2924                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2925
2926         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2927
2928                 val = REG_RD(bp, reg_offset);
2929                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2930                 REG_WR(bp, reg_offset, val);
2931
2932                 BNX2X_ERR("SPIO5 hw attention\n");
2933
2934                 /* Fan failure attention */
2935                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2936                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2937                         /* Low power mode is controlled by GPIO 2 */
2938                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2939                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2940                         /* The PHY reset is controlled by GPIO 1 */
2941                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2942                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2943                         break;
2944
2945                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2946                         /* The PHY reset is controlled by GPIO 1 */
2947                         /* fake the port number to cancel the swap done in
2948                            set_gpio() */
2949                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2950                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2951                         port = (swap_val && swap_override) ^ 1;
2952                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2953                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2954                         break;
2955
2956                 default:
2957                         break;
2958                 }
2959                 bnx2x_fan_failure(bp);
2960         }
2961
2962         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2963                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2964                 bnx2x_acquire_phy_lock(bp);
2965                 bnx2x_handle_module_detect_int(&bp->link_params);
2966                 bnx2x_release_phy_lock(bp);
2967         }
2968
2969         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2970
2971                 val = REG_RD(bp, reg_offset);
2972                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2973                 REG_WR(bp, reg_offset, val);
2974
2975                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2976                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2977                 bnx2x_panic();
2978         }
2979 }
2980
2981 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2982 {
2983         u32 val;
2984
2985         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2986
2987                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2988                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2989                 /* DORQ discard attention */
2990                 if (val & 0x2)
2991                         BNX2X_ERR("FATAL error from DORQ\n");
2992         }
2993
2994         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2995
2996                 int port = BP_PORT(bp);
2997                 int reg_offset;
2998
2999                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3000                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3001
3002                 val = REG_RD(bp, reg_offset);
3003                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3004                 REG_WR(bp, reg_offset, val);
3005
3006                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3007                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3008                 bnx2x_panic();
3009         }
3010 }
3011
3012 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3013 {
3014         u32 val;
3015
3016         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3017
3018                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3019                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3020                 /* CFC error attention */
3021                 if (val & 0x2)
3022                         BNX2X_ERR("FATAL error from CFC\n");
3023         }
3024
3025         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3026
3027                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3028                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3029                 /* RQ_USDMDP_FIFO_OVERFLOW */
3030                 if (val & 0x18000)
3031                         BNX2X_ERR("FATAL error from PXP\n");
3032         }
3033
3034         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3035
3036                 int port = BP_PORT(bp);
3037                 int reg_offset;
3038
3039                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3040                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3041
3042                 val = REG_RD(bp, reg_offset);
3043                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3044                 REG_WR(bp, reg_offset, val);
3045
3046                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3047                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3048                 bnx2x_panic();
3049         }
3050 }
3051
3052 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3053 {
3054         u32 val;
3055
3056         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3057
3058                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3059                         int func = BP_FUNC(bp);
3060
3061                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3062                         bp->mf_config = SHMEM_RD(bp,
3063                                            mf_cfg.func_mf_config[func].config);
3064                         val = SHMEM_RD(bp, func_mb[func].drv_status);
3065                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3066                                 bnx2x_dcc_event(bp,
3067                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3068                         bnx2x__link_status_update(bp);
3069                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3070                                 bnx2x_pmf_update(bp);
3071
3072                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3073
3074                         BNX2X_ERR("MC assert!\n");
3075                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3076                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3077                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3078                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3079                         bnx2x_panic();
3080
3081                 } else if (attn & BNX2X_MCP_ASSERT) {
3082
3083                         BNX2X_ERR("MCP assert!\n");
3084                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3085                         bnx2x_fw_dump(bp);
3086
3087                 } else
3088                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3089         }
3090
3091         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3092                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3093                 if (attn & BNX2X_GRC_TIMEOUT) {
3094                         val = CHIP_IS_E1H(bp) ?
3095                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3096                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3097                 }
3098                 if (attn & BNX2X_GRC_RSV) {
3099                         val = CHIP_IS_E1H(bp) ?
3100                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3101                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3102                 }
3103                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3104         }
3105 }
3106
3107 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3108 {
3109         struct attn_route attn;
3110         struct attn_route group_mask;
3111         int port = BP_PORT(bp);
3112         int index;
3113         u32 reg_addr;
3114         u32 val;
3115         u32 aeu_mask;
3116
3117         /* need to take HW lock because MCP or other port might also
3118            try to handle this event */
3119         bnx2x_acquire_alr(bp);
3120
3121         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3122         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3123         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3124         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3125         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3126            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3127
3128         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3129                 if (deasserted & (1 << index)) {
3130                         group_mask = bp->attn_group[index];
3131
3132                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3133                            index, group_mask.sig[0], group_mask.sig[1],
3134                            group_mask.sig[2], group_mask.sig[3]);
3135
3136                         bnx2x_attn_int_deasserted3(bp,
3137                                         attn.sig[3] & group_mask.sig[3]);
3138                         bnx2x_attn_int_deasserted1(bp,
3139                                         attn.sig[1] & group_mask.sig[1]);
3140                         bnx2x_attn_int_deasserted2(bp,
3141                                         attn.sig[2] & group_mask.sig[2]);
3142                         bnx2x_attn_int_deasserted0(bp,
3143                                         attn.sig[0] & group_mask.sig[0]);
3144
3145                         if ((attn.sig[0] & group_mask.sig[0] &
3146                                                 HW_PRTY_ASSERT_SET_0) ||
3147                             (attn.sig[1] & group_mask.sig[1] &
3148                                                 HW_PRTY_ASSERT_SET_1) ||
3149                             (attn.sig[2] & group_mask.sig[2] &
3150                                                 HW_PRTY_ASSERT_SET_2))
3151                                 BNX2X_ERR("FATAL HW block parity attention\n");
3152                 }
3153         }
3154
3155         bnx2x_release_alr(bp);
3156
3157         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3158
3159         val = ~deasserted;
3160         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3161            val, reg_addr);
3162         REG_WR(bp, reg_addr, val);
3163
3164         if (~bp->attn_state & deasserted)
3165                 BNX2X_ERR("IGU ERROR\n");
3166
3167         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3168                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3169
3170         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3171         aeu_mask = REG_RD(bp, reg_addr);
3172
3173         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3174            aeu_mask, deasserted);
3175         aeu_mask |= (deasserted & 0xff);
3176         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3177
3178         REG_WR(bp, reg_addr, aeu_mask);
3179         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3180
3181         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3182         bp->attn_state &= ~deasserted;
3183         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3184 }
3185
3186 static void bnx2x_attn_int(struct bnx2x *bp)
3187 {
3188         /* read local copy of bits */
3189         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3190                                                                 attn_bits);
3191         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3192                                                                 attn_bits_ack);
3193         u32 attn_state = bp->attn_state;
3194
3195         /* look for changed bits */
3196         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3197         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3198
3199         DP(NETIF_MSG_HW,
3200            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3201            attn_bits, attn_ack, asserted, deasserted);
3202
3203         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3204                 BNX2X_ERR("BAD attention state\n");
3205
3206         /* handle bits that were raised */
3207         if (asserted)
3208                 bnx2x_attn_int_asserted(bp, asserted);
3209
3210         if (deasserted)
3211                 bnx2x_attn_int_deasserted(bp, deasserted);
3212 }
3213
3214 static void bnx2x_sp_task(struct work_struct *work)
3215 {
3216         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3217         u16 status;
3218
3219
3220         /* Return here if interrupt is disabled */
3221         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3222                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3223                 return;
3224         }
3225
3226         status = bnx2x_update_dsb_idx(bp);
3227 /*      if (status == 0)                                     */
3228 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3229
3230         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3231
3232         /* HW attentions */
3233         if (status & 0x1)
3234                 bnx2x_attn_int(bp);
3235
3236         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3237                      IGU_INT_NOP, 1);
3238         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3239                      IGU_INT_NOP, 1);
3240         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3241                      IGU_INT_NOP, 1);
3242         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3243                      IGU_INT_NOP, 1);
3244         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3245                      IGU_INT_ENABLE, 1);
3246
3247 }
3248
3249 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3250 {
3251         struct net_device *dev = dev_instance;
3252         struct bnx2x *bp = netdev_priv(dev);
3253
3254         /* Return here if interrupt is disabled */
3255         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3256                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3257                 return IRQ_HANDLED;
3258         }
3259
3260         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3261
3262 #ifdef BNX2X_STOP_ON_ERROR
3263         if (unlikely(bp->panic))
3264                 return IRQ_HANDLED;
3265 #endif
3266
3267 #ifdef BCM_CNIC
3268         {
3269                 struct cnic_ops *c_ops;
3270
3271                 rcu_read_lock();
3272                 c_ops = rcu_dereference(bp->cnic_ops);
3273                 if (c_ops)
3274                         c_ops->cnic_handler(bp->cnic_data, NULL);
3275                 rcu_read_unlock();
3276         }
3277 #endif
3278         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3279
3280         return IRQ_HANDLED;
3281 }
3282
3283 /* end of slow path */
3284
3285 /* Statistics */
3286
3287 /****************************************************************************
3288 * Macros
3289 ****************************************************************************/
3290
3291 /* sum[hi:lo] += add[hi:lo] */
3292 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3293         do { \
3294                 s_lo += a_lo; \
3295                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3296         } while (0)
3297
3298 /* difference = minuend - subtrahend */
3299 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3300         do { \
3301                 if (m_lo < s_lo) { \
3302                         /* underflow */ \
3303                         d_hi = m_hi - s_hi; \
3304                         if (d_hi > 0) { \
3305                                 /* we can 'loan' 1 */ \
3306                                 d_hi--; \
3307                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3308                         } else { \
3309                                 /* m_hi <= s_hi */ \
3310                                 d_hi = 0; \
3311                                 d_lo = 0; \
3312                         } \
3313                 } else { \
3314                         /* m_lo >= s_lo */ \
3315                         if (m_hi < s_hi) { \
3316                                 d_hi = 0; \
3317                                 d_lo = 0; \
3318                         } else { \
3319                                 /* m_hi >= s_hi */ \
3320                                 d_hi = m_hi - s_hi; \
3321                                 d_lo = m_lo - s_lo; \
3322                         } \
3323                 } \
3324         } while (0)
3325
3326 #define UPDATE_STAT64(s, t) \
3327         do { \
3328                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3329                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3330                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3331                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3332                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3333                        pstats->mac_stx[1].t##_lo, diff.lo); \
3334         } while (0)
3335
3336 #define UPDATE_STAT64_NIG(s, t) \
3337         do { \
3338                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3339                         diff.lo, new->s##_lo, old->s##_lo); \
3340                 ADD_64(estats->t##_hi, diff.hi, \
3341                        estats->t##_lo, diff.lo); \
3342         } while (0)
3343
3344 /* sum[hi:lo] += add */
3345 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3346         do { \
3347                 s_lo += a; \
3348                 s_hi += (s_lo < a) ? 1 : 0; \
3349         } while (0)
3350
3351 #define UPDATE_EXTEND_STAT(s) \
3352         do { \
3353                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3354                               pstats->mac_stx[1].s##_lo, \
3355                               new->s); \
3356         } while (0)
3357
3358 #define UPDATE_EXTEND_TSTAT(s, t) \
3359         do { \
3360                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3361                 old_tclient->s = tclient->s; \
3362                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3363         } while (0)
3364
3365 #define UPDATE_EXTEND_USTAT(s, t) \
3366         do { \
3367                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3368                 old_uclient->s = uclient->s; \
3369                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3370         } while (0)
3371
3372 #define UPDATE_EXTEND_XSTAT(s, t) \
3373         do { \
3374                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3375                 old_xclient->s = xclient->s; \
3376                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3377         } while (0)
3378
3379 /* minuend -= subtrahend */
3380 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3381         do { \
3382                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3383         } while (0)
3384
3385 /* minuend[hi:lo] -= subtrahend */
3386 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3387         do { \
3388                 SUB_64(m_hi, 0, m_lo, s); \
3389         } while (0)
3390
3391 #define SUB_EXTEND_USTAT(s, t) \
3392         do { \
3393                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3394                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3395         } while (0)
3396
3397 /*
3398  * General service functions
3399  */
3400
3401 static inline long bnx2x_hilo(u32 *hiref)
3402 {
3403         u32 lo = *(hiref + 1);
3404 #if (BITS_PER_LONG == 64)
3405         u32 hi = *hiref;
3406
3407         return HILO_U64(hi, lo);
3408 #else
3409         return lo;
3410 #endif
3411 }
3412
3413 /*
3414  * Init service functions
3415  */
3416
3417 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3418 {
3419         if (!bp->stats_pending) {
3420                 struct eth_query_ramrod_data ramrod_data = {0};
3421                 int i, rc;
3422
3423                 ramrod_data.drv_counter = bp->stats_counter++;
3424                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3425                 for_each_queue(bp, i)
3426                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3427
3428                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3429                                    ((u32 *)&ramrod_data)[1],
3430                                    ((u32 *)&ramrod_data)[0], 0);
3431                 if (rc == 0) {
3432                         /* stats ramrod has it's own slot on the spq */
3433                         bp->spq_left++;
3434                         bp->stats_pending = 1;
3435                 }
3436         }
3437 }
3438
3439 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3440 {
3441         struct dmae_command *dmae = &bp->stats_dmae;
3442         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3443
3444         *stats_comp = DMAE_COMP_VAL;
3445         if (CHIP_REV_IS_SLOW(bp))
3446                 return;
3447
3448         /* loader */
3449         if (bp->executer_idx) {
3450                 int loader_idx = PMF_DMAE_C(bp);
3451
3452                 memset(dmae, 0, sizeof(struct dmae_command));
3453
3454                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3455                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3456                                 DMAE_CMD_DST_RESET |
3457 #ifdef __BIG_ENDIAN
3458                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3459 #else
3460                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3461 #endif
3462                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3463                                                DMAE_CMD_PORT_0) |
3464                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3465                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3466                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3467                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3468                                      sizeof(struct dmae_command) *
3469                                      (loader_idx + 1)) >> 2;
3470                 dmae->dst_addr_hi = 0;
3471                 dmae->len = sizeof(struct dmae_command) >> 2;
3472                 if (CHIP_IS_E1(bp))
3473                         dmae->len--;
3474                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3475                 dmae->comp_addr_hi = 0;
3476                 dmae->comp_val = 1;
3477
3478                 *stats_comp = 0;
3479                 bnx2x_post_dmae(bp, dmae, loader_idx);
3480
3481         } else if (bp->func_stx) {
3482                 *stats_comp = 0;
3483                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3484         }
3485 }
3486
3487 static int bnx2x_stats_comp(struct bnx2x *bp)
3488 {
3489         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3490         int cnt = 10;
3491
3492         might_sleep();
3493         while (*stats_comp != DMAE_COMP_VAL) {
3494                 if (!cnt) {
3495                         BNX2X_ERR("timeout waiting for stats finished\n");
3496                         break;
3497                 }
3498                 cnt--;
3499                 msleep(1);
3500         }
3501         return 1;
3502 }
3503
3504 /*
3505  * Statistics service functions
3506  */
3507
3508 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3509 {
3510         struct dmae_command *dmae;
3511         u32 opcode;
3512         int loader_idx = PMF_DMAE_C(bp);
3513         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3514
3515         /* sanity */
3516         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3517                 BNX2X_ERR("BUG!\n");
3518                 return;
3519         }
3520
3521         bp->executer_idx = 0;
3522
3523         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3524                   DMAE_CMD_C_ENABLE |
3525                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3526 #ifdef __BIG_ENDIAN
3527                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3528 #else
3529                   DMAE_CMD_ENDIANITY_DW_SWAP |
3530 #endif
3531                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3532                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3533
3534         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3535         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3536         dmae->src_addr_lo = bp->port.port_stx >> 2;
3537         dmae->src_addr_hi = 0;
3538         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3539         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3540         dmae->len = DMAE_LEN32_RD_MAX;
3541         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3542         dmae->comp_addr_hi = 0;
3543         dmae->comp_val = 1;
3544
3545         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3546         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3547         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3548         dmae->src_addr_hi = 0;
3549         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3550                                    DMAE_LEN32_RD_MAX * 4);
3551         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3552                                    DMAE_LEN32_RD_MAX * 4);
3553         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3554         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3555         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3556         dmae->comp_val = DMAE_COMP_VAL;
3557
3558         *stats_comp = 0;
3559         bnx2x_hw_stats_post(bp);
3560         bnx2x_stats_comp(bp);
3561 }
3562
3563 static void bnx2x_port_stats_init(struct bnx2x *bp)
3564 {
3565         struct dmae_command *dmae;
3566         int port = BP_PORT(bp);
3567         int vn = BP_E1HVN(bp);
3568         u32 opcode;
3569         int loader_idx = PMF_DMAE_C(bp);
3570         u32 mac_addr;
3571         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3572
3573         /* sanity */
3574         if (!bp->link_vars.link_up || !bp->port.pmf) {
3575                 BNX2X_ERR("BUG!\n");
3576                 return;
3577         }
3578
3579         bp->executer_idx = 0;
3580
3581         /* MCP */
3582         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3583                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3584                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3585 #ifdef __BIG_ENDIAN
3586                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3587 #else
3588                   DMAE_CMD_ENDIANITY_DW_SWAP |
3589 #endif
3590                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3591                   (vn << DMAE_CMD_E1HVN_SHIFT));
3592
3593         if (bp->port.port_stx) {
3594
3595                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3596                 dmae->opcode = opcode;
3597                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3598                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3599                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3600                 dmae->dst_addr_hi = 0;
3601                 dmae->len = sizeof(struct host_port_stats) >> 2;
3602                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3603                 dmae->comp_addr_hi = 0;
3604                 dmae->comp_val = 1;
3605         }
3606
3607         if (bp->func_stx) {
3608
3609                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3610                 dmae->opcode = opcode;
3611                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3612                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3613                 dmae->dst_addr_lo = bp->func_stx >> 2;
3614                 dmae->dst_addr_hi = 0;
3615                 dmae->len = sizeof(struct host_func_stats) >> 2;
3616                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3617                 dmae->comp_addr_hi = 0;
3618                 dmae->comp_val = 1;
3619         }
3620
3621         /* MAC */
3622         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3623                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3624                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3625 #ifdef __BIG_ENDIAN
3626                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3627 #else
3628                   DMAE_CMD_ENDIANITY_DW_SWAP |
3629 #endif
3630                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3631                   (vn << DMAE_CMD_E1HVN_SHIFT));
3632
3633         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3634
3635                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3636                                    NIG_REG_INGRESS_BMAC0_MEM);
3637
3638                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3639                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3640                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3641                 dmae->opcode = opcode;
3642                 dmae->src_addr_lo = (mac_addr +
3643                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3644                 dmae->src_addr_hi = 0;
3645                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3646                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3647                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3648                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3649                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3650                 dmae->comp_addr_hi = 0;
3651                 dmae->comp_val = 1;
3652
3653                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3654                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3655                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3656                 dmae->opcode = opcode;
3657                 dmae->src_addr_lo = (mac_addr +
3658                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3659                 dmae->src_addr_hi = 0;
3660                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3661                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3662                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3663                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3664                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3665                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3666                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3667                 dmae->comp_addr_hi = 0;
3668                 dmae->comp_val = 1;
3669
3670         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3671
3672                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3673
3674                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3675                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3676                 dmae->opcode = opcode;
3677                 dmae->src_addr_lo = (mac_addr +
3678                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3679                 dmae->src_addr_hi = 0;
3680                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3681                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3682                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3683                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3684                 dmae->comp_addr_hi = 0;
3685                 dmae->comp_val = 1;
3686
3687                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3688                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3689                 dmae->opcode = opcode;
3690                 dmae->src_addr_lo = (mac_addr +
3691                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3692                 dmae->src_addr_hi = 0;
3693                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3694                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3695                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3696                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3697                 dmae->len = 1;
3698                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3699                 dmae->comp_addr_hi = 0;
3700                 dmae->comp_val = 1;
3701
3702                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3703                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3704                 dmae->opcode = opcode;
3705                 dmae->src_addr_lo = (mac_addr +
3706                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3707                 dmae->src_addr_hi = 0;
3708                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3709                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3710                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3711                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3712                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3713                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3714                 dmae->comp_addr_hi = 0;
3715                 dmae->comp_val = 1;
3716         }
3717
3718         /* NIG */
3719         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3720         dmae->opcode = opcode;
3721         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3722                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3723         dmae->src_addr_hi = 0;
3724         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3725         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3726         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3727         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3728         dmae->comp_addr_hi = 0;
3729         dmae->comp_val = 1;
3730
3731         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3732         dmae->opcode = opcode;
3733         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3734                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3735         dmae->src_addr_hi = 0;
3736         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3737                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3738         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3739                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3740         dmae->len = (2*sizeof(u32)) >> 2;
3741         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3742         dmae->comp_addr_hi = 0;
3743         dmae->comp_val = 1;
3744
3745         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3746         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3747                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3748                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3749 #ifdef __BIG_ENDIAN
3750                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3751 #else
3752                         DMAE_CMD_ENDIANITY_DW_SWAP |
3753 #endif
3754                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3755                         (vn << DMAE_CMD_E1HVN_SHIFT));
3756         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3757                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3758         dmae->src_addr_hi = 0;
3759         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3760                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3761         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3762                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3763         dmae->len = (2*sizeof(u32)) >> 2;
3764         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3765         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3766         dmae->comp_val = DMAE_COMP_VAL;
3767
3768         *stats_comp = 0;
3769 }
3770
3771 static void bnx2x_func_stats_init(struct bnx2x *bp)
3772 {
3773         struct dmae_command *dmae = &bp->stats_dmae;
3774         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3775
3776         /* sanity */
3777         if (!bp->func_stx) {
3778                 BNX2X_ERR("BUG!\n");
3779                 return;
3780         }
3781
3782         bp->executer_idx = 0;
3783         memset(dmae, 0, sizeof(struct dmae_command));
3784
3785         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3786                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3787                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3788 #ifdef __BIG_ENDIAN
3789                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3790 #else
3791                         DMAE_CMD_ENDIANITY_DW_SWAP |
3792 #endif
3793                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3794                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3795         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3796         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3797         dmae->dst_addr_lo = bp->func_stx >> 2;
3798         dmae->dst_addr_hi = 0;
3799         dmae->len = sizeof(struct host_func_stats) >> 2;
3800         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3801         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3802         dmae->comp_val = DMAE_COMP_VAL;
3803
3804         *stats_comp = 0;
3805 }
3806
3807 static void bnx2x_stats_start(struct bnx2x *bp)
3808 {
3809         if (bp->port.pmf)
3810                 bnx2x_port_stats_init(bp);
3811
3812         else if (bp->func_stx)
3813                 bnx2x_func_stats_init(bp);
3814
3815         bnx2x_hw_stats_post(bp);
3816         bnx2x_storm_stats_post(bp);
3817 }
3818
3819 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3820 {
3821         bnx2x_stats_comp(bp);
3822         bnx2x_stats_pmf_update(bp);
3823         bnx2x_stats_start(bp);
3824 }
3825
3826 static void bnx2x_stats_restart(struct bnx2x *bp)
3827 {
3828         bnx2x_stats_comp(bp);
3829         bnx2x_stats_start(bp);
3830 }
3831
3832 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3833 {
3834         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3835         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3836         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3837         struct {
3838                 u32 lo;
3839                 u32 hi;
3840         } diff;
3841
3842         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3843         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3844         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3845         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3846         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3847         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3848         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3849         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3850         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3851         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3852         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3853         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3854         UPDATE_STAT64(tx_stat_gt127,
3855                                 tx_stat_etherstatspkts65octetsto127octets);
3856         UPDATE_STAT64(tx_stat_gt255,
3857                                 tx_stat_etherstatspkts128octetsto255octets);
3858         UPDATE_STAT64(tx_stat_gt511,
3859                                 tx_stat_etherstatspkts256octetsto511octets);
3860         UPDATE_STAT64(tx_stat_gt1023,
3861                                 tx_stat_etherstatspkts512octetsto1023octets);
3862         UPDATE_STAT64(tx_stat_gt1518,
3863                                 tx_stat_etherstatspkts1024octetsto1522octets);
3864         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3865         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3866         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3867         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3868         UPDATE_STAT64(tx_stat_gterr,
3869                                 tx_stat_dot3statsinternalmactransmiterrors);
3870         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3871
3872         estats->pause_frames_received_hi =
3873                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3874         estats->pause_frames_received_lo =
3875                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3876
3877         estats->pause_frames_sent_hi =
3878                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3879         estats->pause_frames_sent_lo =
3880                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3881 }
3882
3883 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3884 {
3885         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3886         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3887         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3888
3889         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3890         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3891         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3892         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3893         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3894         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3895         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3896         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3897         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3898         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3899         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3900         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3901         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3902         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3903         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3904         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3905         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3906         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3907         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3908         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3909         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3910         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3911         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3912         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3913         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3914         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3915         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3916         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3917         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3918         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3919         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3920
3921         estats->pause_frames_received_hi =
3922                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3923         estats->pause_frames_received_lo =
3924                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3925         ADD_64(estats->pause_frames_received_hi,
3926                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3927                estats->pause_frames_received_lo,
3928                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3929
3930         estats->pause_frames_sent_hi =
3931                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3932         estats->pause_frames_sent_lo =
3933                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3934         ADD_64(estats->pause_frames_sent_hi,
3935                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3936                estats->pause_frames_sent_lo,
3937                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3938 }
3939
3940 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3941 {
3942         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3943         struct nig_stats *old = &(bp->port.old_nig_stats);
3944         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3945         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3946         struct {
3947                 u32 lo;
3948                 u32 hi;
3949         } diff;
3950         u32 nig_timer_max;
3951
3952         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3953                 bnx2x_bmac_stats_update(bp);
3954
3955         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3956                 bnx2x_emac_stats_update(bp);
3957
3958         else { /* unreached */
3959                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3960                 return -1;
3961         }
3962
3963         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3964                       new->brb_discard - old->brb_discard);
3965         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3966                       new->brb_truncate - old->brb_truncate);
3967
3968         UPDATE_STAT64_NIG(egress_mac_pkt0,
3969                                         etherstatspkts1024octetsto1522octets);
3970         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3971
3972         memcpy(old, new, sizeof(struct nig_stats));
3973
3974         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3975                sizeof(struct mac_stx));
3976         estats->brb_drop_hi = pstats->brb_drop_hi;
3977         estats->brb_drop_lo = pstats->brb_drop_lo;
3978
3979         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3980
3981         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3982         if (nig_timer_max != estats->nig_timer_max) {
3983                 estats->nig_timer_max = nig_timer_max;
3984                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3985         }
3986
3987         return 0;
3988 }
3989
3990 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3991 {
3992         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3993         struct tstorm_per_port_stats *tport =
3994                                         &stats->tstorm_common.port_statistics;
3995         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3996         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3997         int i;
3998
3999         memcpy(&(fstats->total_bytes_received_hi),
4000                &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4001                sizeof(struct host_func_stats) - 2*sizeof(u32));
4002         estats->error_bytes_received_hi = 0;
4003         estats->error_bytes_received_lo = 0;
4004         estats->etherstatsoverrsizepkts_hi = 0;
4005         estats->etherstatsoverrsizepkts_lo = 0;
4006         estats->no_buff_discard_hi = 0;
4007         estats->no_buff_discard_lo = 0;
4008
4009         for_each_queue(bp, i) {
4010                 struct bnx2x_fastpath *fp = &bp->fp[i];
4011                 int cl_id = fp->cl_id;
4012                 struct tstorm_per_client_stats *tclient =
4013                                 &stats->tstorm_common.client_statistics[cl_id];
4014                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4015                 struct ustorm_per_client_stats *uclient =
4016                                 &stats->ustorm_common.client_statistics[cl_id];
4017                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4018                 struct xstorm_per_client_stats *xclient =
4019                                 &stats->xstorm_common.client_statistics[cl_id];
4020                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4021                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4022                 u32 diff;
4023
4024                 /* are storm stats valid? */
4025                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4026                                                         bp->stats_counter) {
4027                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4028                            "  xstorm counter (%d) != stats_counter (%d)\n",
4029                            i, xclient->stats_counter, bp->stats_counter);
4030                         return -1;
4031                 }
4032                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4033                                                         bp->stats_counter) {
4034                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4035                            "  tstorm counter (%d) != stats_counter (%d)\n",
4036                            i, tclient->stats_counter, bp->stats_counter);
4037                         return -2;
4038                 }
4039                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4040                                                         bp->stats_counter) {
4041                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4042                            "  ustorm counter (%d) != stats_counter (%d)\n",
4043                            i, uclient->stats_counter, bp->stats_counter);
4044                         return -4;
4045                 }
4046
4047                 qstats->total_bytes_received_hi =
4048                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4049                 qstats->total_bytes_received_lo =
4050                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4051
4052                 ADD_64(qstats->total_bytes_received_hi,
4053                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4054                        qstats->total_bytes_received_lo,
4055                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4056
4057                 ADD_64(qstats->total_bytes_received_hi,
4058                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4059                        qstats->total_bytes_received_lo,
4060                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4061
4062                 qstats->valid_bytes_received_hi =
4063                                         qstats->total_bytes_received_hi;
4064                 qstats->valid_bytes_received_lo =
4065                                         qstats->total_bytes_received_lo;
4066
4067                 qstats->error_bytes_received_hi =
4068                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
4069                 qstats->error_bytes_received_lo =
4070                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
4071
4072                 ADD_64(qstats->total_bytes_received_hi,
4073                        qstats->error_bytes_received_hi,
4074                        qstats->total_bytes_received_lo,
4075                        qstats->error_bytes_received_lo);
4076
4077                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4078                                         total_unicast_packets_received);
4079                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4080                                         total_multicast_packets_received);
4081                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4082                                         total_broadcast_packets_received);
4083                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4084                                         etherstatsoverrsizepkts);
4085                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4086
4087                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4088                                         total_unicast_packets_received);
4089                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4090                                         total_multicast_packets_received);
4091                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4092                                         total_broadcast_packets_received);
4093                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4094                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4095                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4096
4097                 qstats->total_bytes_transmitted_hi =
4098                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4099                 qstats->total_bytes_transmitted_lo =
4100                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4101
4102                 ADD_64(qstats->total_bytes_transmitted_hi,
4103                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
4104                        qstats->total_bytes_transmitted_lo,
4105                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
4106
4107                 ADD_64(qstats->total_bytes_transmitted_hi,
4108                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4109                        qstats->total_bytes_transmitted_lo,
4110                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4111
4112                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4113                                         total_unicast_packets_transmitted);
4114                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4115                                         total_multicast_packets_transmitted);
4116                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4117                                         total_broadcast_packets_transmitted);
4118
4119                 old_tclient->checksum_discard = tclient->checksum_discard;
4120                 old_tclient->ttl0_discard = tclient->ttl0_discard;
4121
4122                 ADD_64(fstats->total_bytes_received_hi,
4123                        qstats->total_bytes_received_hi,
4124                        fstats->total_bytes_received_lo,
4125                        qstats->total_bytes_received_lo);
4126                 ADD_64(fstats->total_bytes_transmitted_hi,
4127                        qstats->total_bytes_transmitted_hi,
4128                        fstats->total_bytes_transmitted_lo,
4129                        qstats->total_bytes_transmitted_lo);
4130                 ADD_64(fstats->total_unicast_packets_received_hi,
4131                        qstats->total_unicast_packets_received_hi,
4132                        fstats->total_unicast_packets_received_lo,
4133                        qstats->total_unicast_packets_received_lo);
4134                 ADD_64(fstats->total_multicast_packets_received_hi,
4135                        qstats->total_multicast_packets_received_hi,
4136                        fstats->total_multicast_packets_received_lo,
4137                        qstats->total_multicast_packets_received_lo);
4138                 ADD_64(fstats->total_broadcast_packets_received_hi,
4139                        qstats->total_broadcast_packets_received_hi,
4140                        fstats->total_broadcast_packets_received_lo,
4141                        qstats->total_broadcast_packets_received_lo);
4142                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4143                        qstats->total_unicast_packets_transmitted_hi,
4144                        fstats->total_unicast_packets_transmitted_lo,
4145                        qstats->total_unicast_packets_transmitted_lo);
4146                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4147                        qstats->total_multicast_packets_transmitted_hi,
4148                        fstats->total_multicast_packets_transmitted_lo,
4149                        qstats->total_multicast_packets_transmitted_lo);
4150                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4151                        qstats->total_broadcast_packets_transmitted_hi,
4152                        fstats->total_broadcast_packets_transmitted_lo,
4153                        qstats->total_broadcast_packets_transmitted_lo);
4154                 ADD_64(fstats->valid_bytes_received_hi,
4155                        qstats->valid_bytes_received_hi,
4156                        fstats->valid_bytes_received_lo,
4157                        qstats->valid_bytes_received_lo);
4158
4159                 ADD_64(estats->error_bytes_received_hi,
4160                        qstats->error_bytes_received_hi,
4161                        estats->error_bytes_received_lo,
4162                        qstats->error_bytes_received_lo);
4163                 ADD_64(estats->etherstatsoverrsizepkts_hi,
4164                        qstats->etherstatsoverrsizepkts_hi,
4165                        estats->etherstatsoverrsizepkts_lo,
4166                        qstats->etherstatsoverrsizepkts_lo);
4167                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4168                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4169         }
4170
4171         ADD_64(fstats->total_bytes_received_hi,
4172                estats->rx_stat_ifhcinbadoctets_hi,
4173                fstats->total_bytes_received_lo,
4174                estats->rx_stat_ifhcinbadoctets_lo);
4175
4176         memcpy(estats, &(fstats->total_bytes_received_hi),
4177                sizeof(struct host_func_stats) - 2*sizeof(u32));
4178
4179         ADD_64(estats->etherstatsoverrsizepkts_hi,
4180                estats->rx_stat_dot3statsframestoolong_hi,
4181                estats->etherstatsoverrsizepkts_lo,
4182                estats->rx_stat_dot3statsframestoolong_lo);
4183         ADD_64(estats->error_bytes_received_hi,
4184                estats->rx_stat_ifhcinbadoctets_hi,
4185                estats->error_bytes_received_lo,
4186                estats->rx_stat_ifhcinbadoctets_lo);
4187
4188         if (bp->port.pmf) {
4189                 estats->mac_filter_discard =
4190                                 le32_to_cpu(tport->mac_filter_discard);
4191                 estats->xxoverflow_discard =
4192                                 le32_to_cpu(tport->xxoverflow_discard);
4193                 estats->brb_truncate_discard =
4194                                 le32_to_cpu(tport->brb_truncate_discard);
4195                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4196         }
4197
4198         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4199
4200         bp->stats_pending = 0;
4201
4202         return 0;
4203 }
4204
4205 static void bnx2x_net_stats_update(struct bnx2x *bp)
4206 {
4207         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4208         struct net_device_stats *nstats = &bp->dev->stats;
4209         int i;
4210
4211         nstats->rx_packets =
4212                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4213                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4214                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4215
4216         nstats->tx_packets =
4217                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4218                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4219                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4220
4221         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4222
4223         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4224
4225         nstats->rx_dropped = estats->mac_discard;
4226         for_each_queue(bp, i)
4227                 nstats->rx_dropped +=
4228                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4229
4230         nstats->tx_dropped = 0;
4231
4232         nstats->multicast =
4233                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4234
4235         nstats->collisions =
4236                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4237
4238         nstats->rx_length_errors =
4239                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4240                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4241         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4242                                  bnx2x_hilo(&estats->brb_truncate_hi);
4243         nstats->rx_crc_errors =
4244                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4245         nstats->rx_frame_errors =
4246                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4247         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4248         nstats->rx_missed_errors = estats->xxoverflow_discard;
4249
4250         nstats->rx_errors = nstats->rx_length_errors +
4251                             nstats->rx_over_errors +
4252                             nstats->rx_crc_errors +
4253                             nstats->rx_frame_errors +
4254                             nstats->rx_fifo_errors +
4255                             nstats->rx_missed_errors;
4256
4257         nstats->tx_aborted_errors =
4258                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4259                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4260         nstats->tx_carrier_errors =
4261                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4262         nstats->tx_fifo_errors = 0;
4263         nstats->tx_heartbeat_errors = 0;
4264         nstats->tx_window_errors = 0;
4265
4266         nstats->tx_errors = nstats->tx_aborted_errors +
4267                             nstats->tx_carrier_errors +
4268             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4269 }
4270
4271 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4272 {
4273         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4274         int i;
4275
4276         estats->driver_xoff = 0;
4277         estats->rx_err_discard_pkt = 0;
4278         estats->rx_skb_alloc_failed = 0;
4279         estats->hw_csum_err = 0;
4280         for_each_queue(bp, i) {
4281                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4282
4283                 estats->driver_xoff += qstats->driver_xoff;
4284                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4285                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4286                 estats->hw_csum_err += qstats->hw_csum_err;
4287         }
4288 }
4289
4290 static void bnx2x_stats_update(struct bnx2x *bp)
4291 {
4292         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4293
4294         if (*stats_comp != DMAE_COMP_VAL)
4295                 return;
4296
4297         if (bp->port.pmf)
4298                 bnx2x_hw_stats_update(bp);
4299
4300         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4301                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4302                 bnx2x_panic();
4303                 return;
4304         }
4305
4306         bnx2x_net_stats_update(bp);
4307         bnx2x_drv_stats_update(bp);
4308
4309         if (netif_msg_timer(bp)) {
4310                 struct bnx2x_fastpath *fp0_rx = bp->fp;
4311                 struct bnx2x_fastpath *fp0_tx = bp->fp;
4312                 struct tstorm_per_client_stats *old_tclient =
4313                                                         &bp->fp->old_tclient;
4314                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4315                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4316                 struct net_device_stats *nstats = &bp->dev->stats;
4317                 int i;
4318
4319                 netdev_printk(KERN_DEBUG, bp->dev, "\n");
4320                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4321                                   "  tx pkt (%lx)\n",
4322                        bnx2x_tx_avail(fp0_tx),
4323                        le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4324                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4325                                   "  rx pkt (%lx)\n",
4326                        (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4327                              fp0_rx->rx_comp_cons),
4328                        le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4329                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4330                                   "brb truncate %u\n",
4331                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4332                        qstats->driver_xoff,
4333                        estats->brb_drop_lo, estats->brb_truncate_lo);
4334                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4335                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4336                         "mac_discard %u  mac_filter_discard %u  "
4337                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4338                         "ttl0_discard %u\n",
4339                        le32_to_cpu(old_tclient->checksum_discard),
4340                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4341                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4342                        estats->mac_discard, estats->mac_filter_discard,
4343                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4344                        le32_to_cpu(old_tclient->ttl0_discard));
4345
4346                 for_each_queue(bp, i) {
4347                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4348                                bnx2x_fp(bp, i, tx_pkt),
4349                                bnx2x_fp(bp, i, rx_pkt),
4350                                bnx2x_fp(bp, i, rx_calls));
4351                 }
4352         }
4353
4354         bnx2x_hw_stats_post(bp);
4355         bnx2x_storm_stats_post(bp);
4356 }
4357
4358 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4359 {
4360         struct dmae_command *dmae;
4361         u32 opcode;
4362         int loader_idx = PMF_DMAE_C(bp);
4363         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4364
4365         bp->executer_idx = 0;
4366
4367         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4368                   DMAE_CMD_C_ENABLE |
4369                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4370 #ifdef __BIG_ENDIAN
4371                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4372 #else
4373                   DMAE_CMD_ENDIANITY_DW_SWAP |
4374 #endif
4375                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4376                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4377
4378         if (bp->port.port_stx) {
4379
4380                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4381                 if (bp->func_stx)
4382                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4383                 else
4384                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4385                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4386                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4387                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4388                 dmae->dst_addr_hi = 0;
4389                 dmae->len = sizeof(struct host_port_stats) >> 2;
4390                 if (bp->func_stx) {
4391                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4392                         dmae->comp_addr_hi = 0;
4393                         dmae->comp_val = 1;
4394                 } else {
4395                         dmae->comp_addr_lo =
4396                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4397                         dmae->comp_addr_hi =
4398                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4399                         dmae->comp_val = DMAE_COMP_VAL;
4400
4401                         *stats_comp = 0;
4402                 }
4403         }
4404
4405         if (bp->func_stx) {
4406
4407                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4408                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4409                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4410                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4411                 dmae->dst_addr_lo = bp->func_stx >> 2;
4412                 dmae->dst_addr_hi = 0;
4413                 dmae->len = sizeof(struct host_func_stats) >> 2;
4414                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4415                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4416                 dmae->comp_val = DMAE_COMP_VAL;
4417
4418                 *stats_comp = 0;
4419         }
4420 }
4421
4422 static void bnx2x_stats_stop(struct bnx2x *bp)
4423 {
4424         int update = 0;
4425
4426         bnx2x_stats_comp(bp);
4427
4428         if (bp->port.pmf)
4429                 update = (bnx2x_hw_stats_update(bp) == 0);
4430
4431         update |= (bnx2x_storm_stats_update(bp) == 0);
4432
4433         if (update) {
4434                 bnx2x_net_stats_update(bp);
4435
4436                 if (bp->port.pmf)
4437                         bnx2x_port_stats_stop(bp);
4438
4439                 bnx2x_hw_stats_post(bp);
4440                 bnx2x_stats_comp(bp);
4441         }
4442 }
4443
4444 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4445 {
4446 }
4447
4448 static const struct {
4449         void (*action)(struct bnx2x *bp);
4450         enum bnx2x_stats_state next_state;
4451 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4452 /* state        event   */
4453 {
4454 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4455 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4456 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4457 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4458 },
4459 {
4460 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4461 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4462 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4463 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4464 }
4465 };
4466
4467 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4468 {
4469         enum bnx2x_stats_state state = bp->stats_state;
4470
4471         bnx2x_stats_stm[state][event].action(bp);
4472         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4473
4474         /* Make sure the state has been "changed" */
4475         smp_wmb();
4476
4477         if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
4478                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4479                    state, event, bp->stats_state);
4480 }
4481
4482 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4483 {
4484         struct dmae_command *dmae;
4485         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4486
4487         /* sanity */
4488         if (!bp->port.pmf || !bp->port.port_stx) {
4489                 BNX2X_ERR("BUG!\n");
4490                 return;
4491         }
4492
4493         bp->executer_idx = 0;
4494
4495         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4496         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4497                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4498                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4499 #ifdef __BIG_ENDIAN
4500                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4501 #else
4502                         DMAE_CMD_ENDIANITY_DW_SWAP |
4503 #endif
4504                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4505                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4506         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4507         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4508         dmae->dst_addr_lo = bp->port.port_stx >> 2;
4509         dmae->dst_addr_hi = 0;
4510         dmae->len = sizeof(struct host_port_stats) >> 2;
4511         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4512         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4513         dmae->comp_val = DMAE_COMP_VAL;
4514
4515         *stats_comp = 0;
4516         bnx2x_hw_stats_post(bp);
4517         bnx2x_stats_comp(bp);
4518 }
4519
4520 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4521 {
4522         int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4523         int port = BP_PORT(bp);
4524         int func;
4525         u32 func_stx;
4526
4527         /* sanity */
4528         if (!bp->port.pmf || !bp->func_stx) {
4529                 BNX2X_ERR("BUG!\n");
4530                 return;
4531         }
4532
4533         /* save our func_stx */
4534         func_stx = bp->func_stx;
4535
4536         for (vn = VN_0; vn < vn_max; vn++) {
4537                 func = 2*vn + port;
4538
4539                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4540                 bnx2x_func_stats_init(bp);
4541                 bnx2x_hw_stats_post(bp);
4542                 bnx2x_stats_comp(bp);
4543         }
4544
4545         /* restore our func_stx */
4546         bp->func_stx = func_stx;
4547 }
4548
4549 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4550 {
4551         struct dmae_command *dmae = &bp->stats_dmae;
4552         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4553
4554         /* sanity */
4555         if (!bp->func_stx) {
4556                 BNX2X_ERR("BUG!\n");
4557                 return;
4558         }
4559
4560         bp->executer_idx = 0;
4561         memset(dmae, 0, sizeof(struct dmae_command));
4562
4563         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4564                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4565                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4566 #ifdef __BIG_ENDIAN
4567                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4568 #else
4569                         DMAE_CMD_ENDIANITY_DW_SWAP |
4570 #endif
4571                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4572                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4573         dmae->src_addr_lo = bp->func_stx >> 2;
4574         dmae->src_addr_hi = 0;
4575         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4576         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4577         dmae->len = sizeof(struct host_func_stats) >> 2;
4578         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4579         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4580         dmae->comp_val = DMAE_COMP_VAL;
4581
4582         *stats_comp = 0;
4583         bnx2x_hw_stats_post(bp);
4584         bnx2x_stats_comp(bp);
4585 }
4586
4587 static void bnx2x_stats_init(struct bnx2x *bp)
4588 {
4589         int port = BP_PORT(bp);
4590         int func = BP_FUNC(bp);
4591         int i;
4592
4593         bp->stats_pending = 0;
4594         bp->executer_idx = 0;
4595         bp->stats_counter = 0;
4596
4597         /* port and func stats for management */
4598         if (!BP_NOMCP(bp)) {
4599                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4600                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4601
4602         } else {
4603                 bp->port.port_stx = 0;
4604                 bp->func_stx = 0;
4605         }
4606         DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
4607            bp->port.port_stx, bp->func_stx);
4608
4609         /* port stats */
4610         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4611         bp->port.old_nig_stats.brb_discard =
4612                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4613         bp->port.old_nig_stats.brb_truncate =
4614                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4615         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4616                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4617         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4618                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4619
4620         /* function stats */
4621         for_each_queue(bp, i) {
4622                 struct bnx2x_fastpath *fp = &bp->fp[i];
4623
4624                 memset(&fp->old_tclient, 0,
4625                        sizeof(struct tstorm_per_client_stats));
4626                 memset(&fp->old_uclient, 0,
4627                        sizeof(struct ustorm_per_client_stats));
4628                 memset(&fp->old_xclient, 0,
4629                        sizeof(struct xstorm_per_client_stats));
4630                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4631         }
4632
4633         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4634         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4635
4636         bp->stats_state = STATS_STATE_DISABLED;
4637
4638         if (bp->port.pmf) {
4639                 if (bp->port.port_stx)
4640                         bnx2x_port_stats_base_init(bp);
4641
4642                 if (bp->func_stx)
4643                         bnx2x_func_stats_base_init(bp);
4644
4645         } else if (bp->func_stx)
4646                 bnx2x_func_stats_base_update(bp);
4647 }
4648
4649 static void bnx2x_timer(unsigned long data)
4650 {
4651         struct bnx2x *bp = (struct bnx2x *) data;
4652
4653         if (!netif_running(bp->dev))
4654                 return;
4655
4656         if (atomic_read(&bp->intr_sem) != 0)
4657                 goto timer_restart;
4658
4659         if (poll) {
4660                 struct bnx2x_fastpath *fp = &bp->fp[0];
4661                 int rc;
4662
4663                 bnx2x_tx_int(fp);
4664                 rc = bnx2x_rx_int(fp, 1000);
4665         }
4666
4667         if (!BP_NOMCP(bp)) {
4668                 int func = BP_FUNC(bp);
4669                 u32 drv_pulse;
4670                 u32 mcp_pulse;
4671
4672                 ++bp->fw_drv_pulse_wr_seq;
4673                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4674                 /* TBD - add SYSTEM_TIME */
4675                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4676                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4677
4678                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4679                              MCP_PULSE_SEQ_MASK);
4680                 /* The delta between driver pulse and mcp response
4681                  * should be 1 (before mcp response) or 0 (after mcp response)
4682                  */
4683                 if ((drv_pulse != mcp_pulse) &&
4684                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4685                         /* someone lost a heartbeat... */
4686                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4687                                   drv_pulse, mcp_pulse);
4688                 }
4689         }
4690
4691         if (bp->state == BNX2X_STATE_OPEN)
4692                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4693
4694 timer_restart:
4695         mod_timer(&bp->timer, jiffies + bp->current_interval);
4696 }
4697
4698 /* end of Statistics */
4699
4700 /* nic init */
4701
4702 /*
4703  * nic init service functions
4704  */
4705
4706 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4707 {
4708         int port = BP_PORT(bp);
4709
4710         /* "CSTORM" */
4711         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4712                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4713                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4714         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4715                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4716                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4717 }
4718
4719 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4720                           dma_addr_t mapping, int sb_id)
4721 {
4722         int port = BP_PORT(bp);
4723         int func = BP_FUNC(bp);
4724         int index;
4725         u64 section;
4726
4727         /* USTORM */
4728         section = ((u64)mapping) + offsetof(struct host_status_block,
4729                                             u_status_block);
4730         sb->u_status_block.status_block_id = sb_id;
4731
4732         REG_WR(bp, BAR_CSTRORM_INTMEM +
4733                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4734         REG_WR(bp, BAR_CSTRORM_INTMEM +
4735                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4736                U64_HI(section));
4737         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4738                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4739
4740         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4741                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4742                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4743
4744         /* CSTORM */
4745         section = ((u64)mapping) + offsetof(struct host_status_block,
4746                                             c_status_block);
4747         sb->c_status_block.status_block_id = sb_id;
4748
4749         REG_WR(bp, BAR_CSTRORM_INTMEM +
4750                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4751         REG_WR(bp, BAR_CSTRORM_INTMEM +
4752                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4753                U64_HI(section));
4754         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4755                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4756
4757         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4758                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4759                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4760
4761         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4762 }
4763
4764 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4765 {
4766         int func = BP_FUNC(bp);
4767
4768         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4769                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4770                         sizeof(struct tstorm_def_status_block)/4);
4771         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4772                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4773                         sizeof(struct cstorm_def_status_block_u)/4);
4774         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4775                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4776                         sizeof(struct cstorm_def_status_block_c)/4);
4777         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4778                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4779                         sizeof(struct xstorm_def_status_block)/4);
4780 }
4781
4782 static void bnx2x_init_def_sb(struct bnx2x *bp,
4783                               struct host_def_status_block *def_sb,
4784                               dma_addr_t mapping, int sb_id)
4785 {
4786         int port = BP_PORT(bp);
4787         int func = BP_FUNC(bp);
4788         int index, val, reg_offset;
4789         u64 section;
4790
4791         /* ATTN */
4792         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4793                                             atten_status_block);
4794         def_sb->atten_status_block.status_block_id = sb_id;
4795
4796         bp->attn_state = 0;
4797
4798         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4799                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4800
4801         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4802                 bp->attn_group[index].sig[0] = REG_RD(bp,
4803                                                      reg_offset + 0x10*index);
4804                 bp->attn_group[index].sig[1] = REG_RD(bp,
4805                                                reg_offset + 0x4 + 0x10*index);
4806                 bp->attn_group[index].sig[2] = REG_RD(bp,
4807                                                reg_offset + 0x8 + 0x10*index);
4808                 bp->attn_group[index].sig[3] = REG_RD(bp,
4809                                                reg_offset + 0xc + 0x10*index);
4810         }
4811
4812         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4813                              HC_REG_ATTN_MSG0_ADDR_L);
4814
4815         REG_WR(bp, reg_offset, U64_LO(section));
4816         REG_WR(bp, reg_offset + 4, U64_HI(section));
4817
4818         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4819
4820         val = REG_RD(bp, reg_offset);
4821         val |= sb_id;
4822         REG_WR(bp, reg_offset, val);
4823
4824         /* USTORM */
4825         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4826                                             u_def_status_block);
4827         def_sb->u_def_status_block.status_block_id = sb_id;
4828
4829         REG_WR(bp, BAR_CSTRORM_INTMEM +
4830                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4831         REG_WR(bp, BAR_CSTRORM_INTMEM +
4832                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4833                U64_HI(section));
4834         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4835                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4836
4837         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4838                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4839                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4840
4841         /* CSTORM */
4842         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4843                                             c_def_status_block);
4844         def_sb->c_def_status_block.status_block_id = sb_id;
4845
4846         REG_WR(bp, BAR_CSTRORM_INTMEM +
4847                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4848         REG_WR(bp, BAR_CSTRORM_INTMEM +
4849                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4850                U64_HI(section));
4851         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4852                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4853
4854         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4855                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4856                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4857
4858         /* TSTORM */
4859         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4860                                             t_def_status_block);
4861         def_sb->t_def_status_block.status_block_id = sb_id;
4862
4863         REG_WR(bp, BAR_TSTRORM_INTMEM +
4864                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4865         REG_WR(bp, BAR_TSTRORM_INTMEM +
4866                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4867                U64_HI(section));
4868         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4869                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4870
4871         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4872                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4873                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4874
4875         /* XSTORM */
4876         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4877                                             x_def_status_block);
4878         def_sb->x_def_status_block.status_block_id = sb_id;
4879
4880         REG_WR(bp, BAR_XSTRORM_INTMEM +
4881                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4882         REG_WR(bp, BAR_XSTRORM_INTMEM +
4883                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4884                U64_HI(section));
4885         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4886                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4887
4888         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4889                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4890                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4891
4892         bp->stats_pending = 0;
4893         bp->set_mac_pending = 0;
4894
4895         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4896 }
4897
4898 static void bnx2x_update_coalesce(struct bnx2x *bp)
4899 {
4900         int port = BP_PORT(bp);
4901         int i;
4902
4903         for_each_queue(bp, i) {
4904                 int sb_id = bp->fp[i].sb_id;
4905
4906                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4907                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4908                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4909                                                       U_SB_ETH_RX_CQ_INDEX),
4910                         bp->rx_ticks/(4 * BNX2X_BTR));
4911                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4912                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4913                                                        U_SB_ETH_RX_CQ_INDEX),
4914                          (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4915
4916                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4917                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4918                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4919                                                       C_SB_ETH_TX_CQ_INDEX),
4920                         bp->tx_ticks/(4 * BNX2X_BTR));
4921                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4922                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4923                                                        C_SB_ETH_TX_CQ_INDEX),
4924                          (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4925         }
4926 }
4927
4928 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4929                                        struct bnx2x_fastpath *fp, int last)
4930 {
4931         int i;
4932
4933         for (i = 0; i < last; i++) {
4934                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4935                 struct sk_buff *skb = rx_buf->skb;
4936
4937                 if (skb == NULL) {
4938                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4939                         continue;
4940                 }
4941
4942                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4943                         pci_unmap_single(bp->pdev,
4944                                          pci_unmap_addr(rx_buf, mapping),
4945                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4946
4947                 dev_kfree_skb(skb);
4948                 rx_buf->skb = NULL;
4949         }
4950 }
4951
4952 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4953 {
4954         int func = BP_FUNC(bp);
4955         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4956                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4957         u16 ring_prod, cqe_ring_prod;
4958         int i, j;
4959
4960         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4961         DP(NETIF_MSG_IFUP,
4962            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4963
4964         if (bp->flags & TPA_ENABLE_FLAG) {
4965
4966                 for_each_queue(bp, j) {
4967                         struct bnx2x_fastpath *fp = &bp->fp[j];
4968
4969                         for (i = 0; i < max_agg_queues; i++) {
4970                                 fp->tpa_pool[i].skb =
4971                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4972                                 if (!fp->tpa_pool[i].skb) {
4973                                         BNX2X_ERR("Failed to allocate TPA "
4974                                                   "skb pool for queue[%d] - "
4975                                                   "disabling TPA on this "
4976                                                   "queue!\n", j);
4977                                         bnx2x_free_tpa_pool(bp, fp, i);
4978                                         fp->disable_tpa = 1;
4979                                         break;
4980                                 }
4981                                 pci_unmap_addr_set((struct sw_rx_bd *)
4982                                                         &bp->fp->tpa_pool[i],
4983                                                    mapping, 0);
4984                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4985                         }
4986                 }
4987         }
4988
4989         for_each_queue(bp, j) {
4990                 struct bnx2x_fastpath *fp = &bp->fp[j];
4991
4992                 fp->rx_bd_cons = 0;
4993                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4994                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4995
4996                 /* "next page" elements initialization */
4997                 /* SGE ring */
4998                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4999                         struct eth_rx_sge *sge;
5000
5001                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5002                         sge->addr_hi =
5003                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5004                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5005                         sge->addr_lo =
5006                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5007                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5008                 }
5009
5010                 bnx2x_init_sge_ring_bit_mask(fp);
5011
5012                 /* RX BD ring */
5013                 for (i = 1; i <= NUM_RX_RINGS; i++) {
5014                         struct eth_rx_bd *rx_bd;
5015
5016                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5017                         rx_bd->addr_hi =
5018                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5019                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5020                         rx_bd->addr_lo =
5021                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5022                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5023                 }
5024
5025                 /* CQ ring */
5026                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5027                         struct eth_rx_cqe_next_page *nextpg;
5028
5029                         nextpg = (struct eth_rx_cqe_next_page *)
5030                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5031                         nextpg->addr_hi =
5032                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5033                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5034                         nextpg->addr_lo =
5035                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5036                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5037                 }
5038
5039                 /* Allocate SGEs and initialize the ring elements */
5040                 for (i = 0, ring_prod = 0;
5041                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5042
5043                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5044                                 BNX2X_ERR("was only able to allocate "
5045                                           "%d rx sges\n", i);
5046                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5047                                 /* Cleanup already allocated elements */
5048                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5049                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5050                                 fp->disable_tpa = 1;
5051                                 ring_prod = 0;
5052                                 break;
5053                         }
5054                         ring_prod = NEXT_SGE_IDX(ring_prod);
5055                 }
5056                 fp->rx_sge_prod = ring_prod;
5057
5058                 /* Allocate BDs and initialize BD ring */
5059                 fp->rx_comp_cons = 0;
5060                 cqe_ring_prod = ring_prod = 0;
5061                 for (i = 0; i < bp->rx_ring_size; i++) {
5062                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5063                                 BNX2X_ERR("was only able to allocate "
5064                                           "%d rx skbs on queue[%d]\n", i, j);
5065                                 fp->eth_q_stats.rx_skb_alloc_failed++;
5066                                 break;
5067                         }
5068                         ring_prod = NEXT_RX_IDX(ring_prod);
5069                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5070                         WARN_ON(ring_prod <= i);
5071                 }
5072
5073                 fp->rx_bd_prod = ring_prod;
5074                 /* must not have more available CQEs than BDs */
5075                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5076                                        cqe_ring_prod);
5077                 fp->rx_pkt = fp->rx_calls = 0;
5078
5079                 /* Warning!
5080                  * this will generate an interrupt (to the TSTORM)
5081                  * must only be done after chip is initialized
5082                  */
5083                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5084                                      fp->rx_sge_prod);
5085                 if (j != 0)
5086                         continue;
5087
5088                 REG_WR(bp, BAR_USTRORM_INTMEM +
5089                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5090                        U64_LO(fp->rx_comp_mapping));
5091                 REG_WR(bp, BAR_USTRORM_INTMEM +
5092                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5093                        U64_HI(fp->rx_comp_mapping));
5094         }
5095 }
5096
5097 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5098 {
5099         int i, j;
5100
5101         for_each_queue(bp, j) {
5102                 struct bnx2x_fastpath *fp = &bp->fp[j];
5103
5104                 for (i = 1; i <= NUM_TX_RINGS; i++) {
5105                         struct eth_tx_next_bd *tx_next_bd =
5106                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5107
5108                         tx_next_bd->addr_hi =
5109                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5110                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5111                         tx_next_bd->addr_lo =
5112                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5113                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5114                 }
5115
5116                 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5117                 fp->tx_db.data.zero_fill1 = 0;
5118                 fp->tx_db.data.prod = 0;
5119
5120                 fp->tx_pkt_prod = 0;
5121                 fp->tx_pkt_cons = 0;
5122                 fp->tx_bd_prod = 0;
5123                 fp->tx_bd_cons = 0;
5124                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5125                 fp->tx_pkt = 0;
5126         }
5127 }
5128
5129 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5130 {
5131         int func = BP_FUNC(bp);
5132
5133         spin_lock_init(&bp->spq_lock);
5134
5135         bp->spq_left = MAX_SPQ_PENDING;
5136         bp->spq_prod_idx = 0;
5137         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5138         bp->spq_prod_bd = bp->spq;
5139         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5140
5141         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5142                U64_LO(bp->spq_mapping));
5143         REG_WR(bp,
5144                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5145                U64_HI(bp->spq_mapping));
5146
5147         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5148                bp->spq_prod_idx);
5149 }
5150
5151 static void bnx2x_init_context(struct bnx2x *bp)
5152 {
5153         int i;
5154
5155         /* Rx */
5156         for_each_queue(bp, i) {
5157                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5158                 struct bnx2x_fastpath *fp = &bp->fp[i];
5159                 u8 cl_id = fp->cl_id;
5160
5161                 context->ustorm_st_context.common.sb_index_numbers =
5162                                                 BNX2X_RX_SB_INDEX_NUM;
5163                 context->ustorm_st_context.common.clientId = cl_id;
5164                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5165                 context->ustorm_st_context.common.flags =
5166                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5167                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5168                 context->ustorm_st_context.common.statistics_counter_id =
5169                                                 cl_id;
5170                 context->ustorm_st_context.common.mc_alignment_log_size =
5171                                                 BNX2X_RX_ALIGN_SHIFT;
5172                 context->ustorm_st_context.common.bd_buff_size =
5173                                                 bp->rx_buf_size;
5174                 context->ustorm_st_context.common.bd_page_base_hi =
5175                                                 U64_HI(fp->rx_desc_mapping);
5176                 context->ustorm_st_context.common.bd_page_base_lo =
5177                                                 U64_LO(fp->rx_desc_mapping);
5178                 if (!fp->disable_tpa) {
5179                         context->ustorm_st_context.common.flags |=
5180                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5181                         context->ustorm_st_context.common.sge_buff_size =
5182                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5183                                          (u32)0xffff);
5184                         context->ustorm_st_context.common.sge_page_base_hi =
5185                                                 U64_HI(fp->rx_sge_mapping);
5186                         context->ustorm_st_context.common.sge_page_base_lo =
5187                                                 U64_LO(fp->rx_sge_mapping);
5188
5189                         context->ustorm_st_context.common.max_sges_for_packet =
5190                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5191                         context->ustorm_st_context.common.max_sges_for_packet =
5192                                 ((context->ustorm_st_context.common.
5193                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
5194                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5195                 }
5196
5197                 context->ustorm_ag_context.cdu_usage =
5198                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5199                                                CDU_REGION_NUMBER_UCM_AG,
5200                                                ETH_CONNECTION_TYPE);
5201
5202                 context->xstorm_ag_context.cdu_reserved =
5203                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5204                                                CDU_REGION_NUMBER_XCM_AG,
5205                                                ETH_CONNECTION_TYPE);
5206         }
5207
5208         /* Tx */
5209         for_each_queue(bp, i) {
5210                 struct bnx2x_fastpath *fp = &bp->fp[i];
5211                 struct eth_context *context =
5212                         bnx2x_sp(bp, context[i].eth);
5213
5214                 context->cstorm_st_context.sb_index_number =
5215                                                 C_SB_ETH_TX_CQ_INDEX;
5216                 context->cstorm_st_context.status_block_id = fp->sb_id;
5217
5218                 context->xstorm_st_context.tx_bd_page_base_hi =
5219                                                 U64_HI(fp->tx_desc_mapping);
5220                 context->xstorm_st_context.tx_bd_page_base_lo =
5221                                                 U64_LO(fp->tx_desc_mapping);
5222                 context->xstorm_st_context.statistics_data = (fp->cl_id |
5223                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5224         }
5225 }
5226
5227 static void bnx2x_init_ind_table(struct bnx2x *bp)
5228 {
5229         int func = BP_FUNC(bp);
5230         int i;
5231
5232         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5233                 return;
5234
5235         DP(NETIF_MSG_IFUP,
5236            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
5237         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5238                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5239                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5240                         bp->fp->cl_id + (i % bp->num_queues));
5241 }
5242
5243 static void bnx2x_set_client_config(struct bnx2x *bp)
5244 {
5245         struct tstorm_eth_client_config tstorm_client = {0};
5246         int port = BP_PORT(bp);
5247         int i;
5248
5249         tstorm_client.mtu = bp->dev->mtu;
5250         tstorm_client.config_flags =
5251                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5252                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5253 #ifdef BCM_VLAN
5254         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5255                 tstorm_client.config_flags |=
5256                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5257                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5258         }
5259 #endif
5260
5261         for_each_queue(bp, i) {
5262                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5263
5264                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5265                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5266                        ((u32 *)&tstorm_client)[0]);
5267                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5268                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5269                        ((u32 *)&tstorm_client)[1]);
5270         }
5271
5272         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5273            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5274 }
5275
5276 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5277 {
5278         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5279         int mode = bp->rx_mode;
5280         int mask = bp->rx_mode_cl_mask;
5281         int func = BP_FUNC(bp);
5282         int port = BP_PORT(bp);
5283         int i;
5284         /* All but management unicast packets should pass to the host as well */
5285         u32 llh_mask =
5286                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5287                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5288                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5289                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5290
5291         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
5292
5293         switch (mode) {
5294         case BNX2X_RX_MODE_NONE: /* no Rx */
5295                 tstorm_mac_filter.ucast_drop_all = mask;
5296                 tstorm_mac_filter.mcast_drop_all = mask;
5297                 tstorm_mac_filter.bcast_drop_all = mask;
5298                 break;
5299
5300         case BNX2X_RX_MODE_NORMAL:
5301                 tstorm_mac_filter.bcast_accept_all = mask;
5302                 break;
5303
5304         case BNX2X_RX_MODE_ALLMULTI:
5305                 tstorm_mac_filter.mcast_accept_all = mask;
5306                 tstorm_mac_filter.bcast_accept_all = mask;
5307                 break;
5308
5309         case BNX2X_RX_MODE_PROMISC:
5310                 tstorm_mac_filter.ucast_accept_all = mask;
5311                 tstorm_mac_filter.mcast_accept_all = mask;
5312                 tstorm_mac_filter.bcast_accept_all = mask;
5313                 /* pass management unicast packets as well */
5314                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5315                 break;
5316
5317         default:
5318                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5319                 break;
5320         }
5321
5322         REG_WR(bp,
5323                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5324                llh_mask);
5325
5326         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5327                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5328                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5329                        ((u32 *)&tstorm_mac_filter)[i]);
5330
5331 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5332                    ((u32 *)&tstorm_mac_filter)[i]); */
5333         }
5334
5335         if (mode != BNX2X_RX_MODE_NONE)
5336                 bnx2x_set_client_config(bp);
5337 }
5338
5339 static void bnx2x_init_internal_common(struct bnx2x *bp)
5340 {
5341         int i;
5342
5343         /* Zero this manually as its initialization is
5344            currently missing in the initTool */
5345         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5346                 REG_WR(bp, BAR_USTRORM_INTMEM +
5347                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5348 }
5349
5350 static void bnx2x_init_internal_port(struct bnx2x *bp)
5351 {
5352         int port = BP_PORT(bp);
5353
5354         REG_WR(bp,
5355                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5356         REG_WR(bp,
5357                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5358         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5359         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5360 }
5361
5362 static void bnx2x_init_internal_func(struct bnx2x *bp)
5363 {
5364         struct tstorm_eth_function_common_config tstorm_config = {0};
5365         struct stats_indication_flags stats_flags = {0};
5366         int port = BP_PORT(bp);
5367         int func = BP_FUNC(bp);
5368         int i, j;
5369         u32 offset;
5370         u16 max_agg_size;
5371
5372         if (is_multi(bp)) {
5373                 tstorm_config.config_flags = MULTI_FLAGS(bp);
5374                 tstorm_config.rss_result_mask = MULTI_MASK;
5375         }
5376
5377         /* Enable TPA if needed */
5378         if (bp->flags & TPA_ENABLE_FLAG)
5379                 tstorm_config.config_flags |=
5380                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5381
5382         if (IS_E1HMF(bp))
5383                 tstorm_config.config_flags |=
5384                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5385
5386         tstorm_config.leading_client_id = BP_L_ID(bp);
5387
5388         REG_WR(bp, BAR_TSTRORM_INTMEM +
5389                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5390                (*(u32 *)&tstorm_config));
5391
5392         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5393         bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5394         bnx2x_set_storm_rx_mode(bp);
5395
5396         for_each_queue(bp, i) {
5397                 u8 cl_id = bp->fp[i].cl_id;
5398
5399                 /* reset xstorm per client statistics */
5400                 offset = BAR_XSTRORM_INTMEM +
5401                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5402                 for (j = 0;
5403                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5404                         REG_WR(bp, offset + j*4, 0);
5405
5406                 /* reset tstorm per client statistics */
5407                 offset = BAR_TSTRORM_INTMEM +
5408                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5409                 for (j = 0;
5410                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5411                         REG_WR(bp, offset + j*4, 0);
5412
5413                 /* reset ustorm per client statistics */
5414                 offset = BAR_USTRORM_INTMEM +
5415                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5416                 for (j = 0;
5417                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5418                         REG_WR(bp, offset + j*4, 0);
5419         }
5420
5421         /* Init statistics related context */
5422         stats_flags.collect_eth = 1;
5423
5424         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5425                ((u32 *)&stats_flags)[0]);
5426         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5427                ((u32 *)&stats_flags)[1]);
5428
5429         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5430                ((u32 *)&stats_flags)[0]);
5431         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5432                ((u32 *)&stats_flags)[1]);
5433
5434         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5435                ((u32 *)&stats_flags)[0]);
5436         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5437                ((u32 *)&stats_flags)[1]);
5438
5439         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5440                ((u32 *)&stats_flags)[0]);
5441         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5442                ((u32 *)&stats_flags)[1]);
5443
5444         REG_WR(bp, BAR_XSTRORM_INTMEM +
5445                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5446                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5447         REG_WR(bp, BAR_XSTRORM_INTMEM +
5448                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5449                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5450
5451         REG_WR(bp, BAR_TSTRORM_INTMEM +
5452                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5453                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5454         REG_WR(bp, BAR_TSTRORM_INTMEM +
5455                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5456                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5457
5458         REG_WR(bp, BAR_USTRORM_INTMEM +
5459                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5460                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5461         REG_WR(bp, BAR_USTRORM_INTMEM +
5462                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5463                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5464
5465         if (CHIP_IS_E1H(bp)) {
5466                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5467                         IS_E1HMF(bp));
5468                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5469                         IS_E1HMF(bp));
5470                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5471                         IS_E1HMF(bp));
5472                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5473                         IS_E1HMF(bp));
5474
5475                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5476                          bp->e1hov);
5477         }
5478
5479         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5480         max_agg_size =
5481                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5482                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5483                     (u32)0xffff);
5484         for_each_queue(bp, i) {
5485                 struct bnx2x_fastpath *fp = &bp->fp[i];
5486
5487                 REG_WR(bp, BAR_USTRORM_INTMEM +
5488                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5489                        U64_LO(fp->rx_comp_mapping));
5490                 REG_WR(bp, BAR_USTRORM_INTMEM +
5491                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5492                        U64_HI(fp->rx_comp_mapping));
5493
5494                 /* Next page */
5495                 REG_WR(bp, BAR_USTRORM_INTMEM +
5496                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5497                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5498                 REG_WR(bp, BAR_USTRORM_INTMEM +
5499                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5500                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5501
5502                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5503                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5504                          max_agg_size);
5505         }
5506
5507         /* dropless flow control */
5508         if (CHIP_IS_E1H(bp)) {
5509                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5510
5511                 rx_pause.bd_thr_low = 250;
5512                 rx_pause.cqe_thr_low = 250;
5513                 rx_pause.cos = 1;
5514                 rx_pause.sge_thr_low = 0;
5515                 rx_pause.bd_thr_high = 350;
5516                 rx_pause.cqe_thr_high = 350;
5517                 rx_pause.sge_thr_high = 0;
5518
5519                 for_each_queue(bp, i) {
5520                         struct bnx2x_fastpath *fp = &bp->fp[i];
5521
5522                         if (!fp->disable_tpa) {
5523                                 rx_pause.sge_thr_low = 150;
5524                                 rx_pause.sge_thr_high = 250;
5525                         }
5526
5527
5528                         offset = BAR_USTRORM_INTMEM +
5529                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5530                                                                    fp->cl_id);
5531                         for (j = 0;
5532                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5533                              j++)
5534                                 REG_WR(bp, offset + j*4,
5535                                        ((u32 *)&rx_pause)[j]);
5536                 }
5537         }
5538
5539         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5540
5541         /* Init rate shaping and fairness contexts */
5542         if (IS_E1HMF(bp)) {
5543                 int vn;
5544
5545                 /* During init there is no active link
5546                    Until link is up, set link rate to 10Gbps */
5547                 bp->link_vars.line_speed = SPEED_10000;
5548                 bnx2x_init_port_minmax(bp);
5549
5550                 if (!BP_NOMCP(bp))
5551                         bp->mf_config =
5552                               SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5553                 bnx2x_calc_vn_weight_sum(bp);
5554
5555                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5556                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5557
5558                 /* Enable rate shaping and fairness */
5559                 bp->cmng.flags.cmng_enables |=
5560                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5561
5562         } else {
5563                 /* rate shaping and fairness are disabled */
5564                 DP(NETIF_MSG_IFUP,
5565                    "single function mode  minmax will be disabled\n");
5566         }
5567
5568
5569         /* Store it to internal memory */
5570         if (bp->port.pmf)
5571                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5572                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5573                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5574                                ((u32 *)(&bp->cmng))[i]);
5575 }
5576
5577 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5578 {
5579         switch (load_code) {
5580         case FW_MSG_CODE_DRV_LOAD_COMMON:
5581                 bnx2x_init_internal_common(bp);
5582                 /* no break */
5583
5584         case FW_MSG_CODE_DRV_LOAD_PORT:
5585                 bnx2x_init_internal_port(bp);
5586                 /* no break */
5587
5588         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5589                 bnx2x_init_internal_func(bp);
5590                 break;
5591
5592         default:
5593                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5594                 break;
5595         }
5596 }
5597
5598 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5599 {
5600         int i;
5601
5602         for_each_queue(bp, i) {
5603                 struct bnx2x_fastpath *fp = &bp->fp[i];
5604
5605                 fp->bp = bp;
5606                 fp->state = BNX2X_FP_STATE_CLOSED;
5607                 fp->index = i;
5608                 fp->cl_id = BP_L_ID(bp) + i;
5609 #ifdef BCM_CNIC
5610                 fp->sb_id = fp->cl_id + 1;
5611 #else
5612                 fp->sb_id = fp->cl_id;
5613 #endif
5614                 DP(NETIF_MSG_IFUP,
5615                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5616                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5617                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5618                               fp->sb_id);
5619                 bnx2x_update_fpsb_idx(fp);
5620         }
5621
5622         /* ensure status block indices were read */
5623         rmb();
5624
5625
5626         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5627                           DEF_SB_ID);
5628         bnx2x_update_dsb_idx(bp);
5629         bnx2x_update_coalesce(bp);
5630         bnx2x_init_rx_rings(bp);
5631         bnx2x_init_tx_ring(bp);
5632         bnx2x_init_sp_ring(bp);
5633         bnx2x_init_context(bp);
5634         bnx2x_init_internal(bp, load_code);
5635         bnx2x_init_ind_table(bp);
5636         bnx2x_stats_init(bp);
5637
5638         /* At this point, we are ready for interrupts */
5639         atomic_set(&bp->intr_sem, 0);
5640
5641         /* flush all before enabling interrupts */
5642         mb();
5643         mmiowb();
5644
5645         bnx2x_int_enable(bp);
5646
5647         /* Check for SPIO5 */
5648         bnx2x_attn_int_deasserted0(bp,
5649                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5650                                    AEU_INPUTS_ATTN_BITS_SPIO5);
5651 }
5652
5653 /* end of nic init */
5654
5655 /*
5656  * gzip service functions
5657  */
5658
5659 static int bnx2x_gunzip_init(struct bnx2x *bp)
5660 {
5661         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5662                                               &bp->gunzip_mapping);
5663         if (bp->gunzip_buf  == NULL)
5664                 goto gunzip_nomem1;
5665
5666         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5667         if (bp->strm  == NULL)
5668                 goto gunzip_nomem2;
5669
5670         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5671                                       GFP_KERNEL);
5672         if (bp->strm->workspace == NULL)
5673                 goto gunzip_nomem3;
5674
5675         return 0;
5676
5677 gunzip_nomem3:
5678         kfree(bp->strm);
5679         bp->strm = NULL;
5680
5681 gunzip_nomem2:
5682         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5683                             bp->gunzip_mapping);
5684         bp->gunzip_buf = NULL;
5685
5686 gunzip_nomem1:
5687         netdev_err(bp->dev, "Cannot allocate firmware buffer for un-compression\n");
5688         return -ENOMEM;
5689 }
5690
5691 static void bnx2x_gunzip_end(struct bnx2x *bp)
5692 {
5693         kfree(bp->strm->workspace);
5694
5695         kfree(bp->strm);
5696         bp->strm = NULL;
5697
5698         if (bp->gunzip_buf) {
5699                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5700                                     bp->gunzip_mapping);
5701                 bp->gunzip_buf = NULL;
5702         }
5703 }
5704
5705 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5706 {
5707         int n, rc;
5708
5709         /* check gzip header */
5710         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5711                 BNX2X_ERR("Bad gzip header\n");
5712                 return -EINVAL;
5713         }
5714
5715         n = 10;
5716
5717 #define FNAME                           0x8
5718
5719         if (zbuf[3] & FNAME)
5720                 while ((zbuf[n++] != 0) && (n < len));
5721
5722         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5723         bp->strm->avail_in = len - n;
5724         bp->strm->next_out = bp->gunzip_buf;
5725         bp->strm->avail_out = FW_BUF_SIZE;
5726
5727         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5728         if (rc != Z_OK)
5729                 return rc;
5730
5731         rc = zlib_inflate(bp->strm, Z_FINISH);
5732         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5733                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
5734                            bp->strm->msg);
5735
5736         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5737         if (bp->gunzip_outlen & 0x3)
5738                 netdev_err(bp->dev, "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
5739                            bp->gunzip_outlen);
5740         bp->gunzip_outlen >>= 2;
5741
5742         zlib_inflateEnd(bp->strm);
5743
5744         if (rc == Z_STREAM_END)
5745                 return 0;
5746
5747         return rc;
5748 }
5749
5750 /* nic load/unload */
5751
5752 /*
5753  * General service functions
5754  */
5755
5756 /* send a NIG loopback debug packet */
5757 static void bnx2x_lb_pckt(struct bnx2x *bp)
5758 {
5759         u32 wb_write[3];
5760
5761         /* Ethernet source and destination addresses */
5762         wb_write[0] = 0x55555555;
5763         wb_write[1] = 0x55555555;
5764         wb_write[2] = 0x20;             /* SOP */
5765         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5766
5767         /* NON-IP protocol */
5768         wb_write[0] = 0x09000000;
5769         wb_write[1] = 0x55555555;
5770         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5771         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5772 }
5773
5774 /* some of the internal memories
5775  * are not directly readable from the driver
5776  * to test them we send debug packets
5777  */
5778 static int bnx2x_int_mem_test(struct bnx2x *bp)
5779 {
5780         int factor;
5781         int count, i;
5782         u32 val = 0;
5783
5784         if (CHIP_REV_IS_FPGA(bp))
5785                 factor = 120;
5786         else if (CHIP_REV_IS_EMUL(bp))
5787                 factor = 200;
5788         else
5789                 factor = 1;
5790
5791         DP(NETIF_MSG_HW, "start part1\n");
5792
5793         /* Disable inputs of parser neighbor blocks */
5794         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5795         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5796         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5797         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5798
5799         /*  Write 0 to parser credits for CFC search request */
5800         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5801
5802         /* send Ethernet packet */
5803         bnx2x_lb_pckt(bp);
5804
5805         /* TODO do i reset NIG statistic? */
5806         /* Wait until NIG register shows 1 packet of size 0x10 */
5807         count = 1000 * factor;
5808         while (count) {
5809
5810                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5811                 val = *bnx2x_sp(bp, wb_data[0]);
5812                 if (val == 0x10)
5813                         break;
5814
5815                 msleep(10);
5816                 count--;
5817         }
5818         if (val != 0x10) {
5819                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5820                 return -1;
5821         }
5822
5823         /* Wait until PRS register shows 1 packet */
5824         count = 1000 * factor;
5825         while (count) {
5826                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5827                 if (val == 1)
5828                         break;
5829
5830                 msleep(10);
5831                 count--;
5832         }
5833         if (val != 0x1) {
5834                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5835                 return -2;
5836         }
5837
5838         /* Reset and init BRB, PRS */
5839         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5840         msleep(50);
5841         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5842         msleep(50);
5843         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5844         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5845
5846         DP(NETIF_MSG_HW, "part2\n");
5847
5848         /* Disable inputs of parser neighbor blocks */
5849         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5850         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5851         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5852         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5853
5854         /* Write 0 to parser credits for CFC search request */
5855         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5856
5857         /* send 10 Ethernet packets */
5858         for (i = 0; i < 10; i++)
5859                 bnx2x_lb_pckt(bp);
5860
5861         /* Wait until NIG register shows 10 + 1
5862            packets of size 11*0x10 = 0xb0 */
5863         count = 1000 * factor;
5864         while (count) {
5865
5866                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5867                 val = *bnx2x_sp(bp, wb_data[0]);
5868                 if (val == 0xb0)
5869                         break;
5870
5871                 msleep(10);
5872                 count--;
5873         }
5874         if (val != 0xb0) {
5875                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5876                 return -3;
5877         }
5878
5879         /* Wait until PRS register shows 2 packets */
5880         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5881         if (val != 2)
5882                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5883
5884         /* Write 1 to parser credits for CFC search request */
5885         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5886
5887         /* Wait until PRS register shows 3 packets */
5888         msleep(10 * factor);
5889         /* Wait until NIG register shows 1 packet of size 0x10 */
5890         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5891         if (val != 3)
5892                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5893
5894         /* clear NIG EOP FIFO */
5895         for (i = 0; i < 11; i++)
5896                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5897         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5898         if (val != 1) {
5899                 BNX2X_ERR("clear of NIG failed\n");
5900                 return -4;
5901         }
5902
5903         /* Reset and init BRB, PRS, NIG */
5904         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5905         msleep(50);
5906         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5907         msleep(50);
5908         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5909         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5910 #ifndef BCM_CNIC
5911         /* set NIC mode */
5912         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5913 #endif
5914
5915         /* Enable inputs of parser neighbor blocks */
5916         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5917         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5918         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5919         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5920
5921         DP(NETIF_MSG_HW, "done\n");
5922
5923         return 0; /* OK */
5924 }
5925
5926 static void enable_blocks_attention(struct bnx2x *bp)
5927 {
5928         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5929         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5930         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5931         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5932         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5933         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5934         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5935         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5936         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5937 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5938 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5939         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5940         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5941         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5942 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5943 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5944         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5945         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5946         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5947         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5948 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5949 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5950         if (CHIP_REV_IS_FPGA(bp))
5951                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5952         else
5953                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5954         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5955         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5956         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5957 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5958 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5959         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5960         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5961 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5962         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5963 }
5964
5965
5966 static void bnx2x_reset_common(struct bnx2x *bp)
5967 {
5968         /* reset_common */
5969         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5970                0xd3ffff7f);
5971         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5972 }
5973
5974 static void bnx2x_init_pxp(struct bnx2x *bp)
5975 {
5976         u16 devctl;
5977         int r_order, w_order;
5978
5979         pci_read_config_word(bp->pdev,
5980                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5981         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5982         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5983         if (bp->mrrs == -1)
5984                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5985         else {
5986                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5987                 r_order = bp->mrrs;
5988         }
5989
5990         bnx2x_init_pxp_arb(bp, r_order, w_order);
5991 }
5992
5993 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5994 {
5995         u32 val;
5996         u8 port;
5997         u8 is_required = 0;
5998
5999         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6000               SHARED_HW_CFG_FAN_FAILURE_MASK;
6001
6002         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6003                 is_required = 1;
6004
6005         /*
6006          * The fan failure mechanism is usually related to the PHY type since
6007          * the power consumption of the board is affected by the PHY. Currently,
6008          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6009          */
6010         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6011                 for (port = PORT_0; port < PORT_MAX; port++) {
6012                         u32 phy_type =
6013                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
6014                                          external_phy_config) &
6015                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6016                         is_required |=
6017                                 ((phy_type ==
6018                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6019                                  (phy_type ==
6020                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6021                                  (phy_type ==
6022                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6023                 }
6024
6025         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6026
6027         if (is_required == 0)
6028                 return;
6029
6030         /* Fan failure is indicated by SPIO 5 */
6031         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6032                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
6033
6034         /* set to active low mode */
6035         val = REG_RD(bp, MISC_REG_SPIO_INT);
6036         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6037                                 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6038         REG_WR(bp, MISC_REG_SPIO_INT, val);
6039
6040         /* enable interrupt to signal the IGU */
6041         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6042         val |= (1 << MISC_REGISTERS_SPIO_5);
6043         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6044 }
6045
6046 static int bnx2x_init_common(struct bnx2x *bp)
6047 {
6048         u32 val, i;
6049 #ifdef BCM_CNIC
6050         u32 wb_write[2];
6051 #endif
6052
6053         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
6054
6055         bnx2x_reset_common(bp);
6056         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6057         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6058
6059         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6060         if (CHIP_IS_E1H(bp))
6061                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6062
6063         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6064         msleep(30);
6065         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6066
6067         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6068         if (CHIP_IS_E1(bp)) {
6069                 /* enable HW interrupt from PXP on USDM overflow
6070                    bit 16 on INT_MASK_0 */
6071                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6072         }
6073
6074         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6075         bnx2x_init_pxp(bp);
6076
6077 #ifdef __BIG_ENDIAN
6078         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6079         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6080         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6081         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6082         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6083         /* make sure this value is 0 */
6084         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6085
6086 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6087         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6088         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6089         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6090         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6091 #endif
6092
6093         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6094 #ifdef BCM_CNIC
6095         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6096         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6097         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6098 #endif
6099
6100         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6101                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6102
6103         /* let the HW do it's magic ... */
6104         msleep(100);
6105         /* finish PXP init */
6106         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6107         if (val != 1) {
6108                 BNX2X_ERR("PXP2 CFG failed\n");
6109                 return -EBUSY;
6110         }
6111         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6112         if (val != 1) {
6113                 BNX2X_ERR("PXP2 RD_INIT failed\n");
6114                 return -EBUSY;
6115         }
6116
6117         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6118         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6119
6120         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6121
6122         /* clean the DMAE memory */
6123         bp->dmae_ready = 1;
6124         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6125
6126         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6127         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6128         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6129         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6130
6131         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6132         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6133         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6134         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6135
6136         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6137
6138 #ifdef BCM_CNIC
6139         wb_write[0] = 0;
6140         wb_write[1] = 0;
6141         for (i = 0; i < 64; i++) {
6142                 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6143                 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6144
6145                 if (CHIP_IS_E1H(bp)) {
6146                         REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6147                         bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6148                                           wb_write, 2);
6149                 }
6150         }
6151 #endif
6152         /* soft reset pulse */
6153         REG_WR(bp, QM_REG_SOFT_RESET, 1);
6154         REG_WR(bp, QM_REG_SOFT_RESET, 0);
6155
6156 #ifdef BCM_CNIC
6157         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6158 #endif
6159
6160         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6161         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6162         if (!CHIP_REV_IS_SLOW(bp)) {
6163                 /* enable hw interrupt from doorbell Q */
6164                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6165         }
6166
6167         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6168         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6169         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6170 #ifndef BCM_CNIC
6171         /* set NIC mode */
6172         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6173 #endif
6174         if (CHIP_IS_E1H(bp))
6175                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6176
6177         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6178         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6179         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6180         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6181
6182         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6183         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6184         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6185         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6186
6187         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6188         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6189         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6190         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6191
6192         /* sync semi rtc */
6193         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6194                0x80000000);
6195         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6196                0x80000000);
6197
6198         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6199         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6200         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6201
6202         REG_WR(bp, SRC_REG_SOFT_RST, 1);
6203         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6204                 REG_WR(bp, i, 0xc0cac01a);
6205                 /* TODO: replace with something meaningful */
6206         }
6207         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6208 #ifdef BCM_CNIC
6209         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6210         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6211         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6212         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6213         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6214         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6215         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6216         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6217         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6218         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6219 #endif
6220         REG_WR(bp, SRC_REG_SOFT_RST, 0);
6221
6222         if (sizeof(union cdu_context) != 1024)
6223                 /* we currently assume that a context is 1024 bytes */
6224                 pr_alert("please adjust the size of cdu_context(%ld)\n",
6225                          (long)sizeof(union cdu_context));
6226
6227         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6228         val = (4 << 24) + (0 << 12) + 1024;
6229         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6230
6231         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6232         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6233         /* enable context validation interrupt from CFC */
6234         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6235
6236         /* set the thresholds to prevent CFC/CDU race */
6237         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6238
6239         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6240         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6241
6242         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6243         /* Reset PCIE errors for debug */
6244         REG_WR(bp, 0x2814, 0xffffffff);
6245         REG_WR(bp, 0x3820, 0xffffffff);
6246
6247         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6248         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6249         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6250         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6251
6252         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6253         if (CHIP_IS_E1H(bp)) {
6254                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6255                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6256         }
6257
6258         if (CHIP_REV_IS_SLOW(bp))
6259                 msleep(200);
6260
6261         /* finish CFC init */
6262         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6263         if (val != 1) {
6264                 BNX2X_ERR("CFC LL_INIT failed\n");
6265                 return -EBUSY;
6266         }
6267         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6268         if (val != 1) {
6269                 BNX2X_ERR("CFC AC_INIT failed\n");
6270                 return -EBUSY;
6271         }
6272         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6273         if (val != 1) {
6274                 BNX2X_ERR("CFC CAM_INIT failed\n");
6275                 return -EBUSY;
6276         }
6277         REG_WR(bp, CFC_REG_DEBUG0, 0);
6278
6279         /* read NIG statistic
6280            to see if this is our first up since powerup */
6281         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6282         val = *bnx2x_sp(bp, wb_data[0]);
6283
6284         /* do internal memory self test */
6285         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6286                 BNX2X_ERR("internal mem self test failed\n");
6287                 return -EBUSY;
6288         }
6289
6290         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6291         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6292         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6293         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6294         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6295                 bp->port.need_hw_lock = 1;
6296                 break;
6297
6298         default:
6299                 break;
6300         }
6301
6302         bnx2x_setup_fan_failure_detection(bp);
6303
6304         /* clear PXP2 attentions */
6305         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6306
6307         enable_blocks_attention(bp);
6308
6309         if (!BP_NOMCP(bp)) {
6310                 bnx2x_acquire_phy_lock(bp);
6311                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6312                 bnx2x_release_phy_lock(bp);
6313         } else
6314                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6315
6316         return 0;
6317 }
6318
6319 static int bnx2x_init_port(struct bnx2x *bp)
6320 {
6321         int port = BP_PORT(bp);
6322         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6323         u32 low, high;
6324         u32 val;
6325
6326         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
6327
6328         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6329
6330         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6331         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6332
6333         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6334         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6335         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6336         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6337
6338 #ifdef BCM_CNIC
6339         REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6340
6341         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6342         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6343         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6344 #endif
6345         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6346
6347         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6348         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6349                 /* no pause for emulation and FPGA */
6350                 low = 0;
6351                 high = 513;
6352         } else {
6353                 if (IS_E1HMF(bp))
6354                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6355                 else if (bp->dev->mtu > 4096) {
6356                         if (bp->flags & ONE_PORT_FLAG)
6357                                 low = 160;
6358                         else {
6359                                 val = bp->dev->mtu;
6360                                 /* (24*1024 + val*4)/256 */
6361                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6362                         }
6363                 } else
6364                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6365                 high = low + 56;        /* 14*1024/256 */
6366         }
6367         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6368         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6369
6370
6371         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6372
6373         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6374         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6375         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6376         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6377
6378         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6379         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6380         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6381         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6382
6383         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6384         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6385
6386         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6387
6388         /* configure PBF to work without PAUSE mtu 9000 */
6389         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6390
6391         /* update threshold */
6392         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6393         /* update init credit */
6394         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6395
6396         /* probe changes */
6397         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6398         msleep(5);
6399         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6400
6401 #ifdef BCM_CNIC
6402         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6403 #endif
6404         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6405         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6406
6407         if (CHIP_IS_E1(bp)) {
6408                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6409                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6410         }
6411         bnx2x_init_block(bp, HC_BLOCK, init_stage);
6412
6413         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6414         /* init aeu_mask_attn_func_0/1:
6415          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6416          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6417          *             bits 4-7 are used for "per vn group attention" */
6418         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6419                (IS_E1HMF(bp) ? 0xF7 : 0x7));
6420
6421         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6422         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6423         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6424         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6425         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6426
6427         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6428
6429         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6430
6431         if (CHIP_IS_E1H(bp)) {
6432                 /* 0x2 disable e1hov, 0x1 enable */
6433                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6434                        (IS_E1HMF(bp) ? 0x1 : 0x2));
6435
6436                 {
6437                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6438                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6439                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6440                 }
6441         }
6442
6443         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6444         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6445
6446         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6447         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6448                 {
6449                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6450
6451                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6452                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6453
6454                 /* The GPIO should be swapped if the swap register is
6455                    set and active */
6456                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6457                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6458
6459                 /* Select function upon port-swap configuration */
6460                 if (port == 0) {
6461                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6462                         aeu_gpio_mask = (swap_val && swap_override) ?
6463                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6464                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6465                 } else {
6466                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6467                         aeu_gpio_mask = (swap_val && swap_override) ?
6468                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6469                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6470                 }
6471                 val = REG_RD(bp, offset);
6472                 /* add GPIO3 to group */
6473                 val |= aeu_gpio_mask;
6474                 REG_WR(bp, offset, val);
6475                 }
6476                 break;
6477
6478         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6479         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6480                 /* add SPIO 5 to group 0 */
6481                 {
6482                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6483                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6484                 val = REG_RD(bp, reg_addr);
6485                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6486                 REG_WR(bp, reg_addr, val);
6487                 }
6488                 break;
6489
6490         default:
6491                 break;
6492         }
6493
6494         bnx2x__link_reset(bp);
6495
6496         return 0;
6497 }
6498
6499 #define ILT_PER_FUNC            (768/2)
6500 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6501 /* the phys address is shifted right 12 bits and has an added
6502    1=valid bit added to the 53rd bit
6503    then since this is a wide register(TM)
6504    we split it into two 32 bit writes
6505  */
6506 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6507 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6508 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6509 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6510
6511 #ifdef BCM_CNIC
6512 #define CNIC_ILT_LINES          127
6513 #define CNIC_CTX_PER_ILT        16
6514 #else
6515 #define CNIC_ILT_LINES          0
6516 #endif
6517
6518 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6519 {
6520         int reg;
6521
6522         if (CHIP_IS_E1H(bp))
6523                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6524         else /* E1 */
6525                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6526
6527         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6528 }
6529
6530 static int bnx2x_init_func(struct bnx2x *bp)
6531 {
6532         int port = BP_PORT(bp);
6533         int func = BP_FUNC(bp);
6534         u32 addr, val;
6535         int i;
6536
6537         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
6538
6539         /* set MSI reconfigure capability */
6540         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6541         val = REG_RD(bp, addr);
6542         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6543         REG_WR(bp, addr, val);
6544
6545         i = FUNC_ILT_BASE(func);
6546
6547         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6548         if (CHIP_IS_E1H(bp)) {
6549                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6550                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6551         } else /* E1 */
6552                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6553                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6554
6555 #ifdef BCM_CNIC
6556         i += 1 + CNIC_ILT_LINES;
6557         bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6558         if (CHIP_IS_E1(bp))
6559                 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6560         else {
6561                 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6562                 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6563         }
6564
6565         i++;
6566         bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6567         if (CHIP_IS_E1(bp))
6568                 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6569         else {
6570                 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6571                 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6572         }
6573
6574         i++;
6575         bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6576         if (CHIP_IS_E1(bp))
6577                 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6578         else {
6579                 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6580                 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6581         }
6582
6583         /* tell the searcher where the T2 table is */
6584         REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6585
6586         bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6587                     U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6588
6589         bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6590                     U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6591                     U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6592
6593         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6594 #endif
6595
6596         if (CHIP_IS_E1H(bp)) {
6597                 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6598                 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6599                 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6600                 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6601                 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6602                 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6603                 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6604                 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6605                 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
6606
6607                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6608                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6609         }
6610
6611         /* HC init per function */
6612         if (CHIP_IS_E1H(bp)) {
6613                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6614
6615                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6616                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6617         }
6618         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6619
6620         /* Reset PCIE errors for debug */
6621         REG_WR(bp, 0x2114, 0xffffffff);
6622         REG_WR(bp, 0x2120, 0xffffffff);
6623
6624         return 0;
6625 }
6626
6627 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6628 {
6629         int i, rc = 0;
6630
6631         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
6632            BP_FUNC(bp), load_code);
6633
6634         bp->dmae_ready = 0;
6635         mutex_init(&bp->dmae_mutex);
6636         rc = bnx2x_gunzip_init(bp);
6637         if (rc)
6638                 return rc;
6639
6640         switch (load_code) {
6641         case FW_MSG_CODE_DRV_LOAD_COMMON:
6642                 rc = bnx2x_init_common(bp);
6643                 if (rc)
6644                         goto init_hw_err;
6645                 /* no break */
6646
6647         case FW_MSG_CODE_DRV_LOAD_PORT:
6648                 bp->dmae_ready = 1;
6649                 rc = bnx2x_init_port(bp);
6650                 if (rc)
6651                         goto init_hw_err;
6652                 /* no break */
6653
6654         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6655                 bp->dmae_ready = 1;
6656                 rc = bnx2x_init_func(bp);
6657                 if (rc)
6658                         goto init_hw_err;
6659                 break;
6660
6661         default:
6662                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6663                 break;
6664         }
6665
6666         if (!BP_NOMCP(bp)) {
6667                 int func = BP_FUNC(bp);
6668
6669                 bp->fw_drv_pulse_wr_seq =
6670                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6671                                  DRV_PULSE_SEQ_MASK);
6672                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6673         }
6674
6675         /* this needs to be done before gunzip end */
6676         bnx2x_zero_def_sb(bp);
6677         for_each_queue(bp, i)
6678                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6679 #ifdef BCM_CNIC
6680         bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6681 #endif
6682
6683 init_hw_err:
6684         bnx2x_gunzip_end(bp);
6685
6686         return rc;
6687 }
6688
6689 static void bnx2x_free_mem(struct bnx2x *bp)
6690 {
6691
6692 #define BNX2X_PCI_FREE(x, y, size) \
6693         do { \
6694                 if (x) { \
6695                         pci_free_consistent(bp->pdev, size, x, y); \
6696                         x = NULL; \
6697                         y = 0; \
6698                 } \
6699         } while (0)
6700
6701 #define BNX2X_FREE(x) \
6702         do { \
6703                 if (x) { \
6704                         vfree(x); \
6705                         x = NULL; \
6706                 } \
6707         } while (0)
6708
6709         int i;
6710
6711         /* fastpath */
6712         /* Common */
6713         for_each_queue(bp, i) {
6714
6715                 /* status blocks */
6716                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6717                                bnx2x_fp(bp, i, status_blk_mapping),
6718                                sizeof(struct host_status_block));
6719         }
6720         /* Rx */
6721         for_each_queue(bp, i) {
6722
6723                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6724                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6725                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6726                                bnx2x_fp(bp, i, rx_desc_mapping),
6727                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
6728
6729                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6730                                bnx2x_fp(bp, i, rx_comp_mapping),
6731                                sizeof(struct eth_fast_path_rx_cqe) *
6732                                NUM_RCQ_BD);
6733
6734                 /* SGE ring */
6735                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6736                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6737                                bnx2x_fp(bp, i, rx_sge_mapping),
6738                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6739         }
6740         /* Tx */
6741         for_each_queue(bp, i) {
6742
6743                 /* fastpath tx rings: tx_buf tx_desc */
6744                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6745                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6746                                bnx2x_fp(bp, i, tx_desc_mapping),
6747                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6748         }
6749         /* end of fastpath */
6750
6751         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6752                        sizeof(struct host_def_status_block));
6753
6754         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6755                        sizeof(struct bnx2x_slowpath));
6756
6757 #ifdef BCM_CNIC
6758         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6759         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6760         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6761         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6762         BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6763                        sizeof(struct host_status_block));
6764 #endif
6765         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6766
6767 #undef BNX2X_PCI_FREE
6768 #undef BNX2X_KFREE
6769 }
6770
6771 static int bnx2x_alloc_mem(struct bnx2x *bp)
6772 {
6773
6774 #define BNX2X_PCI_ALLOC(x, y, size) \
6775         do { \
6776                 x = pci_alloc_consistent(bp->pdev, size, y); \
6777                 if (x == NULL) \
6778                         goto alloc_mem_err; \
6779                 memset(x, 0, size); \
6780         } while (0)
6781
6782 #define BNX2X_ALLOC(x, size) \
6783         do { \
6784                 x = vmalloc(size); \
6785                 if (x == NULL) \
6786                         goto alloc_mem_err; \
6787                 memset(x, 0, size); \
6788         } while (0)
6789
6790         int i;
6791
6792         /* fastpath */
6793         /* Common */
6794         for_each_queue(bp, i) {
6795                 bnx2x_fp(bp, i, bp) = bp;
6796
6797                 /* status blocks */
6798                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6799                                 &bnx2x_fp(bp, i, status_blk_mapping),
6800                                 sizeof(struct host_status_block));
6801         }
6802         /* Rx */
6803         for_each_queue(bp, i) {
6804
6805                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6806                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6807                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6808                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6809                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6810                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6811
6812                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6813                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6814                                 sizeof(struct eth_fast_path_rx_cqe) *
6815                                 NUM_RCQ_BD);
6816
6817                 /* SGE ring */
6818                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6819                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6820                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6821                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6822                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6823         }
6824         /* Tx */
6825         for_each_queue(bp, i) {
6826
6827                 /* fastpath tx rings: tx_buf tx_desc */
6828                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6829                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6830                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6831                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6832                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6833         }
6834         /* end of fastpath */
6835
6836         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6837                         sizeof(struct host_def_status_block));
6838
6839         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6840                         sizeof(struct bnx2x_slowpath));
6841
6842 #ifdef BCM_CNIC
6843         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6844
6845         /* allocate searcher T2 table
6846            we allocate 1/4 of alloc num for T2
6847           (which is not entered into the ILT) */
6848         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6849
6850         /* Initialize T2 (for 1024 connections) */
6851         for (i = 0; i < 16*1024; i += 64)
6852                 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6853
6854         /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
6855         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6856
6857         /* QM queues (128*MAX_CONN) */
6858         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6859
6860         BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6861                         sizeof(struct host_status_block));
6862 #endif
6863
6864         /* Slow path ring */
6865         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6866
6867         return 0;
6868
6869 alloc_mem_err:
6870         bnx2x_free_mem(bp);
6871         return -ENOMEM;
6872
6873 #undef BNX2X_PCI_ALLOC
6874 #undef BNX2X_ALLOC
6875 }
6876
6877 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6878 {
6879         int i;
6880
6881         for_each_queue(bp, i) {
6882                 struct bnx2x_fastpath *fp = &bp->fp[i];
6883
6884                 u16 bd_cons = fp->tx_bd_cons;
6885                 u16 sw_prod = fp->tx_pkt_prod;
6886                 u16 sw_cons = fp->tx_pkt_cons;
6887
6888                 while (sw_cons != sw_prod) {
6889                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6890                         sw_cons++;
6891                 }
6892         }
6893 }
6894
6895 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6896 {
6897         int i, j;
6898
6899         for_each_queue(bp, j) {
6900                 struct bnx2x_fastpath *fp = &bp->fp[j];
6901
6902                 for (i = 0; i < NUM_RX_BD; i++) {
6903                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6904                         struct sk_buff *skb = rx_buf->skb;
6905
6906                         if (skb == NULL)
6907                                 continue;
6908
6909                         pci_unmap_single(bp->pdev,
6910                                          pci_unmap_addr(rx_buf, mapping),
6911                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6912
6913                         rx_buf->skb = NULL;
6914                         dev_kfree_skb(skb);
6915                 }
6916                 if (!fp->disable_tpa)
6917                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6918                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6919                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6920         }
6921 }
6922
6923 static void bnx2x_free_skbs(struct bnx2x *bp)
6924 {
6925         bnx2x_free_tx_skbs(bp);
6926         bnx2x_free_rx_skbs(bp);
6927 }
6928
6929 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6930 {
6931         int i, offset = 1;
6932
6933         free_irq(bp->msix_table[0].vector, bp->dev);
6934         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6935            bp->msix_table[0].vector);
6936
6937 #ifdef BCM_CNIC
6938         offset++;
6939 #endif
6940         for_each_queue(bp, i) {
6941                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6942                    "state %x\n", i, bp->msix_table[i + offset].vector,
6943                    bnx2x_fp(bp, i, state));
6944
6945                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6946         }
6947 }
6948
6949 static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
6950 {
6951         if (bp->flags & USING_MSIX_FLAG) {
6952                 if (!disable_only)
6953                         bnx2x_free_msix_irqs(bp);
6954                 pci_disable_msix(bp->pdev);
6955                 bp->flags &= ~USING_MSIX_FLAG;
6956
6957         } else if (bp->flags & USING_MSI_FLAG) {
6958                 if (!disable_only)
6959                         free_irq(bp->pdev->irq, bp->dev);
6960                 pci_disable_msi(bp->pdev);
6961                 bp->flags &= ~USING_MSI_FLAG;
6962
6963         } else if (!disable_only)
6964                 free_irq(bp->pdev->irq, bp->dev);
6965 }
6966
6967 static int bnx2x_enable_msix(struct bnx2x *bp)
6968 {
6969         int i, rc, offset = 1;
6970         int igu_vec = 0;
6971
6972         bp->msix_table[0].entry = igu_vec;
6973         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6974
6975 #ifdef BCM_CNIC
6976         igu_vec = BP_L_ID(bp) + offset;
6977         bp->msix_table[1].entry = igu_vec;
6978         DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6979         offset++;
6980 #endif
6981         for_each_queue(bp, i) {
6982                 igu_vec = BP_L_ID(bp) + offset + i;
6983                 bp->msix_table[i + offset].entry = igu_vec;
6984                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6985                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6986         }
6987
6988         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6989                              BNX2X_NUM_QUEUES(bp) + offset);
6990         if (rc) {
6991                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
6992                 return rc;
6993         }
6994
6995         bp->flags |= USING_MSIX_FLAG;
6996
6997         return 0;
6998 }
6999
7000 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7001 {
7002         int i, rc, offset = 1;
7003
7004         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7005                          bp->dev->name, bp->dev);
7006         if (rc) {
7007                 BNX2X_ERR("request sp irq failed\n");
7008                 return -EBUSY;
7009         }
7010
7011 #ifdef BCM_CNIC
7012         offset++;
7013 #endif
7014         for_each_queue(bp, i) {
7015                 struct bnx2x_fastpath *fp = &bp->fp[i];
7016                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7017                          bp->dev->name, i);
7018
7019                 rc = request_irq(bp->msix_table[i + offset].vector,
7020                                  bnx2x_msix_fp_int, 0, fp->name, fp);
7021                 if (rc) {
7022                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
7023                         bnx2x_free_msix_irqs(bp);
7024                         return -EBUSY;
7025                 }
7026
7027                 fp->state = BNX2X_FP_STATE_IRQ;
7028         }
7029
7030         i = BNX2X_NUM_QUEUES(bp);
7031         netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
7032                     bp->msix_table[0].vector,
7033                     0, bp->msix_table[offset].vector,
7034                     i - 1, bp->msix_table[offset + i - 1].vector);
7035
7036         return 0;
7037 }
7038
7039 static int bnx2x_enable_msi(struct bnx2x *bp)
7040 {
7041         int rc;
7042
7043         rc = pci_enable_msi(bp->pdev);
7044         if (rc) {
7045                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7046                 return -1;
7047         }
7048         bp->flags |= USING_MSI_FLAG;
7049
7050         return 0;
7051 }
7052
7053 static int bnx2x_req_irq(struct bnx2x *bp)
7054 {
7055         unsigned long flags;
7056         int rc;
7057
7058         if (bp->flags & USING_MSI_FLAG)
7059                 flags = 0;
7060         else
7061                 flags = IRQF_SHARED;
7062
7063         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7064                          bp->dev->name, bp->dev);
7065         if (!rc)
7066                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7067
7068         return rc;
7069 }
7070
7071 static void bnx2x_napi_enable(struct bnx2x *bp)
7072 {
7073         int i;
7074
7075         for_each_queue(bp, i)
7076                 napi_enable(&bnx2x_fp(bp, i, napi));
7077 }
7078
7079 static void bnx2x_napi_disable(struct bnx2x *bp)
7080 {
7081         int i;
7082
7083         for_each_queue(bp, i)
7084                 napi_disable(&bnx2x_fp(bp, i, napi));
7085 }
7086
7087 static void bnx2x_netif_start(struct bnx2x *bp)
7088 {
7089         int intr_sem;
7090
7091         intr_sem = atomic_dec_and_test(&bp->intr_sem);
7092         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7093
7094         if (intr_sem) {
7095                 if (netif_running(bp->dev)) {
7096                         bnx2x_napi_enable(bp);
7097                         bnx2x_int_enable(bp);
7098                         if (bp->state == BNX2X_STATE_OPEN)
7099                                 netif_tx_wake_all_queues(bp->dev);
7100                 }
7101         }
7102 }
7103
7104 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7105 {
7106         bnx2x_int_disable_sync(bp, disable_hw);
7107         bnx2x_napi_disable(bp);
7108         netif_tx_disable(bp->dev);
7109 }
7110
7111 /*
7112  * Init service functions
7113  */
7114
7115 /**
7116  * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7117  *
7118  * @param bp driver descriptor
7119  * @param set set or clear an entry (1 or 0)
7120  * @param mac pointer to a buffer containing a MAC
7121  * @param cl_bit_vec bit vector of clients to register a MAC for
7122  * @param cam_offset offset in a CAM to use
7123  * @param with_bcast set broadcast MAC as well
7124  */
7125 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7126                                       u32 cl_bit_vec, u8 cam_offset,
7127                                       u8 with_bcast)
7128 {
7129         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7130         int port = BP_PORT(bp);
7131
7132         /* CAM allocation
7133          * unicasts 0-31:port0 32-63:port1
7134          * multicast 64-127:port0 128-191:port1
7135          */
7136         config->hdr.length = 1 + (with_bcast ? 1 : 0);
7137         config->hdr.offset = cam_offset;
7138         config->hdr.client_id = 0xff;
7139         config->hdr.reserved1 = 0;
7140
7141         /* primary MAC */
7142         config->config_table[0].cam_entry.msb_mac_addr =
7143                                         swab16(*(u16 *)&mac[0]);
7144         config->config_table[0].cam_entry.middle_mac_addr =
7145                                         swab16(*(u16 *)&mac[2]);
7146         config->config_table[0].cam_entry.lsb_mac_addr =
7147                                         swab16(*(u16 *)&mac[4]);
7148         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7149         if (set)
7150                 config->config_table[0].target_table_entry.flags = 0;
7151         else
7152                 CAM_INVALIDATE(config->config_table[0]);
7153         config->config_table[0].target_table_entry.clients_bit_vector =
7154                                                 cpu_to_le32(cl_bit_vec);
7155         config->config_table[0].target_table_entry.vlan_id = 0;
7156
7157         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7158            (set ? "setting" : "clearing"),
7159            config->config_table[0].cam_entry.msb_mac_addr,
7160            config->config_table[0].cam_entry.middle_mac_addr,
7161            config->config_table[0].cam_entry.lsb_mac_addr);
7162
7163         /* broadcast */
7164         if (with_bcast) {
7165                 config->config_table[1].cam_entry.msb_mac_addr =
7166                         cpu_to_le16(0xffff);
7167                 config->config_table[1].cam_entry.middle_mac_addr =
7168                         cpu_to_le16(0xffff);
7169                 config->config_table[1].cam_entry.lsb_mac_addr =
7170                         cpu_to_le16(0xffff);
7171                 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7172                 if (set)
7173                         config->config_table[1].target_table_entry.flags =
7174                                         TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7175                 else
7176                         CAM_INVALIDATE(config->config_table[1]);
7177                 config->config_table[1].target_table_entry.clients_bit_vector =
7178                                                         cpu_to_le32(cl_bit_vec);
7179                 config->config_table[1].target_table_entry.vlan_id = 0;
7180         }
7181
7182         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7183                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7184                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7185 }
7186
7187 /**
7188  * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7189  *
7190  * @param bp driver descriptor
7191  * @param set set or clear an entry (1 or 0)
7192  * @param mac pointer to a buffer containing a MAC
7193  * @param cl_bit_vec bit vector of clients to register a MAC for
7194  * @param cam_offset offset in a CAM to use
7195  */
7196 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7197                                        u32 cl_bit_vec, u8 cam_offset)
7198 {
7199         struct mac_configuration_cmd_e1h *config =
7200                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7201
7202         config->hdr.length = 1;
7203         config->hdr.offset = cam_offset;
7204         config->hdr.client_id = 0xff;
7205         config->hdr.reserved1 = 0;
7206
7207         /* primary MAC */
7208         config->config_table[0].msb_mac_addr =
7209                                         swab16(*(u16 *)&mac[0]);
7210         config->config_table[0].middle_mac_addr =
7211                                         swab16(*(u16 *)&mac[2]);
7212         config->config_table[0].lsb_mac_addr =
7213                                         swab16(*(u16 *)&mac[4]);
7214         config->config_table[0].clients_bit_vector =
7215                                         cpu_to_le32(cl_bit_vec);
7216         config->config_table[0].vlan_id = 0;
7217         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7218         if (set)
7219                 config->config_table[0].flags = BP_PORT(bp);
7220         else
7221                 config->config_table[0].flags =
7222                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7223
7224         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
7225            (set ? "setting" : "clearing"),
7226            config->config_table[0].msb_mac_addr,
7227            config->config_table[0].middle_mac_addr,
7228            config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7229
7230         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7231                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7232                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7233 }
7234
7235 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7236                              int *state_p, int poll)
7237 {
7238         /* can take a while if any port is running */
7239         int cnt = 5000;
7240
7241         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7242            poll ? "polling" : "waiting", state, idx);
7243
7244         might_sleep();
7245         while (cnt--) {
7246                 if (poll) {
7247                         bnx2x_rx_int(bp->fp, 10);
7248                         /* if index is different from 0
7249                          * the reply for some commands will
7250                          * be on the non default queue
7251                          */
7252                         if (idx)
7253                                 bnx2x_rx_int(&bp->fp[idx], 10);
7254                 }
7255
7256                 mb(); /* state is changed by bnx2x_sp_event() */
7257                 if (*state_p == state) {
7258 #ifdef BNX2X_STOP_ON_ERROR
7259                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
7260 #endif
7261                         return 0;
7262                 }
7263
7264                 msleep(1);
7265
7266                 if (bp->panic)
7267                         return -EIO;
7268         }
7269
7270         /* timeout! */
7271         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7272                   poll ? "polling" : "waiting", state, idx);
7273 #ifdef BNX2X_STOP_ON_ERROR
7274         bnx2x_panic();
7275 #endif
7276
7277         return -EBUSY;
7278 }
7279
7280 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7281 {
7282         bp->set_mac_pending++;
7283         smp_wmb();
7284
7285         bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7286                                    (1 << bp->fp->cl_id), BP_FUNC(bp));
7287
7288         /* Wait for a completion */
7289         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7290 }
7291
7292 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7293 {
7294         bp->set_mac_pending++;
7295         smp_wmb();
7296
7297         bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7298                                   (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7299                                   1);
7300
7301         /* Wait for a completion */
7302         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7303 }
7304
7305 #ifdef BCM_CNIC
7306 /**
7307  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7308  * MAC(s). This function will wait until the ramdord completion
7309  * returns.
7310  *
7311  * @param bp driver handle
7312  * @param set set or clear the CAM entry
7313  *
7314  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7315  */
7316 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7317 {
7318         u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7319
7320         bp->set_mac_pending++;
7321         smp_wmb();
7322
7323         /* Send a SET_MAC ramrod */
7324         if (CHIP_IS_E1(bp))
7325                 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7326                                   cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7327                                   1);
7328         else
7329                 /* CAM allocation for E1H
7330                 * unicasts: by func number
7331                 * multicast: 20+FUNC*20, 20 each
7332                 */
7333                 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7334                                    cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7335
7336         /* Wait for a completion when setting */
7337         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7338
7339         return 0;
7340 }
7341 #endif
7342
7343 static int bnx2x_setup_leading(struct bnx2x *bp)
7344 {
7345         int rc;
7346
7347         /* reset IGU state */
7348         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7349
7350         /* SETUP ramrod */
7351         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7352
7353         /* Wait for completion */
7354         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7355
7356         return rc;
7357 }
7358
7359 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7360 {
7361         struct bnx2x_fastpath *fp = &bp->fp[index];
7362
7363         /* reset IGU state */
7364         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7365
7366         /* SETUP ramrod */
7367         fp->state = BNX2X_FP_STATE_OPENING;
7368         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7369                       fp->cl_id, 0);
7370
7371         /* Wait for completion */
7372         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7373                                  &(fp->state), 0);
7374 }
7375
7376 static int bnx2x_poll(struct napi_struct *napi, int budget);
7377
7378 static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7379 {
7380
7381         switch (bp->multi_mode) {
7382         case ETH_RSS_MODE_DISABLED:
7383                 bp->num_queues = 1;
7384                 break;
7385
7386         case ETH_RSS_MODE_REGULAR:
7387                 if (num_queues)
7388                         bp->num_queues = min_t(u32, num_queues,
7389                                                   BNX2X_MAX_QUEUES(bp));
7390                 else
7391                         bp->num_queues = min_t(u32, num_online_cpus(),
7392                                                   BNX2X_MAX_QUEUES(bp));
7393                 break;
7394
7395
7396         default:
7397                 bp->num_queues = 1;
7398                 break;
7399         }
7400 }
7401
7402 static int bnx2x_set_num_queues(struct bnx2x *bp)
7403 {
7404         int rc = 0;
7405
7406         switch (int_mode) {
7407         case INT_MODE_INTx:
7408         case INT_MODE_MSI:
7409                 bp->num_queues = 1;
7410                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7411                 break;
7412
7413         case INT_MODE_MSIX:
7414         default:
7415                 /* Set number of queues according to bp->multi_mode value */
7416                 bnx2x_set_num_queues_msix(bp);
7417
7418                 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7419                    bp->num_queues);
7420
7421                 /* if we can't use MSI-X we only need one fp,
7422                  * so try to enable MSI-X with the requested number of fp's
7423                  * and fallback to MSI or legacy INTx with one fp
7424                  */
7425                 rc = bnx2x_enable_msix(bp);
7426                 if (rc)
7427                         /* failed to enable MSI-X */
7428                         bp->num_queues = 1;
7429                 break;
7430         }
7431         bp->dev->real_num_tx_queues = bp->num_queues;
7432         return rc;
7433 }
7434
7435 #ifdef BCM_CNIC
7436 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7437 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7438 #endif
7439
7440 /* must be called with rtnl_lock */
7441 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7442 {
7443         u32 load_code;
7444         int i, rc;
7445
7446 #ifdef BNX2X_STOP_ON_ERROR
7447         if (unlikely(bp->panic))
7448                 return -EPERM;
7449 #endif
7450
7451         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7452
7453         rc = bnx2x_set_num_queues(bp);
7454
7455         if (bnx2x_alloc_mem(bp)) {
7456                 bnx2x_free_irq(bp, true);
7457                 return -ENOMEM;
7458         }
7459
7460         for_each_queue(bp, i)
7461                 bnx2x_fp(bp, i, disable_tpa) =
7462                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
7463
7464         for_each_queue(bp, i)
7465                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7466                                bnx2x_poll, 128);
7467
7468         bnx2x_napi_enable(bp);
7469
7470         if (bp->flags & USING_MSIX_FLAG) {
7471                 rc = bnx2x_req_msix_irqs(bp);
7472                 if (rc) {
7473                         bnx2x_free_irq(bp, true);
7474                         goto load_error1;
7475                 }
7476         } else {
7477                 /* Fall to INTx if failed to enable MSI-X due to lack of
7478                    memory (in bnx2x_set_num_queues()) */
7479                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7480                         bnx2x_enable_msi(bp);
7481                 bnx2x_ack_int(bp);
7482                 rc = bnx2x_req_irq(bp);
7483                 if (rc) {
7484                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
7485                         bnx2x_free_irq(bp, true);
7486                         goto load_error1;
7487                 }
7488                 if (bp->flags & USING_MSI_FLAG) {
7489                         bp->dev->irq = bp->pdev->irq;
7490                         netdev_info(bp->dev, "using MSI  IRQ %d\n",
7491                                     bp->pdev->irq);
7492                 }
7493         }
7494
7495         /* Send LOAD_REQUEST command to MCP
7496            Returns the type of LOAD command:
7497            if it is the first port to be initialized
7498            common blocks should be initialized, otherwise - not
7499         */
7500         if (!BP_NOMCP(bp)) {
7501                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7502                 if (!load_code) {
7503                         BNX2X_ERR("MCP response failure, aborting\n");
7504                         rc = -EBUSY;
7505                         goto load_error2;
7506                 }
7507                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7508                         rc = -EBUSY; /* other port in diagnostic mode */
7509                         goto load_error2;
7510                 }
7511
7512         } else {
7513                 int port = BP_PORT(bp);
7514
7515                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
7516                    load_count[0], load_count[1], load_count[2]);
7517                 load_count[0]++;
7518                 load_count[1 + port]++;
7519                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
7520                    load_count[0], load_count[1], load_count[2]);
7521                 if (load_count[0] == 1)
7522                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7523                 else if (load_count[1 + port] == 1)
7524                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7525                 else
7526                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7527         }
7528
7529         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7530             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7531                 bp->port.pmf = 1;
7532         else
7533                 bp->port.pmf = 0;
7534         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7535
7536         /* Initialize HW */
7537         rc = bnx2x_init_hw(bp, load_code);
7538         if (rc) {
7539                 BNX2X_ERR("HW init failed, aborting\n");
7540                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7541                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7542                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7543                 goto load_error2;
7544         }
7545
7546         /* Setup NIC internals and enable interrupts */
7547         bnx2x_nic_init(bp, load_code);
7548
7549         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7550             (bp->common.shmem2_base))
7551                 SHMEM2_WR(bp, dcc_support,
7552                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7553                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7554
7555         /* Send LOAD_DONE command to MCP */
7556         if (!BP_NOMCP(bp)) {
7557                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7558                 if (!load_code) {
7559                         BNX2X_ERR("MCP response failure, aborting\n");
7560                         rc = -EBUSY;
7561                         goto load_error3;
7562                 }
7563         }
7564
7565         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7566
7567         rc = bnx2x_setup_leading(bp);
7568         if (rc) {
7569                 BNX2X_ERR("Setup leading failed!\n");
7570 #ifndef BNX2X_STOP_ON_ERROR
7571                 goto load_error3;
7572 #else
7573                 bp->panic = 1;
7574                 return -EBUSY;
7575 #endif
7576         }
7577
7578         if (CHIP_IS_E1H(bp))
7579                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7580                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7581                         bp->flags |= MF_FUNC_DIS;
7582                 }
7583
7584         if (bp->state == BNX2X_STATE_OPEN) {
7585 #ifdef BCM_CNIC
7586                 /* Enable Timer scan */
7587                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7588 #endif
7589                 for_each_nondefault_queue(bp, i) {
7590                         rc = bnx2x_setup_multi(bp, i);
7591                         if (rc)
7592 #ifdef BCM_CNIC
7593                                 goto load_error4;
7594 #else
7595                                 goto load_error3;
7596 #endif
7597                 }
7598
7599                 if (CHIP_IS_E1(bp))
7600                         bnx2x_set_eth_mac_addr_e1(bp, 1);
7601                 else
7602                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
7603 #ifdef BCM_CNIC
7604                 /* Set iSCSI L2 MAC */
7605                 mutex_lock(&bp->cnic_mutex);
7606                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7607                         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7608                         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7609                         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
7610                                       CNIC_SB_ID(bp));
7611                 }
7612                 mutex_unlock(&bp->cnic_mutex);
7613 #endif
7614         }
7615
7616         if (bp->port.pmf)
7617                 bnx2x_initial_phy_init(bp, load_mode);
7618
7619         /* Start fast path */
7620         switch (load_mode) {
7621         case LOAD_NORMAL:
7622                 if (bp->state == BNX2X_STATE_OPEN) {
7623                         /* Tx queue should be only reenabled */
7624                         netif_tx_wake_all_queues(bp->dev);
7625                 }
7626                 /* Initialize the receive filter. */
7627                 bnx2x_set_rx_mode(bp->dev);
7628                 break;
7629
7630         case LOAD_OPEN:
7631                 netif_tx_start_all_queues(bp->dev);
7632                 if (bp->state != BNX2X_STATE_OPEN)
7633                         netif_tx_disable(bp->dev);
7634                 /* Initialize the receive filter. */
7635                 bnx2x_set_rx_mode(bp->dev);
7636                 break;
7637
7638         case LOAD_DIAG:
7639                 /* Initialize the receive filter. */
7640                 bnx2x_set_rx_mode(bp->dev);
7641                 bp->state = BNX2X_STATE_DIAG;
7642                 break;
7643
7644         default:
7645                 break;
7646         }
7647
7648         if (!bp->port.pmf)
7649                 bnx2x__link_status_update(bp);
7650
7651         /* start the timer */
7652         mod_timer(&bp->timer, jiffies + bp->current_interval);
7653
7654 #ifdef BCM_CNIC
7655         bnx2x_setup_cnic_irq_info(bp);
7656         if (bp->state == BNX2X_STATE_OPEN)
7657                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7658 #endif
7659
7660         return 0;
7661
7662 #ifdef BCM_CNIC
7663 load_error4:
7664         /* Disable Timer scan */
7665         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7666 #endif
7667 load_error3:
7668         bnx2x_int_disable_sync(bp, 1);
7669         if (!BP_NOMCP(bp)) {
7670                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7671                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7672         }
7673         bp->port.pmf = 0;
7674         /* Free SKBs, SGEs, TPA pool and driver internals */
7675         bnx2x_free_skbs(bp);
7676         for_each_queue(bp, i)
7677                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7678 load_error2:
7679         /* Release IRQs */
7680         bnx2x_free_irq(bp, false);
7681 load_error1:
7682         bnx2x_napi_disable(bp);
7683         for_each_queue(bp, i)
7684                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7685         bnx2x_free_mem(bp);
7686
7687         return rc;
7688 }
7689
7690 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7691 {
7692         struct bnx2x_fastpath *fp = &bp->fp[index];
7693         int rc;
7694
7695         /* halt the connection */
7696         fp->state = BNX2X_FP_STATE_HALTING;
7697         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7698
7699         /* Wait for completion */
7700         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7701                                &(fp->state), 1);
7702         if (rc) /* timeout */
7703                 return rc;
7704
7705         /* delete cfc entry */
7706         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7707
7708         /* Wait for completion */
7709         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7710                                &(fp->state), 1);
7711         return rc;
7712 }
7713
7714 static int bnx2x_stop_leading(struct bnx2x *bp)
7715 {
7716         __le16 dsb_sp_prod_idx;
7717         /* if the other port is handling traffic,
7718            this can take a lot of time */
7719         int cnt = 500;
7720         int rc;
7721
7722         might_sleep();
7723
7724         /* Send HALT ramrod */
7725         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7726         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7727
7728         /* Wait for completion */
7729         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7730                                &(bp->fp[0].state), 1);
7731         if (rc) /* timeout */
7732                 return rc;
7733
7734         dsb_sp_prod_idx = *bp->dsb_sp_prod;
7735
7736         /* Send PORT_DELETE ramrod */
7737         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7738
7739         /* Wait for completion to arrive on default status block
7740            we are going to reset the chip anyway
7741            so there is not much to do if this times out
7742          */
7743         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7744                 if (!cnt) {
7745                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7746                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7747                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
7748 #ifdef BNX2X_STOP_ON_ERROR
7749                         bnx2x_panic();
7750 #endif
7751                         rc = -EBUSY;
7752                         break;
7753                 }
7754                 cnt--;
7755                 msleep(1);
7756                 rmb(); /* Refresh the dsb_sp_prod */
7757         }
7758         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7759         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7760
7761         return rc;
7762 }
7763
7764 static void bnx2x_reset_func(struct bnx2x *bp)
7765 {
7766         int port = BP_PORT(bp);
7767         int func = BP_FUNC(bp);
7768         int base, i;
7769
7770         /* Configure IGU */
7771         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7772         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7773
7774 #ifdef BCM_CNIC
7775         /* Disable Timer scan */
7776         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7777         /*
7778          * Wait for at least 10ms and up to 2 second for the timers scan to
7779          * complete
7780          */
7781         for (i = 0; i < 200; i++) {
7782                 msleep(10);
7783                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7784                         break;
7785         }
7786 #endif
7787         /* Clear ILT */
7788         base = FUNC_ILT_BASE(func);
7789         for (i = base; i < base + ILT_PER_FUNC; i++)
7790                 bnx2x_ilt_wr(bp, i, 0);
7791 }
7792
7793 static void bnx2x_reset_port(struct bnx2x *bp)
7794 {
7795         int port = BP_PORT(bp);
7796         u32 val;
7797
7798         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7799
7800         /* Do not rcv packets to BRB */
7801         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7802         /* Do not direct rcv packets that are not for MCP to the BRB */
7803         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7804                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7805
7806         /* Configure AEU */
7807         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7808
7809         msleep(100);
7810         /* Check for BRB port occupancy */
7811         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7812         if (val)
7813                 DP(NETIF_MSG_IFDOWN,
7814                    "BRB1 is not empty  %d blocks are occupied\n", val);
7815
7816         /* TODO: Close Doorbell port? */
7817 }
7818
7819 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7820 {
7821         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7822            BP_FUNC(bp), reset_code);
7823
7824         switch (reset_code) {
7825         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7826                 bnx2x_reset_port(bp);
7827                 bnx2x_reset_func(bp);
7828                 bnx2x_reset_common(bp);
7829                 break;
7830
7831         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7832                 bnx2x_reset_port(bp);
7833                 bnx2x_reset_func(bp);
7834                 break;
7835
7836         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7837                 bnx2x_reset_func(bp);
7838                 break;
7839
7840         default:
7841                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7842                 break;
7843         }
7844 }
7845
7846 /* must be called with rtnl_lock */
7847 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7848 {
7849         int port = BP_PORT(bp);
7850         u32 reset_code = 0;
7851         int i, cnt, rc;
7852
7853 #ifdef BCM_CNIC
7854         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7855 #endif
7856         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7857
7858         /* Set "drop all" */
7859         bp->rx_mode = BNX2X_RX_MODE_NONE;
7860         bnx2x_set_storm_rx_mode(bp);
7861
7862         /* Disable HW interrupts, NAPI and Tx */
7863         bnx2x_netif_stop(bp, 1);
7864
7865         del_timer_sync(&bp->timer);
7866         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7867                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7868         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7869
7870         /* Release IRQs */
7871         bnx2x_free_irq(bp, false);
7872
7873         /* Wait until tx fastpath tasks complete */
7874         for_each_queue(bp, i) {
7875                 struct bnx2x_fastpath *fp = &bp->fp[i];
7876
7877                 cnt = 1000;
7878                 while (bnx2x_has_tx_work_unload(fp)) {
7879
7880                         bnx2x_tx_int(fp);
7881                         if (!cnt) {
7882                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7883                                           i);
7884 #ifdef BNX2X_STOP_ON_ERROR
7885                                 bnx2x_panic();
7886                                 return -EBUSY;
7887 #else
7888                                 break;
7889 #endif
7890                         }
7891                         cnt--;
7892                         msleep(1);
7893                 }
7894         }
7895         /* Give HW time to discard old tx messages */
7896         msleep(1);
7897
7898         if (CHIP_IS_E1(bp)) {
7899                 struct mac_configuration_cmd *config =
7900                                                 bnx2x_sp(bp, mcast_config);
7901
7902                 bnx2x_set_eth_mac_addr_e1(bp, 0);
7903
7904                 for (i = 0; i < config->hdr.length; i++)
7905                         CAM_INVALIDATE(config->config_table[i]);
7906
7907                 config->hdr.length = i;
7908                 if (CHIP_REV_IS_SLOW(bp))
7909                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7910                 else
7911                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7912                 config->hdr.client_id = bp->fp->cl_id;
7913                 config->hdr.reserved1 = 0;
7914
7915                 bp->set_mac_pending++;
7916                 smp_wmb();
7917
7918                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7919                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7920                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7921
7922         } else { /* E1H */
7923                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7924
7925                 bnx2x_set_eth_mac_addr_e1h(bp, 0);
7926
7927                 for (i = 0; i < MC_HASH_SIZE; i++)
7928                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7929
7930                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7931         }
7932 #ifdef BCM_CNIC
7933         /* Clear iSCSI L2 MAC */
7934         mutex_lock(&bp->cnic_mutex);
7935         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7936                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7937                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7938         }
7939         mutex_unlock(&bp->cnic_mutex);
7940 #endif
7941
7942         if (unload_mode == UNLOAD_NORMAL)
7943                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7944
7945         else if (bp->flags & NO_WOL_FLAG)
7946                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7947
7948         else if (bp->wol) {
7949                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7950                 u8 *mac_addr = bp->dev->dev_addr;
7951                 u32 val;
7952                 /* The mac address is written to entries 1-4 to
7953                    preserve entry 0 which is used by the PMF */
7954                 u8 entry = (BP_E1HVN(bp) + 1)*8;
7955
7956                 val = (mac_addr[0] << 8) | mac_addr[1];
7957                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7958
7959                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7960                       (mac_addr[4] << 8) | mac_addr[5];
7961                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7962
7963                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7964
7965         } else
7966                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7967
7968         /* Close multi and leading connections
7969            Completions for ramrods are collected in a synchronous way */
7970         for_each_nondefault_queue(bp, i)
7971                 if (bnx2x_stop_multi(bp, i))
7972                         goto unload_error;
7973
7974         rc = bnx2x_stop_leading(bp);
7975         if (rc) {
7976                 BNX2X_ERR("Stop leading failed!\n");
7977 #ifdef BNX2X_STOP_ON_ERROR
7978                 return -EBUSY;
7979 #else
7980                 goto unload_error;
7981 #endif
7982         }
7983
7984 unload_error:
7985         if (!BP_NOMCP(bp))
7986                 reset_code = bnx2x_fw_command(bp, reset_code);
7987         else {
7988                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
7989                    load_count[0], load_count[1], load_count[2]);
7990                 load_count[0]--;
7991                 load_count[1 + port]--;
7992                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
7993                    load_count[0], load_count[1], load_count[2]);
7994                 if (load_count[0] == 0)
7995                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7996                 else if (load_count[1 + port] == 0)
7997                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7998                 else
7999                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8000         }
8001
8002         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8003             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8004                 bnx2x__link_reset(bp);
8005
8006         /* Reset the chip */
8007         bnx2x_reset_chip(bp, reset_code);
8008
8009         /* Report UNLOAD_DONE to MCP */
8010         if (!BP_NOMCP(bp))
8011                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8012
8013         bp->port.pmf = 0;
8014
8015         /* Free SKBs, SGEs, TPA pool and driver internals */
8016         bnx2x_free_skbs(bp);
8017         for_each_queue(bp, i)
8018                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8019         for_each_queue(bp, i)
8020                 netif_napi_del(&bnx2x_fp(bp, i, napi));
8021         bnx2x_free_mem(bp);
8022
8023         bp->state = BNX2X_STATE_CLOSED;
8024
8025         netif_carrier_off(bp->dev);
8026
8027         return 0;
8028 }
8029
8030 static void bnx2x_reset_task(struct work_struct *work)
8031 {
8032         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8033
8034 #ifdef BNX2X_STOP_ON_ERROR
8035         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8036                   " so reset not done to allow debug dump,\n"
8037                   " you will need to reboot when done\n");
8038         return;
8039 #endif
8040
8041         rtnl_lock();
8042
8043         if (!netif_running(bp->dev))
8044                 goto reset_task_exit;
8045
8046         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8047         bnx2x_nic_load(bp, LOAD_NORMAL);
8048
8049 reset_task_exit:
8050         rtnl_unlock();
8051 }
8052
8053 /* end of nic load/unload */
8054
8055 /* ethtool_ops */
8056
8057 /*
8058  * Init service functions
8059  */
8060
8061 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8062 {
8063         switch (func) {
8064         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8065         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8066         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8067         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8068         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8069         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8070         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8071         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8072         default:
8073                 BNX2X_ERR("Unsupported function index: %d\n", func);
8074                 return (u32)(-1);
8075         }
8076 }
8077
8078 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8079 {
8080         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8081
8082         /* Flush all outstanding writes */
8083         mmiowb();
8084
8085         /* Pretend to be function 0 */
8086         REG_WR(bp, reg, 0);
8087         /* Flush the GRC transaction (in the chip) */
8088         new_val = REG_RD(bp, reg);
8089         if (new_val != 0) {
8090                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8091                           new_val);
8092                 BUG();
8093         }
8094
8095         /* From now we are in the "like-E1" mode */
8096         bnx2x_int_disable(bp);
8097
8098         /* Flush all outstanding writes */
8099         mmiowb();
8100
8101         /* Restore the original funtion settings */
8102         REG_WR(bp, reg, orig_func);
8103         new_val = REG_RD(bp, reg);
8104         if (new_val != orig_func) {
8105                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8106                           orig_func, new_val);
8107                 BUG();
8108         }
8109 }
8110
8111 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8112 {
8113         if (CHIP_IS_E1H(bp))
8114                 bnx2x_undi_int_disable_e1h(bp, func);
8115         else
8116                 bnx2x_int_disable(bp);
8117 }
8118
8119 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8120 {
8121         u32 val;
8122
8123         /* Check if there is any driver already loaded */
8124         val = REG_RD(bp, MISC_REG_UNPREPARED);
8125         if (val == 0x1) {
8126                 /* Check if it is the UNDI driver
8127                  * UNDI driver initializes CID offset for normal bell to 0x7
8128                  */
8129                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8130                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8131                 if (val == 0x7) {
8132                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8133                         /* save our func */
8134                         int func = BP_FUNC(bp);
8135                         u32 swap_en;
8136                         u32 swap_val;
8137
8138                         /* clear the UNDI indication */
8139                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8140
8141                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
8142
8143                         /* try unload UNDI on port 0 */
8144                         bp->func = 0;
8145                         bp->fw_seq =
8146                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8147                                 DRV_MSG_SEQ_NUMBER_MASK);
8148                         reset_code = bnx2x_fw_command(bp, reset_code);
8149
8150                         /* if UNDI is loaded on the other port */
8151                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8152
8153                                 /* send "DONE" for previous unload */
8154                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8155
8156                                 /* unload UNDI on port 1 */
8157                                 bp->func = 1;
8158                                 bp->fw_seq =
8159                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8160                                         DRV_MSG_SEQ_NUMBER_MASK);
8161                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8162
8163                                 bnx2x_fw_command(bp, reset_code);
8164                         }
8165
8166                         /* now it's safe to release the lock */
8167                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8168
8169                         bnx2x_undi_int_disable(bp, func);
8170
8171                         /* close input traffic and wait for it */
8172                         /* Do not rcv packets to BRB */
8173                         REG_WR(bp,
8174                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8175                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8176                         /* Do not direct rcv packets that are not for MCP to
8177                          * the BRB */
8178                         REG_WR(bp,
8179                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8180                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8181                         /* clear AEU */
8182                         REG_WR(bp,
8183                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8184                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8185                         msleep(10);
8186
8187                         /* save NIG port swap info */
8188                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8189                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
8190                         /* reset device */
8191                         REG_WR(bp,
8192                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8193                                0xd3ffffff);
8194                         REG_WR(bp,
8195                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8196                                0x1403);
8197                         /* take the NIG out of reset and restore swap values */
8198                         REG_WR(bp,
8199                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8200                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
8201                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8202                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8203
8204                         /* send unload done to the MCP */
8205                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8206
8207                         /* restore our func and fw_seq */
8208                         bp->func = func;
8209                         bp->fw_seq =
8210                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8211                                 DRV_MSG_SEQ_NUMBER_MASK);
8212
8213                 } else
8214                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8215         }
8216 }
8217
8218 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8219 {
8220         u32 val, val2, val3, val4, id;
8221         u16 pmc;
8222
8223         /* Get the chip revision id and number. */
8224         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8225         val = REG_RD(bp, MISC_REG_CHIP_NUM);
8226         id = ((val & 0xffff) << 16);
8227         val = REG_RD(bp, MISC_REG_CHIP_REV);
8228         id |= ((val & 0xf) << 12);
8229         val = REG_RD(bp, MISC_REG_CHIP_METAL);
8230         id |= ((val & 0xff) << 4);
8231         val = REG_RD(bp, MISC_REG_BOND_ID);
8232         id |= (val & 0xf);
8233         bp->common.chip_id = id;
8234         bp->link_params.chip_id = bp->common.chip_id;
8235         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8236
8237         val = (REG_RD(bp, 0x2874) & 0x55);
8238         if ((bp->common.chip_id & 0x1) ||
8239             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8240                 bp->flags |= ONE_PORT_FLAG;
8241                 BNX2X_DEV_INFO("single port device\n");
8242         }
8243
8244         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8245         bp->common.flash_size = (NVRAM_1MB_SIZE <<
8246                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
8247         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8248                        bp->common.flash_size, bp->common.flash_size);
8249
8250         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8251         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
8252         bp->link_params.shmem_base = bp->common.shmem_base;
8253         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
8254                        bp->common.shmem_base, bp->common.shmem2_base);
8255
8256         if (!bp->common.shmem_base ||
8257             (bp->common.shmem_base < 0xA0000) ||
8258             (bp->common.shmem_base >= 0xC0000)) {
8259                 BNX2X_DEV_INFO("MCP not active\n");
8260                 bp->flags |= NO_MCP_FLAG;
8261                 return;
8262         }
8263
8264         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8265         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8266                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8267                 BNX2X_ERR("BAD MCP validity signature\n");
8268
8269         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8270         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8271
8272         bp->link_params.hw_led_mode = ((bp->common.hw_config &
8273                                         SHARED_HW_CFG_LED_MODE_MASK) >>
8274                                        SHARED_HW_CFG_LED_MODE_SHIFT);
8275
8276         bp->link_params.feature_config_flags = 0;
8277         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8278         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8279                 bp->link_params.feature_config_flags |=
8280                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8281         else
8282                 bp->link_params.feature_config_flags &=
8283                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8284
8285         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8286         bp->common.bc_ver = val;
8287         BNX2X_DEV_INFO("bc_ver %X\n", val);
8288         if (val < BNX2X_BC_VER) {
8289                 /* for now only warn
8290                  * later we might need to enforce this */
8291                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8292                           " please upgrade BC\n", BNX2X_BC_VER, val);
8293         }
8294         bp->link_params.feature_config_flags |=
8295                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8296                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8297
8298         if (BP_E1HVN(bp) == 0) {
8299                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8300                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8301         } else {
8302                 /* no WOL capability for E1HVN != 0 */
8303                 bp->flags |= NO_WOL_FLAG;
8304         }
8305         BNX2X_DEV_INFO("%sWoL capable\n",
8306                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
8307
8308         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8309         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8310         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8311         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8312
8313         pr_info("part number %X-%X-%X-%X\n", val, val2, val3, val4);
8314 }
8315
8316 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8317                                                     u32 switch_cfg)
8318 {
8319         int port = BP_PORT(bp);
8320         u32 ext_phy_type;
8321
8322         switch (switch_cfg) {
8323         case SWITCH_CFG_1G:
8324                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8325
8326                 ext_phy_type =
8327                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8328                 switch (ext_phy_type) {
8329                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8330                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8331                                        ext_phy_type);
8332
8333                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8334                                                SUPPORTED_10baseT_Full |
8335                                                SUPPORTED_100baseT_Half |
8336                                                SUPPORTED_100baseT_Full |
8337                                                SUPPORTED_1000baseT_Full |
8338                                                SUPPORTED_2500baseX_Full |
8339                                                SUPPORTED_TP |
8340                                                SUPPORTED_FIBRE |
8341                                                SUPPORTED_Autoneg |
8342                                                SUPPORTED_Pause |
8343                                                SUPPORTED_Asym_Pause);
8344                         break;
8345
8346                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8347                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8348                                        ext_phy_type);
8349
8350                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8351                                                SUPPORTED_10baseT_Full |
8352                                                SUPPORTED_100baseT_Half |
8353                                                SUPPORTED_100baseT_Full |
8354                                                SUPPORTED_1000baseT_Full |
8355                                                SUPPORTED_TP |
8356                                                SUPPORTED_FIBRE |
8357                                                SUPPORTED_Autoneg |
8358                                                SUPPORTED_Pause |
8359                                                SUPPORTED_Asym_Pause);
8360                         break;
8361
8362                 default:
8363                         BNX2X_ERR("NVRAM config error. "
8364                                   "BAD SerDes ext_phy_config 0x%x\n",
8365                                   bp->link_params.ext_phy_config);
8366                         return;
8367                 }
8368
8369                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8370                                            port*0x10);
8371                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8372                 break;
8373
8374         case SWITCH_CFG_10G:
8375                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8376
8377                 ext_phy_type =
8378                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8379                 switch (ext_phy_type) {
8380                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8381                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8382                                        ext_phy_type);
8383
8384                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8385                                                SUPPORTED_10baseT_Full |
8386                                                SUPPORTED_100baseT_Half |
8387                                                SUPPORTED_100baseT_Full |
8388                                                SUPPORTED_1000baseT_Full |
8389                                                SUPPORTED_2500baseX_Full |
8390                                                SUPPORTED_10000baseT_Full |
8391                                                SUPPORTED_TP |
8392                                                SUPPORTED_FIBRE |
8393                                                SUPPORTED_Autoneg |
8394                                                SUPPORTED_Pause |
8395                                                SUPPORTED_Asym_Pause);
8396                         break;
8397
8398                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8399                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8400                                        ext_phy_type);
8401
8402                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8403                                                SUPPORTED_1000baseT_Full |
8404                                                SUPPORTED_FIBRE |
8405                                                SUPPORTED_Autoneg |
8406                                                SUPPORTED_Pause |
8407                                                SUPPORTED_Asym_Pause);
8408                         break;
8409
8410                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8411                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8412                                        ext_phy_type);
8413
8414                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8415                                                SUPPORTED_2500baseX_Full |
8416                                                SUPPORTED_1000baseT_Full |
8417                                                SUPPORTED_FIBRE |
8418                                                SUPPORTED_Autoneg |
8419                                                SUPPORTED_Pause |
8420                                                SUPPORTED_Asym_Pause);
8421                         break;
8422
8423                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8424                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8425                                        ext_phy_type);
8426
8427                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8428                                                SUPPORTED_FIBRE |
8429                                                SUPPORTED_Pause |
8430                                                SUPPORTED_Asym_Pause);
8431                         break;
8432
8433                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8434                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8435                                        ext_phy_type);
8436
8437                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8438                                                SUPPORTED_1000baseT_Full |
8439                                                SUPPORTED_FIBRE |
8440                                                SUPPORTED_Pause |
8441                                                SUPPORTED_Asym_Pause);
8442                         break;
8443
8444                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8445                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8446                                        ext_phy_type);
8447
8448                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8449                                                SUPPORTED_1000baseT_Full |
8450                                                SUPPORTED_Autoneg |
8451                                                SUPPORTED_FIBRE |
8452                                                SUPPORTED_Pause |
8453                                                SUPPORTED_Asym_Pause);
8454                         break;
8455
8456                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8457                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8458                                        ext_phy_type);
8459
8460                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8461                                                SUPPORTED_1000baseT_Full |
8462                                                SUPPORTED_Autoneg |
8463                                                SUPPORTED_FIBRE |
8464                                                SUPPORTED_Pause |
8465                                                SUPPORTED_Asym_Pause);
8466                         break;
8467
8468                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8469                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8470                                        ext_phy_type);
8471
8472                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8473                                                SUPPORTED_TP |
8474                                                SUPPORTED_Autoneg |
8475                                                SUPPORTED_Pause |
8476                                                SUPPORTED_Asym_Pause);
8477                         break;
8478
8479                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8480                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8481                                        ext_phy_type);
8482
8483                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8484                                                SUPPORTED_10baseT_Full |
8485                                                SUPPORTED_100baseT_Half |
8486                                                SUPPORTED_100baseT_Full |
8487                                                SUPPORTED_1000baseT_Full |
8488                                                SUPPORTED_10000baseT_Full |
8489                                                SUPPORTED_TP |
8490                                                SUPPORTED_Autoneg |
8491                                                SUPPORTED_Pause |
8492                                                SUPPORTED_Asym_Pause);
8493                         break;
8494
8495                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8496                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8497                                   bp->link_params.ext_phy_config);
8498                         break;
8499
8500                 default:
8501                         BNX2X_ERR("NVRAM config error. "
8502                                   "BAD XGXS ext_phy_config 0x%x\n",
8503                                   bp->link_params.ext_phy_config);
8504                         return;
8505                 }
8506
8507                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8508                                            port*0x18);
8509                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8510
8511                 break;
8512
8513         default:
8514                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8515                           bp->port.link_config);
8516                 return;
8517         }
8518         bp->link_params.phy_addr = bp->port.phy_addr;
8519
8520         /* mask what we support according to speed_cap_mask */
8521         if (!(bp->link_params.speed_cap_mask &
8522                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8523                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8524
8525         if (!(bp->link_params.speed_cap_mask &
8526                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8527                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8528
8529         if (!(bp->link_params.speed_cap_mask &
8530                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8531                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8532
8533         if (!(bp->link_params.speed_cap_mask &
8534                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8535                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8536
8537         if (!(bp->link_params.speed_cap_mask &
8538                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8539                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8540                                         SUPPORTED_1000baseT_Full);
8541
8542         if (!(bp->link_params.speed_cap_mask &
8543                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8544                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8545
8546         if (!(bp->link_params.speed_cap_mask &
8547                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8548                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8549
8550         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8551 }
8552
8553 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8554 {
8555         bp->link_params.req_duplex = DUPLEX_FULL;
8556
8557         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8558         case PORT_FEATURE_LINK_SPEED_AUTO:
8559                 if (bp->port.supported & SUPPORTED_Autoneg) {
8560                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8561                         bp->port.advertising = bp->port.supported;
8562                 } else {
8563                         u32 ext_phy_type =
8564                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8565
8566                         if ((ext_phy_type ==
8567                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8568                             (ext_phy_type ==
8569                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8570                                 /* force 10G, no AN */
8571                                 bp->link_params.req_line_speed = SPEED_10000;
8572                                 bp->port.advertising =
8573                                                 (ADVERTISED_10000baseT_Full |
8574                                                  ADVERTISED_FIBRE);
8575                                 break;
8576                         }
8577                         BNX2X_ERR("NVRAM config error. "
8578                                   "Invalid link_config 0x%x"
8579                                   "  Autoneg not supported\n",
8580                                   bp->port.link_config);
8581                         return;
8582                 }
8583                 break;
8584
8585         case PORT_FEATURE_LINK_SPEED_10M_FULL:
8586                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8587                         bp->link_params.req_line_speed = SPEED_10;
8588                         bp->port.advertising = (ADVERTISED_10baseT_Full |
8589                                                 ADVERTISED_TP);
8590                 } else {
8591                         BNX2X_ERR("NVRAM config error. "
8592                                   "Invalid link_config 0x%x"
8593                                   "  speed_cap_mask 0x%x\n",
8594                                   bp->port.link_config,
8595                                   bp->link_params.speed_cap_mask);
8596                         return;
8597                 }
8598                 break;
8599
8600         case PORT_FEATURE_LINK_SPEED_10M_HALF:
8601                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8602                         bp->link_params.req_line_speed = SPEED_10;
8603                         bp->link_params.req_duplex = DUPLEX_HALF;
8604                         bp->port.advertising = (ADVERTISED_10baseT_Half |
8605                                                 ADVERTISED_TP);
8606                 } else {
8607                         BNX2X_ERR("NVRAM config error. "
8608                                   "Invalid link_config 0x%x"
8609                                   "  speed_cap_mask 0x%x\n",
8610                                   bp->port.link_config,
8611                                   bp->link_params.speed_cap_mask);
8612                         return;
8613                 }
8614                 break;
8615
8616         case PORT_FEATURE_LINK_SPEED_100M_FULL:
8617                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8618                         bp->link_params.req_line_speed = SPEED_100;
8619                         bp->port.advertising = (ADVERTISED_100baseT_Full |
8620                                                 ADVERTISED_TP);
8621                 } else {
8622                         BNX2X_ERR("NVRAM config error. "
8623                                   "Invalid link_config 0x%x"
8624                                   "  speed_cap_mask 0x%x\n",
8625                                   bp->port.link_config,
8626                                   bp->link_params.speed_cap_mask);
8627                         return;
8628                 }
8629                 break;
8630
8631         case PORT_FEATURE_LINK_SPEED_100M_HALF:
8632                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8633                         bp->link_params.req_line_speed = SPEED_100;
8634                         bp->link_params.req_duplex = DUPLEX_HALF;
8635                         bp->port.advertising = (ADVERTISED_100baseT_Half |
8636                                                 ADVERTISED_TP);
8637                 } else {
8638                         BNX2X_ERR("NVRAM config error. "
8639                                   "Invalid link_config 0x%x"
8640                                   "  speed_cap_mask 0x%x\n",
8641                                   bp->port.link_config,
8642                                   bp->link_params.speed_cap_mask);
8643                         return;
8644                 }
8645                 break;
8646
8647         case PORT_FEATURE_LINK_SPEED_1G:
8648                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8649                         bp->link_params.req_line_speed = SPEED_1000;
8650                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
8651                                                 ADVERTISED_TP);
8652                 } else {
8653                         BNX2X_ERR("NVRAM config error. "
8654                                   "Invalid link_config 0x%x"
8655                                   "  speed_cap_mask 0x%x\n",
8656                                   bp->port.link_config,
8657                                   bp->link_params.speed_cap_mask);
8658                         return;
8659                 }
8660                 break;
8661
8662         case PORT_FEATURE_LINK_SPEED_2_5G:
8663                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8664                         bp->link_params.req_line_speed = SPEED_2500;
8665                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
8666                                                 ADVERTISED_TP);
8667                 } else {
8668                         BNX2X_ERR("NVRAM config error. "
8669                                   "Invalid link_config 0x%x"
8670                                   "  speed_cap_mask 0x%x\n",
8671                                   bp->port.link_config,
8672                                   bp->link_params.speed_cap_mask);
8673                         return;
8674                 }
8675                 break;
8676
8677         case PORT_FEATURE_LINK_SPEED_10G_CX4:
8678         case PORT_FEATURE_LINK_SPEED_10G_KX4:
8679         case PORT_FEATURE_LINK_SPEED_10G_KR:
8680                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8681                         bp->link_params.req_line_speed = SPEED_10000;
8682                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
8683                                                 ADVERTISED_FIBRE);
8684                 } else {
8685                         BNX2X_ERR("NVRAM config error. "
8686                                   "Invalid link_config 0x%x"
8687                                   "  speed_cap_mask 0x%x\n",
8688                                   bp->port.link_config,
8689                                   bp->link_params.speed_cap_mask);
8690                         return;
8691                 }
8692                 break;
8693
8694         default:
8695                 BNX2X_ERR("NVRAM config error. "
8696                           "BAD link speed link_config 0x%x\n",
8697                           bp->port.link_config);
8698                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8699                 bp->port.advertising = bp->port.supported;
8700                 break;
8701         }
8702
8703         bp->link_params.req_flow_ctrl = (bp->port.link_config &
8704                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8705         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8706             !(bp->port.supported & SUPPORTED_Autoneg))
8707                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8708
8709         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
8710                        "  advertising 0x%x\n",
8711                        bp->link_params.req_line_speed,
8712                        bp->link_params.req_duplex,
8713                        bp->link_params.req_flow_ctrl, bp->port.advertising);
8714 }
8715
8716 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8717 {
8718         mac_hi = cpu_to_be16(mac_hi);
8719         mac_lo = cpu_to_be32(mac_lo);
8720         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8721         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8722 }
8723
8724 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8725 {
8726         int port = BP_PORT(bp);
8727         u32 val, val2;
8728         u32 config;
8729         u16 i;
8730         u32 ext_phy_type;
8731
8732         bp->link_params.bp = bp;
8733         bp->link_params.port = port;
8734
8735         bp->link_params.lane_config =
8736                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8737         bp->link_params.ext_phy_config =
8738                 SHMEM_RD(bp,
8739                          dev_info.port_hw_config[port].external_phy_config);
8740         /* BCM8727_NOC => BCM8727 no over current */
8741         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8742             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8743                 bp->link_params.ext_phy_config &=
8744                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8745                 bp->link_params.ext_phy_config |=
8746                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8747                 bp->link_params.feature_config_flags |=
8748                         FEATURE_CONFIG_BCM8727_NOC;
8749         }
8750
8751         bp->link_params.speed_cap_mask =
8752                 SHMEM_RD(bp,
8753                          dev_info.port_hw_config[port].speed_capability_mask);
8754
8755         bp->port.link_config =
8756                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8757
8758         /* Get the 4 lanes xgxs config rx and tx */
8759         for (i = 0; i < 2; i++) {
8760                 val = SHMEM_RD(bp,
8761                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8762                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8763                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8764
8765                 val = SHMEM_RD(bp,
8766                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8767                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8768                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8769         }
8770
8771         /* If the device is capable of WoL, set the default state according
8772          * to the HW
8773          */
8774         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8775         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8776                    (config & PORT_FEATURE_WOL_ENABLED));
8777
8778         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
8779                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
8780                        bp->link_params.lane_config,
8781                        bp->link_params.ext_phy_config,
8782                        bp->link_params.speed_cap_mask, bp->port.link_config);
8783
8784         bp->link_params.switch_cfg |= (bp->port.link_config &
8785                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
8786         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8787
8788         bnx2x_link_settings_requested(bp);
8789
8790         /*
8791          * If connected directly, work with the internal PHY, otherwise, work
8792          * with the external PHY
8793          */
8794         ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8795         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8796                 bp->mdio.prtad = bp->link_params.phy_addr;
8797
8798         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8799                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8800                 bp->mdio.prtad =
8801                         XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
8802
8803         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8804         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8805         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8806         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8807         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8808
8809 #ifdef BCM_CNIC
8810         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8811         val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8812         bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8813 #endif
8814 }
8815
8816 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8817 {
8818         int func = BP_FUNC(bp);
8819         u32 val, val2;
8820         int rc = 0;
8821
8822         bnx2x_get_common_hwinfo(bp);
8823
8824         bp->e1hov = 0;
8825         bp->e1hmf = 0;
8826         if (CHIP_IS_E1H(bp)) {
8827                 bp->mf_config =
8828                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8829
8830                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8831                        FUNC_MF_CFG_E1HOV_TAG_MASK);
8832                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8833                         bp->e1hmf = 1;
8834                 BNX2X_DEV_INFO("%s function mode\n",
8835                                IS_E1HMF(bp) ? "multi" : "single");
8836
8837                 if (IS_E1HMF(bp)) {
8838                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8839                                                                 e1hov_tag) &
8840                                FUNC_MF_CFG_E1HOV_TAG_MASK);
8841                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8842                                 bp->e1hov = val;
8843                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8844                                                "(0x%04x)\n",
8845                                                func, bp->e1hov, bp->e1hov);
8846                         } else {
8847                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
8848                                           "  aborting\n", func);
8849                                 rc = -EPERM;
8850                         }
8851                 } else {
8852                         if (BP_E1HVN(bp)) {
8853                                 BNX2X_ERR("!!!  VN %d in single function mode,"
8854                                           "  aborting\n", BP_E1HVN(bp));
8855                                 rc = -EPERM;
8856                         }
8857                 }
8858         }
8859
8860         if (!BP_NOMCP(bp)) {
8861                 bnx2x_get_port_hwinfo(bp);
8862
8863                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8864                               DRV_MSG_SEQ_NUMBER_MASK);
8865                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8866         }
8867
8868         if (IS_E1HMF(bp)) {
8869                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8870                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
8871                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8872                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8873                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8874                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8875                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8876                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8877                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8878                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8879                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8880                                ETH_ALEN);
8881                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8882                                ETH_ALEN);
8883                 }
8884
8885                 return rc;
8886         }
8887
8888         if (BP_NOMCP(bp)) {
8889                 /* only supposed to happen on emulation/FPGA */
8890                 BNX2X_ERR("warning random MAC workaround active\n");
8891                 random_ether_addr(bp->dev->dev_addr);
8892                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8893         }
8894
8895         return rc;
8896 }
8897
8898 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8899 {
8900         int func = BP_FUNC(bp);
8901         int timer_interval;
8902         int rc;
8903
8904         /* Disable interrupt handling until HW is initialized */
8905         atomic_set(&bp->intr_sem, 1);
8906         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8907
8908         mutex_init(&bp->port.phy_mutex);
8909         mutex_init(&bp->fw_mb_mutex);
8910 #ifdef BCM_CNIC
8911         mutex_init(&bp->cnic_mutex);
8912 #endif
8913
8914         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8915         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8916
8917         rc = bnx2x_get_hwinfo(bp);
8918
8919         /* need to reset chip if undi was active */
8920         if (!BP_NOMCP(bp))
8921                 bnx2x_undi_unload(bp);
8922
8923         if (CHIP_REV_IS_FPGA(bp))
8924                 pr_err("FPGA detected\n");
8925
8926         if (BP_NOMCP(bp) && (func == 0))
8927                 pr_err("MCP disabled, must load devices in order!\n");
8928
8929         /* Set multi queue mode */
8930         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8931             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8932                 pr_err("Multi disabled since int_mode requested is not MSI-X\n");
8933                 multi_mode = ETH_RSS_MODE_DISABLED;
8934         }
8935         bp->multi_mode = multi_mode;
8936
8937
8938         /* Set TPA flags */
8939         if (disable_tpa) {
8940                 bp->flags &= ~TPA_ENABLE_FLAG;
8941                 bp->dev->features &= ~NETIF_F_LRO;
8942         } else {
8943                 bp->flags |= TPA_ENABLE_FLAG;
8944                 bp->dev->features |= NETIF_F_LRO;
8945         }
8946
8947         if (CHIP_IS_E1(bp))
8948                 bp->dropless_fc = 0;
8949         else
8950                 bp->dropless_fc = dropless_fc;
8951
8952         bp->mrrs = mrrs;
8953
8954         bp->tx_ring_size = MAX_TX_AVAIL;
8955         bp->rx_ring_size = MAX_RX_AVAIL;
8956
8957         bp->rx_csum = 1;
8958
8959         /* make sure that the numbers are in the right granularity */
8960         bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8961         bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8962
8963         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8964         bp->current_interval = (poll ? poll : timer_interval);
8965
8966         init_timer(&bp->timer);
8967         bp->timer.expires = jiffies + bp->current_interval;
8968         bp->timer.data = (unsigned long) bp;
8969         bp->timer.function = bnx2x_timer;
8970
8971         return rc;
8972 }
8973
8974 /*
8975  * ethtool service functions
8976  */
8977
8978 /* All ethtool functions called with rtnl_lock */
8979
8980 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8981 {
8982         struct bnx2x *bp = netdev_priv(dev);
8983
8984         cmd->supported = bp->port.supported;
8985         cmd->advertising = bp->port.advertising;
8986
8987         if ((bp->state == BNX2X_STATE_OPEN) &&
8988             !(bp->flags & MF_FUNC_DIS) &&
8989             (bp->link_vars.link_up)) {
8990                 cmd->speed = bp->link_vars.line_speed;
8991                 cmd->duplex = bp->link_vars.duplex;
8992                 if (IS_E1HMF(bp)) {
8993                         u16 vn_max_rate;
8994
8995                         vn_max_rate =
8996                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8997                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8998                         if (vn_max_rate < cmd->speed)
8999                                 cmd->speed = vn_max_rate;
9000                 }
9001         } else {
9002                 cmd->speed = -1;
9003                 cmd->duplex = -1;
9004         }
9005
9006         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9007                 u32 ext_phy_type =
9008                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9009
9010                 switch (ext_phy_type) {
9011                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9012                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9013                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9014                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9015                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9016                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9017                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9018                         cmd->port = PORT_FIBRE;
9019                         break;
9020
9021                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9022                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9023                         cmd->port = PORT_TP;
9024                         break;
9025
9026                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9027                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9028                                   bp->link_params.ext_phy_config);
9029                         break;
9030
9031                 default:
9032                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
9033                            bp->link_params.ext_phy_config);
9034                         break;
9035                 }
9036         } else
9037                 cmd->port = PORT_TP;
9038
9039         cmd->phy_address = bp->mdio.prtad;
9040         cmd->transceiver = XCVR_INTERNAL;
9041
9042         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9043                 cmd->autoneg = AUTONEG_ENABLE;
9044         else
9045                 cmd->autoneg = AUTONEG_DISABLE;
9046
9047         cmd->maxtxpkt = 0;
9048         cmd->maxrxpkt = 0;
9049
9050         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9051            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
9052            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
9053            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
9054            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9055            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9056            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9057
9058         return 0;
9059 }
9060
9061 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9062 {
9063         struct bnx2x *bp = netdev_priv(dev);
9064         u32 advertising;
9065
9066         if (IS_E1HMF(bp))
9067                 return 0;
9068
9069         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9070            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
9071            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
9072            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
9073            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9074            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9075            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9076
9077         if (cmd->autoneg == AUTONEG_ENABLE) {
9078                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9079                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
9080                         return -EINVAL;
9081                 }
9082
9083                 /* advertise the requested speed and duplex if supported */
9084                 cmd->advertising &= bp->port.supported;
9085
9086                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9087                 bp->link_params.req_duplex = DUPLEX_FULL;
9088                 bp->port.advertising |= (ADVERTISED_Autoneg |
9089                                          cmd->advertising);
9090
9091         } else { /* forced speed */
9092                 /* advertise the requested speed and duplex if supported */
9093                 switch (cmd->speed) {
9094                 case SPEED_10:
9095                         if (cmd->duplex == DUPLEX_FULL) {
9096                                 if (!(bp->port.supported &
9097                                       SUPPORTED_10baseT_Full)) {
9098                                         DP(NETIF_MSG_LINK,
9099                                            "10M full not supported\n");
9100                                         return -EINVAL;
9101                                 }
9102
9103                                 advertising = (ADVERTISED_10baseT_Full |
9104                                                ADVERTISED_TP);
9105                         } else {
9106                                 if (!(bp->port.supported &
9107                                       SUPPORTED_10baseT_Half)) {
9108                                         DP(NETIF_MSG_LINK,
9109                                            "10M half not supported\n");
9110                                         return -EINVAL;
9111                                 }
9112
9113                                 advertising = (ADVERTISED_10baseT_Half |
9114                                                ADVERTISED_TP);
9115                         }
9116                         break;
9117
9118                 case SPEED_100:
9119                         if (cmd->duplex == DUPLEX_FULL) {
9120                                 if (!(bp->port.supported &
9121                                                 SUPPORTED_100baseT_Full)) {
9122                                         DP(NETIF_MSG_LINK,
9123                                            "100M full not supported\n");
9124                                         return -EINVAL;
9125                                 }
9126
9127                                 advertising = (ADVERTISED_100baseT_Full |
9128                                                ADVERTISED_TP);
9129                         } else {
9130                                 if (!(bp->port.supported &
9131                                                 SUPPORTED_100baseT_Half)) {
9132                                         DP(NETIF_MSG_LINK,
9133                                            "100M half not supported\n");
9134                                         return -EINVAL;
9135                                 }
9136
9137                                 advertising = (ADVERTISED_100baseT_Half |
9138                                                ADVERTISED_TP);
9139                         }
9140                         break;
9141
9142                 case SPEED_1000:
9143                         if (cmd->duplex != DUPLEX_FULL) {
9144                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
9145                                 return -EINVAL;
9146                         }
9147
9148                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
9149                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
9150                                 return -EINVAL;
9151                         }
9152
9153                         advertising = (ADVERTISED_1000baseT_Full |
9154                                        ADVERTISED_TP);
9155                         break;
9156
9157                 case SPEED_2500:
9158                         if (cmd->duplex != DUPLEX_FULL) {
9159                                 DP(NETIF_MSG_LINK,
9160                                    "2.5G half not supported\n");
9161                                 return -EINVAL;
9162                         }
9163
9164                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
9165                                 DP(NETIF_MSG_LINK,
9166                                    "2.5G full not supported\n");
9167                                 return -EINVAL;
9168                         }
9169
9170                         advertising = (ADVERTISED_2500baseX_Full |
9171                                        ADVERTISED_TP);
9172                         break;
9173
9174                 case SPEED_10000:
9175                         if (cmd->duplex != DUPLEX_FULL) {
9176                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
9177                                 return -EINVAL;
9178                         }
9179
9180                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
9181                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
9182                                 return -EINVAL;
9183                         }
9184
9185                         advertising = (ADVERTISED_10000baseT_Full |
9186                                        ADVERTISED_FIBRE);
9187                         break;
9188
9189                 default:
9190                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
9191                         return -EINVAL;
9192                 }
9193
9194                 bp->link_params.req_line_speed = cmd->speed;
9195                 bp->link_params.req_duplex = cmd->duplex;
9196                 bp->port.advertising = advertising;
9197         }
9198
9199         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
9200            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
9201            bp->link_params.req_line_speed, bp->link_params.req_duplex,
9202            bp->port.advertising);
9203
9204         if (netif_running(dev)) {
9205                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9206                 bnx2x_link_set(bp);
9207         }
9208
9209         return 0;
9210 }
9211
9212 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9213 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9214
9215 static int bnx2x_get_regs_len(struct net_device *dev)
9216 {
9217         struct bnx2x *bp = netdev_priv(dev);
9218         int regdump_len = 0;
9219         int i;
9220
9221         if (CHIP_IS_E1(bp)) {
9222                 for (i = 0; i < REGS_COUNT; i++)
9223                         if (IS_E1_ONLINE(reg_addrs[i].info))
9224                                 regdump_len += reg_addrs[i].size;
9225
9226                 for (i = 0; i < WREGS_COUNT_E1; i++)
9227                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9228                                 regdump_len += wreg_addrs_e1[i].size *
9229                                         (1 + wreg_addrs_e1[i].read_regs_count);
9230
9231         } else { /* E1H */
9232                 for (i = 0; i < REGS_COUNT; i++)
9233                         if (IS_E1H_ONLINE(reg_addrs[i].info))
9234                                 regdump_len += reg_addrs[i].size;
9235
9236                 for (i = 0; i < WREGS_COUNT_E1H; i++)
9237                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9238                                 regdump_len += wreg_addrs_e1h[i].size *
9239                                         (1 + wreg_addrs_e1h[i].read_regs_count);
9240         }
9241         regdump_len *= 4;
9242         regdump_len += sizeof(struct dump_hdr);
9243
9244         return regdump_len;
9245 }
9246
9247 static void bnx2x_get_regs(struct net_device *dev,
9248                            struct ethtool_regs *regs, void *_p)
9249 {
9250         u32 *p = _p, i, j;
9251         struct bnx2x *bp = netdev_priv(dev);
9252         struct dump_hdr dump_hdr = {0};
9253
9254         regs->version = 0;
9255         memset(p, 0, regs->len);
9256
9257         if (!netif_running(bp->dev))
9258                 return;
9259
9260         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9261         dump_hdr.dump_sign = dump_sign_all;
9262         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9263         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9264         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9265         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9266         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9267
9268         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9269         p += dump_hdr.hdr_size + 1;
9270
9271         if (CHIP_IS_E1(bp)) {
9272                 for (i = 0; i < REGS_COUNT; i++)
9273                         if (IS_E1_ONLINE(reg_addrs[i].info))
9274                                 for (j = 0; j < reg_addrs[i].size; j++)
9275                                         *p++ = REG_RD(bp,
9276                                                       reg_addrs[i].addr + j*4);
9277
9278         } else { /* E1H */
9279                 for (i = 0; i < REGS_COUNT; i++)
9280                         if (IS_E1H_ONLINE(reg_addrs[i].info))
9281                                 for (j = 0; j < reg_addrs[i].size; j++)
9282                                         *p++ = REG_RD(bp,
9283                                                       reg_addrs[i].addr + j*4);
9284         }
9285 }
9286
9287 #define PHY_FW_VER_LEN                  10
9288
9289 static void bnx2x_get_drvinfo(struct net_device *dev,
9290                               struct ethtool_drvinfo *info)
9291 {
9292         struct bnx2x *bp = netdev_priv(dev);
9293         u8 phy_fw_ver[PHY_FW_VER_LEN];
9294
9295         strcpy(info->driver, DRV_MODULE_NAME);
9296         strcpy(info->version, DRV_MODULE_VERSION);
9297
9298         phy_fw_ver[0] = '\0';
9299         if (bp->port.pmf) {
9300                 bnx2x_acquire_phy_lock(bp);
9301                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9302                                              (bp->state != BNX2X_STATE_CLOSED),
9303                                              phy_fw_ver, PHY_FW_VER_LEN);
9304                 bnx2x_release_phy_lock(bp);
9305         }
9306
9307         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9308                  (bp->common.bc_ver & 0xff0000) >> 16,
9309                  (bp->common.bc_ver & 0xff00) >> 8,
9310                  (bp->common.bc_ver & 0xff),
9311                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9312         strcpy(info->bus_info, pci_name(bp->pdev));
9313         info->n_stats = BNX2X_NUM_STATS;
9314         info->testinfo_len = BNX2X_NUM_TESTS;
9315         info->eedump_len = bp->common.flash_size;
9316         info->regdump_len = bnx2x_get_regs_len(dev);
9317 }
9318
9319 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9320 {
9321         struct bnx2x *bp = netdev_priv(dev);
9322
9323         if (bp->flags & NO_WOL_FLAG) {
9324                 wol->supported = 0;
9325                 wol->wolopts = 0;
9326         } else {
9327                 wol->supported = WAKE_MAGIC;
9328                 if (bp->wol)
9329                         wol->wolopts = WAKE_MAGIC;
9330                 else
9331                         wol->wolopts = 0;
9332         }
9333         memset(&wol->sopass, 0, sizeof(wol->sopass));
9334 }
9335
9336 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9337 {
9338         struct bnx2x *bp = netdev_priv(dev);
9339
9340         if (wol->wolopts & ~WAKE_MAGIC)
9341                 return -EINVAL;
9342
9343         if (wol->wolopts & WAKE_MAGIC) {
9344                 if (bp->flags & NO_WOL_FLAG)
9345                         return -EINVAL;
9346
9347                 bp->wol = 1;
9348         } else
9349                 bp->wol = 0;
9350
9351         return 0;
9352 }
9353
9354 static u32 bnx2x_get_msglevel(struct net_device *dev)
9355 {
9356         struct bnx2x *bp = netdev_priv(dev);
9357
9358         return bp->msg_enable;
9359 }
9360
9361 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9362 {
9363         struct bnx2x *bp = netdev_priv(dev);
9364
9365         if (capable(CAP_NET_ADMIN))
9366                 bp->msg_enable = level;
9367 }
9368
9369 static int bnx2x_nway_reset(struct net_device *dev)
9370 {
9371         struct bnx2x *bp = netdev_priv(dev);
9372
9373         if (!bp->port.pmf)
9374                 return 0;
9375
9376         if (netif_running(dev)) {
9377                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9378                 bnx2x_link_set(bp);
9379         }
9380
9381         return 0;
9382 }
9383
9384 static u32 bnx2x_get_link(struct net_device *dev)
9385 {
9386         struct bnx2x *bp = netdev_priv(dev);
9387
9388         if (bp->flags & MF_FUNC_DIS)
9389                 return 0;
9390
9391         return bp->link_vars.link_up;
9392 }
9393
9394 static int bnx2x_get_eeprom_len(struct net_device *dev)
9395 {
9396         struct bnx2x *bp = netdev_priv(dev);
9397
9398         return bp->common.flash_size;
9399 }
9400
9401 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9402 {
9403         int port = BP_PORT(bp);
9404         int count, i;
9405         u32 val = 0;
9406
9407         /* adjust timeout for emulation/FPGA */
9408         count = NVRAM_TIMEOUT_COUNT;
9409         if (CHIP_REV_IS_SLOW(bp))
9410                 count *= 100;
9411
9412         /* request access to nvram interface */
9413         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9414                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9415
9416         for (i = 0; i < count*10; i++) {
9417                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9418                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9419                         break;
9420
9421                 udelay(5);
9422         }
9423
9424         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9425                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9426                 return -EBUSY;
9427         }
9428
9429         return 0;
9430 }
9431
9432 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9433 {
9434         int port = BP_PORT(bp);
9435         int count, i;
9436         u32 val = 0;
9437
9438         /* adjust timeout for emulation/FPGA */
9439         count = NVRAM_TIMEOUT_COUNT;
9440         if (CHIP_REV_IS_SLOW(bp))
9441                 count *= 100;
9442
9443         /* relinquish nvram interface */
9444         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9445                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9446
9447         for (i = 0; i < count*10; i++) {
9448                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9449                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9450                         break;
9451
9452                 udelay(5);
9453         }
9454
9455         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9456                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9457                 return -EBUSY;
9458         }
9459
9460         return 0;
9461 }
9462
9463 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9464 {
9465         u32 val;
9466
9467         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9468
9469         /* enable both bits, even on read */
9470         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9471                (val | MCPR_NVM_ACCESS_ENABLE_EN |
9472                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
9473 }
9474
9475 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9476 {
9477         u32 val;
9478
9479         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9480
9481         /* disable both bits, even after read */
9482         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9483                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9484                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9485 }
9486
9487 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9488                                   u32 cmd_flags)
9489 {
9490         int count, i, rc;
9491         u32 val;
9492
9493         /* build the command word */
9494         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9495
9496         /* need to clear DONE bit separately */
9497         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9498
9499         /* address of the NVRAM to read from */
9500         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9501                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9502
9503         /* issue a read command */
9504         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9505
9506         /* adjust timeout for emulation/FPGA */
9507         count = NVRAM_TIMEOUT_COUNT;
9508         if (CHIP_REV_IS_SLOW(bp))
9509                 count *= 100;
9510
9511         /* wait for completion */
9512         *ret_val = 0;
9513         rc = -EBUSY;
9514         for (i = 0; i < count; i++) {
9515                 udelay(5);
9516                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9517
9518                 if (val & MCPR_NVM_COMMAND_DONE) {
9519                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9520                         /* we read nvram data in cpu order
9521                          * but ethtool sees it as an array of bytes
9522                          * converting to big-endian will do the work */
9523                         *ret_val = cpu_to_be32(val);
9524                         rc = 0;
9525                         break;
9526                 }
9527         }
9528
9529         return rc;
9530 }
9531
9532 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9533                             int buf_size)
9534 {
9535         int rc;
9536         u32 cmd_flags;
9537         __be32 val;
9538
9539         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9540                 DP(BNX2X_MSG_NVM,
9541                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9542                    offset, buf_size);
9543                 return -EINVAL;
9544         }
9545
9546         if (offset + buf_size > bp->common.flash_size) {
9547                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9548                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9549                    offset, buf_size, bp->common.flash_size);
9550                 return -EINVAL;
9551         }
9552
9553         /* request access to nvram interface */
9554         rc = bnx2x_acquire_nvram_lock(bp);
9555         if (rc)
9556                 return rc;
9557
9558         /* enable access to nvram interface */
9559         bnx2x_enable_nvram_access(bp);
9560
9561         /* read the first word(s) */
9562         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9563         while ((buf_size > sizeof(u32)) && (rc == 0)) {
9564                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9565                 memcpy(ret_buf, &val, 4);
9566
9567                 /* advance to the next dword */
9568                 offset += sizeof(u32);
9569                 ret_buf += sizeof(u32);
9570                 buf_size -= sizeof(u32);
9571                 cmd_flags = 0;
9572         }
9573
9574         if (rc == 0) {
9575                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9576                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9577                 memcpy(ret_buf, &val, 4);
9578         }
9579
9580         /* disable access to nvram interface */
9581         bnx2x_disable_nvram_access(bp);
9582         bnx2x_release_nvram_lock(bp);
9583
9584         return rc;
9585 }
9586
9587 static int bnx2x_get_eeprom(struct net_device *dev,
9588                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9589 {
9590         struct bnx2x *bp = netdev_priv(dev);
9591         int rc;
9592
9593         if (!netif_running(dev))
9594                 return -EAGAIN;
9595
9596         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9597            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9598            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9599            eeprom->len, eeprom->len);
9600
9601         /* parameters already validated in ethtool_get_eeprom */
9602
9603         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9604
9605         return rc;
9606 }
9607
9608 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9609                                    u32 cmd_flags)
9610 {
9611         int count, i, rc;
9612
9613         /* build the command word */
9614         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9615
9616         /* need to clear DONE bit separately */
9617         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9618
9619         /* write the data */
9620         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9621
9622         /* address of the NVRAM to write to */
9623         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9624                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9625
9626         /* issue the write command */
9627         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9628
9629         /* adjust timeout for emulation/FPGA */
9630         count = NVRAM_TIMEOUT_COUNT;
9631         if (CHIP_REV_IS_SLOW(bp))
9632                 count *= 100;
9633
9634         /* wait for completion */
9635         rc = -EBUSY;
9636         for (i = 0; i < count; i++) {
9637                 udelay(5);
9638                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9639                 if (val & MCPR_NVM_COMMAND_DONE) {
9640                         rc = 0;
9641                         break;
9642                 }
9643         }
9644
9645         return rc;
9646 }
9647
9648 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
9649
9650 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9651                               int buf_size)
9652 {
9653         int rc;
9654         u32 cmd_flags;
9655         u32 align_offset;
9656         __be32 val;
9657
9658         if (offset + buf_size > bp->common.flash_size) {
9659                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9660                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9661                    offset, buf_size, bp->common.flash_size);
9662                 return -EINVAL;
9663         }
9664
9665         /* request access to nvram interface */
9666         rc = bnx2x_acquire_nvram_lock(bp);
9667         if (rc)
9668                 return rc;
9669
9670         /* enable access to nvram interface */
9671         bnx2x_enable_nvram_access(bp);
9672
9673         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9674         align_offset = (offset & ~0x03);
9675         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9676
9677         if (rc == 0) {
9678                 val &= ~(0xff << BYTE_OFFSET(offset));
9679                 val |= (*data_buf << BYTE_OFFSET(offset));
9680
9681                 /* nvram data is returned as an array of bytes
9682                  * convert it back to cpu order */
9683                 val = be32_to_cpu(val);
9684
9685                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9686                                              cmd_flags);
9687         }
9688
9689         /* disable access to nvram interface */
9690         bnx2x_disable_nvram_access(bp);
9691         bnx2x_release_nvram_lock(bp);
9692
9693         return rc;
9694 }
9695
9696 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9697                              int buf_size)
9698 {
9699         int rc;
9700         u32 cmd_flags;
9701         u32 val;
9702         u32 written_so_far;
9703
9704         if (buf_size == 1)      /* ethtool */
9705                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9706
9707         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9708                 DP(BNX2X_MSG_NVM,
9709                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9710                    offset, buf_size);
9711                 return -EINVAL;
9712         }
9713
9714         if (offset + buf_size > bp->common.flash_size) {
9715                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9716                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9717                    offset, buf_size, bp->common.flash_size);
9718                 return -EINVAL;
9719         }
9720
9721         /* request access to nvram interface */
9722         rc = bnx2x_acquire_nvram_lock(bp);
9723         if (rc)
9724                 return rc;
9725
9726         /* enable access to nvram interface */
9727         bnx2x_enable_nvram_access(bp);
9728
9729         written_so_far = 0;
9730         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9731         while ((written_so_far < buf_size) && (rc == 0)) {
9732                 if (written_so_far == (buf_size - sizeof(u32)))
9733                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9734                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9735                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9736                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9737                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9738
9739                 memcpy(&val, data_buf, 4);
9740
9741                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9742
9743                 /* advance to the next dword */
9744                 offset += sizeof(u32);
9745                 data_buf += sizeof(u32);
9746                 written_so_far += sizeof(u32);
9747                 cmd_flags = 0;
9748         }
9749
9750         /* disable access to nvram interface */
9751         bnx2x_disable_nvram_access(bp);
9752         bnx2x_release_nvram_lock(bp);
9753
9754         return rc;
9755 }
9756
9757 static int bnx2x_set_eeprom(struct net_device *dev,
9758                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9759 {
9760         struct bnx2x *bp = netdev_priv(dev);
9761         int port = BP_PORT(bp);
9762         int rc = 0;
9763
9764         if (!netif_running(dev))
9765                 return -EAGAIN;
9766
9767         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9768            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9769            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9770            eeprom->len, eeprom->len);
9771
9772         /* parameters already validated in ethtool_set_eeprom */
9773
9774         /* PHY eeprom can be accessed only by the PMF */
9775         if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9776             !bp->port.pmf)
9777                 return -EINVAL;
9778
9779         if (eeprom->magic == 0x50485950) {
9780                 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9781                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9782
9783                 bnx2x_acquire_phy_lock(bp);
9784                 rc |= bnx2x_link_reset(&bp->link_params,
9785                                        &bp->link_vars, 0);
9786                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9787                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9788                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9789                                        MISC_REGISTERS_GPIO_HIGH, port);
9790                 bnx2x_release_phy_lock(bp);
9791                 bnx2x_link_report(bp);
9792
9793         } else if (eeprom->magic == 0x50485952) {
9794                 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9795                 if (bp->state == BNX2X_STATE_OPEN) {
9796                         bnx2x_acquire_phy_lock(bp);
9797                         rc |= bnx2x_link_reset(&bp->link_params,
9798                                                &bp->link_vars, 1);
9799
9800                         rc |= bnx2x_phy_init(&bp->link_params,
9801                                              &bp->link_vars);
9802                         bnx2x_release_phy_lock(bp);
9803                         bnx2x_calc_fc_adv(bp);
9804                 }
9805         } else if (eeprom->magic == 0x53985943) {
9806                 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9807                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9808                                        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9809                         u8 ext_phy_addr =
9810                              XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9811
9812                         /* DSP Remove Download Mode */
9813                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9814                                        MISC_REGISTERS_GPIO_LOW, port);
9815
9816                         bnx2x_acquire_phy_lock(bp);
9817
9818                         bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9819
9820                         /* wait 0.5 sec to allow it to run */
9821                         msleep(500);
9822                         bnx2x_ext_phy_hw_reset(bp, port);
9823                         msleep(500);
9824                         bnx2x_release_phy_lock(bp);
9825                 }
9826         } else
9827                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9828
9829         return rc;
9830 }
9831
9832 static int bnx2x_get_coalesce(struct net_device *dev,
9833                               struct ethtool_coalesce *coal)
9834 {
9835         struct bnx2x *bp = netdev_priv(dev);
9836
9837         memset(coal, 0, sizeof(struct ethtool_coalesce));
9838
9839         coal->rx_coalesce_usecs = bp->rx_ticks;
9840         coal->tx_coalesce_usecs = bp->tx_ticks;
9841
9842         return 0;
9843 }
9844
9845 #define BNX2X_MAX_COALES_TOUT  (0xf0*12) /* Maximal coalescing timeout in us */
9846 static int bnx2x_set_coalesce(struct net_device *dev,
9847                               struct ethtool_coalesce *coal)
9848 {
9849         struct bnx2x *bp = netdev_priv(dev);
9850
9851         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9852         if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9853                 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9854
9855         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9856         if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9857                 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9858
9859         if (netif_running(dev))
9860                 bnx2x_update_coalesce(bp);
9861
9862         return 0;
9863 }
9864
9865 static void bnx2x_get_ringparam(struct net_device *dev,
9866                                 struct ethtool_ringparam *ering)
9867 {
9868         struct bnx2x *bp = netdev_priv(dev);
9869
9870         ering->rx_max_pending = MAX_RX_AVAIL;
9871         ering->rx_mini_max_pending = 0;
9872         ering->rx_jumbo_max_pending = 0;
9873
9874         ering->rx_pending = bp->rx_ring_size;
9875         ering->rx_mini_pending = 0;
9876         ering->rx_jumbo_pending = 0;
9877
9878         ering->tx_max_pending = MAX_TX_AVAIL;
9879         ering->tx_pending = bp->tx_ring_size;
9880 }
9881
9882 static int bnx2x_set_ringparam(struct net_device *dev,
9883                                struct ethtool_ringparam *ering)
9884 {
9885         struct bnx2x *bp = netdev_priv(dev);
9886         int rc = 0;
9887
9888         if ((ering->rx_pending > MAX_RX_AVAIL) ||
9889             (ering->tx_pending > MAX_TX_AVAIL) ||
9890             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9891                 return -EINVAL;
9892
9893         bp->rx_ring_size = ering->rx_pending;
9894         bp->tx_ring_size = ering->tx_pending;
9895
9896         if (netif_running(dev)) {
9897                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9898                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9899         }
9900
9901         return rc;
9902 }
9903
9904 static void bnx2x_get_pauseparam(struct net_device *dev,
9905                                  struct ethtool_pauseparam *epause)
9906 {
9907         struct bnx2x *bp = netdev_priv(dev);
9908
9909         epause->autoneg = (bp->link_params.req_flow_ctrl ==
9910                            BNX2X_FLOW_CTRL_AUTO) &&
9911                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9912
9913         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9914                             BNX2X_FLOW_CTRL_RX);
9915         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9916                             BNX2X_FLOW_CTRL_TX);
9917
9918         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9919            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9920            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9921 }
9922
9923 static int bnx2x_set_pauseparam(struct net_device *dev,
9924                                 struct ethtool_pauseparam *epause)
9925 {
9926         struct bnx2x *bp = netdev_priv(dev);
9927
9928         if (IS_E1HMF(bp))
9929                 return 0;
9930
9931         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9932            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9933            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9934
9935         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9936
9937         if (epause->rx_pause)
9938                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9939
9940         if (epause->tx_pause)
9941                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9942
9943         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9944                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9945
9946         if (epause->autoneg) {
9947                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9948                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
9949                         return -EINVAL;
9950                 }
9951
9952                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9953                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9954         }
9955
9956         DP(NETIF_MSG_LINK,
9957            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9958
9959         if (netif_running(dev)) {
9960                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9961                 bnx2x_link_set(bp);
9962         }
9963
9964         return 0;
9965 }
9966
9967 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9968 {
9969         struct bnx2x *bp = netdev_priv(dev);
9970         int changed = 0;
9971         int rc = 0;
9972
9973         /* TPA requires Rx CSUM offloading */
9974         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9975                 if (!disable_tpa) {
9976                         if (!(dev->features & NETIF_F_LRO)) {
9977                                 dev->features |= NETIF_F_LRO;
9978                                 bp->flags |= TPA_ENABLE_FLAG;
9979                                 changed = 1;
9980                         }
9981                 } else
9982                         rc = -EINVAL;
9983         } else if (dev->features & NETIF_F_LRO) {
9984                 dev->features &= ~NETIF_F_LRO;
9985                 bp->flags &= ~TPA_ENABLE_FLAG;
9986                 changed = 1;
9987         }
9988
9989         if (changed && netif_running(dev)) {
9990                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9991                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9992         }
9993
9994         return rc;
9995 }
9996
9997 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9998 {
9999         struct bnx2x *bp = netdev_priv(dev);
10000
10001         return bp->rx_csum;
10002 }
10003
10004 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10005 {
10006         struct bnx2x *bp = netdev_priv(dev);
10007         int rc = 0;
10008
10009         bp->rx_csum = data;
10010
10011         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10012            TPA'ed packets will be discarded due to wrong TCP CSUM */
10013         if (!data) {
10014                 u32 flags = ethtool_op_get_flags(dev);
10015
10016                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10017         }
10018
10019         return rc;
10020 }
10021
10022 static int bnx2x_set_tso(struct net_device *dev, u32 data)
10023 {
10024         if (data) {
10025                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10026                 dev->features |= NETIF_F_TSO6;
10027         } else {
10028                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
10029                 dev->features &= ~NETIF_F_TSO6;
10030         }
10031
10032         return 0;
10033 }
10034
10035 static const struct {
10036         char string[ETH_GSTRING_LEN];
10037 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
10038         { "register_test (offline)" },
10039         { "memory_test (offline)" },
10040         { "loopback_test (offline)" },
10041         { "nvram_test (online)" },
10042         { "interrupt_test (online)" },
10043         { "link_test (online)" },
10044         { "idle check (online)" }
10045 };
10046
10047 static int bnx2x_test_registers(struct bnx2x *bp)
10048 {
10049         int idx, i, rc = -ENODEV;
10050         u32 wr_val = 0;
10051         int port = BP_PORT(bp);
10052         static const struct {
10053                 u32  offset0;
10054                 u32  offset1;
10055                 u32  mask;
10056         } reg_tbl[] = {
10057 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
10058                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
10059                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
10060                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
10061                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
10062                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
10063                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
10064                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
10065                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
10066                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
10067 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
10068                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
10069                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
10070                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
10071                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
10072                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10073                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
10074                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
10075                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
10076                 { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
10077 /* 20 */        { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
10078                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
10079                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
10080                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
10081                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
10082                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
10083                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
10084                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
10085                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
10086                 { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
10087 /* 30 */        { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
10088                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
10089                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
10090                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10091                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
10092                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10093                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
10094
10095                 { 0xffffffff, 0, 0x00000000 }
10096         };
10097
10098         if (!netif_running(bp->dev))
10099                 return rc;
10100
10101         /* Repeat the test twice:
10102            First by writing 0x00000000, second by writing 0xffffffff */
10103         for (idx = 0; idx < 2; idx++) {
10104
10105                 switch (idx) {
10106                 case 0:
10107                         wr_val = 0;
10108                         break;
10109                 case 1:
10110                         wr_val = 0xffffffff;
10111                         break;
10112                 }
10113
10114                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10115                         u32 offset, mask, save_val, val;
10116
10117                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10118                         mask = reg_tbl[i].mask;
10119
10120                         save_val = REG_RD(bp, offset);
10121
10122                         REG_WR(bp, offset, wr_val);
10123                         val = REG_RD(bp, offset);
10124
10125                         /* Restore the original register's value */
10126                         REG_WR(bp, offset, save_val);
10127
10128                         /* verify that value is as expected value */
10129                         if ((val & mask) != (wr_val & mask))
10130                                 goto test_reg_exit;
10131                 }
10132         }
10133
10134         rc = 0;
10135
10136 test_reg_exit:
10137         return rc;
10138 }
10139
10140 static int bnx2x_test_memory(struct bnx2x *bp)
10141 {
10142         int i, j, rc = -ENODEV;
10143         u32 val;
10144         static const struct {
10145                 u32 offset;
10146                 int size;
10147         } mem_tbl[] = {
10148                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
10149                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10150                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
10151                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
10152                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
10153                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
10154                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
10155
10156                 { 0xffffffff, 0 }
10157         };
10158         static const struct {
10159                 char *name;
10160                 u32 offset;
10161                 u32 e1_mask;
10162                 u32 e1h_mask;
10163         } prty_tbl[] = {
10164                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
10165                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
10166                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
10167                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
10168                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
10169                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
10170
10171                 { NULL, 0xffffffff, 0, 0 }
10172         };
10173
10174         if (!netif_running(bp->dev))
10175                 return rc;
10176
10177         /* Go through all the memories */
10178         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10179                 for (j = 0; j < mem_tbl[i].size; j++)
10180                         REG_RD(bp, mem_tbl[i].offset + j*4);
10181
10182         /* Check the parity status */
10183         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10184                 val = REG_RD(bp, prty_tbl[i].offset);
10185                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10186                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
10187                         DP(NETIF_MSG_HW,
10188                            "%s is 0x%x\n", prty_tbl[i].name, val);
10189                         goto test_mem_exit;
10190                 }
10191         }
10192
10193         rc = 0;
10194
10195 test_mem_exit:
10196         return rc;
10197 }
10198
10199 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10200 {
10201         int cnt = 1000;
10202
10203         if (link_up)
10204                 while (bnx2x_link_test(bp) && cnt--)
10205                         msleep(10);
10206 }
10207
10208 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10209 {
10210         unsigned int pkt_size, num_pkts, i;
10211         struct sk_buff *skb;
10212         unsigned char *packet;
10213         struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10214         struct bnx2x_fastpath *fp_tx = &bp->fp[0];
10215         u16 tx_start_idx, tx_idx;
10216         u16 rx_start_idx, rx_idx;
10217         u16 pkt_prod, bd_prod;
10218         struct sw_tx_bd *tx_buf;
10219         struct eth_tx_start_bd *tx_start_bd;
10220         struct eth_tx_parse_bd *pbd = NULL;
10221         dma_addr_t mapping;
10222         union eth_rx_cqe *cqe;
10223         u8 cqe_fp_flags;
10224         struct sw_rx_bd *rx_buf;
10225         u16 len;
10226         int rc = -ENODEV;
10227
10228         /* check the loopback mode */
10229         switch (loopback_mode) {
10230         case BNX2X_PHY_LOOPBACK:
10231                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10232                         return -EINVAL;
10233                 break;
10234         case BNX2X_MAC_LOOPBACK:
10235                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
10236                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
10237                 break;
10238         default:
10239                 return -EINVAL;
10240         }
10241
10242         /* prepare the loopback packet */
10243         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10244                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
10245         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10246         if (!skb) {
10247                 rc = -ENOMEM;
10248                 goto test_loopback_exit;
10249         }
10250         packet = skb_put(skb, pkt_size);
10251         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
10252         memset(packet + ETH_ALEN, 0, ETH_ALEN);
10253         memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
10254         for (i = ETH_HLEN; i < pkt_size; i++)
10255                 packet[i] = (unsigned char) (i & 0xff);
10256
10257         /* send the loopback packet */
10258         num_pkts = 0;
10259         tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10260         rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10261
10262         pkt_prod = fp_tx->tx_pkt_prod++;
10263         tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10264         tx_buf->first_bd = fp_tx->tx_bd_prod;
10265         tx_buf->skb = skb;
10266         tx_buf->flags = 0;
10267
10268         bd_prod = TX_BD(fp_tx->tx_bd_prod);
10269         tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10270         mapping = pci_map_single(bp->pdev, skb->data,
10271                                  skb_headlen(skb), PCI_DMA_TODEVICE);
10272         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10273         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10274         tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10275         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10276         tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10277         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10278         tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10279                                 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10280
10281         /* turn on parsing and get a BD */
10282         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10283         pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10284
10285         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10286
10287         wmb();
10288
10289         fp_tx->tx_db.data.prod += 2;
10290         barrier();
10291         DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
10292
10293         mmiowb();
10294
10295         num_pkts++;
10296         fp_tx->tx_bd_prod += 2; /* start + pbd */
10297
10298         udelay(100);
10299
10300         tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10301         if (tx_idx != tx_start_idx + num_pkts)
10302                 goto test_loopback_exit;
10303
10304         rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10305         if (rx_idx != rx_start_idx + num_pkts)
10306                 goto test_loopback_exit;
10307
10308         cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
10309         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10310         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10311                 goto test_loopback_rx_exit;
10312
10313         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10314         if (len != pkt_size)
10315                 goto test_loopback_rx_exit;
10316
10317         rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
10318         skb = rx_buf->skb;
10319         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10320         for (i = ETH_HLEN; i < pkt_size; i++)
10321                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10322                         goto test_loopback_rx_exit;
10323
10324         rc = 0;
10325
10326 test_loopback_rx_exit:
10327
10328         fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10329         fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10330         fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10331         fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
10332
10333         /* Update producers */
10334         bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10335                              fp_rx->rx_sge_prod);
10336
10337 test_loopback_exit:
10338         bp->link_params.loopback_mode = LOOPBACK_NONE;
10339
10340         return rc;
10341 }
10342
10343 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10344 {
10345         int rc = 0, res;
10346
10347         if (!netif_running(bp->dev))
10348                 return BNX2X_LOOPBACK_FAILED;
10349
10350         bnx2x_netif_stop(bp, 1);
10351         bnx2x_acquire_phy_lock(bp);
10352
10353         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10354         if (res) {
10355                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
10356                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
10357         }
10358
10359         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10360         if (res) {
10361                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
10362                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
10363         }
10364
10365         bnx2x_release_phy_lock(bp);
10366         bnx2x_netif_start(bp);
10367
10368         return rc;
10369 }
10370
10371 #define CRC32_RESIDUAL                  0xdebb20e3
10372
10373 static int bnx2x_test_nvram(struct bnx2x *bp)
10374 {
10375         static const struct {
10376                 int offset;
10377                 int size;
10378         } nvram_tbl[] = {
10379                 {     0,  0x14 }, /* bootstrap */
10380                 {  0x14,  0xec }, /* dir */
10381                 { 0x100, 0x350 }, /* manuf_info */
10382                 { 0x450,  0xf0 }, /* feature_info */
10383                 { 0x640,  0x64 }, /* upgrade_key_info */
10384                 { 0x6a4,  0x64 },
10385                 { 0x708,  0x70 }, /* manuf_key_info */
10386                 { 0x778,  0x70 },
10387                 {     0,     0 }
10388         };
10389         __be32 buf[0x350 / 4];
10390         u8 *data = (u8 *)buf;
10391         int i, rc;
10392         u32 magic, crc;
10393
10394         rc = bnx2x_nvram_read(bp, 0, data, 4);
10395         if (rc) {
10396                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
10397                 goto test_nvram_exit;
10398         }
10399
10400         magic = be32_to_cpu(buf[0]);
10401         if (magic != 0x669955aa) {
10402                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10403                 rc = -ENODEV;
10404                 goto test_nvram_exit;
10405         }
10406
10407         for (i = 0; nvram_tbl[i].size; i++) {
10408
10409                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10410                                       nvram_tbl[i].size);
10411                 if (rc) {
10412                         DP(NETIF_MSG_PROBE,
10413                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10414                         goto test_nvram_exit;
10415                 }
10416
10417                 crc = ether_crc_le(nvram_tbl[i].size, data);
10418                 if (crc != CRC32_RESIDUAL) {
10419                         DP(NETIF_MSG_PROBE,
10420                            "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
10421                         rc = -ENODEV;
10422                         goto test_nvram_exit;
10423                 }
10424         }
10425
10426 test_nvram_exit:
10427         return rc;
10428 }
10429
10430 static int bnx2x_test_intr(struct bnx2x *bp)
10431 {
10432         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10433         int i, rc;
10434
10435         if (!netif_running(bp->dev))
10436                 return -ENODEV;
10437
10438         config->hdr.length = 0;
10439         if (CHIP_IS_E1(bp))
10440                 /* use last unicast entries */
10441                 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
10442         else
10443                 config->hdr.offset = BP_FUNC(bp);
10444         config->hdr.client_id = bp->fp->cl_id;
10445         config->hdr.reserved1 = 0;
10446
10447         bp->set_mac_pending++;
10448         smp_wmb();
10449         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10450                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10451                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10452         if (rc == 0) {
10453                 for (i = 0; i < 10; i++) {
10454                         if (!bp->set_mac_pending)
10455                                 break;
10456                         smp_rmb();
10457                         msleep_interruptible(10);
10458                 }
10459                 if (i == 10)
10460                         rc = -ENODEV;
10461         }
10462
10463         return rc;
10464 }
10465
10466 static void bnx2x_self_test(struct net_device *dev,
10467                             struct ethtool_test *etest, u64 *buf)
10468 {
10469         struct bnx2x *bp = netdev_priv(dev);
10470
10471         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10472
10473         if (!netif_running(dev))
10474                 return;
10475
10476         /* offline tests are not supported in MF mode */
10477         if (IS_E1HMF(bp))
10478                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10479
10480         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10481                 int port = BP_PORT(bp);
10482                 u32 val;
10483                 u8 link_up;
10484
10485                 /* save current value of input enable for TX port IF */
10486                 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10487                 /* disable input for TX port IF */
10488                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10489
10490                 link_up = (bnx2x_link_test(bp) == 0);
10491                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10492                 bnx2x_nic_load(bp, LOAD_DIAG);
10493                 /* wait until link state is restored */
10494                 bnx2x_wait_for_link(bp, link_up);
10495
10496                 if (bnx2x_test_registers(bp) != 0) {
10497                         buf[0] = 1;
10498                         etest->flags |= ETH_TEST_FL_FAILED;
10499                 }
10500                 if (bnx2x_test_memory(bp) != 0) {
10501                         buf[1] = 1;
10502                         etest->flags |= ETH_TEST_FL_FAILED;
10503                 }
10504                 buf[2] = bnx2x_test_loopback(bp, link_up);
10505                 if (buf[2] != 0)
10506                         etest->flags |= ETH_TEST_FL_FAILED;
10507
10508                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10509
10510                 /* restore input for TX port IF */
10511                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10512
10513                 bnx2x_nic_load(bp, LOAD_NORMAL);
10514                 /* wait until link state is restored */
10515                 bnx2x_wait_for_link(bp, link_up);
10516         }
10517         if (bnx2x_test_nvram(bp) != 0) {
10518                 buf[3] = 1;
10519                 etest->flags |= ETH_TEST_FL_FAILED;
10520         }
10521         if (bnx2x_test_intr(bp) != 0) {
10522                 buf[4] = 1;
10523                 etest->flags |= ETH_TEST_FL_FAILED;
10524         }
10525         if (bp->port.pmf)
10526                 if (bnx2x_link_test(bp) != 0) {
10527                         buf[5] = 1;
10528                         etest->flags |= ETH_TEST_FL_FAILED;
10529                 }
10530
10531 #ifdef BNX2X_EXTRA_DEBUG
10532         bnx2x_panic_dump(bp);
10533 #endif
10534 }
10535
10536 static const struct {
10537         long offset;
10538         int size;
10539         u8 string[ETH_GSTRING_LEN];
10540 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10541 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10542         { Q_STATS_OFFSET32(error_bytes_received_hi),
10543                                                 8, "[%d]: rx_error_bytes" },
10544         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10545                                                 8, "[%d]: rx_ucast_packets" },
10546         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10547                                                 8, "[%d]: rx_mcast_packets" },
10548         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10549                                                 8, "[%d]: rx_bcast_packets" },
10550         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10551         { Q_STATS_OFFSET32(rx_err_discard_pkt),
10552                                          4, "[%d]: rx_phy_ip_err_discards"},
10553         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10554                                          4, "[%d]: rx_skb_alloc_discard" },
10555         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10556
10557 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10558         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10559                                                         8, "[%d]: tx_packets" }
10560 };
10561
10562 static const struct {
10563         long offset;
10564         int size;
10565         u32 flags;
10566 #define STATS_FLAGS_PORT                1
10567 #define STATS_FLAGS_FUNC                2
10568 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10569         u8 string[ETH_GSTRING_LEN];
10570 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10571 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10572                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
10573         { STATS_OFFSET32(error_bytes_received_hi),
10574                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10575         { STATS_OFFSET32(total_unicast_packets_received_hi),
10576                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10577         { STATS_OFFSET32(total_multicast_packets_received_hi),
10578                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10579         { STATS_OFFSET32(total_broadcast_packets_received_hi),
10580                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10581         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10582                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10583         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10584                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
10585         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10586                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10587         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10588                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10589 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10590                                 8, STATS_FLAGS_PORT, "rx_fragments" },
10591         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10592                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
10593         { STATS_OFFSET32(no_buff_discard_hi),
10594                                 8, STATS_FLAGS_BOTH, "rx_discards" },
10595         { STATS_OFFSET32(mac_filter_discard),
10596                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10597         { STATS_OFFSET32(xxoverflow_discard),
10598                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10599         { STATS_OFFSET32(brb_drop_hi),
10600                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10601         { STATS_OFFSET32(brb_truncate_hi),
10602                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10603         { STATS_OFFSET32(pause_frames_received_hi),
10604                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10605         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10606                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10607         { STATS_OFFSET32(nig_timer_max),
10608                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10609 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10610                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10611         { STATS_OFFSET32(rx_skb_alloc_failed),
10612                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10613         { STATS_OFFSET32(hw_csum_err),
10614                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10615
10616         { STATS_OFFSET32(total_bytes_transmitted_hi),
10617                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
10618         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10619                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10620         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10621                                 8, STATS_FLAGS_BOTH, "tx_packets" },
10622         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10623                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10624         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10625                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10626         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10627                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10628         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10629                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10630 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10631                                 8, STATS_FLAGS_PORT, "tx_deferred" },
10632         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10633                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10634         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10635                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10636         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10637                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10638         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10639                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10640         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10641                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10642         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10643                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10644         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10645                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10646         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10647                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10648         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10649                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10650 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10651                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10652         { STATS_OFFSET32(pause_frames_sent_hi),
10653                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10654 };
10655
10656 #define IS_PORT_STAT(i) \
10657         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10658 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10659 #define IS_E1HMF_MODE_STAT(bp) \
10660                         (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
10661
10662 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10663 {
10664         struct bnx2x *bp = netdev_priv(dev);
10665         int i, num_stats;
10666
10667         switch(stringset) {
10668         case ETH_SS_STATS:
10669                 if (is_multi(bp)) {
10670                         num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
10671                         if (!IS_E1HMF_MODE_STAT(bp))
10672                                 num_stats += BNX2X_NUM_STATS;
10673                 } else {
10674                         if (IS_E1HMF_MODE_STAT(bp)) {
10675                                 num_stats = 0;
10676                                 for (i = 0; i < BNX2X_NUM_STATS; i++)
10677                                         if (IS_FUNC_STAT(i))
10678                                                 num_stats++;
10679                         } else
10680                                 num_stats = BNX2X_NUM_STATS;
10681                 }
10682                 return num_stats;
10683
10684         case ETH_SS_TEST:
10685                 return BNX2X_NUM_TESTS;
10686
10687         default:
10688                 return -EINVAL;
10689         }
10690 }
10691
10692 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10693 {
10694         struct bnx2x *bp = netdev_priv(dev);
10695         int i, j, k;
10696
10697         switch (stringset) {
10698         case ETH_SS_STATS:
10699                 if (is_multi(bp)) {
10700                         k = 0;
10701                         for_each_queue(bp, i) {
10702                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10703                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10704                                                 bnx2x_q_stats_arr[j].string, i);
10705                                 k += BNX2X_NUM_Q_STATS;
10706                         }
10707                         if (IS_E1HMF_MODE_STAT(bp))
10708                                 break;
10709                         for (j = 0; j < BNX2X_NUM_STATS; j++)
10710                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10711                                        bnx2x_stats_arr[j].string);
10712                 } else {
10713                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10714                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10715                                         continue;
10716                                 strcpy(buf + j*ETH_GSTRING_LEN,
10717                                        bnx2x_stats_arr[i].string);
10718                                 j++;
10719                         }
10720                 }
10721                 break;
10722
10723         case ETH_SS_TEST:
10724                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10725                 break;
10726         }
10727 }
10728
10729 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10730                                     struct ethtool_stats *stats, u64 *buf)
10731 {
10732         struct bnx2x *bp = netdev_priv(dev);
10733         u32 *hw_stats, *offset;
10734         int i, j, k;
10735
10736         if (is_multi(bp)) {
10737                 k = 0;
10738                 for_each_queue(bp, i) {
10739                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10740                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10741                                 if (bnx2x_q_stats_arr[j].size == 0) {
10742                                         /* skip this counter */
10743                                         buf[k + j] = 0;
10744                                         continue;
10745                                 }
10746                                 offset = (hw_stats +
10747                                           bnx2x_q_stats_arr[j].offset);
10748                                 if (bnx2x_q_stats_arr[j].size == 4) {
10749                                         /* 4-byte counter */
10750                                         buf[k + j] = (u64) *offset;
10751                                         continue;
10752                                 }
10753                                 /* 8-byte counter */
10754                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10755                         }
10756                         k += BNX2X_NUM_Q_STATS;
10757                 }
10758                 if (IS_E1HMF_MODE_STAT(bp))
10759                         return;
10760                 hw_stats = (u32 *)&bp->eth_stats;
10761                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10762                         if (bnx2x_stats_arr[j].size == 0) {
10763                                 /* skip this counter */
10764                                 buf[k + j] = 0;
10765                                 continue;
10766                         }
10767                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
10768                         if (bnx2x_stats_arr[j].size == 4) {
10769                                 /* 4-byte counter */
10770                                 buf[k + j] = (u64) *offset;
10771                                 continue;
10772                         }
10773                         /* 8-byte counter */
10774                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
10775                 }
10776         } else {
10777                 hw_stats = (u32 *)&bp->eth_stats;
10778                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10779                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10780                                 continue;
10781                         if (bnx2x_stats_arr[i].size == 0) {
10782                                 /* skip this counter */
10783                                 buf[j] = 0;
10784                                 j++;
10785                                 continue;
10786                         }
10787                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
10788                         if (bnx2x_stats_arr[i].size == 4) {
10789                                 /* 4-byte counter */
10790                                 buf[j] = (u64) *offset;
10791                                 j++;
10792                                 continue;
10793                         }
10794                         /* 8-byte counter */
10795                         buf[j] = HILO_U64(*offset, *(offset + 1));
10796                         j++;
10797                 }
10798         }
10799 }
10800
10801 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10802 {
10803         struct bnx2x *bp = netdev_priv(dev);
10804         int i;
10805
10806         if (!netif_running(dev))
10807                 return 0;
10808
10809         if (!bp->port.pmf)
10810                 return 0;
10811
10812         if (data == 0)
10813                 data = 2;
10814
10815         for (i = 0; i < (data * 2); i++) {
10816                 if ((i % 2) == 0)
10817                         bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10818                                       SPEED_1000);
10819                 else
10820                         bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
10821
10822                 msleep_interruptible(500);
10823                 if (signal_pending(current))
10824                         break;
10825         }
10826
10827         if (bp->link_vars.link_up)
10828                 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10829                               bp->link_vars.line_speed);
10830
10831         return 0;
10832 }
10833
10834 static const struct ethtool_ops bnx2x_ethtool_ops = {
10835         .get_settings           = bnx2x_get_settings,
10836         .set_settings           = bnx2x_set_settings,
10837         .get_drvinfo            = bnx2x_get_drvinfo,
10838         .get_regs_len           = bnx2x_get_regs_len,
10839         .get_regs               = bnx2x_get_regs,
10840         .get_wol                = bnx2x_get_wol,
10841         .set_wol                = bnx2x_set_wol,
10842         .get_msglevel           = bnx2x_get_msglevel,
10843         .set_msglevel           = bnx2x_set_msglevel,
10844         .nway_reset             = bnx2x_nway_reset,
10845         .get_link               = bnx2x_get_link,
10846         .get_eeprom_len         = bnx2x_get_eeprom_len,
10847         .get_eeprom             = bnx2x_get_eeprom,
10848         .set_eeprom             = bnx2x_set_eeprom,
10849         .get_coalesce           = bnx2x_get_coalesce,
10850         .set_coalesce           = bnx2x_set_coalesce,
10851         .get_ringparam          = bnx2x_get_ringparam,
10852         .set_ringparam          = bnx2x_set_ringparam,
10853         .get_pauseparam         = bnx2x_get_pauseparam,
10854         .set_pauseparam         = bnx2x_set_pauseparam,
10855         .get_rx_csum            = bnx2x_get_rx_csum,
10856         .set_rx_csum            = bnx2x_set_rx_csum,
10857         .get_tx_csum            = ethtool_op_get_tx_csum,
10858         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
10859         .set_flags              = bnx2x_set_flags,
10860         .get_flags              = ethtool_op_get_flags,
10861         .get_sg                 = ethtool_op_get_sg,
10862         .set_sg                 = ethtool_op_set_sg,
10863         .get_tso                = ethtool_op_get_tso,
10864         .set_tso                = bnx2x_set_tso,
10865         .self_test              = bnx2x_self_test,
10866         .get_sset_count         = bnx2x_get_sset_count,
10867         .get_strings            = bnx2x_get_strings,
10868         .phys_id                = bnx2x_phys_id,
10869         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
10870 };
10871
10872 /* end of ethtool_ops */
10873
10874 /****************************************************************************
10875 * General service functions
10876 ****************************************************************************/
10877
10878 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10879 {
10880         u16 pmcsr;
10881
10882         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10883
10884         switch (state) {
10885         case PCI_D0:
10886                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10887                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10888                                        PCI_PM_CTRL_PME_STATUS));
10889
10890                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10891                         /* delay required during transition out of D3hot */
10892                         msleep(20);
10893                 break;
10894
10895         case PCI_D3hot:
10896                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10897                 pmcsr |= 3;
10898
10899                 if (bp->wol)
10900                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10901
10902                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10903                                       pmcsr);
10904
10905                 /* No more memory access after this point until
10906                 * device is brought back to D0.
10907                 */
10908                 break;
10909
10910         default:
10911                 return -EINVAL;
10912         }
10913         return 0;
10914 }
10915
10916 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10917 {
10918         u16 rx_cons_sb;
10919
10920         /* Tell compiler that status block fields can change */
10921         barrier();
10922         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10923         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10924                 rx_cons_sb++;
10925         return (fp->rx_comp_cons != rx_cons_sb);
10926 }
10927
10928 /*
10929  * net_device service functions
10930  */
10931
10932 static int bnx2x_poll(struct napi_struct *napi, int budget)
10933 {
10934         int work_done = 0;
10935         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10936                                                  napi);
10937         struct bnx2x *bp = fp->bp;
10938
10939         while (1) {
10940 #ifdef BNX2X_STOP_ON_ERROR
10941                 if (unlikely(bp->panic)) {
10942                         napi_complete(napi);
10943                         return 0;
10944                 }
10945 #endif
10946
10947                 if (bnx2x_has_tx_work(fp))
10948                         bnx2x_tx_int(fp);
10949
10950                 if (bnx2x_has_rx_work(fp)) {
10951                         work_done += bnx2x_rx_int(fp, budget - work_done);
10952
10953                         /* must not complete if we consumed full budget */
10954                         if (work_done >= budget)
10955                                 break;
10956                 }
10957
10958                 /* Fall out from the NAPI loop if needed */
10959                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10960                         bnx2x_update_fpsb_idx(fp);
10961                 /* bnx2x_has_rx_work() reads the status block, thus we need
10962                  * to ensure that status block indices have been actually read
10963                  * (bnx2x_update_fpsb_idx) prior to this check
10964                  * (bnx2x_has_rx_work) so that we won't write the "newer"
10965                  * value of the status block to IGU (if there was a DMA right
10966                  * after bnx2x_has_rx_work and if there is no rmb, the memory
10967                  * reading (bnx2x_update_fpsb_idx) may be postponed to right
10968                  * before bnx2x_ack_sb). In this case there will never be
10969                  * another interrupt until there is another update of the
10970                  * status block, while there is still unhandled work.
10971                  */
10972                         rmb();
10973
10974                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10975                                 napi_complete(napi);
10976                                 /* Re-enable interrupts */
10977                                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10978                                              le16_to_cpu(fp->fp_c_idx),
10979                                              IGU_INT_NOP, 1);
10980                                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10981                                              le16_to_cpu(fp->fp_u_idx),
10982                                              IGU_INT_ENABLE, 1);
10983                                 break;
10984                         }
10985                 }
10986         }
10987
10988         return work_done;
10989 }
10990
10991
10992 /* we split the first BD into headers and data BDs
10993  * to ease the pain of our fellow microcode engineers
10994  * we use one mapping for both BDs
10995  * So far this has only been observed to happen
10996  * in Other Operating Systems(TM)
10997  */
10998 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10999                                    struct bnx2x_fastpath *fp,
11000                                    struct sw_tx_bd *tx_buf,
11001                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
11002                                    u16 bd_prod, int nbd)
11003 {
11004         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
11005         struct eth_tx_bd *d_tx_bd;
11006         dma_addr_t mapping;
11007         int old_len = le16_to_cpu(h_tx_bd->nbytes);
11008
11009         /* first fix first BD */
11010         h_tx_bd->nbd = cpu_to_le16(nbd);
11011         h_tx_bd->nbytes = cpu_to_le16(hlen);
11012
11013         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11014            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11015            h_tx_bd->addr_lo, h_tx_bd->nbd);
11016
11017         /* now get a new data BD
11018          * (after the pbd) and fill it */
11019         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11020         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11021
11022         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11023                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11024
11025         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11026         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11027         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
11028
11029         /* this marks the BD as one that has no individual mapping */
11030         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11031
11032         DP(NETIF_MSG_TX_QUEUED,
11033            "TSO split data size is %d (%x:%x)\n",
11034            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11035
11036         /* update tx_bd */
11037         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
11038
11039         return bd_prod;
11040 }
11041
11042 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11043 {
11044         if (fix > 0)
11045                 csum = (u16) ~csum_fold(csum_sub(csum,
11046                                 csum_partial(t_header - fix, fix, 0)));
11047
11048         else if (fix < 0)
11049                 csum = (u16) ~csum_fold(csum_add(csum,
11050                                 csum_partial(t_header, -fix, 0)));
11051
11052         return swab16(csum);
11053 }
11054
11055 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11056 {
11057         u32 rc;
11058
11059         if (skb->ip_summed != CHECKSUM_PARTIAL)
11060                 rc = XMIT_PLAIN;
11061
11062         else {
11063                 if (skb->protocol == htons(ETH_P_IPV6)) {
11064                         rc = XMIT_CSUM_V6;
11065                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11066                                 rc |= XMIT_CSUM_TCP;
11067
11068                 } else {
11069                         rc = XMIT_CSUM_V4;
11070                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11071                                 rc |= XMIT_CSUM_TCP;
11072                 }
11073         }
11074
11075         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
11076                 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
11077
11078         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
11079                 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
11080
11081         return rc;
11082 }
11083
11084 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11085 /* check if packet requires linearization (packet is too fragmented)
11086    no need to check fragmentation if page size > 8K (there will be no
11087    violation to FW restrictions) */
11088 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11089                              u32 xmit_type)
11090 {
11091         int to_copy = 0;
11092         int hlen = 0;
11093         int first_bd_sz = 0;
11094
11095         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11096         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11097
11098                 if (xmit_type & XMIT_GSO) {
11099                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11100                         /* Check if LSO packet needs to be copied:
11101                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11102                         int wnd_size = MAX_FETCH_BD - 3;
11103                         /* Number of windows to check */
11104                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11105                         int wnd_idx = 0;
11106                         int frag_idx = 0;
11107                         u32 wnd_sum = 0;
11108
11109                         /* Headers length */
11110                         hlen = (int)(skb_transport_header(skb) - skb->data) +
11111                                 tcp_hdrlen(skb);
11112
11113                         /* Amount of data (w/o headers) on linear part of SKB*/
11114                         first_bd_sz = skb_headlen(skb) - hlen;
11115
11116                         wnd_sum  = first_bd_sz;
11117
11118                         /* Calculate the first sum - it's special */
11119                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11120                                 wnd_sum +=
11121                                         skb_shinfo(skb)->frags[frag_idx].size;
11122
11123                         /* If there was data on linear skb data - check it */
11124                         if (first_bd_sz > 0) {
11125                                 if (unlikely(wnd_sum < lso_mss)) {
11126                                         to_copy = 1;
11127                                         goto exit_lbl;
11128                                 }
11129
11130                                 wnd_sum -= first_bd_sz;
11131                         }
11132
11133                         /* Others are easier: run through the frag list and
11134                            check all windows */
11135                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11136                                 wnd_sum +=
11137                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11138
11139                                 if (unlikely(wnd_sum < lso_mss)) {
11140                                         to_copy = 1;
11141                                         break;
11142                                 }
11143                                 wnd_sum -=
11144                                         skb_shinfo(skb)->frags[wnd_idx].size;
11145                         }
11146                 } else {
11147                         /* in non-LSO too fragmented packet should always
11148                            be linearized */
11149                         to_copy = 1;
11150                 }
11151         }
11152
11153 exit_lbl:
11154         if (unlikely(to_copy))
11155                 DP(NETIF_MSG_TX_QUEUED,
11156                    "Linearization IS REQUIRED for %s packet. "
11157                    "num_frags %d  hlen %d  first_bd_sz %d\n",
11158                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11159                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11160
11161         return to_copy;
11162 }
11163 #endif
11164
11165 /* called with netif_tx_lock
11166  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
11167  * netif_wake_queue()
11168  */
11169 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11170 {
11171         struct bnx2x *bp = netdev_priv(dev);
11172         struct bnx2x_fastpath *fp;
11173         struct netdev_queue *txq;
11174         struct sw_tx_bd *tx_buf;
11175         struct eth_tx_start_bd *tx_start_bd;
11176         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
11177         struct eth_tx_parse_bd *pbd = NULL;
11178         u16 pkt_prod, bd_prod;
11179         int nbd, fp_index;
11180         dma_addr_t mapping;
11181         u32 xmit_type = bnx2x_xmit_type(bp, skb);
11182         int i;
11183         u8 hlen = 0;
11184         __le16 pkt_size = 0;
11185
11186 #ifdef BNX2X_STOP_ON_ERROR
11187         if (unlikely(bp->panic))
11188                 return NETDEV_TX_BUSY;
11189 #endif
11190
11191         fp_index = skb_get_queue_mapping(skb);
11192         txq = netdev_get_tx_queue(dev, fp_index);
11193
11194         fp = &bp->fp[fp_index];
11195
11196         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
11197                 fp->eth_q_stats.driver_xoff++;
11198                 netif_tx_stop_queue(txq);
11199                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11200                 return NETDEV_TX_BUSY;
11201         }
11202
11203         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
11204            "  gso type %x  xmit_type %x\n",
11205            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11206            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11207
11208 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11209         /* First, check if we need to linearize the skb (due to FW
11210            restrictions). No need to check fragmentation if page size > 8K
11211            (there will be no violation to FW restrictions) */
11212         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11213                 /* Statistics of linearization */
11214                 bp->lin_cnt++;
11215                 if (skb_linearize(skb) != 0) {
11216                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11217                            "silently dropping this SKB\n");
11218                         dev_kfree_skb_any(skb);
11219                         return NETDEV_TX_OK;
11220                 }
11221         }
11222 #endif
11223
11224         /*
11225         Please read carefully. First we use one BD which we mark as start,
11226         then we have a parsing info BD (used for TSO or xsum),
11227         and only then we have the rest of the TSO BDs.
11228         (don't forget to mark the last one as last,
11229         and to unmap only AFTER you write to the BD ...)
11230         And above all, all pdb sizes are in words - NOT DWORDS!
11231         */
11232
11233         pkt_prod = fp->tx_pkt_prod++;
11234         bd_prod = TX_BD(fp->tx_bd_prod);
11235
11236         /* get a tx_buf and first BD */
11237         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
11238         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
11239
11240         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11241         tx_start_bd->general_data = (UNICAST_ADDRESS <<
11242                                      ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
11243         /* header nbd */
11244         tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
11245
11246         /* remember the first BD of the packet */
11247         tx_buf->first_bd = fp->tx_bd_prod;
11248         tx_buf->skb = skb;
11249         tx_buf->flags = 0;
11250
11251         DP(NETIF_MSG_TX_QUEUED,
11252            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
11253            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
11254
11255 #ifdef BCM_VLAN
11256         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11257             (bp->flags & HW_VLAN_TX_FLAG)) {
11258                 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11259                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
11260         } else
11261 #endif
11262                 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11263
11264         /* turn on parsing and get a BD */
11265         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11266         pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
11267
11268         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11269
11270         if (xmit_type & XMIT_CSUM) {
11271                 hlen = (skb_network_header(skb) - skb->data) / 2;
11272
11273                 /* for now NS flag is not used in Linux */
11274                 pbd->global_data =
11275                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11276                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
11277
11278                 pbd->ip_hlen = (skb_transport_header(skb) -
11279                                 skb_network_header(skb)) / 2;
11280
11281                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11282
11283                 pbd->total_hlen = cpu_to_le16(hlen);
11284                 hlen = hlen*2;
11285
11286                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
11287
11288                 if (xmit_type & XMIT_CSUM_V4)
11289                         tx_start_bd->bd_flags.as_bitfield |=
11290                                                 ETH_TX_BD_FLAGS_IP_CSUM;
11291                 else
11292                         tx_start_bd->bd_flags.as_bitfield |=
11293                                                 ETH_TX_BD_FLAGS_IPV6;
11294
11295                 if (xmit_type & XMIT_CSUM_TCP) {
11296                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11297
11298                 } else {
11299                         s8 fix = SKB_CS_OFF(skb); /* signed! */
11300
11301                         pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
11302
11303                         DP(NETIF_MSG_TX_QUEUED,
11304                            "hlen %d  fix %d  csum before fix %x\n",
11305                            le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
11306
11307                         /* HW bug: fixup the CSUM */
11308                         pbd->tcp_pseudo_csum =
11309                                 bnx2x_csum_fix(skb_transport_header(skb),
11310                                                SKB_CS(skb), fix);
11311
11312                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11313                            pbd->tcp_pseudo_csum);
11314                 }
11315         }
11316
11317         mapping = pci_map_single(bp->pdev, skb->data,
11318                                  skb_headlen(skb), PCI_DMA_TODEVICE);
11319
11320         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11321         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11322         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11323         tx_start_bd->nbd = cpu_to_le16(nbd);
11324         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11325         pkt_size = tx_start_bd->nbytes;
11326
11327         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
11328            "  nbytes %d  flags %x  vlan %x\n",
11329            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11330            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11331            tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
11332
11333         if (xmit_type & XMIT_GSO) {
11334
11335                 DP(NETIF_MSG_TX_QUEUED,
11336                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
11337                    skb->len, hlen, skb_headlen(skb),
11338                    skb_shinfo(skb)->gso_size);
11339
11340                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
11341
11342                 if (unlikely(skb_headlen(skb) > hlen))
11343                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11344                                                  hlen, bd_prod, ++nbd);
11345
11346                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11347                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
11348                 pbd->tcp_flags = pbd_tcp_flags(skb);
11349
11350                 if (xmit_type & XMIT_GSO_V4) {
11351                         pbd->ip_id = swab16(ip_hdr(skb)->id);
11352                         pbd->tcp_pseudo_csum =
11353                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11354                                                           ip_hdr(skb)->daddr,
11355                                                           0, IPPROTO_TCP, 0));
11356
11357                 } else
11358                         pbd->tcp_pseudo_csum =
11359                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11360                                                         &ipv6_hdr(skb)->daddr,
11361                                                         0, IPPROTO_TCP, 0));
11362
11363                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11364         }
11365         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
11366
11367         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11368                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
11369
11370                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11371                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11372                 if (total_pkt_bd == NULL)
11373                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11374
11375                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11376                                        frag->size, PCI_DMA_TODEVICE);
11377
11378                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11379                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11380                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11381                 le16_add_cpu(&pkt_size, frag->size);
11382
11383                 DP(NETIF_MSG_TX_QUEUED,
11384                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
11385                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11386                    le16_to_cpu(tx_data_bd->nbytes));
11387         }
11388
11389         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
11390
11391         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11392
11393         /* now send a tx doorbell, counting the next BD
11394          * if the packet contains or ends with it
11395          */
11396         if (TX_BD_POFF(bd_prod) < nbd)
11397                 nbd++;
11398
11399         if (total_pkt_bd != NULL)
11400                 total_pkt_bd->total_pkt_bytes = pkt_size;
11401
11402         if (pbd)
11403                 DP(NETIF_MSG_TX_QUEUED,
11404                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
11405                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
11406                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11407                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11408                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11409
11410         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
11411
11412         /*
11413          * Make sure that the BD data is updated before updating the producer
11414          * since FW might read the BD right after the producer is updated.
11415          * This is only applicable for weak-ordered memory model archs such
11416          * as IA-64. The following barrier is also mandatory since FW will
11417          * assumes packets must have BDs.
11418          */
11419         wmb();
11420
11421         fp->tx_db.data.prod += nbd;
11422         barrier();
11423         DOORBELL(bp, fp->index, fp->tx_db.raw);
11424
11425         mmiowb();
11426
11427         fp->tx_bd_prod += nbd;
11428
11429         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11430                 netif_tx_stop_queue(txq);
11431
11432                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
11433                  * ordering of set_bit() in netif_tx_stop_queue() and read of
11434                  * fp->bd_tx_cons */
11435                 smp_mb();
11436
11437                 fp->eth_q_stats.driver_xoff++;
11438                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11439                         netif_tx_wake_queue(txq);
11440         }
11441         fp->tx_pkt++;
11442
11443         return NETDEV_TX_OK;
11444 }
11445
11446 /* called with rtnl_lock */
11447 static int bnx2x_open(struct net_device *dev)
11448 {
11449         struct bnx2x *bp = netdev_priv(dev);
11450
11451         netif_carrier_off(dev);
11452
11453         bnx2x_set_power_state(bp, PCI_D0);
11454
11455         return bnx2x_nic_load(bp, LOAD_OPEN);
11456 }
11457
11458 /* called with rtnl_lock */
11459 static int bnx2x_close(struct net_device *dev)
11460 {
11461         struct bnx2x *bp = netdev_priv(dev);
11462
11463         /* Unload the driver, release IRQs */
11464         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11465         if (atomic_read(&bp->pdev->enable_cnt) == 1)
11466                 if (!CHIP_REV_IS_SLOW(bp))
11467                         bnx2x_set_power_state(bp, PCI_D3hot);
11468
11469         return 0;
11470 }
11471
11472 /* called with netif_tx_lock from dev_mcast.c */
11473 static void bnx2x_set_rx_mode(struct net_device *dev)
11474 {
11475         struct bnx2x *bp = netdev_priv(dev);
11476         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11477         int port = BP_PORT(bp);
11478
11479         if (bp->state != BNX2X_STATE_OPEN) {
11480                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11481                 return;
11482         }
11483
11484         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11485
11486         if (dev->flags & IFF_PROMISC)
11487                 rx_mode = BNX2X_RX_MODE_PROMISC;
11488
11489         else if ((dev->flags & IFF_ALLMULTI) ||
11490                  ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
11491                   CHIP_IS_E1(bp)))
11492                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11493
11494         else { /* some multicasts */
11495                 if (CHIP_IS_E1(bp)) {
11496                         int i, old, offset;
11497                         struct dev_mc_list *mclist;
11498                         struct mac_configuration_cmd *config =
11499                                                 bnx2x_sp(bp, mcast_config);
11500
11501                         i = 0;
11502                         netdev_for_each_mc_addr(mclist, dev) {
11503                                 config->config_table[i].
11504                                         cam_entry.msb_mac_addr =
11505                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
11506                                 config->config_table[i].
11507                                         cam_entry.middle_mac_addr =
11508                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
11509                                 config->config_table[i].
11510                                         cam_entry.lsb_mac_addr =
11511                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
11512                                 config->config_table[i].cam_entry.flags =
11513                                                         cpu_to_le16(port);
11514                                 config->config_table[i].
11515                                         target_table_entry.flags = 0;
11516                                 config->config_table[i].target_table_entry.
11517                                         clients_bit_vector =
11518                                                 cpu_to_le32(1 << BP_L_ID(bp));
11519                                 config->config_table[i].
11520                                         target_table_entry.vlan_id = 0;
11521
11522                                 DP(NETIF_MSG_IFUP,
11523                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11524                                    config->config_table[i].
11525                                                 cam_entry.msb_mac_addr,
11526                                    config->config_table[i].
11527                                                 cam_entry.middle_mac_addr,
11528                                    config->config_table[i].
11529                                                 cam_entry.lsb_mac_addr);
11530                                 i++;
11531                         }
11532                         old = config->hdr.length;
11533                         if (old > i) {
11534                                 for (; i < old; i++) {
11535                                         if (CAM_IS_INVALID(config->
11536                                                            config_table[i])) {
11537                                                 /* already invalidated */
11538                                                 break;
11539                                         }
11540                                         /* invalidate */
11541                                         CAM_INVALIDATE(config->
11542                                                        config_table[i]);
11543                                 }
11544                         }
11545
11546                         if (CHIP_REV_IS_SLOW(bp))
11547                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11548                         else
11549                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
11550
11551                         config->hdr.length = i;
11552                         config->hdr.offset = offset;
11553                         config->hdr.client_id = bp->fp->cl_id;
11554                         config->hdr.reserved1 = 0;
11555
11556                         bp->set_mac_pending++;
11557                         smp_wmb();
11558
11559                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11560                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11561                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11562                                       0);
11563                 } else { /* E1H */
11564                         /* Accept one or more multicasts */
11565                         struct dev_mc_list *mclist;
11566                         u32 mc_filter[MC_HASH_SIZE];
11567                         u32 crc, bit, regidx;
11568                         int i;
11569
11570                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11571
11572                         netdev_for_each_mc_addr(mclist, dev) {
11573                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11574                                    mclist->dmi_addr);
11575
11576                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11577                                 bit = (crc >> 24) & 0xff;
11578                                 regidx = bit >> 5;
11579                                 bit &= 0x1f;
11580                                 mc_filter[regidx] |= (1 << bit);
11581                         }
11582
11583                         for (i = 0; i < MC_HASH_SIZE; i++)
11584                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11585                                        mc_filter[i]);
11586                 }
11587         }
11588
11589         bp->rx_mode = rx_mode;
11590         bnx2x_set_storm_rx_mode(bp);
11591 }
11592
11593 /* called with rtnl_lock */
11594 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11595 {
11596         struct sockaddr *addr = p;
11597         struct bnx2x *bp = netdev_priv(dev);
11598
11599         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11600                 return -EINVAL;
11601
11602         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11603         if (netif_running(dev)) {
11604                 if (CHIP_IS_E1(bp))
11605                         bnx2x_set_eth_mac_addr_e1(bp, 1);
11606                 else
11607                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
11608         }
11609
11610         return 0;
11611 }
11612
11613 /* called with rtnl_lock */
11614 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11615                            int devad, u16 addr)
11616 {
11617         struct bnx2x *bp = netdev_priv(netdev);
11618         u16 value;
11619         int rc;
11620         u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11621
11622         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11623            prtad, devad, addr);
11624
11625         if (prtad != bp->mdio.prtad) {
11626                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11627                    prtad, bp->mdio.prtad);
11628                 return -EINVAL;
11629         }
11630
11631         /* The HW expects different devad if CL22 is used */
11632         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11633
11634         bnx2x_acquire_phy_lock(bp);
11635         rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11636                              devad, addr, &value);
11637         bnx2x_release_phy_lock(bp);
11638         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11639
11640         if (!rc)
11641                 rc = value;
11642         return rc;
11643 }
11644
11645 /* called with rtnl_lock */
11646 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11647                             u16 addr, u16 value)
11648 {
11649         struct bnx2x *bp = netdev_priv(netdev);
11650         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11651         int rc;
11652
11653         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11654                            " value 0x%x\n", prtad, devad, addr, value);
11655
11656         if (prtad != bp->mdio.prtad) {
11657                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11658                    prtad, bp->mdio.prtad);
11659                 return -EINVAL;
11660         }
11661
11662         /* The HW expects different devad if CL22 is used */
11663         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11664
11665         bnx2x_acquire_phy_lock(bp);
11666         rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11667                               devad, addr, value);
11668         bnx2x_release_phy_lock(bp);
11669         return rc;
11670 }
11671
11672 /* called with rtnl_lock */
11673 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11674 {
11675         struct bnx2x *bp = netdev_priv(dev);
11676         struct mii_ioctl_data *mdio = if_mii(ifr);
11677
11678         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11679            mdio->phy_id, mdio->reg_num, mdio->val_in);
11680
11681         if (!netif_running(dev))
11682                 return -EAGAIN;
11683
11684         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11685 }
11686
11687 /* called with rtnl_lock */
11688 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11689 {
11690         struct bnx2x *bp = netdev_priv(dev);
11691         int rc = 0;
11692
11693         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11694             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11695                 return -EINVAL;
11696
11697         /* This does not race with packet allocation
11698          * because the actual alloc size is
11699          * only updated as part of load
11700          */
11701         dev->mtu = new_mtu;
11702
11703         if (netif_running(dev)) {
11704                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11705                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11706         }
11707
11708         return rc;
11709 }
11710
11711 static void bnx2x_tx_timeout(struct net_device *dev)
11712 {
11713         struct bnx2x *bp = netdev_priv(dev);
11714
11715 #ifdef BNX2X_STOP_ON_ERROR
11716         if (!bp->panic)
11717                 bnx2x_panic();
11718 #endif
11719         /* This allows the netif to be shutdown gracefully before resetting */
11720         schedule_work(&bp->reset_task);
11721 }
11722
11723 #ifdef BCM_VLAN
11724 /* called with rtnl_lock */
11725 static void bnx2x_vlan_rx_register(struct net_device *dev,
11726                                    struct vlan_group *vlgrp)
11727 {
11728         struct bnx2x *bp = netdev_priv(dev);
11729
11730         bp->vlgrp = vlgrp;
11731
11732         /* Set flags according to the required capabilities */
11733         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11734
11735         if (dev->features & NETIF_F_HW_VLAN_TX)
11736                 bp->flags |= HW_VLAN_TX_FLAG;
11737
11738         if (dev->features & NETIF_F_HW_VLAN_RX)
11739                 bp->flags |= HW_VLAN_RX_FLAG;
11740
11741         if (netif_running(dev))
11742                 bnx2x_set_client_config(bp);
11743 }
11744
11745 #endif
11746
11747 #ifdef CONFIG_NET_POLL_CONTROLLER
11748 static void poll_bnx2x(struct net_device *dev)
11749 {
11750         struct bnx2x *bp = netdev_priv(dev);
11751
11752         disable_irq(bp->pdev->irq);
11753         bnx2x_interrupt(bp->pdev->irq, dev);
11754         enable_irq(bp->pdev->irq);
11755 }
11756 #endif
11757
11758 static const struct net_device_ops bnx2x_netdev_ops = {
11759         .ndo_open               = bnx2x_open,
11760         .ndo_stop               = bnx2x_close,
11761         .ndo_start_xmit         = bnx2x_start_xmit,
11762         .ndo_set_multicast_list = bnx2x_set_rx_mode,
11763         .ndo_set_mac_address    = bnx2x_change_mac_addr,
11764         .ndo_validate_addr      = eth_validate_addr,
11765         .ndo_do_ioctl           = bnx2x_ioctl,
11766         .ndo_change_mtu         = bnx2x_change_mtu,
11767         .ndo_tx_timeout         = bnx2x_tx_timeout,
11768 #ifdef BCM_VLAN
11769         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
11770 #endif
11771 #ifdef CONFIG_NET_POLL_CONTROLLER
11772         .ndo_poll_controller    = poll_bnx2x,
11773 #endif
11774 };
11775
11776 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11777                                     struct net_device *dev)
11778 {
11779         struct bnx2x *bp;
11780         int rc;
11781
11782         SET_NETDEV_DEV(dev, &pdev->dev);
11783         bp = netdev_priv(dev);
11784
11785         bp->dev = dev;
11786         bp->pdev = pdev;
11787         bp->flags = 0;
11788         bp->func = PCI_FUNC(pdev->devfn);
11789
11790         rc = pci_enable_device(pdev);
11791         if (rc) {
11792                 pr_err("Cannot enable PCI device, aborting\n");
11793                 goto err_out;
11794         }
11795
11796         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11797                 pr_err("Cannot find PCI device base address, aborting\n");
11798                 rc = -ENODEV;
11799                 goto err_out_disable;
11800         }
11801
11802         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11803                 pr_err("Cannot find second PCI device base address, aborting\n");
11804                 rc = -ENODEV;
11805                 goto err_out_disable;
11806         }
11807
11808         if (atomic_read(&pdev->enable_cnt) == 1) {
11809                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11810                 if (rc) {
11811                         pr_err("Cannot obtain PCI resources, aborting\n");
11812                         goto err_out_disable;
11813                 }
11814
11815                 pci_set_master(pdev);
11816                 pci_save_state(pdev);
11817         }
11818
11819         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11820         if (bp->pm_cap == 0) {
11821                 pr_err("Cannot find power management capability, aborting\n");
11822                 rc = -EIO;
11823                 goto err_out_release;
11824         }
11825
11826         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11827         if (bp->pcie_cap == 0) {
11828                 pr_err("Cannot find PCI Express capability, aborting\n");
11829                 rc = -EIO;
11830                 goto err_out_release;
11831         }
11832
11833         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11834                 bp->flags |= USING_DAC_FLAG;
11835                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11836                         pr_err("pci_set_consistent_dma_mask failed, aborting\n");
11837                         rc = -EIO;
11838                         goto err_out_release;
11839                 }
11840
11841         } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11842                 pr_err("System does not support DMA, aborting\n");
11843                 rc = -EIO;
11844                 goto err_out_release;
11845         }
11846
11847         dev->mem_start = pci_resource_start(pdev, 0);
11848         dev->base_addr = dev->mem_start;
11849         dev->mem_end = pci_resource_end(pdev, 0);
11850
11851         dev->irq = pdev->irq;
11852
11853         bp->regview = pci_ioremap_bar(pdev, 0);
11854         if (!bp->regview) {
11855                 pr_err("Cannot map register space, aborting\n");
11856                 rc = -ENOMEM;
11857                 goto err_out_release;
11858         }
11859
11860         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11861                                         min_t(u64, BNX2X_DB_SIZE,
11862                                               pci_resource_len(pdev, 2)));
11863         if (!bp->doorbells) {
11864                 pr_err("Cannot map doorbell space, aborting\n");
11865                 rc = -ENOMEM;
11866                 goto err_out_unmap;
11867         }
11868
11869         bnx2x_set_power_state(bp, PCI_D0);
11870
11871         /* clean indirect addresses */
11872         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11873                                PCICFG_VENDOR_ID_OFFSET);
11874         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11875         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11876         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11877         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11878
11879         dev->watchdog_timeo = TX_TIMEOUT;
11880
11881         dev->netdev_ops = &bnx2x_netdev_ops;
11882         dev->ethtool_ops = &bnx2x_ethtool_ops;
11883         dev->features |= NETIF_F_SG;
11884         dev->features |= NETIF_F_HW_CSUM;
11885         if (bp->flags & USING_DAC_FLAG)
11886                 dev->features |= NETIF_F_HIGHDMA;
11887         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11888         dev->features |= NETIF_F_TSO6;
11889 #ifdef BCM_VLAN
11890         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11891         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11892
11893         dev->vlan_features |= NETIF_F_SG;
11894         dev->vlan_features |= NETIF_F_HW_CSUM;
11895         if (bp->flags & USING_DAC_FLAG)
11896                 dev->vlan_features |= NETIF_F_HIGHDMA;
11897         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11898         dev->vlan_features |= NETIF_F_TSO6;
11899 #endif
11900
11901         /* get_port_hwinfo() will set prtad and mmds properly */
11902         bp->mdio.prtad = MDIO_PRTAD_NONE;
11903         bp->mdio.mmds = 0;
11904         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11905         bp->mdio.dev = dev;
11906         bp->mdio.mdio_read = bnx2x_mdio_read;
11907         bp->mdio.mdio_write = bnx2x_mdio_write;
11908
11909         return 0;
11910
11911 err_out_unmap:
11912         if (bp->regview) {
11913                 iounmap(bp->regview);
11914                 bp->regview = NULL;
11915         }
11916         if (bp->doorbells) {
11917                 iounmap(bp->doorbells);
11918                 bp->doorbells = NULL;
11919         }
11920
11921 err_out_release:
11922         if (atomic_read(&pdev->enable_cnt) == 1)
11923                 pci_release_regions(pdev);
11924
11925 err_out_disable:
11926         pci_disable_device(pdev);
11927         pci_set_drvdata(pdev, NULL);
11928
11929 err_out:
11930         return rc;
11931 }
11932
11933 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11934                                                  int *width, int *speed)
11935 {
11936         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11937
11938         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11939
11940         /* return value of 1=2.5GHz 2=5GHz */
11941         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11942 }
11943
11944 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11945 {
11946         const struct firmware *firmware = bp->firmware;
11947         struct bnx2x_fw_file_hdr *fw_hdr;
11948         struct bnx2x_fw_file_section *sections;
11949         u32 offset, len, num_ops;
11950         u16 *ops_offsets;
11951         int i;
11952         const u8 *fw_ver;
11953
11954         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11955                 return -EINVAL;
11956
11957         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11958         sections = (struct bnx2x_fw_file_section *)fw_hdr;
11959
11960         /* Make sure none of the offsets and sizes make us read beyond
11961          * the end of the firmware data */
11962         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11963                 offset = be32_to_cpu(sections[i].offset);
11964                 len = be32_to_cpu(sections[i].len);
11965                 if (offset + len > firmware->size) {
11966                         pr_err("Section %d length is out of bounds\n", i);
11967                         return -EINVAL;
11968                 }
11969         }
11970
11971         /* Likewise for the init_ops offsets */
11972         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11973         ops_offsets = (u16 *)(firmware->data + offset);
11974         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11975
11976         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11977                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11978                         pr_err("Section offset %d is out of bounds\n", i);
11979                         return -EINVAL;
11980                 }
11981         }
11982
11983         /* Check FW version */
11984         offset = be32_to_cpu(fw_hdr->fw_version.offset);
11985         fw_ver = firmware->data + offset;
11986         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11987             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11988             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11989             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11990                 pr_err("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
11991                        fw_ver[0], fw_ver[1], fw_ver[2],
11992                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11993                        BCM_5710_FW_MINOR_VERSION,
11994                        BCM_5710_FW_REVISION_VERSION,
11995                        BCM_5710_FW_ENGINEERING_VERSION);
11996                 return -EINVAL;
11997         }
11998
11999         return 0;
12000 }
12001
12002 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12003 {
12004         const __be32 *source = (const __be32 *)_source;
12005         u32 *target = (u32 *)_target;
12006         u32 i;
12007
12008         for (i = 0; i < n/4; i++)
12009                 target[i] = be32_to_cpu(source[i]);
12010 }
12011
12012 /*
12013    Ops array is stored in the following format:
12014    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12015  */
12016 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
12017 {
12018         const __be32 *source = (const __be32 *)_source;
12019         struct raw_op *target = (struct raw_op *)_target;
12020         u32 i, j, tmp;
12021
12022         for (i = 0, j = 0; i < n/8; i++, j += 2) {
12023                 tmp = be32_to_cpu(source[j]);
12024                 target[i].op = (tmp >> 24) & 0xff;
12025                 target[i].offset =  tmp & 0xffffff;
12026                 target[i].raw_data = be32_to_cpu(source[j+1]);
12027         }
12028 }
12029
12030 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12031 {
12032         const __be16 *source = (const __be16 *)_source;
12033         u16 *target = (u16 *)_target;
12034         u32 i;
12035
12036         for (i = 0; i < n/2; i++)
12037                 target[i] = be16_to_cpu(source[i]);
12038 }
12039
12040 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
12041 do {                                                                    \
12042         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
12043         bp->arr = kmalloc(len, GFP_KERNEL);                             \
12044         if (!bp->arr) {                                                 \
12045                 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
12046                 goto lbl;                                               \
12047         }                                                               \
12048         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
12049              (u8 *)bp->arr, len);                                       \
12050 } while (0)
12051
12052 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12053 {
12054         const char *fw_file_name;
12055         struct bnx2x_fw_file_hdr *fw_hdr;
12056         int rc;
12057
12058         if (CHIP_IS_E1(bp))
12059                 fw_file_name = FW_FILE_NAME_E1;
12060         else
12061                 fw_file_name = FW_FILE_NAME_E1H;
12062
12063         pr_info("Loading %s\n", fw_file_name);
12064
12065         rc = request_firmware(&bp->firmware, fw_file_name, dev);
12066         if (rc) {
12067                 pr_err("Can't load firmware file %s\n", fw_file_name);
12068                 goto request_firmware_exit;
12069         }
12070
12071         rc = bnx2x_check_firmware(bp);
12072         if (rc) {
12073                 pr_err("Corrupt firmware file %s\n", fw_file_name);
12074                 goto request_firmware_exit;
12075         }
12076
12077         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12078
12079         /* Initialize the pointers to the init arrays */
12080         /* Blob */
12081         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12082
12083         /* Opcodes */
12084         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12085
12086         /* Offsets */
12087         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12088                             be16_to_cpu_n);
12089
12090         /* STORMs firmware */
12091         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12092                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12093         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
12094                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12095         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12096                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12097         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
12098                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
12099         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12100                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12101         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
12102                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12103         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12104                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12105         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
12106                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
12107
12108         return 0;
12109
12110 init_offsets_alloc_err:
12111         kfree(bp->init_ops);
12112 init_ops_alloc_err:
12113         kfree(bp->init_data);
12114 request_firmware_exit:
12115         release_firmware(bp->firmware);
12116
12117         return rc;
12118 }
12119
12120
12121 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12122                                     const struct pci_device_id *ent)
12123 {
12124         struct net_device *dev = NULL;
12125         struct bnx2x *bp;
12126         int pcie_width, pcie_speed;
12127         int rc;
12128
12129         /* dev zeroed in init_etherdev */
12130         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
12131         if (!dev) {
12132                 pr_err("Cannot allocate net device\n");
12133                 return -ENOMEM;
12134         }
12135
12136         bp = netdev_priv(dev);
12137         bp->msg_enable = debug;
12138
12139         pci_set_drvdata(pdev, dev);
12140
12141         rc = bnx2x_init_dev(pdev, dev);
12142         if (rc < 0) {
12143                 free_netdev(dev);
12144                 return rc;
12145         }
12146
12147         rc = bnx2x_init_bp(bp);
12148         if (rc)
12149                 goto init_one_exit;
12150
12151         /* Set init arrays */
12152         rc = bnx2x_init_firmware(bp, &pdev->dev);
12153         if (rc) {
12154                 pr_err("Error loading firmware\n");
12155                 goto init_one_exit;
12156         }
12157
12158         rc = register_netdev(dev);
12159         if (rc) {
12160                 dev_err(&pdev->dev, "Cannot register net device\n");
12161                 goto init_one_exit;
12162         }
12163
12164         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12165         netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
12166                     board_info[ent->driver_data].name,
12167                     (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12168                     pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12169                     dev->base_addr, bp->pdev->irq, dev->dev_addr);
12170
12171         return 0;
12172
12173 init_one_exit:
12174         if (bp->regview)
12175                 iounmap(bp->regview);
12176
12177         if (bp->doorbells)
12178                 iounmap(bp->doorbells);
12179
12180         free_netdev(dev);
12181
12182         if (atomic_read(&pdev->enable_cnt) == 1)
12183                 pci_release_regions(pdev);
12184
12185         pci_disable_device(pdev);
12186         pci_set_drvdata(pdev, NULL);
12187
12188         return rc;
12189 }
12190
12191 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12192 {
12193         struct net_device *dev = pci_get_drvdata(pdev);
12194         struct bnx2x *bp;
12195
12196         if (!dev) {
12197                 pr_err("BAD net device from bnx2x_init_one\n");
12198                 return;
12199         }
12200         bp = netdev_priv(dev);
12201
12202         unregister_netdev(dev);
12203
12204         kfree(bp->init_ops_offsets);
12205         kfree(bp->init_ops);
12206         kfree(bp->init_data);
12207         release_firmware(bp->firmware);
12208
12209         if (bp->regview)
12210                 iounmap(bp->regview);
12211
12212         if (bp->doorbells)
12213                 iounmap(bp->doorbells);
12214
12215         free_netdev(dev);
12216
12217         if (atomic_read(&pdev->enable_cnt) == 1)
12218                 pci_release_regions(pdev);
12219
12220         pci_disable_device(pdev);
12221         pci_set_drvdata(pdev, NULL);
12222 }
12223
12224 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12225 {
12226         struct net_device *dev = pci_get_drvdata(pdev);
12227         struct bnx2x *bp;
12228
12229         if (!dev) {
12230                 pr_err("BAD net device from bnx2x_init_one\n");
12231                 return -ENODEV;
12232         }
12233         bp = netdev_priv(dev);
12234
12235         rtnl_lock();
12236
12237         pci_save_state(pdev);
12238
12239         if (!netif_running(dev)) {
12240                 rtnl_unlock();
12241                 return 0;
12242         }
12243
12244         netif_device_detach(dev);
12245
12246         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12247
12248         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
12249
12250         rtnl_unlock();
12251
12252         return 0;
12253 }
12254
12255 static int bnx2x_resume(struct pci_dev *pdev)
12256 {
12257         struct net_device *dev = pci_get_drvdata(pdev);
12258         struct bnx2x *bp;
12259         int rc;
12260
12261         if (!dev) {
12262                 pr_err("BAD net device from bnx2x_init_one\n");
12263                 return -ENODEV;
12264         }
12265         bp = netdev_priv(dev);
12266
12267         rtnl_lock();
12268
12269         pci_restore_state(pdev);
12270
12271         if (!netif_running(dev)) {
12272                 rtnl_unlock();
12273                 return 0;
12274         }
12275
12276         bnx2x_set_power_state(bp, PCI_D0);
12277         netif_device_attach(dev);
12278
12279         rc = bnx2x_nic_load(bp, LOAD_OPEN);
12280
12281         rtnl_unlock();
12282
12283         return rc;
12284 }
12285
12286 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12287 {
12288         int i;
12289
12290         bp->state = BNX2X_STATE_ERROR;
12291
12292         bp->rx_mode = BNX2X_RX_MODE_NONE;
12293
12294         bnx2x_netif_stop(bp, 0);
12295
12296         del_timer_sync(&bp->timer);
12297         bp->stats_state = STATS_STATE_DISABLED;
12298         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12299
12300         /* Release IRQs */
12301         bnx2x_free_irq(bp, false);
12302
12303         if (CHIP_IS_E1(bp)) {
12304                 struct mac_configuration_cmd *config =
12305                                                 bnx2x_sp(bp, mcast_config);
12306
12307                 for (i = 0; i < config->hdr.length; i++)
12308                         CAM_INVALIDATE(config->config_table[i]);
12309         }
12310
12311         /* Free SKBs, SGEs, TPA pool and driver internals */
12312         bnx2x_free_skbs(bp);
12313         for_each_queue(bp, i)
12314                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12315         for_each_queue(bp, i)
12316                 netif_napi_del(&bnx2x_fp(bp, i, napi));
12317         bnx2x_free_mem(bp);
12318
12319         bp->state = BNX2X_STATE_CLOSED;
12320
12321         netif_carrier_off(bp->dev);
12322
12323         return 0;
12324 }
12325
12326 static void bnx2x_eeh_recover(struct bnx2x *bp)
12327 {
12328         u32 val;
12329
12330         mutex_init(&bp->port.phy_mutex);
12331
12332         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12333         bp->link_params.shmem_base = bp->common.shmem_base;
12334         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12335
12336         if (!bp->common.shmem_base ||
12337             (bp->common.shmem_base < 0xA0000) ||
12338             (bp->common.shmem_base >= 0xC0000)) {
12339                 BNX2X_DEV_INFO("MCP not active\n");
12340                 bp->flags |= NO_MCP_FLAG;
12341                 return;
12342         }
12343
12344         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12345         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12346                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12347                 BNX2X_ERR("BAD MCP validity signature\n");
12348
12349         if (!BP_NOMCP(bp)) {
12350                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12351                               & DRV_MSG_SEQ_NUMBER_MASK);
12352                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12353         }
12354 }
12355
12356 /**
12357  * bnx2x_io_error_detected - called when PCI error is detected
12358  * @pdev: Pointer to PCI device
12359  * @state: The current pci connection state
12360  *
12361  * This function is called after a PCI bus error affecting
12362  * this device has been detected.
12363  */
12364 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12365                                                 pci_channel_state_t state)
12366 {
12367         struct net_device *dev = pci_get_drvdata(pdev);
12368         struct bnx2x *bp = netdev_priv(dev);
12369
12370         rtnl_lock();
12371
12372         netif_device_detach(dev);
12373
12374         if (state == pci_channel_io_perm_failure) {
12375                 rtnl_unlock();
12376                 return PCI_ERS_RESULT_DISCONNECT;
12377         }
12378
12379         if (netif_running(dev))
12380                 bnx2x_eeh_nic_unload(bp);
12381
12382         pci_disable_device(pdev);
12383
12384         rtnl_unlock();
12385
12386         /* Request a slot reset */
12387         return PCI_ERS_RESULT_NEED_RESET;
12388 }
12389
12390 /**
12391  * bnx2x_io_slot_reset - called after the PCI bus has been reset
12392  * @pdev: Pointer to PCI device
12393  *
12394  * Restart the card from scratch, as if from a cold-boot.
12395  */
12396 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12397 {
12398         struct net_device *dev = pci_get_drvdata(pdev);
12399         struct bnx2x *bp = netdev_priv(dev);
12400
12401         rtnl_lock();
12402
12403         if (pci_enable_device(pdev)) {
12404                 dev_err(&pdev->dev,
12405                         "Cannot re-enable PCI device after reset\n");
12406                 rtnl_unlock();
12407                 return PCI_ERS_RESULT_DISCONNECT;
12408         }
12409
12410         pci_set_master(pdev);
12411         pci_restore_state(pdev);
12412
12413         if (netif_running(dev))
12414                 bnx2x_set_power_state(bp, PCI_D0);
12415
12416         rtnl_unlock();
12417
12418         return PCI_ERS_RESULT_RECOVERED;
12419 }
12420
12421 /**
12422  * bnx2x_io_resume - called when traffic can start flowing again
12423  * @pdev: Pointer to PCI device
12424  *
12425  * This callback is called when the error recovery driver tells us that
12426  * its OK to resume normal operation.
12427  */
12428 static void bnx2x_io_resume(struct pci_dev *pdev)
12429 {
12430         struct net_device *dev = pci_get_drvdata(pdev);
12431         struct bnx2x *bp = netdev_priv(dev);
12432
12433         rtnl_lock();
12434
12435         bnx2x_eeh_recover(bp);
12436
12437         if (netif_running(dev))
12438                 bnx2x_nic_load(bp, LOAD_NORMAL);
12439
12440         netif_device_attach(dev);
12441
12442         rtnl_unlock();
12443 }
12444
12445 static struct pci_error_handlers bnx2x_err_handler = {
12446         .error_detected = bnx2x_io_error_detected,
12447         .slot_reset     = bnx2x_io_slot_reset,
12448         .resume         = bnx2x_io_resume,
12449 };
12450
12451 static struct pci_driver bnx2x_pci_driver = {
12452         .name        = DRV_MODULE_NAME,
12453         .id_table    = bnx2x_pci_tbl,
12454         .probe       = bnx2x_init_one,
12455         .remove      = __devexit_p(bnx2x_remove_one),
12456         .suspend     = bnx2x_suspend,
12457         .resume      = bnx2x_resume,
12458         .err_handler = &bnx2x_err_handler,
12459 };
12460
12461 static int __init bnx2x_init(void)
12462 {
12463         int ret;
12464
12465         pr_info("%s", version);
12466
12467         bnx2x_wq = create_singlethread_workqueue("bnx2x");
12468         if (bnx2x_wq == NULL) {
12469                 pr_err("Cannot create workqueue\n");
12470                 return -ENOMEM;
12471         }
12472
12473         ret = pci_register_driver(&bnx2x_pci_driver);
12474         if (ret) {
12475                 pr_err("Cannot register driver\n");
12476                 destroy_workqueue(bnx2x_wq);
12477         }
12478         return ret;
12479 }
12480
12481 static void __exit bnx2x_cleanup(void)
12482 {
12483         pci_unregister_driver(&bnx2x_pci_driver);
12484
12485         destroy_workqueue(bnx2x_wq);
12486 }
12487
12488 module_init(bnx2x_init);
12489 module_exit(bnx2x_cleanup);
12490
12491 #ifdef BCM_CNIC
12492
12493 /* count denotes the number of new completions we have seen */
12494 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12495 {
12496         struct eth_spe *spe;
12497
12498 #ifdef BNX2X_STOP_ON_ERROR
12499         if (unlikely(bp->panic))
12500                 return;
12501 #endif
12502
12503         spin_lock_bh(&bp->spq_lock);
12504         bp->cnic_spq_pending -= count;
12505
12506         for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12507              bp->cnic_spq_pending++) {
12508
12509                 if (!bp->cnic_kwq_pending)
12510                         break;
12511
12512                 spe = bnx2x_sp_get_next(bp);
12513                 *spe = *bp->cnic_kwq_cons;
12514
12515                 bp->cnic_kwq_pending--;
12516
12517                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12518                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12519
12520                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12521                         bp->cnic_kwq_cons = bp->cnic_kwq;
12522                 else
12523                         bp->cnic_kwq_cons++;
12524         }
12525         bnx2x_sp_prod_update(bp);
12526         spin_unlock_bh(&bp->spq_lock);
12527 }
12528
12529 static int bnx2x_cnic_sp_queue(struct net_device *dev,
12530                                struct kwqe_16 *kwqes[], u32 count)
12531 {
12532         struct bnx2x *bp = netdev_priv(dev);
12533         int i;
12534
12535 #ifdef BNX2X_STOP_ON_ERROR
12536         if (unlikely(bp->panic))
12537                 return -EIO;
12538 #endif
12539
12540         spin_lock_bh(&bp->spq_lock);
12541
12542         for (i = 0; i < count; i++) {
12543                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12544
12545                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12546                         break;
12547
12548                 *bp->cnic_kwq_prod = *spe;
12549
12550                 bp->cnic_kwq_pending++;
12551
12552                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12553                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
12554                    spe->data.mac_config_addr.hi,
12555                    spe->data.mac_config_addr.lo,
12556                    bp->cnic_kwq_pending);
12557
12558                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12559                         bp->cnic_kwq_prod = bp->cnic_kwq;
12560                 else
12561                         bp->cnic_kwq_prod++;
12562         }
12563
12564         spin_unlock_bh(&bp->spq_lock);
12565
12566         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12567                 bnx2x_cnic_sp_post(bp, 0);
12568
12569         return i;
12570 }
12571
12572 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12573 {
12574         struct cnic_ops *c_ops;
12575         int rc = 0;
12576
12577         mutex_lock(&bp->cnic_mutex);
12578         c_ops = bp->cnic_ops;
12579         if (c_ops)
12580                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12581         mutex_unlock(&bp->cnic_mutex);
12582
12583         return rc;
12584 }
12585
12586 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12587 {
12588         struct cnic_ops *c_ops;
12589         int rc = 0;
12590
12591         rcu_read_lock();
12592         c_ops = rcu_dereference(bp->cnic_ops);
12593         if (c_ops)
12594                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12595         rcu_read_unlock();
12596
12597         return rc;
12598 }
12599
12600 /*
12601  * for commands that have no data
12602  */
12603 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12604 {
12605         struct cnic_ctl_info ctl = {0};
12606
12607         ctl.cmd = cmd;
12608
12609         return bnx2x_cnic_ctl_send(bp, &ctl);
12610 }
12611
12612 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12613 {
12614         struct cnic_ctl_info ctl;
12615
12616         /* first we tell CNIC and only then we count this as a completion */
12617         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12618         ctl.data.comp.cid = cid;
12619
12620         bnx2x_cnic_ctl_send_bh(bp, &ctl);
12621         bnx2x_cnic_sp_post(bp, 1);
12622 }
12623
12624 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12625 {
12626         struct bnx2x *bp = netdev_priv(dev);
12627         int rc = 0;
12628
12629         switch (ctl->cmd) {
12630         case DRV_CTL_CTXTBL_WR_CMD: {
12631                 u32 index = ctl->data.io.offset;
12632                 dma_addr_t addr = ctl->data.io.dma_addr;
12633
12634                 bnx2x_ilt_wr(bp, index, addr);
12635                 break;
12636         }
12637
12638         case DRV_CTL_COMPLETION_CMD: {
12639                 int count = ctl->data.comp.comp_count;
12640
12641                 bnx2x_cnic_sp_post(bp, count);
12642                 break;
12643         }
12644
12645         /* rtnl_lock is held.  */
12646         case DRV_CTL_START_L2_CMD: {
12647                 u32 cli = ctl->data.ring.client_id;
12648
12649                 bp->rx_mode_cl_mask |= (1 << cli);
12650                 bnx2x_set_storm_rx_mode(bp);
12651                 break;
12652         }
12653
12654         /* rtnl_lock is held.  */
12655         case DRV_CTL_STOP_L2_CMD: {
12656                 u32 cli = ctl->data.ring.client_id;
12657
12658                 bp->rx_mode_cl_mask &= ~(1 << cli);
12659                 bnx2x_set_storm_rx_mode(bp);
12660                 break;
12661         }
12662
12663         default:
12664                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12665                 rc = -EINVAL;
12666         }
12667
12668         return rc;
12669 }
12670
12671 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12672 {
12673         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12674
12675         if (bp->flags & USING_MSIX_FLAG) {
12676                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12677                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12678                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12679         } else {
12680                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12681                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12682         }
12683         cp->irq_arr[0].status_blk = bp->cnic_sb;
12684         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12685         cp->irq_arr[1].status_blk = bp->def_status_blk;
12686         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12687
12688         cp->num_irq = 2;
12689 }
12690
12691 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12692                                void *data)
12693 {
12694         struct bnx2x *bp = netdev_priv(dev);
12695         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12696
12697         if (ops == NULL)
12698                 return -EINVAL;
12699
12700         if (atomic_read(&bp->intr_sem) != 0)
12701                 return -EBUSY;
12702
12703         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12704         if (!bp->cnic_kwq)
12705                 return -ENOMEM;
12706
12707         bp->cnic_kwq_cons = bp->cnic_kwq;
12708         bp->cnic_kwq_prod = bp->cnic_kwq;
12709         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12710
12711         bp->cnic_spq_pending = 0;
12712         bp->cnic_kwq_pending = 0;
12713
12714         bp->cnic_data = data;
12715
12716         cp->num_irq = 0;
12717         cp->drv_state = CNIC_DRV_STATE_REGD;
12718
12719         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12720
12721         bnx2x_setup_cnic_irq_info(bp);
12722         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12723         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12724         rcu_assign_pointer(bp->cnic_ops, ops);
12725
12726         return 0;
12727 }
12728
12729 static int bnx2x_unregister_cnic(struct net_device *dev)
12730 {
12731         struct bnx2x *bp = netdev_priv(dev);
12732         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12733
12734         mutex_lock(&bp->cnic_mutex);
12735         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12736                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12737                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12738         }
12739         cp->drv_state = 0;
12740         rcu_assign_pointer(bp->cnic_ops, NULL);
12741         mutex_unlock(&bp->cnic_mutex);
12742         synchronize_rcu();
12743         kfree(bp->cnic_kwq);
12744         bp->cnic_kwq = NULL;
12745
12746         return 0;
12747 }
12748
12749 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12750 {
12751         struct bnx2x *bp = netdev_priv(dev);
12752         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12753
12754         cp->drv_owner = THIS_MODULE;
12755         cp->chip_id = CHIP_ID(bp);
12756         cp->pdev = bp->pdev;
12757         cp->io_base = bp->regview;
12758         cp->io_base2 = bp->doorbells;
12759         cp->max_kwqe_pending = 8;
12760         cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12761         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12762         cp->ctx_tbl_len = CNIC_ILT_LINES;
12763         cp->starting_cid = BCM_CNIC_CID_START;
12764         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12765         cp->drv_ctl = bnx2x_drv_ctl;
12766         cp->drv_register_cnic = bnx2x_register_cnic;
12767         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12768
12769         return cp;
12770 }
12771 EXPORT_SYMBOL(bnx2x_cnic_probe);
12772
12773 #endif /* BCM_CNIC */
12774