]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/bnx2x_main.c
bnx2x: Use firmware 5.2.13
[net-next-2.6.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52 #include <linux/stringify.h>
53
54
55 #include "bnx2x.h"
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
59
60 #define DRV_MODULE_VERSION      "1.52.1-5"
61 #define DRV_MODULE_RELDATE      "2009/11/09"
62 #define BNX2X_BC_VER            0x040200
63
64 #include <linux/firmware.h>
65 #include "bnx2x_fw_file_hdr.h"
66 /* FW files */
67 #define FW_FILE_VERSION                                 \
68         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
69         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
70         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
71         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72 #define FW_FILE_NAME_E1         "bnx2x-e1-" FW_FILE_VERSION ".fw"
73 #define FW_FILE_NAME_E1H        "bnx2x-e1h-" FW_FILE_VERSION ".fw"
74
75 /* Time in jiffies before concluding the transmitter is hung */
76 #define TX_TIMEOUT              (5*HZ)
77
78 static char version[] __devinitdata =
79         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
80         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
82 MODULE_AUTHOR("Eliezer Tamir");
83 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_MODULE_VERSION);
86 MODULE_FIRMWARE(FW_FILE_NAME_E1);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
88
89 static int multi_mode = 1;
90 module_param(multi_mode, int, 0);
91 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92                              "(0 Disable; 1 Enable (default))");
93
94 static int num_queues;
95 module_param(num_queues, int, 0);
96 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97                                 " (default is as a number of CPUs)");
98
99 static int disable_tpa;
100 module_param(disable_tpa, int, 0);
101 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
102
103 static int int_mode;
104 module_param(int_mode, int, 0);
105 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
106
107 static int dropless_fc;
108 module_param(dropless_fc, int, 0);
109 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
111 static int poll;
112 module_param(poll, int, 0);
113 MODULE_PARM_DESC(poll, " Use polling (for debug)");
114
115 static int mrrs = -1;
116 module_param(mrrs, int, 0);
117 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
119 static int debug;
120 module_param(debug, int, 0);
121 MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
124
125 static struct workqueue_struct *bnx2x_wq;
126
127 enum bnx2x_board_type {
128         BCM57710 = 0,
129         BCM57711 = 1,
130         BCM57711E = 2,
131 };
132
133 /* indexed by board_type, above */
134 static struct {
135         char *name;
136 } board_info[] __devinitdata = {
137         { "Broadcom NetXtreme II BCM57710 XGb" },
138         { "Broadcom NetXtreme II BCM57711 XGb" },
139         { "Broadcom NetXtreme II BCM57711E XGb" }
140 };
141
142
143 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
144         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
147         { 0 }
148 };
149
150 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
151
152 /****************************************************************************
153 * General service functions
154 ****************************************************************************/
155
156 /* used only at init
157  * locking is done by mcp
158  */
159 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
160 {
161         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164                                PCICFG_VENDOR_ID_OFFSET);
165 }
166
167 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168 {
169         u32 val;
170
171         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174                                PCICFG_VENDOR_ID_OFFSET);
175
176         return val;
177 }
178
179 static const u32 dmae_reg_go_c[] = {
180         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
184 };
185
186 /* copy command into DMAE command memory and set DMAE command go */
187 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188                             int idx)
189 {
190         u32 cmd_offset;
191         int i;
192
193         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
196
197                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
199         }
200         REG_WR(bp, dmae_reg_go_c[idx], 1);
201 }
202
203 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204                       u32 len32)
205 {
206         struct dmae_command dmae;
207         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
208         int cnt = 200;
209
210         if (!bp->dmae_ready) {
211                 u32 *data = bnx2x_sp(bp, wb_data[0]);
212
213                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
214                    "  using indirect\n", dst_addr, len32);
215                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216                 return;
217         }
218
219         memset(&dmae, 0, sizeof(struct dmae_command));
220
221         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
224 #ifdef __BIG_ENDIAN
225                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
226 #else
227                        DMAE_CMD_ENDIANITY_DW_SWAP |
228 #endif
229                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231         dmae.src_addr_lo = U64_LO(dma_addr);
232         dmae.src_addr_hi = U64_HI(dma_addr);
233         dmae.dst_addr_lo = dst_addr >> 2;
234         dmae.dst_addr_hi = 0;
235         dmae.len = len32;
236         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238         dmae.comp_val = DMAE_COMP_VAL;
239
240         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
241            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
242                     "dst_addr [%x:%08x (%08x)]\n"
243            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
244            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
247         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
248            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
250
251         mutex_lock(&bp->dmae_mutex);
252
253         *wb_comp = 0;
254
255         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
256
257         udelay(5);
258
259         while (*wb_comp != DMAE_COMP_VAL) {
260                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
261
262                 if (!cnt) {
263                         BNX2X_ERR("DMAE timeout!\n");
264                         break;
265                 }
266                 cnt--;
267                 /* adjust delay for emulation/FPGA */
268                 if (CHIP_REV_IS_SLOW(bp))
269                         msleep(100);
270                 else
271                         udelay(5);
272         }
273
274         mutex_unlock(&bp->dmae_mutex);
275 }
276
277 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
278 {
279         struct dmae_command dmae;
280         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
281         int cnt = 200;
282
283         if (!bp->dmae_ready) {
284                 u32 *data = bnx2x_sp(bp, wb_data[0]);
285                 int i;
286
287                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
288                    "  using indirect\n", src_addr, len32);
289                 for (i = 0; i < len32; i++)
290                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291                 return;
292         }
293
294         memset(&dmae, 0, sizeof(struct dmae_command));
295
296         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
299 #ifdef __BIG_ENDIAN
300                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
301 #else
302                        DMAE_CMD_ENDIANITY_DW_SWAP |
303 #endif
304                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306         dmae.src_addr_lo = src_addr >> 2;
307         dmae.src_addr_hi = 0;
308         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310         dmae.len = len32;
311         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313         dmae.comp_val = DMAE_COMP_VAL;
314
315         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
316            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
317                     "dst_addr [%x:%08x (%08x)]\n"
318            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
319            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
322
323         mutex_lock(&bp->dmae_mutex);
324
325         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
326         *wb_comp = 0;
327
328         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
329
330         udelay(5);
331
332         while (*wb_comp != DMAE_COMP_VAL) {
333
334                 if (!cnt) {
335                         BNX2X_ERR("DMAE timeout!\n");
336                         break;
337                 }
338                 cnt--;
339                 /* adjust delay for emulation/FPGA */
340                 if (CHIP_REV_IS_SLOW(bp))
341                         msleep(100);
342                 else
343                         udelay(5);
344         }
345         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
346            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
348
349         mutex_unlock(&bp->dmae_mutex);
350 }
351
352 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353                                u32 addr, u32 len)
354 {
355         int offset = 0;
356
357         while (len > DMAE_LEN32_WR_MAX) {
358                 bnx2x_write_dmae(bp, phys_addr + offset,
359                                  addr + offset, DMAE_LEN32_WR_MAX);
360                 offset += DMAE_LEN32_WR_MAX * 4;
361                 len -= DMAE_LEN32_WR_MAX;
362         }
363
364         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
365 }
366
367 /* used only for slowpath so not inlined */
368 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369 {
370         u32 wb_write[2];
371
372         wb_write[0] = val_hi;
373         wb_write[1] = val_lo;
374         REG_WR_DMAE(bp, reg, wb_write, 2);
375 }
376
377 #ifdef USE_WB_RD
378 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379 {
380         u32 wb_data[2];
381
382         REG_RD_DMAE(bp, reg, wb_data, 2);
383
384         return HILO_U64(wb_data[0], wb_data[1]);
385 }
386 #endif
387
388 static int bnx2x_mc_assert(struct bnx2x *bp)
389 {
390         char last_idx;
391         int i, rc = 0;
392         u32 row0, row1, row2, row3;
393
394         /* XSTORM */
395         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
396                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
397         if (last_idx)
398                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
399
400         /* print the asserts */
401         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
402
403                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404                               XSTORM_ASSERT_LIST_OFFSET(i));
405                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
407                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
409                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
411
412                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
413                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
414                                   " 0x%08x 0x%08x 0x%08x\n",
415                                   i, row3, row2, row1, row0);
416                         rc++;
417                 } else {
418                         break;
419                 }
420         }
421
422         /* TSTORM */
423         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
424                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
425         if (last_idx)
426                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
427
428         /* print the asserts */
429         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
430
431                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432                               TSTORM_ASSERT_LIST_OFFSET(i));
433                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
435                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
437                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
439
440                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
441                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
442                                   " 0x%08x 0x%08x 0x%08x\n",
443                                   i, row3, row2, row1, row0);
444                         rc++;
445                 } else {
446                         break;
447                 }
448         }
449
450         /* CSTORM */
451         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
452                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
453         if (last_idx)
454                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
455
456         /* print the asserts */
457         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
458
459                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460                               CSTORM_ASSERT_LIST_OFFSET(i));
461                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
463                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
465                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
467
468                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
469                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
470                                   " 0x%08x 0x%08x 0x%08x\n",
471                                   i, row3, row2, row1, row0);
472                         rc++;
473                 } else {
474                         break;
475                 }
476         }
477
478         /* USTORM */
479         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
480                            USTORM_ASSERT_LIST_INDEX_OFFSET);
481         if (last_idx)
482                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
483
484         /* print the asserts */
485         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
486
487                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
488                               USTORM_ASSERT_LIST_OFFSET(i));
489                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
490                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
491                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
492                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
493                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
494                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
495
496                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
497                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
498                                   " 0x%08x 0x%08x 0x%08x\n",
499                                   i, row3, row2, row1, row0);
500                         rc++;
501                 } else {
502                         break;
503                 }
504         }
505
506         return rc;
507 }
508
509 static void bnx2x_fw_dump(struct bnx2x *bp)
510 {
511         u32 mark, offset;
512         __be32 data[9];
513         int word;
514
515         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
516         mark = ((mark + 0x3) & ~0x3);
517         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
518
519         printk(KERN_ERR PFX);
520         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
521                 for (word = 0; word < 8; word++)
522                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
523                                                   offset + 4*word));
524                 data[8] = 0x0;
525                 printk(KERN_CONT "%s", (char *)data);
526         }
527         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
528                 for (word = 0; word < 8; word++)
529                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
530                                                   offset + 4*word));
531                 data[8] = 0x0;
532                 printk(KERN_CONT "%s", (char *)data);
533         }
534         printk(KERN_ERR PFX "end of fw dump\n");
535 }
536
537 static void bnx2x_panic_dump(struct bnx2x *bp)
538 {
539         int i;
540         u16 j, start, end;
541
542         bp->stats_state = STATS_STATE_DISABLED;
543         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
544
545         BNX2X_ERR("begin crash dump -----------------\n");
546
547         /* Indices */
548         /* Common */
549         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
550                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
551                   "  spq_prod_idx(%u)\n",
552                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
554
555         /* Rx */
556         for_each_queue(bp, i) {
557                 struct bnx2x_fastpath *fp = &bp->fp[i];
558
559                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
560                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
561                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
562                           i, fp->rx_bd_prod, fp->rx_bd_cons,
563                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
565                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
566                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
567                           fp->rx_sge_prod, fp->last_max_sge,
568                           le16_to_cpu(fp->fp_u_idx),
569                           fp->status_blk->u_status_block.status_block_index);
570         }
571
572         /* Tx */
573         for_each_queue(bp, i) {
574                 struct bnx2x_fastpath *fp = &bp->fp[i];
575
576                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
577                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
578                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
579                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
580                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
581                           "  tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
582                           fp->status_blk->c_status_block.status_block_index,
583                           fp->tx_db.data.prod);
584         }
585
586         /* Rings */
587         /* Rx */
588         for_each_queue(bp, i) {
589                 struct bnx2x_fastpath *fp = &bp->fp[i];
590
591                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
592                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
593                 for (j = start; j != end; j = RX_BD(j + 1)) {
594                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
595                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
596
597                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
598                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
599                 }
600
601                 start = RX_SGE(fp->rx_sge_prod);
602                 end = RX_SGE(fp->last_max_sge);
603                 for (j = start; j != end; j = RX_SGE(j + 1)) {
604                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
605                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
606
607                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
608                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
609                 }
610
611                 start = RCQ_BD(fp->rx_comp_cons - 10);
612                 end = RCQ_BD(fp->rx_comp_cons + 503);
613                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
614                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
615
616                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
617                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
618                 }
619         }
620
621         /* Tx */
622         for_each_queue(bp, i) {
623                 struct bnx2x_fastpath *fp = &bp->fp[i];
624
625                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
626                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
627                 for (j = start; j != end; j = TX_BD(j + 1)) {
628                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
629
630                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
631                                   i, j, sw_bd->skb, sw_bd->first_bd);
632                 }
633
634                 start = TX_BD(fp->tx_bd_cons - 10);
635                 end = TX_BD(fp->tx_bd_cons + 254);
636                 for (j = start; j != end; j = TX_BD(j + 1)) {
637                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
638
639                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
640                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
641                 }
642         }
643
644         bnx2x_fw_dump(bp);
645         bnx2x_mc_assert(bp);
646         BNX2X_ERR("end crash dump -----------------\n");
647 }
648
649 static void bnx2x_int_enable(struct bnx2x *bp)
650 {
651         int port = BP_PORT(bp);
652         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
653         u32 val = REG_RD(bp, addr);
654         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
655         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
656
657         if (msix) {
658                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
659                          HC_CONFIG_0_REG_INT_LINE_EN_0);
660                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
661                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
662         } else if (msi) {
663                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
664                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
665                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
666                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
667         } else {
668                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
669                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
670                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
671                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
672
673                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674                    val, port, addr);
675
676                 REG_WR(bp, addr, val);
677
678                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
679         }
680
681         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
682            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
683
684         REG_WR(bp, addr, val);
685         /*
686          * Ensure that HC_CONFIG is written before leading/trailing edge config
687          */
688         mmiowb();
689         barrier();
690
691         if (CHIP_IS_E1H(bp)) {
692                 /* init leading/trailing edge */
693                 if (IS_E1HMF(bp)) {
694                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
695                         if (bp->port.pmf)
696                                 /* enable nig and gpio3 attention */
697                                 val |= 0x1100;
698                 } else
699                         val = 0xffff;
700
701                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
702                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
703         }
704
705         /* Make sure that interrupts are indeed enabled from here on */
706         mmiowb();
707 }
708
709 static void bnx2x_int_disable(struct bnx2x *bp)
710 {
711         int port = BP_PORT(bp);
712         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
713         u32 val = REG_RD(bp, addr);
714
715         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
716                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
717                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
718                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
719
720         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
721            val, port, addr);
722
723         /* flush all outstanding writes */
724         mmiowb();
725
726         REG_WR(bp, addr, val);
727         if (REG_RD(bp, addr) != val)
728                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
729 }
730
731 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
732 {
733         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
734         int i, offset;
735
736         /* disable interrupt handling */
737         atomic_inc(&bp->intr_sem);
738         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
739
740         if (disable_hw)
741                 /* prevent the HW from sending interrupts */
742                 bnx2x_int_disable(bp);
743
744         /* make sure all ISRs are done */
745         if (msix) {
746                 synchronize_irq(bp->msix_table[0].vector);
747                 offset = 1;
748 #ifdef BCM_CNIC
749                 offset++;
750 #endif
751                 for_each_queue(bp, i)
752                         synchronize_irq(bp->msix_table[i + offset].vector);
753         } else
754                 synchronize_irq(bp->pdev->irq);
755
756         /* make sure sp_task is not running */
757         cancel_delayed_work(&bp->sp_task);
758         flush_workqueue(bnx2x_wq);
759 }
760
761 /* fast path */
762
763 /*
764  * General service functions
765  */
766
767 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
768                                 u8 storm, u16 index, u8 op, u8 update)
769 {
770         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
771                        COMMAND_REG_INT_ACK);
772         struct igu_ack_register igu_ack;
773
774         igu_ack.status_block_index = index;
775         igu_ack.sb_id_and_flags =
776                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
777                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
778                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
779                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
780
781         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
782            (*(u32 *)&igu_ack), hc_addr);
783         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
784
785         /* Make sure that ACK is written */
786         mmiowb();
787         barrier();
788 }
789
790 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
791 {
792         struct host_status_block *fpsb = fp->status_blk;
793
794         barrier(); /* status block is written to by the chip */
795         fp->fp_c_idx = fpsb->c_status_block.status_block_index;
796         fp->fp_u_idx = fpsb->u_status_block.status_block_index;
797 }
798
799 static u16 bnx2x_ack_int(struct bnx2x *bp)
800 {
801         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
802                        COMMAND_REG_SIMD_MASK);
803         u32 result = REG_RD(bp, hc_addr);
804
805         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
806            result, hc_addr);
807
808         return result;
809 }
810
811
812 /*
813  * fast path service functions
814  */
815
816 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
817 {
818         /* Tell compiler that consumer and producer can change */
819         barrier();
820         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
821 }
822
823 /* free skb in the packet ring at pos idx
824  * return idx of last bd freed
825  */
826 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
827                              u16 idx)
828 {
829         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
830         struct eth_tx_start_bd *tx_start_bd;
831         struct eth_tx_bd *tx_data_bd;
832         struct sk_buff *skb = tx_buf->skb;
833         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
834         int nbd;
835
836         /* prefetch skb end pointer to speedup dev_kfree_skb() */
837         prefetch(&skb->end);
838
839         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
840            idx, tx_buf, skb);
841
842         /* unmap first bd */
843         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
844         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
845         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
846                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
847
848         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
849 #ifdef BNX2X_STOP_ON_ERROR
850         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
851                 BNX2X_ERR("BAD nbd!\n");
852                 bnx2x_panic();
853         }
854 #endif
855         new_cons = nbd + tx_buf->first_bd;
856
857         /* Get the next bd */
858         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
859
860         /* Skip a parse bd... */
861         --nbd;
862         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863
864         /* ...and the TSO split header bd since they have no mapping */
865         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
866                 --nbd;
867                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
868         }
869
870         /* now free frags */
871         while (nbd > 0) {
872
873                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
874                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
875                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
876                                BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
877                 if (--nbd)
878                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
879         }
880
881         /* release skb */
882         WARN_ON(!skb);
883         dev_kfree_skb(skb);
884         tx_buf->first_bd = 0;
885         tx_buf->skb = NULL;
886
887         return new_cons;
888 }
889
890 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
891 {
892         s16 used;
893         u16 prod;
894         u16 cons;
895
896         barrier(); /* Tell compiler that prod and cons can change */
897         prod = fp->tx_bd_prod;
898         cons = fp->tx_bd_cons;
899
900         /* NUM_TX_RINGS = number of "next-page" entries
901            It will be used as a threshold */
902         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
903
904 #ifdef BNX2X_STOP_ON_ERROR
905         WARN_ON(used < 0);
906         WARN_ON(used > fp->bp->tx_ring_size);
907         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
908 #endif
909
910         return (s16)(fp->bp->tx_ring_size) - used;
911 }
912
913 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
914 {
915         u16 hw_cons;
916
917         /* Tell compiler that status block fields can change */
918         barrier();
919         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
920         return hw_cons != fp->tx_pkt_cons;
921 }
922
923 static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
924 {
925         struct bnx2x *bp = fp->bp;
926         struct netdev_queue *txq;
927         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
928
929 #ifdef BNX2X_STOP_ON_ERROR
930         if (unlikely(bp->panic))
931                 return -1;
932 #endif
933
934         txq = netdev_get_tx_queue(bp->dev, fp->index);
935         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
936         sw_cons = fp->tx_pkt_cons;
937
938         while (sw_cons != hw_cons) {
939                 u16 pkt_cons;
940
941                 pkt_cons = TX_BD(sw_cons);
942
943                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
944
945                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
946                    hw_cons, sw_cons, pkt_cons);
947
948 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
949                         rmb();
950                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
951                 }
952 */
953                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
954                 sw_cons++;
955         }
956
957         fp->tx_pkt_cons = sw_cons;
958         fp->tx_bd_cons = bd_cons;
959
960         /* TBD need a thresh? */
961         if (unlikely(netif_tx_queue_stopped(txq))) {
962
963                 /* Need to make the tx_bd_cons update visible to start_xmit()
964                  * before checking for netif_tx_queue_stopped().  Without the
965                  * memory barrier, there is a small possibility that
966                  * start_xmit() will miss it and cause the queue to be stopped
967                  * forever.
968                  */
969                 smp_mb();
970
971                 if ((netif_tx_queue_stopped(txq)) &&
972                     (bp->state == BNX2X_STATE_OPEN) &&
973                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
974                         netif_tx_wake_queue(txq);
975         }
976         return 0;
977 }
978
979 #ifdef BCM_CNIC
980 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
981 #endif
982
983 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
984                            union eth_rx_cqe *rr_cqe)
985 {
986         struct bnx2x *bp = fp->bp;
987         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
988         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
989
990         DP(BNX2X_MSG_SP,
991            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
992            fp->index, cid, command, bp->state,
993            rr_cqe->ramrod_cqe.ramrod_type);
994
995         bp->spq_left++;
996
997         if (fp->index) {
998                 switch (command | fp->state) {
999                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1000                                                 BNX2X_FP_STATE_OPENING):
1001                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1002                            cid);
1003                         fp->state = BNX2X_FP_STATE_OPEN;
1004                         break;
1005
1006                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1007                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1008                            cid);
1009                         fp->state = BNX2X_FP_STATE_HALTED;
1010                         break;
1011
1012                 default:
1013                         BNX2X_ERR("unexpected MC reply (%d)  "
1014                                   "fp->state is %x\n", command, fp->state);
1015                         break;
1016                 }
1017                 mb(); /* force bnx2x_wait_ramrod() to see the change */
1018                 return;
1019         }
1020
1021         switch (command | bp->state) {
1022         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1023                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1024                 bp->state = BNX2X_STATE_OPEN;
1025                 break;
1026
1027         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1028                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1029                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1030                 fp->state = BNX2X_FP_STATE_HALTED;
1031                 break;
1032
1033         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1034                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1035                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1036                 break;
1037
1038 #ifdef BCM_CNIC
1039         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1040                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1041                 bnx2x_cnic_cfc_comp(bp, cid);
1042                 break;
1043 #endif
1044
1045         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1046         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1047                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1048                 bp->set_mac_pending--;
1049                 smp_wmb();
1050                 break;
1051
1052         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1053                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1054                 bp->set_mac_pending--;
1055                 smp_wmb();
1056                 break;
1057
1058         default:
1059                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1060                           command, bp->state);
1061                 break;
1062         }
1063         mb(); /* force bnx2x_wait_ramrod() to see the change */
1064 }
1065
1066 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1067                                      struct bnx2x_fastpath *fp, u16 index)
1068 {
1069         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1070         struct page *page = sw_buf->page;
1071         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1072
1073         /* Skip "next page" elements */
1074         if (!page)
1075                 return;
1076
1077         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1078                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1079         __free_pages(page, PAGES_PER_SGE_SHIFT);
1080
1081         sw_buf->page = NULL;
1082         sge->addr_hi = 0;
1083         sge->addr_lo = 0;
1084 }
1085
1086 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1087                                            struct bnx2x_fastpath *fp, int last)
1088 {
1089         int i;
1090
1091         for (i = 0; i < last; i++)
1092                 bnx2x_free_rx_sge(bp, fp, i);
1093 }
1094
1095 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1096                                      struct bnx2x_fastpath *fp, u16 index)
1097 {
1098         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1099         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1100         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1101         dma_addr_t mapping;
1102
1103         if (unlikely(page == NULL))
1104                 return -ENOMEM;
1105
1106         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1107                                PCI_DMA_FROMDEVICE);
1108         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1109                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1110                 return -ENOMEM;
1111         }
1112
1113         sw_buf->page = page;
1114         pci_unmap_addr_set(sw_buf, mapping, mapping);
1115
1116         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1117         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1118
1119         return 0;
1120 }
1121
1122 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1123                                      struct bnx2x_fastpath *fp, u16 index)
1124 {
1125         struct sk_buff *skb;
1126         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1127         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1128         dma_addr_t mapping;
1129
1130         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1131         if (unlikely(skb == NULL))
1132                 return -ENOMEM;
1133
1134         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1135                                  PCI_DMA_FROMDEVICE);
1136         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1137                 dev_kfree_skb(skb);
1138                 return -ENOMEM;
1139         }
1140
1141         rx_buf->skb = skb;
1142         pci_unmap_addr_set(rx_buf, mapping, mapping);
1143
1144         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1145         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1146
1147         return 0;
1148 }
1149
1150 /* note that we are not allocating a new skb,
1151  * we are just moving one from cons to prod
1152  * we are not creating a new mapping,
1153  * so there is no need to check for dma_mapping_error().
1154  */
1155 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1156                                struct sk_buff *skb, u16 cons, u16 prod)
1157 {
1158         struct bnx2x *bp = fp->bp;
1159         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1160         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1161         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1162         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1163
1164         pci_dma_sync_single_for_device(bp->pdev,
1165                                        pci_unmap_addr(cons_rx_buf, mapping),
1166                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1167
1168         prod_rx_buf->skb = cons_rx_buf->skb;
1169         pci_unmap_addr_set(prod_rx_buf, mapping,
1170                            pci_unmap_addr(cons_rx_buf, mapping));
1171         *prod_bd = *cons_bd;
1172 }
1173
1174 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1175                                              u16 idx)
1176 {
1177         u16 last_max = fp->last_max_sge;
1178
1179         if (SUB_S16(idx, last_max) > 0)
1180                 fp->last_max_sge = idx;
1181 }
1182
1183 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1184 {
1185         int i, j;
1186
1187         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1188                 int idx = RX_SGE_CNT * i - 1;
1189
1190                 for (j = 0; j < 2; j++) {
1191                         SGE_MASK_CLEAR_BIT(fp, idx);
1192                         idx--;
1193                 }
1194         }
1195 }
1196
1197 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1198                                   struct eth_fast_path_rx_cqe *fp_cqe)
1199 {
1200         struct bnx2x *bp = fp->bp;
1201         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1202                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1203                       SGE_PAGE_SHIFT;
1204         u16 last_max, last_elem, first_elem;
1205         u16 delta = 0;
1206         u16 i;
1207
1208         if (!sge_len)
1209                 return;
1210
1211         /* First mark all used pages */
1212         for (i = 0; i < sge_len; i++)
1213                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1214
1215         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1216            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1217
1218         /* Here we assume that the last SGE index is the biggest */
1219         prefetch((void *)(fp->sge_mask));
1220         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1221
1222         last_max = RX_SGE(fp->last_max_sge);
1223         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1224         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1225
1226         /* If ring is not full */
1227         if (last_elem + 1 != first_elem)
1228                 last_elem++;
1229
1230         /* Now update the prod */
1231         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1232                 if (likely(fp->sge_mask[i]))
1233                         break;
1234
1235                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1236                 delta += RX_SGE_MASK_ELEM_SZ;
1237         }
1238
1239         if (delta > 0) {
1240                 fp->rx_sge_prod += delta;
1241                 /* clear page-end entries */
1242                 bnx2x_clear_sge_mask_next_elems(fp);
1243         }
1244
1245         DP(NETIF_MSG_RX_STATUS,
1246            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1247            fp->last_max_sge, fp->rx_sge_prod);
1248 }
1249
1250 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1251 {
1252         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1253         memset(fp->sge_mask, 0xff,
1254                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1255
1256         /* Clear the two last indices in the page to 1:
1257            these are the indices that correspond to the "next" element,
1258            hence will never be indicated and should be removed from
1259            the calculations. */
1260         bnx2x_clear_sge_mask_next_elems(fp);
1261 }
1262
1263 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1264                             struct sk_buff *skb, u16 cons, u16 prod)
1265 {
1266         struct bnx2x *bp = fp->bp;
1267         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1268         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1269         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1270         dma_addr_t mapping;
1271
1272         /* move empty skb from pool to prod and map it */
1273         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1274         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1275                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1276         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1277
1278         /* move partial skb from cons to pool (don't unmap yet) */
1279         fp->tpa_pool[queue] = *cons_rx_buf;
1280
1281         /* mark bin state as start - print error if current state != stop */
1282         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1283                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1284
1285         fp->tpa_state[queue] = BNX2X_TPA_START;
1286
1287         /* point prod_bd to new skb */
1288         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1289         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1290
1291 #ifdef BNX2X_STOP_ON_ERROR
1292         fp->tpa_queue_used |= (1 << queue);
1293 #ifdef __powerpc64__
1294         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1295 #else
1296         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1297 #endif
1298            fp->tpa_queue_used);
1299 #endif
1300 }
1301
1302 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1303                                struct sk_buff *skb,
1304                                struct eth_fast_path_rx_cqe *fp_cqe,
1305                                u16 cqe_idx)
1306 {
1307         struct sw_rx_page *rx_pg, old_rx_pg;
1308         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1309         u32 i, frag_len, frag_size, pages;
1310         int err;
1311         int j;
1312
1313         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1314         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1315
1316         /* This is needed in order to enable forwarding support */
1317         if (frag_size)
1318                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1319                                                max(frag_size, (u32)len_on_bd));
1320
1321 #ifdef BNX2X_STOP_ON_ERROR
1322         if (pages >
1323             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1324                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1325                           pages, cqe_idx);
1326                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1327                           fp_cqe->pkt_len, len_on_bd);
1328                 bnx2x_panic();
1329                 return -EINVAL;
1330         }
1331 #endif
1332
1333         /* Run through the SGL and compose the fragmented skb */
1334         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1335                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1336
1337                 /* FW gives the indices of the SGE as if the ring is an array
1338                    (meaning that "next" element will consume 2 indices) */
1339                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1340                 rx_pg = &fp->rx_page_ring[sge_idx];
1341                 old_rx_pg = *rx_pg;
1342
1343                 /* If we fail to allocate a substitute page, we simply stop
1344                    where we are and drop the whole packet */
1345                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1346                 if (unlikely(err)) {
1347                         fp->eth_q_stats.rx_skb_alloc_failed++;
1348                         return err;
1349                 }
1350
1351                 /* Unmap the page as we r going to pass it to the stack */
1352                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1353                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1354
1355                 /* Add one frag and update the appropriate fields in the skb */
1356                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1357
1358                 skb->data_len += frag_len;
1359                 skb->truesize += frag_len;
1360                 skb->len += frag_len;
1361
1362                 frag_size -= frag_len;
1363         }
1364
1365         return 0;
1366 }
1367
1368 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1369                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1370                            u16 cqe_idx)
1371 {
1372         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1373         struct sk_buff *skb = rx_buf->skb;
1374         /* alloc new skb */
1375         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1376
1377         /* Unmap skb in the pool anyway, as we are going to change
1378            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1379            fails. */
1380         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1381                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1382
1383         if (likely(new_skb)) {
1384                 /* fix ip xsum and give it to the stack */
1385                 /* (no need to map the new skb) */
1386 #ifdef BCM_VLAN
1387                 int is_vlan_cqe =
1388                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1389                          PARSING_FLAGS_VLAN);
1390                 int is_not_hwaccel_vlan_cqe =
1391                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1392 #endif
1393
1394                 prefetch(skb);
1395                 prefetch(((char *)(skb)) + 128);
1396
1397 #ifdef BNX2X_STOP_ON_ERROR
1398                 if (pad + len > bp->rx_buf_size) {
1399                         BNX2X_ERR("skb_put is about to fail...  "
1400                                   "pad %d  len %d  rx_buf_size %d\n",
1401                                   pad, len, bp->rx_buf_size);
1402                         bnx2x_panic();
1403                         return;
1404                 }
1405 #endif
1406
1407                 skb_reserve(skb, pad);
1408                 skb_put(skb, len);
1409
1410                 skb->protocol = eth_type_trans(skb, bp->dev);
1411                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1412
1413                 {
1414                         struct iphdr *iph;
1415
1416                         iph = (struct iphdr *)skb->data;
1417 #ifdef BCM_VLAN
1418                         /* If there is no Rx VLAN offloading -
1419                            take VLAN tag into an account */
1420                         if (unlikely(is_not_hwaccel_vlan_cqe))
1421                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1422 #endif
1423                         iph->check = 0;
1424                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1425                 }
1426
1427                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1428                                          &cqe->fast_path_cqe, cqe_idx)) {
1429 #ifdef BCM_VLAN
1430                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1431                             (!is_not_hwaccel_vlan_cqe))
1432                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1433                                                 le16_to_cpu(cqe->fast_path_cqe.
1434                                                             vlan_tag));
1435                         else
1436 #endif
1437                                 netif_receive_skb(skb);
1438                 } else {
1439                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1440                            " - dropping packet!\n");
1441                         dev_kfree_skb(skb);
1442                 }
1443
1444
1445                 /* put new skb in bin */
1446                 fp->tpa_pool[queue].skb = new_skb;
1447
1448         } else {
1449                 /* else drop the packet and keep the buffer in the bin */
1450                 DP(NETIF_MSG_RX_STATUS,
1451                    "Failed to allocate new skb - dropping packet!\n");
1452                 fp->eth_q_stats.rx_skb_alloc_failed++;
1453         }
1454
1455         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1456 }
1457
1458 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1459                                         struct bnx2x_fastpath *fp,
1460                                         u16 bd_prod, u16 rx_comp_prod,
1461                                         u16 rx_sge_prod)
1462 {
1463         struct ustorm_eth_rx_producers rx_prods = {0};
1464         int i;
1465
1466         /* Update producers */
1467         rx_prods.bd_prod = bd_prod;
1468         rx_prods.cqe_prod = rx_comp_prod;
1469         rx_prods.sge_prod = rx_sge_prod;
1470
1471         /*
1472          * Make sure that the BD and SGE data is updated before updating the
1473          * producers since FW might read the BD/SGE right after the producer
1474          * is updated.
1475          * This is only applicable for weak-ordered memory model archs such
1476          * as IA-64. The following barrier is also mandatory since FW will
1477          * assumes BDs must have buffers.
1478          */
1479         wmb();
1480
1481         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1482                 REG_WR(bp, BAR_USTRORM_INTMEM +
1483                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1484                        ((u32 *)&rx_prods)[i]);
1485
1486         mmiowb(); /* keep prod updates ordered */
1487
1488         DP(NETIF_MSG_RX_STATUS,
1489            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1490            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1491 }
1492
1493 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1494 {
1495         struct bnx2x *bp = fp->bp;
1496         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1497         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1498         int rx_pkt = 0;
1499
1500 #ifdef BNX2X_STOP_ON_ERROR
1501         if (unlikely(bp->panic))
1502                 return 0;
1503 #endif
1504
1505         /* CQ "next element" is of the size of the regular element,
1506            that's why it's ok here */
1507         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1508         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1509                 hw_comp_cons++;
1510
1511         bd_cons = fp->rx_bd_cons;
1512         bd_prod = fp->rx_bd_prod;
1513         bd_prod_fw = bd_prod;
1514         sw_comp_cons = fp->rx_comp_cons;
1515         sw_comp_prod = fp->rx_comp_prod;
1516
1517         /* Memory barrier necessary as speculative reads of the rx
1518          * buffer can be ahead of the index in the status block
1519          */
1520         rmb();
1521
1522         DP(NETIF_MSG_RX_STATUS,
1523            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1524            fp->index, hw_comp_cons, sw_comp_cons);
1525
1526         while (sw_comp_cons != hw_comp_cons) {
1527                 struct sw_rx_bd *rx_buf = NULL;
1528                 struct sk_buff *skb;
1529                 union eth_rx_cqe *cqe;
1530                 u8 cqe_fp_flags;
1531                 u16 len, pad;
1532
1533                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1534                 bd_prod = RX_BD(bd_prod);
1535                 bd_cons = RX_BD(bd_cons);
1536
1537                 /* Prefetch the page containing the BD descriptor
1538                    at producer's index. It will be needed when new skb is
1539                    allocated */
1540                 prefetch((void *)(PAGE_ALIGN((unsigned long)
1541                                              (&fp->rx_desc_ring[bd_prod])) -
1542                                   PAGE_SIZE + 1));
1543
1544                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1545                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1546
1547                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1548                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1549                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1550                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1551                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1552                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1553
1554                 /* is this a slowpath msg? */
1555                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1556                         bnx2x_sp_event(fp, cqe);
1557                         goto next_cqe;
1558
1559                 /* this is an rx packet */
1560                 } else {
1561                         rx_buf = &fp->rx_buf_ring[bd_cons];
1562                         skb = rx_buf->skb;
1563                         prefetch(skb);
1564                         prefetch((u8 *)skb + 256);
1565                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1566                         pad = cqe->fast_path_cqe.placement_offset;
1567
1568                         /* If CQE is marked both TPA_START and TPA_END
1569                            it is a non-TPA CQE */
1570                         if ((!fp->disable_tpa) &&
1571                             (TPA_TYPE(cqe_fp_flags) !=
1572                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1573                                 u16 queue = cqe->fast_path_cqe.queue_index;
1574
1575                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1576                                         DP(NETIF_MSG_RX_STATUS,
1577                                            "calling tpa_start on queue %d\n",
1578                                            queue);
1579
1580                                         bnx2x_tpa_start(fp, queue, skb,
1581                                                         bd_cons, bd_prod);
1582                                         goto next_rx;
1583                                 }
1584
1585                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1586                                         DP(NETIF_MSG_RX_STATUS,
1587                                            "calling tpa_stop on queue %d\n",
1588                                            queue);
1589
1590                                         if (!BNX2X_RX_SUM_FIX(cqe))
1591                                                 BNX2X_ERR("STOP on none TCP "
1592                                                           "data\n");
1593
1594                                         /* This is a size of the linear data
1595                                            on this skb */
1596                                         len = le16_to_cpu(cqe->fast_path_cqe.
1597                                                                 len_on_bd);
1598                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1599                                                     len, cqe, comp_ring_cons);
1600 #ifdef BNX2X_STOP_ON_ERROR
1601                                         if (bp->panic)
1602                                                 return 0;
1603 #endif
1604
1605                                         bnx2x_update_sge_prod(fp,
1606                                                         &cqe->fast_path_cqe);
1607                                         goto next_cqe;
1608                                 }
1609                         }
1610
1611                         pci_dma_sync_single_for_device(bp->pdev,
1612                                         pci_unmap_addr(rx_buf, mapping),
1613                                                        pad + RX_COPY_THRESH,
1614                                                        PCI_DMA_FROMDEVICE);
1615                         prefetch(skb);
1616                         prefetch(((char *)(skb)) + 128);
1617
1618                         /* is this an error packet? */
1619                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1620                                 DP(NETIF_MSG_RX_ERR,
1621                                    "ERROR  flags %x  rx packet %u\n",
1622                                    cqe_fp_flags, sw_comp_cons);
1623                                 fp->eth_q_stats.rx_err_discard_pkt++;
1624                                 goto reuse_rx;
1625                         }
1626
1627                         /* Since we don't have a jumbo ring
1628                          * copy small packets if mtu > 1500
1629                          */
1630                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1631                             (len <= RX_COPY_THRESH)) {
1632                                 struct sk_buff *new_skb;
1633
1634                                 new_skb = netdev_alloc_skb(bp->dev,
1635                                                            len + pad);
1636                                 if (new_skb == NULL) {
1637                                         DP(NETIF_MSG_RX_ERR,
1638                                            "ERROR  packet dropped "
1639                                            "because of alloc failure\n");
1640                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1641                                         goto reuse_rx;
1642                                 }
1643
1644                                 /* aligned copy */
1645                                 skb_copy_from_linear_data_offset(skb, pad,
1646                                                     new_skb->data + pad, len);
1647                                 skb_reserve(new_skb, pad);
1648                                 skb_put(new_skb, len);
1649
1650                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1651
1652                                 skb = new_skb;
1653
1654                         } else
1655                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1656                                 pci_unmap_single(bp->pdev,
1657                                         pci_unmap_addr(rx_buf, mapping),
1658                                                  bp->rx_buf_size,
1659                                                  PCI_DMA_FROMDEVICE);
1660                                 skb_reserve(skb, pad);
1661                                 skb_put(skb, len);
1662
1663                         } else {
1664                                 DP(NETIF_MSG_RX_ERR,
1665                                    "ERROR  packet dropped because "
1666                                    "of alloc failure\n");
1667                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1668 reuse_rx:
1669                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1670                                 goto next_rx;
1671                         }
1672
1673                         skb->protocol = eth_type_trans(skb, bp->dev);
1674
1675                         skb->ip_summed = CHECKSUM_NONE;
1676                         if (bp->rx_csum) {
1677                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1678                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1679                                 else
1680                                         fp->eth_q_stats.hw_csum_err++;
1681                         }
1682                 }
1683
1684                 skb_record_rx_queue(skb, fp->index);
1685
1686 #ifdef BCM_VLAN
1687                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1688                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1689                      PARSING_FLAGS_VLAN))
1690                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1691                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1692                 else
1693 #endif
1694                         netif_receive_skb(skb);
1695
1696
1697 next_rx:
1698                 rx_buf->skb = NULL;
1699
1700                 bd_cons = NEXT_RX_IDX(bd_cons);
1701                 bd_prod = NEXT_RX_IDX(bd_prod);
1702                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1703                 rx_pkt++;
1704 next_cqe:
1705                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1706                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1707
1708                 if (rx_pkt == budget)
1709                         break;
1710         } /* while */
1711
1712         fp->rx_bd_cons = bd_cons;
1713         fp->rx_bd_prod = bd_prod_fw;
1714         fp->rx_comp_cons = sw_comp_cons;
1715         fp->rx_comp_prod = sw_comp_prod;
1716
1717         /* Update producers */
1718         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1719                              fp->rx_sge_prod);
1720
1721         fp->rx_pkt += rx_pkt;
1722         fp->rx_calls++;
1723
1724         return rx_pkt;
1725 }
1726
1727 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1728 {
1729         struct bnx2x_fastpath *fp = fp_cookie;
1730         struct bnx2x *bp = fp->bp;
1731
1732         /* Return here if interrupt is disabled */
1733         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1734                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1735                 return IRQ_HANDLED;
1736         }
1737
1738         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1739            fp->index, fp->sb_id);
1740         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1741
1742 #ifdef BNX2X_STOP_ON_ERROR
1743         if (unlikely(bp->panic))
1744                 return IRQ_HANDLED;
1745 #endif
1746
1747         /* Handle Rx and Tx according to MSI-X vector */
1748         prefetch(fp->rx_cons_sb);
1749         prefetch(fp->tx_cons_sb);
1750         prefetch(&fp->status_blk->u_status_block.status_block_index);
1751         prefetch(&fp->status_blk->c_status_block.status_block_index);
1752         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1753
1754         return IRQ_HANDLED;
1755 }
1756
1757 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1758 {
1759         struct bnx2x *bp = netdev_priv(dev_instance);
1760         u16 status = bnx2x_ack_int(bp);
1761         u16 mask;
1762         int i;
1763
1764         /* Return here if interrupt is shared and it's not for us */
1765         if (unlikely(status == 0)) {
1766                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1767                 return IRQ_NONE;
1768         }
1769         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1770
1771         /* Return here if interrupt is disabled */
1772         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1773                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1774                 return IRQ_HANDLED;
1775         }
1776
1777 #ifdef BNX2X_STOP_ON_ERROR
1778         if (unlikely(bp->panic))
1779                 return IRQ_HANDLED;
1780 #endif
1781
1782         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1783                 struct bnx2x_fastpath *fp = &bp->fp[i];
1784
1785                 mask = 0x2 << fp->sb_id;
1786                 if (status & mask) {
1787                         /* Handle Rx and Tx according to SB id */
1788                         prefetch(fp->rx_cons_sb);
1789                         prefetch(&fp->status_blk->u_status_block.
1790                                                 status_block_index);
1791                         prefetch(fp->tx_cons_sb);
1792                         prefetch(&fp->status_blk->c_status_block.
1793                                                 status_block_index);
1794                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1795                         status &= ~mask;
1796                 }
1797         }
1798
1799 #ifdef BCM_CNIC
1800         mask = 0x2 << CNIC_SB_ID(bp);
1801         if (status & (mask | 0x1)) {
1802                 struct cnic_ops *c_ops = NULL;
1803
1804                 rcu_read_lock();
1805                 c_ops = rcu_dereference(bp->cnic_ops);
1806                 if (c_ops)
1807                         c_ops->cnic_handler(bp->cnic_data, NULL);
1808                 rcu_read_unlock();
1809
1810                 status &= ~mask;
1811         }
1812 #endif
1813
1814         if (unlikely(status & 0x1)) {
1815                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1816
1817                 status &= ~0x1;
1818                 if (!status)
1819                         return IRQ_HANDLED;
1820         }
1821
1822         if (status)
1823                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1824                    status);
1825
1826         return IRQ_HANDLED;
1827 }
1828
1829 /* end of fast path */
1830
1831 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1832
1833 /* Link */
1834
1835 /*
1836  * General service functions
1837  */
1838
1839 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1840 {
1841         u32 lock_status;
1842         u32 resource_bit = (1 << resource);
1843         int func = BP_FUNC(bp);
1844         u32 hw_lock_control_reg;
1845         int cnt;
1846
1847         /* Validating that the resource is within range */
1848         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1849                 DP(NETIF_MSG_HW,
1850                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1851                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1852                 return -EINVAL;
1853         }
1854
1855         if (func <= 5) {
1856                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1857         } else {
1858                 hw_lock_control_reg =
1859                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1860         }
1861
1862         /* Validating that the resource is not already taken */
1863         lock_status = REG_RD(bp, hw_lock_control_reg);
1864         if (lock_status & resource_bit) {
1865                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1866                    lock_status, resource_bit);
1867                 return -EEXIST;
1868         }
1869
1870         /* Try for 5 second every 5ms */
1871         for (cnt = 0; cnt < 1000; cnt++) {
1872                 /* Try to acquire the lock */
1873                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1874                 lock_status = REG_RD(bp, hw_lock_control_reg);
1875                 if (lock_status & resource_bit)
1876                         return 0;
1877
1878                 msleep(5);
1879         }
1880         DP(NETIF_MSG_HW, "Timeout\n");
1881         return -EAGAIN;
1882 }
1883
1884 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1885 {
1886         u32 lock_status;
1887         u32 resource_bit = (1 << resource);
1888         int func = BP_FUNC(bp);
1889         u32 hw_lock_control_reg;
1890
1891         /* Validating that the resource is within range */
1892         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1893                 DP(NETIF_MSG_HW,
1894                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1895                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1896                 return -EINVAL;
1897         }
1898
1899         if (func <= 5) {
1900                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1901         } else {
1902                 hw_lock_control_reg =
1903                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1904         }
1905
1906         /* Validating that the resource is currently taken */
1907         lock_status = REG_RD(bp, hw_lock_control_reg);
1908         if (!(lock_status & resource_bit)) {
1909                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1910                    lock_status, resource_bit);
1911                 return -EFAULT;
1912         }
1913
1914         REG_WR(bp, hw_lock_control_reg, resource_bit);
1915         return 0;
1916 }
1917
1918 /* HW Lock for shared dual port PHYs */
1919 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1920 {
1921         mutex_lock(&bp->port.phy_mutex);
1922
1923         if (bp->port.need_hw_lock)
1924                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1925 }
1926
1927 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1928 {
1929         if (bp->port.need_hw_lock)
1930                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1931
1932         mutex_unlock(&bp->port.phy_mutex);
1933 }
1934
1935 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1936 {
1937         /* The GPIO should be swapped if swap register is set and active */
1938         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1939                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1940         int gpio_shift = gpio_num +
1941                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942         u32 gpio_mask = (1 << gpio_shift);
1943         u32 gpio_reg;
1944         int value;
1945
1946         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1947                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1948                 return -EINVAL;
1949         }
1950
1951         /* read GPIO value */
1952         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1953
1954         /* get the requested pin value */
1955         if ((gpio_reg & gpio_mask) == gpio_mask)
1956                 value = 1;
1957         else
1958                 value = 0;
1959
1960         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1961
1962         return value;
1963 }
1964
1965 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1966 {
1967         /* The GPIO should be swapped if swap register is set and active */
1968         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1969                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1970         int gpio_shift = gpio_num +
1971                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1972         u32 gpio_mask = (1 << gpio_shift);
1973         u32 gpio_reg;
1974
1975         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1976                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1977                 return -EINVAL;
1978         }
1979
1980         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1981         /* read GPIO and mask except the float bits */
1982         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1983
1984         switch (mode) {
1985         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1986                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1987                    gpio_num, gpio_shift);
1988                 /* clear FLOAT and set CLR */
1989                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1990                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1991                 break;
1992
1993         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1994                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1995                    gpio_num, gpio_shift);
1996                 /* clear FLOAT and set SET */
1997                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1998                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1999                 break;
2000
2001         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2002                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2003                    gpio_num, gpio_shift);
2004                 /* set FLOAT */
2005                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2006                 break;
2007
2008         default:
2009                 break;
2010         }
2011
2012         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2013         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2014
2015         return 0;
2016 }
2017
2018 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2019 {
2020         /* The GPIO should be swapped if swap register is set and active */
2021         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2022                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2023         int gpio_shift = gpio_num +
2024                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2025         u32 gpio_mask = (1 << gpio_shift);
2026         u32 gpio_reg;
2027
2028         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2029                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2030                 return -EINVAL;
2031         }
2032
2033         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2034         /* read GPIO int */
2035         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2036
2037         switch (mode) {
2038         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2039                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2040                                    "output low\n", gpio_num, gpio_shift);
2041                 /* clear SET and set CLR */
2042                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2043                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2044                 break;
2045
2046         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2047                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2048                                    "output high\n", gpio_num, gpio_shift);
2049                 /* clear CLR and set SET */
2050                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2051                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2052                 break;
2053
2054         default:
2055                 break;
2056         }
2057
2058         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2059         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2060
2061         return 0;
2062 }
2063
2064 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2065 {
2066         u32 spio_mask = (1 << spio_num);
2067         u32 spio_reg;
2068
2069         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2070             (spio_num > MISC_REGISTERS_SPIO_7)) {
2071                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2072                 return -EINVAL;
2073         }
2074
2075         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2076         /* read SPIO and mask except the float bits */
2077         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2078
2079         switch (mode) {
2080         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2081                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2082                 /* clear FLOAT and set CLR */
2083                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2084                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2085                 break;
2086
2087         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2088                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2089                 /* clear FLOAT and set SET */
2090                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2091                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2092                 break;
2093
2094         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2095                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2096                 /* set FLOAT */
2097                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2098                 break;
2099
2100         default:
2101                 break;
2102         }
2103
2104         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2105         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2106
2107         return 0;
2108 }
2109
2110 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2111 {
2112         switch (bp->link_vars.ieee_fc &
2113                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2114         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2115                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2116                                           ADVERTISED_Pause);
2117                 break;
2118
2119         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2120                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2121                                          ADVERTISED_Pause);
2122                 break;
2123
2124         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2125                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2126                 break;
2127
2128         default:
2129                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2130                                           ADVERTISED_Pause);
2131                 break;
2132         }
2133 }
2134
2135 static void bnx2x_link_report(struct bnx2x *bp)
2136 {
2137         if (bp->flags & MF_FUNC_DIS) {
2138                 netif_carrier_off(bp->dev);
2139                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2140                 return;
2141         }
2142
2143         if (bp->link_vars.link_up) {
2144                 u16 line_speed;
2145
2146                 if (bp->state == BNX2X_STATE_OPEN)
2147                         netif_carrier_on(bp->dev);
2148                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2149
2150                 line_speed = bp->link_vars.line_speed;
2151                 if (IS_E1HMF(bp)) {
2152                         u16 vn_max_rate;
2153
2154                         vn_max_rate =
2155                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2156                                  FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2157                         if (vn_max_rate < line_speed)
2158                                 line_speed = vn_max_rate;
2159                 }
2160                 printk("%d Mbps ", line_speed);
2161
2162                 if (bp->link_vars.duplex == DUPLEX_FULL)
2163                         printk("full duplex");
2164                 else
2165                         printk("half duplex");
2166
2167                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2168                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2169                                 printk(", receive ");
2170                                 if (bp->link_vars.flow_ctrl &
2171                                     BNX2X_FLOW_CTRL_TX)
2172                                         printk("& transmit ");
2173                         } else {
2174                                 printk(", transmit ");
2175                         }
2176                         printk("flow control ON");
2177                 }
2178                 printk("\n");
2179
2180         } else { /* link_down */
2181                 netif_carrier_off(bp->dev);
2182                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2183         }
2184 }
2185
2186 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2187 {
2188         if (!BP_NOMCP(bp)) {
2189                 u8 rc;
2190
2191                 /* Initialize link parameters structure variables */
2192                 /* It is recommended to turn off RX FC for jumbo frames
2193                    for better performance */
2194                 if (bp->dev->mtu > 5000)
2195                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2196                 else
2197                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2198
2199                 bnx2x_acquire_phy_lock(bp);
2200
2201                 if (load_mode == LOAD_DIAG)
2202                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2203
2204                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2205
2206                 bnx2x_release_phy_lock(bp);
2207
2208                 bnx2x_calc_fc_adv(bp);
2209
2210                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2211                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2212                         bnx2x_link_report(bp);
2213                 }
2214
2215                 return rc;
2216         }
2217         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2218         return -EINVAL;
2219 }
2220
2221 static void bnx2x_link_set(struct bnx2x *bp)
2222 {
2223         if (!BP_NOMCP(bp)) {
2224                 bnx2x_acquire_phy_lock(bp);
2225                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2226                 bnx2x_release_phy_lock(bp);
2227
2228                 bnx2x_calc_fc_adv(bp);
2229         } else
2230                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2231 }
2232
2233 static void bnx2x__link_reset(struct bnx2x *bp)
2234 {
2235         if (!BP_NOMCP(bp)) {
2236                 bnx2x_acquire_phy_lock(bp);
2237                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2238                 bnx2x_release_phy_lock(bp);
2239         } else
2240                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2241 }
2242
2243 static u8 bnx2x_link_test(struct bnx2x *bp)
2244 {
2245         u8 rc;
2246
2247         bnx2x_acquire_phy_lock(bp);
2248         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2249         bnx2x_release_phy_lock(bp);
2250
2251         return rc;
2252 }
2253
2254 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2255 {
2256         u32 r_param = bp->link_vars.line_speed / 8;
2257         u32 fair_periodic_timeout_usec;
2258         u32 t_fair;
2259
2260         memset(&(bp->cmng.rs_vars), 0,
2261                sizeof(struct rate_shaping_vars_per_port));
2262         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2263
2264         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2265         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2266
2267         /* this is the threshold below which no timer arming will occur
2268            1.25 coefficient is for the threshold to be a little bigger
2269            than the real time, to compensate for timer in-accuracy */
2270         bp->cmng.rs_vars.rs_threshold =
2271                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2272
2273         /* resolution of fairness timer */
2274         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2275         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2276         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2277
2278         /* this is the threshold below which we won't arm the timer anymore */
2279         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2280
2281         /* we multiply by 1e3/8 to get bytes/msec.
2282            We don't want the credits to pass a credit
2283            of the t_fair*FAIR_MEM (algorithm resolution) */
2284         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2285         /* since each tick is 4 usec */
2286         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2287 }
2288
2289 /* Calculates the sum of vn_min_rates.
2290    It's needed for further normalizing of the min_rates.
2291    Returns:
2292      sum of vn_min_rates.
2293        or
2294      0 - if all the min_rates are 0.
2295      In the later case fainess algorithm should be deactivated.
2296      If not all min_rates are zero then those that are zeroes will be set to 1.
2297  */
2298 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2299 {
2300         int all_zero = 1;
2301         int port = BP_PORT(bp);
2302         int vn;
2303
2304         bp->vn_weight_sum = 0;
2305         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2306                 int func = 2*vn + port;
2307                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2308                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2309                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2310
2311                 /* Skip hidden vns */
2312                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2313                         continue;
2314
2315                 /* If min rate is zero - set it to 1 */
2316                 if (!vn_min_rate)
2317                         vn_min_rate = DEF_MIN_RATE;
2318                 else
2319                         all_zero = 0;
2320
2321                 bp->vn_weight_sum += vn_min_rate;
2322         }
2323
2324         /* ... only if all min rates are zeros - disable fairness */
2325         if (all_zero) {
2326                 bp->cmng.flags.cmng_enables &=
2327                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2328                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2329                    "  fairness will be disabled\n");
2330         } else
2331                 bp->cmng.flags.cmng_enables |=
2332                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2333 }
2334
2335 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2336 {
2337         struct rate_shaping_vars_per_vn m_rs_vn;
2338         struct fairness_vars_per_vn m_fair_vn;
2339         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2340         u16 vn_min_rate, vn_max_rate;
2341         int i;
2342
2343         /* If function is hidden - set min and max to zeroes */
2344         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2345                 vn_min_rate = 0;
2346                 vn_max_rate = 0;
2347
2348         } else {
2349                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2350                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2351                 /* If min rate is zero - set it to 1 */
2352                 if (!vn_min_rate)
2353                         vn_min_rate = DEF_MIN_RATE;
2354                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2355                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2356         }
2357         DP(NETIF_MSG_IFUP,
2358            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
2359            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2360
2361         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2362         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2363
2364         /* global vn counter - maximal Mbps for this vn */
2365         m_rs_vn.vn_counter.rate = vn_max_rate;
2366
2367         /* quota - number of bytes transmitted in this period */
2368         m_rs_vn.vn_counter.quota =
2369                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2370
2371         if (bp->vn_weight_sum) {
2372                 /* credit for each period of the fairness algorithm:
2373                    number of bytes in T_FAIR (the vn share the port rate).
2374                    vn_weight_sum should not be larger than 10000, thus
2375                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2376                    than zero */
2377                 m_fair_vn.vn_credit_delta =
2378                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2379                                                  (8 * bp->vn_weight_sum))),
2380                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2381                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2382                    m_fair_vn.vn_credit_delta);
2383         }
2384
2385         /* Store it to internal memory */
2386         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2387                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2388                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2389                        ((u32 *)(&m_rs_vn))[i]);
2390
2391         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2392                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2393                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2394                        ((u32 *)(&m_fair_vn))[i]);
2395 }
2396
2397
2398 /* This function is called upon link interrupt */
2399 static void bnx2x_link_attn(struct bnx2x *bp)
2400 {
2401         /* Make sure that we are synced with the current statistics */
2402         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2403
2404         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2405
2406         if (bp->link_vars.link_up) {
2407
2408                 /* dropless flow control */
2409                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2410                         int port = BP_PORT(bp);
2411                         u32 pause_enabled = 0;
2412
2413                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2414                                 pause_enabled = 1;
2415
2416                         REG_WR(bp, BAR_USTRORM_INTMEM +
2417                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2418                                pause_enabled);
2419                 }
2420
2421                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2422                         struct host_port_stats *pstats;
2423
2424                         pstats = bnx2x_sp(bp, port_stats);
2425                         /* reset old bmac stats */
2426                         memset(&(pstats->mac_stx[0]), 0,
2427                                sizeof(struct mac_stx));
2428                 }
2429                 if (bp->state == BNX2X_STATE_OPEN)
2430                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2431         }
2432
2433         /* indicate link status */
2434         bnx2x_link_report(bp);
2435
2436         if (IS_E1HMF(bp)) {
2437                 int port = BP_PORT(bp);
2438                 int func;
2439                 int vn;
2440
2441                 /* Set the attention towards other drivers on the same port */
2442                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2443                         if (vn == BP_E1HVN(bp))
2444                                 continue;
2445
2446                         func = ((vn << 1) | port);
2447                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2448                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2449                 }
2450
2451                 if (bp->link_vars.link_up) {
2452                         int i;
2453
2454                         /* Init rate shaping and fairness contexts */
2455                         bnx2x_init_port_minmax(bp);
2456
2457                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2458                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2459
2460                         /* Store it to internal memory */
2461                         for (i = 0;
2462                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2463                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2464                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2465                                        ((u32 *)(&bp->cmng))[i]);
2466                 }
2467         }
2468 }
2469
2470 static void bnx2x__link_status_update(struct bnx2x *bp)
2471 {
2472         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2473                 return;
2474
2475         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2476
2477         if (bp->link_vars.link_up)
2478                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2479         else
2480                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2481
2482         bnx2x_calc_vn_weight_sum(bp);
2483
2484         /* indicate link status */
2485         bnx2x_link_report(bp);
2486 }
2487
2488 static void bnx2x_pmf_update(struct bnx2x *bp)
2489 {
2490         int port = BP_PORT(bp);
2491         u32 val;
2492
2493         bp->port.pmf = 1;
2494         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2495
2496         /* enable nig attention */
2497         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2498         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2499         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2500
2501         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2502 }
2503
2504 /* end of Link */
2505
2506 /* slow path */
2507
2508 /*
2509  * General service functions
2510  */
2511
2512 /* send the MCP a request, block until there is a reply */
2513 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2514 {
2515         int func = BP_FUNC(bp);
2516         u32 seq = ++bp->fw_seq;
2517         u32 rc = 0;
2518         u32 cnt = 1;
2519         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2520
2521         mutex_lock(&bp->fw_mb_mutex);
2522         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2523         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2524
2525         do {
2526                 /* let the FW do it's magic ... */
2527                 msleep(delay);
2528
2529                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2530
2531                 /* Give the FW up to 5 second (500*10ms) */
2532         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2533
2534         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2535            cnt*delay, rc, seq);
2536
2537         /* is this a reply to our command? */
2538         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2539                 rc &= FW_MSG_CODE_MASK;
2540         else {
2541                 /* FW BUG! */
2542                 BNX2X_ERR("FW failed to respond!\n");
2543                 bnx2x_fw_dump(bp);
2544                 rc = 0;
2545         }
2546         mutex_unlock(&bp->fw_mb_mutex);
2547
2548         return rc;
2549 }
2550
2551 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2552 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2553 static void bnx2x_set_rx_mode(struct net_device *dev);
2554
2555 static void bnx2x_e1h_disable(struct bnx2x *bp)
2556 {
2557         int port = BP_PORT(bp);
2558
2559         netif_tx_disable(bp->dev);
2560
2561         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2562
2563         netif_carrier_off(bp->dev);
2564 }
2565
2566 static void bnx2x_e1h_enable(struct bnx2x *bp)
2567 {
2568         int port = BP_PORT(bp);
2569
2570         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2571
2572         /* Tx queue should be only reenabled */
2573         netif_tx_wake_all_queues(bp->dev);
2574
2575         /*
2576          * Should not call netif_carrier_on since it will be called if the link
2577          * is up when checking for link state
2578          */
2579 }
2580
2581 static void bnx2x_update_min_max(struct bnx2x *bp)
2582 {
2583         int port = BP_PORT(bp);
2584         int vn, i;
2585
2586         /* Init rate shaping and fairness contexts */
2587         bnx2x_init_port_minmax(bp);
2588
2589         bnx2x_calc_vn_weight_sum(bp);
2590
2591         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2592                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2593
2594         if (bp->port.pmf) {
2595                 int func;
2596
2597                 /* Set the attention towards other drivers on the same port */
2598                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2599                         if (vn == BP_E1HVN(bp))
2600                                 continue;
2601
2602                         func = ((vn << 1) | port);
2603                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2604                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2605                 }
2606
2607                 /* Store it to internal memory */
2608                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2609                         REG_WR(bp, BAR_XSTRORM_INTMEM +
2610                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2611                                ((u32 *)(&bp->cmng))[i]);
2612         }
2613 }
2614
2615 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2616 {
2617         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2618
2619         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2620
2621                 /*
2622                  * This is the only place besides the function initialization
2623                  * where the bp->flags can change so it is done without any
2624                  * locks
2625                  */
2626                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2627                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2628                         bp->flags |= MF_FUNC_DIS;
2629
2630                         bnx2x_e1h_disable(bp);
2631                 } else {
2632                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2633                         bp->flags &= ~MF_FUNC_DIS;
2634
2635                         bnx2x_e1h_enable(bp);
2636                 }
2637                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2638         }
2639         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2640
2641                 bnx2x_update_min_max(bp);
2642                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2643         }
2644
2645         /* Report results to MCP */
2646         if (dcc_event)
2647                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2648         else
2649                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2650 }
2651
2652 /* must be called under the spq lock */
2653 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2654 {
2655         struct eth_spe *next_spe = bp->spq_prod_bd;
2656
2657         if (bp->spq_prod_bd == bp->spq_last_bd) {
2658                 bp->spq_prod_bd = bp->spq;
2659                 bp->spq_prod_idx = 0;
2660                 DP(NETIF_MSG_TIMER, "end of spq\n");
2661         } else {
2662                 bp->spq_prod_bd++;
2663                 bp->spq_prod_idx++;
2664         }
2665         return next_spe;
2666 }
2667
2668 /* must be called under the spq lock */
2669 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2670 {
2671         int func = BP_FUNC(bp);
2672
2673         /* Make sure that BD data is updated before writing the producer */
2674         wmb();
2675
2676         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2677                bp->spq_prod_idx);
2678         mmiowb();
2679 }
2680
2681 /* the slow path queue is odd since completions arrive on the fastpath ring */
2682 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2683                          u32 data_hi, u32 data_lo, int common)
2684 {
2685         struct eth_spe *spe;
2686
2687         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2688            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2689            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2690            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2691            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2692
2693 #ifdef BNX2X_STOP_ON_ERROR
2694         if (unlikely(bp->panic))
2695                 return -EIO;
2696 #endif
2697
2698         spin_lock_bh(&bp->spq_lock);
2699
2700         if (!bp->spq_left) {
2701                 BNX2X_ERR("BUG! SPQ ring full!\n");
2702                 spin_unlock_bh(&bp->spq_lock);
2703                 bnx2x_panic();
2704                 return -EBUSY;
2705         }
2706
2707         spe = bnx2x_sp_get_next(bp);
2708
2709         /* CID needs port number to be encoded int it */
2710         spe->hdr.conn_and_cmd_data =
2711                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2712                                      HW_CID(bp, cid)));
2713         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2714         if (common)
2715                 spe->hdr.type |=
2716                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2717
2718         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2719         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2720
2721         bp->spq_left--;
2722
2723         bnx2x_sp_prod_update(bp);
2724         spin_unlock_bh(&bp->spq_lock);
2725         return 0;
2726 }
2727
2728 /* acquire split MCP access lock register */
2729 static int bnx2x_acquire_alr(struct bnx2x *bp)
2730 {
2731         u32 i, j, val;
2732         int rc = 0;
2733
2734         might_sleep();
2735         i = 100;
2736         for (j = 0; j < i*10; j++) {
2737                 val = (1UL << 31);
2738                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2739                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2740                 if (val & (1L << 31))
2741                         break;
2742
2743                 msleep(5);
2744         }
2745         if (!(val & (1L << 31))) {
2746                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2747                 rc = -EBUSY;
2748         }
2749
2750         return rc;
2751 }
2752
2753 /* release split MCP access lock register */
2754 static void bnx2x_release_alr(struct bnx2x *bp)
2755 {
2756         u32 val = 0;
2757
2758         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2759 }
2760
2761 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2762 {
2763         struct host_def_status_block *def_sb = bp->def_status_blk;
2764         u16 rc = 0;
2765
2766         barrier(); /* status block is written to by the chip */
2767         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2768                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2769                 rc |= 1;
2770         }
2771         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2772                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2773                 rc |= 2;
2774         }
2775         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2776                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2777                 rc |= 4;
2778         }
2779         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2780                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2781                 rc |= 8;
2782         }
2783         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2784                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2785                 rc |= 16;
2786         }
2787         return rc;
2788 }
2789
2790 /*
2791  * slow path service functions
2792  */
2793
2794 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2795 {
2796         int port = BP_PORT(bp);
2797         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2798                        COMMAND_REG_ATTN_BITS_SET);
2799         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2800                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2801         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2802                                        NIG_REG_MASK_INTERRUPT_PORT0;
2803         u32 aeu_mask;
2804         u32 nig_mask = 0;
2805
2806         if (bp->attn_state & asserted)
2807                 BNX2X_ERR("IGU ERROR\n");
2808
2809         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2810         aeu_mask = REG_RD(bp, aeu_addr);
2811
2812         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2813            aeu_mask, asserted);
2814         aeu_mask &= ~(asserted & 0xff);
2815         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2816
2817         REG_WR(bp, aeu_addr, aeu_mask);
2818         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2819
2820         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2821         bp->attn_state |= asserted;
2822         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2823
2824         if (asserted & ATTN_HARD_WIRED_MASK) {
2825                 if (asserted & ATTN_NIG_FOR_FUNC) {
2826
2827                         bnx2x_acquire_phy_lock(bp);
2828
2829                         /* save nig interrupt mask */
2830                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2831                         REG_WR(bp, nig_int_mask_addr, 0);
2832
2833                         bnx2x_link_attn(bp);
2834
2835                         /* handle unicore attn? */
2836                 }
2837                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2838                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2839
2840                 if (asserted & GPIO_2_FUNC)
2841                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2842
2843                 if (asserted & GPIO_3_FUNC)
2844                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2845
2846                 if (asserted & GPIO_4_FUNC)
2847                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2848
2849                 if (port == 0) {
2850                         if (asserted & ATTN_GENERAL_ATTN_1) {
2851                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2852                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2853                         }
2854                         if (asserted & ATTN_GENERAL_ATTN_2) {
2855                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2856                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2857                         }
2858                         if (asserted & ATTN_GENERAL_ATTN_3) {
2859                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2860                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2861                         }
2862                 } else {
2863                         if (asserted & ATTN_GENERAL_ATTN_4) {
2864                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2865                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2866                         }
2867                         if (asserted & ATTN_GENERAL_ATTN_5) {
2868                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2869                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2870                         }
2871                         if (asserted & ATTN_GENERAL_ATTN_6) {
2872                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2873                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2874                         }
2875                 }
2876
2877         } /* if hardwired */
2878
2879         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2880            asserted, hc_addr);
2881         REG_WR(bp, hc_addr, asserted);
2882
2883         /* now set back the mask */
2884         if (asserted & ATTN_NIG_FOR_FUNC) {
2885                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2886                 bnx2x_release_phy_lock(bp);
2887         }
2888 }
2889
2890 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2891 {
2892         int port = BP_PORT(bp);
2893
2894         /* mark the failure */
2895         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2896         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2897         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2898                  bp->link_params.ext_phy_config);
2899
2900         /* log the failure */
2901         printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2902                " the driver to shutdown the card to prevent permanent"
2903                " damage.  Please contact Dell Support for assistance\n",
2904                bp->dev->name);
2905 }
2906
2907 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2908 {
2909         int port = BP_PORT(bp);
2910         int reg_offset;
2911         u32 val, swap_val, swap_override;
2912
2913         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2914                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2915
2916         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2917
2918                 val = REG_RD(bp, reg_offset);
2919                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2920                 REG_WR(bp, reg_offset, val);
2921
2922                 BNX2X_ERR("SPIO5 hw attention\n");
2923
2924                 /* Fan failure attention */
2925                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2926                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2927                         /* Low power mode is controlled by GPIO 2 */
2928                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2929                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2930                         /* The PHY reset is controlled by GPIO 1 */
2931                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2932                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2933                         break;
2934
2935                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2936                         /* The PHY reset is controlled by GPIO 1 */
2937                         /* fake the port number to cancel the swap done in
2938                            set_gpio() */
2939                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2940                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2941                         port = (swap_val && swap_override) ^ 1;
2942                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2943                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2944                         break;
2945
2946                 default:
2947                         break;
2948                 }
2949                 bnx2x_fan_failure(bp);
2950         }
2951
2952         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2953                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2954                 bnx2x_acquire_phy_lock(bp);
2955                 bnx2x_handle_module_detect_int(&bp->link_params);
2956                 bnx2x_release_phy_lock(bp);
2957         }
2958
2959         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2960
2961                 val = REG_RD(bp, reg_offset);
2962                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2963                 REG_WR(bp, reg_offset, val);
2964
2965                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2966                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2967                 bnx2x_panic();
2968         }
2969 }
2970
2971 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2972 {
2973         u32 val;
2974
2975         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2976
2977                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2978                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2979                 /* DORQ discard attention */
2980                 if (val & 0x2)
2981                         BNX2X_ERR("FATAL error from DORQ\n");
2982         }
2983
2984         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2985
2986                 int port = BP_PORT(bp);
2987                 int reg_offset;
2988
2989                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2990                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2991
2992                 val = REG_RD(bp, reg_offset);
2993                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2994                 REG_WR(bp, reg_offset, val);
2995
2996                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2997                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2998                 bnx2x_panic();
2999         }
3000 }
3001
3002 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3003 {
3004         u32 val;
3005
3006         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3007
3008                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3009                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3010                 /* CFC error attention */
3011                 if (val & 0x2)
3012                         BNX2X_ERR("FATAL error from CFC\n");
3013         }
3014
3015         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3016
3017                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3018                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3019                 /* RQ_USDMDP_FIFO_OVERFLOW */
3020                 if (val & 0x18000)
3021                         BNX2X_ERR("FATAL error from PXP\n");
3022         }
3023
3024         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3025
3026                 int port = BP_PORT(bp);
3027                 int reg_offset;
3028
3029                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3030                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3031
3032                 val = REG_RD(bp, reg_offset);
3033                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3034                 REG_WR(bp, reg_offset, val);
3035
3036                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3037                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3038                 bnx2x_panic();
3039         }
3040 }
3041
3042 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3043 {
3044         u32 val;
3045
3046         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3047
3048                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3049                         int func = BP_FUNC(bp);
3050
3051                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3052                         bp->mf_config = SHMEM_RD(bp,
3053                                            mf_cfg.func_mf_config[func].config);
3054                         val = SHMEM_RD(bp, func_mb[func].drv_status);
3055                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3056                                 bnx2x_dcc_event(bp,
3057                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3058                         bnx2x__link_status_update(bp);
3059                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3060                                 bnx2x_pmf_update(bp);
3061
3062                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3063
3064                         BNX2X_ERR("MC assert!\n");
3065                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3066                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3067                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3068                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3069                         bnx2x_panic();
3070
3071                 } else if (attn & BNX2X_MCP_ASSERT) {
3072
3073                         BNX2X_ERR("MCP assert!\n");
3074                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3075                         bnx2x_fw_dump(bp);
3076
3077                 } else
3078                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3079         }
3080
3081         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3082                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3083                 if (attn & BNX2X_GRC_TIMEOUT) {
3084                         val = CHIP_IS_E1H(bp) ?
3085                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3086                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3087                 }
3088                 if (attn & BNX2X_GRC_RSV) {
3089                         val = CHIP_IS_E1H(bp) ?
3090                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3091                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3092                 }
3093                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3094         }
3095 }
3096
3097 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3098 {
3099         struct attn_route attn;
3100         struct attn_route group_mask;
3101         int port = BP_PORT(bp);
3102         int index;
3103         u32 reg_addr;
3104         u32 val;
3105         u32 aeu_mask;
3106
3107         /* need to take HW lock because MCP or other port might also
3108            try to handle this event */
3109         bnx2x_acquire_alr(bp);
3110
3111         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3112         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3113         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3114         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3115         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3116            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3117
3118         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3119                 if (deasserted & (1 << index)) {
3120                         group_mask = bp->attn_group[index];
3121
3122                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3123                            index, group_mask.sig[0], group_mask.sig[1],
3124                            group_mask.sig[2], group_mask.sig[3]);
3125
3126                         bnx2x_attn_int_deasserted3(bp,
3127                                         attn.sig[3] & group_mask.sig[3]);
3128                         bnx2x_attn_int_deasserted1(bp,
3129                                         attn.sig[1] & group_mask.sig[1]);
3130                         bnx2x_attn_int_deasserted2(bp,
3131                                         attn.sig[2] & group_mask.sig[2]);
3132                         bnx2x_attn_int_deasserted0(bp,
3133                                         attn.sig[0] & group_mask.sig[0]);
3134
3135                         if ((attn.sig[0] & group_mask.sig[0] &
3136                                                 HW_PRTY_ASSERT_SET_0) ||
3137                             (attn.sig[1] & group_mask.sig[1] &
3138                                                 HW_PRTY_ASSERT_SET_1) ||
3139                             (attn.sig[2] & group_mask.sig[2] &
3140                                                 HW_PRTY_ASSERT_SET_2))
3141                                 BNX2X_ERR("FATAL HW block parity attention\n");
3142                 }
3143         }
3144
3145         bnx2x_release_alr(bp);
3146
3147         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3148
3149         val = ~deasserted;
3150         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3151            val, reg_addr);
3152         REG_WR(bp, reg_addr, val);
3153
3154         if (~bp->attn_state & deasserted)
3155                 BNX2X_ERR("IGU ERROR\n");
3156
3157         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3158                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3159
3160         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3161         aeu_mask = REG_RD(bp, reg_addr);
3162
3163         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3164            aeu_mask, deasserted);
3165         aeu_mask |= (deasserted & 0xff);
3166         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3167
3168         REG_WR(bp, reg_addr, aeu_mask);
3169         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3170
3171         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3172         bp->attn_state &= ~deasserted;
3173         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3174 }
3175
3176 static void bnx2x_attn_int(struct bnx2x *bp)
3177 {
3178         /* read local copy of bits */
3179         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3180                                                                 attn_bits);
3181         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3182                                                                 attn_bits_ack);
3183         u32 attn_state = bp->attn_state;
3184
3185         /* look for changed bits */
3186         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3187         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3188
3189         DP(NETIF_MSG_HW,
3190            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3191            attn_bits, attn_ack, asserted, deasserted);
3192
3193         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3194                 BNX2X_ERR("BAD attention state\n");
3195
3196         /* handle bits that were raised */
3197         if (asserted)
3198                 bnx2x_attn_int_asserted(bp, asserted);
3199
3200         if (deasserted)
3201                 bnx2x_attn_int_deasserted(bp, deasserted);
3202 }
3203
3204 static void bnx2x_sp_task(struct work_struct *work)
3205 {
3206         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3207         u16 status;
3208
3209
3210         /* Return here if interrupt is disabled */
3211         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3212                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3213                 return;
3214         }
3215
3216         status = bnx2x_update_dsb_idx(bp);
3217 /*      if (status == 0)                                     */
3218 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3219
3220         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3221
3222         /* HW attentions */
3223         if (status & 0x1)
3224                 bnx2x_attn_int(bp);
3225
3226         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3227                      IGU_INT_NOP, 1);
3228         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3229                      IGU_INT_NOP, 1);
3230         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3231                      IGU_INT_NOP, 1);
3232         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3233                      IGU_INT_NOP, 1);
3234         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3235                      IGU_INT_ENABLE, 1);
3236
3237 }
3238
3239 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3240 {
3241         struct net_device *dev = dev_instance;
3242         struct bnx2x *bp = netdev_priv(dev);
3243
3244         /* Return here if interrupt is disabled */
3245         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3246                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3247                 return IRQ_HANDLED;
3248         }
3249
3250         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3251
3252 #ifdef BNX2X_STOP_ON_ERROR
3253         if (unlikely(bp->panic))
3254                 return IRQ_HANDLED;
3255 #endif
3256
3257 #ifdef BCM_CNIC
3258         {
3259                 struct cnic_ops *c_ops;
3260
3261                 rcu_read_lock();
3262                 c_ops = rcu_dereference(bp->cnic_ops);
3263                 if (c_ops)
3264                         c_ops->cnic_handler(bp->cnic_data, NULL);
3265                 rcu_read_unlock();
3266         }
3267 #endif
3268         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3269
3270         return IRQ_HANDLED;
3271 }
3272
3273 /* end of slow path */
3274
3275 /* Statistics */
3276
3277 /****************************************************************************
3278 * Macros
3279 ****************************************************************************/
3280
3281 /* sum[hi:lo] += add[hi:lo] */
3282 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3283         do { \
3284                 s_lo += a_lo; \
3285                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3286         } while (0)
3287
3288 /* difference = minuend - subtrahend */
3289 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3290         do { \
3291                 if (m_lo < s_lo) { \
3292                         /* underflow */ \
3293                         d_hi = m_hi - s_hi; \
3294                         if (d_hi > 0) { \
3295                                 /* we can 'loan' 1 */ \
3296                                 d_hi--; \
3297                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3298                         } else { \
3299                                 /* m_hi <= s_hi */ \
3300                                 d_hi = 0; \
3301                                 d_lo = 0; \
3302                         } \
3303                 } else { \
3304                         /* m_lo >= s_lo */ \
3305                         if (m_hi < s_hi) { \
3306                                 d_hi = 0; \
3307                                 d_lo = 0; \
3308                         } else { \
3309                                 /* m_hi >= s_hi */ \
3310                                 d_hi = m_hi - s_hi; \
3311                                 d_lo = m_lo - s_lo; \
3312                         } \
3313                 } \
3314         } while (0)
3315
3316 #define UPDATE_STAT64(s, t) \
3317         do { \
3318                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3319                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3320                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3321                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3322                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3323                        pstats->mac_stx[1].t##_lo, diff.lo); \
3324         } while (0)
3325
3326 #define UPDATE_STAT64_NIG(s, t) \
3327         do { \
3328                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3329                         diff.lo, new->s##_lo, old->s##_lo); \
3330                 ADD_64(estats->t##_hi, diff.hi, \
3331                        estats->t##_lo, diff.lo); \
3332         } while (0)
3333
3334 /* sum[hi:lo] += add */
3335 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3336         do { \
3337                 s_lo += a; \
3338                 s_hi += (s_lo < a) ? 1 : 0; \
3339         } while (0)
3340
3341 #define UPDATE_EXTEND_STAT(s) \
3342         do { \
3343                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3344                               pstats->mac_stx[1].s##_lo, \
3345                               new->s); \
3346         } while (0)
3347
3348 #define UPDATE_EXTEND_TSTAT(s, t) \
3349         do { \
3350                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3351                 old_tclient->s = tclient->s; \
3352                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3353         } while (0)
3354
3355 #define UPDATE_EXTEND_USTAT(s, t) \
3356         do { \
3357                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3358                 old_uclient->s = uclient->s; \
3359                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3360         } while (0)
3361
3362 #define UPDATE_EXTEND_XSTAT(s, t) \
3363         do { \
3364                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3365                 old_xclient->s = xclient->s; \
3366                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3367         } while (0)
3368
3369 /* minuend -= subtrahend */
3370 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3371         do { \
3372                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3373         } while (0)
3374
3375 /* minuend[hi:lo] -= subtrahend */
3376 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3377         do { \
3378                 SUB_64(m_hi, 0, m_lo, s); \
3379         } while (0)
3380
3381 #define SUB_EXTEND_USTAT(s, t) \
3382         do { \
3383                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3384                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3385         } while (0)
3386
3387 /*
3388  * General service functions
3389  */
3390
3391 static inline long bnx2x_hilo(u32 *hiref)
3392 {
3393         u32 lo = *(hiref + 1);
3394 #if (BITS_PER_LONG == 64)
3395         u32 hi = *hiref;
3396
3397         return HILO_U64(hi, lo);
3398 #else
3399         return lo;
3400 #endif
3401 }
3402
3403 /*
3404  * Init service functions
3405  */
3406
3407 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3408 {
3409         if (!bp->stats_pending) {
3410                 struct eth_query_ramrod_data ramrod_data = {0};
3411                 int i, rc;
3412
3413                 ramrod_data.drv_counter = bp->stats_counter++;
3414                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3415                 for_each_queue(bp, i)
3416                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3417
3418                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3419                                    ((u32 *)&ramrod_data)[1],
3420                                    ((u32 *)&ramrod_data)[0], 0);
3421                 if (rc == 0) {
3422                         /* stats ramrod has it's own slot on the spq */
3423                         bp->spq_left++;
3424                         bp->stats_pending = 1;
3425                 }
3426         }
3427 }
3428
3429 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3430 {
3431         struct dmae_command *dmae = &bp->stats_dmae;
3432         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3433
3434         *stats_comp = DMAE_COMP_VAL;
3435         if (CHIP_REV_IS_SLOW(bp))
3436                 return;
3437
3438         /* loader */
3439         if (bp->executer_idx) {
3440                 int loader_idx = PMF_DMAE_C(bp);
3441
3442                 memset(dmae, 0, sizeof(struct dmae_command));
3443
3444                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3445                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3446                                 DMAE_CMD_DST_RESET |
3447 #ifdef __BIG_ENDIAN
3448                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3449 #else
3450                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3451 #endif
3452                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3453                                                DMAE_CMD_PORT_0) |
3454                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3455                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3456                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3457                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3458                                      sizeof(struct dmae_command) *
3459                                      (loader_idx + 1)) >> 2;
3460                 dmae->dst_addr_hi = 0;
3461                 dmae->len = sizeof(struct dmae_command) >> 2;
3462                 if (CHIP_IS_E1(bp))
3463                         dmae->len--;
3464                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3465                 dmae->comp_addr_hi = 0;
3466                 dmae->comp_val = 1;
3467
3468                 *stats_comp = 0;
3469                 bnx2x_post_dmae(bp, dmae, loader_idx);
3470
3471         } else if (bp->func_stx) {
3472                 *stats_comp = 0;
3473                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3474         }
3475 }
3476
3477 static int bnx2x_stats_comp(struct bnx2x *bp)
3478 {
3479         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3480         int cnt = 10;
3481
3482         might_sleep();
3483         while (*stats_comp != DMAE_COMP_VAL) {
3484                 if (!cnt) {
3485                         BNX2X_ERR("timeout waiting for stats finished\n");
3486                         break;
3487                 }
3488                 cnt--;
3489                 msleep(1);
3490         }
3491         return 1;
3492 }
3493
3494 /*
3495  * Statistics service functions
3496  */
3497
3498 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3499 {
3500         struct dmae_command *dmae;
3501         u32 opcode;
3502         int loader_idx = PMF_DMAE_C(bp);
3503         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3504
3505         /* sanity */
3506         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3507                 BNX2X_ERR("BUG!\n");
3508                 return;
3509         }
3510
3511         bp->executer_idx = 0;
3512
3513         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3514                   DMAE_CMD_C_ENABLE |
3515                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3516 #ifdef __BIG_ENDIAN
3517                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3518 #else
3519                   DMAE_CMD_ENDIANITY_DW_SWAP |
3520 #endif
3521                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3522                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3523
3524         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3525         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3526         dmae->src_addr_lo = bp->port.port_stx >> 2;
3527         dmae->src_addr_hi = 0;
3528         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3529         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3530         dmae->len = DMAE_LEN32_RD_MAX;
3531         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3532         dmae->comp_addr_hi = 0;
3533         dmae->comp_val = 1;
3534
3535         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3536         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3537         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3538         dmae->src_addr_hi = 0;
3539         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3540                                    DMAE_LEN32_RD_MAX * 4);
3541         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3542                                    DMAE_LEN32_RD_MAX * 4);
3543         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3544         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3545         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3546         dmae->comp_val = DMAE_COMP_VAL;
3547
3548         *stats_comp = 0;
3549         bnx2x_hw_stats_post(bp);
3550         bnx2x_stats_comp(bp);
3551 }
3552
3553 static void bnx2x_port_stats_init(struct bnx2x *bp)
3554 {
3555         struct dmae_command *dmae;
3556         int port = BP_PORT(bp);
3557         int vn = BP_E1HVN(bp);
3558         u32 opcode;
3559         int loader_idx = PMF_DMAE_C(bp);
3560         u32 mac_addr;
3561         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3562
3563         /* sanity */
3564         if (!bp->link_vars.link_up || !bp->port.pmf) {
3565                 BNX2X_ERR("BUG!\n");
3566                 return;
3567         }
3568
3569         bp->executer_idx = 0;
3570
3571         /* MCP */
3572         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3573                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3574                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3575 #ifdef __BIG_ENDIAN
3576                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3577 #else
3578                   DMAE_CMD_ENDIANITY_DW_SWAP |
3579 #endif
3580                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3581                   (vn << DMAE_CMD_E1HVN_SHIFT));
3582
3583         if (bp->port.port_stx) {
3584
3585                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3586                 dmae->opcode = opcode;
3587                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3588                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3589                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3590                 dmae->dst_addr_hi = 0;
3591                 dmae->len = sizeof(struct host_port_stats) >> 2;
3592                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3593                 dmae->comp_addr_hi = 0;
3594                 dmae->comp_val = 1;
3595         }
3596
3597         if (bp->func_stx) {
3598
3599                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3600                 dmae->opcode = opcode;
3601                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3602                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3603                 dmae->dst_addr_lo = bp->func_stx >> 2;
3604                 dmae->dst_addr_hi = 0;
3605                 dmae->len = sizeof(struct host_func_stats) >> 2;
3606                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3607                 dmae->comp_addr_hi = 0;
3608                 dmae->comp_val = 1;
3609         }
3610
3611         /* MAC */
3612         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3613                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3614                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3615 #ifdef __BIG_ENDIAN
3616                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3617 #else
3618                   DMAE_CMD_ENDIANITY_DW_SWAP |
3619 #endif
3620                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3621                   (vn << DMAE_CMD_E1HVN_SHIFT));
3622
3623         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3624
3625                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3626                                    NIG_REG_INGRESS_BMAC0_MEM);
3627
3628                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3629                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3630                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3631                 dmae->opcode = opcode;
3632                 dmae->src_addr_lo = (mac_addr +
3633                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3634                 dmae->src_addr_hi = 0;
3635                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3636                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3637                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3638                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3639                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3640                 dmae->comp_addr_hi = 0;
3641                 dmae->comp_val = 1;
3642
3643                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3644                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3645                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3646                 dmae->opcode = opcode;
3647                 dmae->src_addr_lo = (mac_addr +
3648                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3649                 dmae->src_addr_hi = 0;
3650                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3651                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3652                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3653                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3654                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3655                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3656                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3657                 dmae->comp_addr_hi = 0;
3658                 dmae->comp_val = 1;
3659
3660         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3661
3662                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3663
3664                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3665                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3666                 dmae->opcode = opcode;
3667                 dmae->src_addr_lo = (mac_addr +
3668                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3669                 dmae->src_addr_hi = 0;
3670                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3671                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3672                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3673                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3674                 dmae->comp_addr_hi = 0;
3675                 dmae->comp_val = 1;
3676
3677                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3678                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3679                 dmae->opcode = opcode;
3680                 dmae->src_addr_lo = (mac_addr +
3681                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3682                 dmae->src_addr_hi = 0;
3683                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3684                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3685                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3686                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3687                 dmae->len = 1;
3688                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3689                 dmae->comp_addr_hi = 0;
3690                 dmae->comp_val = 1;
3691
3692                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3693                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3694                 dmae->opcode = opcode;
3695                 dmae->src_addr_lo = (mac_addr +
3696                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3697                 dmae->src_addr_hi = 0;
3698                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3699                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3700                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3701                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3702                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3703                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3704                 dmae->comp_addr_hi = 0;
3705                 dmae->comp_val = 1;
3706         }
3707
3708         /* NIG */
3709         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3710         dmae->opcode = opcode;
3711         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3712                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3713         dmae->src_addr_hi = 0;
3714         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3715         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3716         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3717         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3718         dmae->comp_addr_hi = 0;
3719         dmae->comp_val = 1;
3720
3721         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3722         dmae->opcode = opcode;
3723         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3724                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3725         dmae->src_addr_hi = 0;
3726         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3727                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3728         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3729                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3730         dmae->len = (2*sizeof(u32)) >> 2;
3731         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3732         dmae->comp_addr_hi = 0;
3733         dmae->comp_val = 1;
3734
3735         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3736         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3737                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3738                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3739 #ifdef __BIG_ENDIAN
3740                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3741 #else
3742                         DMAE_CMD_ENDIANITY_DW_SWAP |
3743 #endif
3744                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3745                         (vn << DMAE_CMD_E1HVN_SHIFT));
3746         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3747                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3748         dmae->src_addr_hi = 0;
3749         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3750                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3751         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3752                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3753         dmae->len = (2*sizeof(u32)) >> 2;
3754         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3755         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3756         dmae->comp_val = DMAE_COMP_VAL;
3757
3758         *stats_comp = 0;
3759 }
3760
3761 static void bnx2x_func_stats_init(struct bnx2x *bp)
3762 {
3763         struct dmae_command *dmae = &bp->stats_dmae;
3764         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3765
3766         /* sanity */
3767         if (!bp->func_stx) {
3768                 BNX2X_ERR("BUG!\n");
3769                 return;
3770         }
3771
3772         bp->executer_idx = 0;
3773         memset(dmae, 0, sizeof(struct dmae_command));
3774
3775         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3776                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3777                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3778 #ifdef __BIG_ENDIAN
3779                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3780 #else
3781                         DMAE_CMD_ENDIANITY_DW_SWAP |
3782 #endif
3783                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3784                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3785         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3786         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3787         dmae->dst_addr_lo = bp->func_stx >> 2;
3788         dmae->dst_addr_hi = 0;
3789         dmae->len = sizeof(struct host_func_stats) >> 2;
3790         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3791         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3792         dmae->comp_val = DMAE_COMP_VAL;
3793
3794         *stats_comp = 0;
3795 }
3796
3797 static void bnx2x_stats_start(struct bnx2x *bp)
3798 {
3799         if (bp->port.pmf)
3800                 bnx2x_port_stats_init(bp);
3801
3802         else if (bp->func_stx)
3803                 bnx2x_func_stats_init(bp);
3804
3805         bnx2x_hw_stats_post(bp);
3806         bnx2x_storm_stats_post(bp);
3807 }
3808
3809 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3810 {
3811         bnx2x_stats_comp(bp);
3812         bnx2x_stats_pmf_update(bp);
3813         bnx2x_stats_start(bp);
3814 }
3815
3816 static void bnx2x_stats_restart(struct bnx2x *bp)
3817 {
3818         bnx2x_stats_comp(bp);
3819         bnx2x_stats_start(bp);
3820 }
3821
3822 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3823 {
3824         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3825         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3826         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3827         struct {
3828                 u32 lo;
3829                 u32 hi;
3830         } diff;
3831
3832         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3833         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3834         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3835         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3836         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3837         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3838         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3839         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3840         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3841         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3842         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3843         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3844         UPDATE_STAT64(tx_stat_gt127,
3845                                 tx_stat_etherstatspkts65octetsto127octets);
3846         UPDATE_STAT64(tx_stat_gt255,
3847                                 tx_stat_etherstatspkts128octetsto255octets);
3848         UPDATE_STAT64(tx_stat_gt511,
3849                                 tx_stat_etherstatspkts256octetsto511octets);
3850         UPDATE_STAT64(tx_stat_gt1023,
3851                                 tx_stat_etherstatspkts512octetsto1023octets);
3852         UPDATE_STAT64(tx_stat_gt1518,
3853                                 tx_stat_etherstatspkts1024octetsto1522octets);
3854         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3855         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3856         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3857         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3858         UPDATE_STAT64(tx_stat_gterr,
3859                                 tx_stat_dot3statsinternalmactransmiterrors);
3860         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3861
3862         estats->pause_frames_received_hi =
3863                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3864         estats->pause_frames_received_lo =
3865                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3866
3867         estats->pause_frames_sent_hi =
3868                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3869         estats->pause_frames_sent_lo =
3870                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3871 }
3872
3873 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3874 {
3875         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3876         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3877         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3878
3879         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3880         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3881         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3882         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3883         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3884         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3885         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3886         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3887         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3888         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3889         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3890         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3891         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3892         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3893         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3894         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3895         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3896         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3897         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3898         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3899         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3900         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3901         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3902         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3903         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3904         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3905         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3906         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3907         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3908         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3909         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3910
3911         estats->pause_frames_received_hi =
3912                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3913         estats->pause_frames_received_lo =
3914                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3915         ADD_64(estats->pause_frames_received_hi,
3916                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3917                estats->pause_frames_received_lo,
3918                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3919
3920         estats->pause_frames_sent_hi =
3921                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3922         estats->pause_frames_sent_lo =
3923                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3924         ADD_64(estats->pause_frames_sent_hi,
3925                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3926                estats->pause_frames_sent_lo,
3927                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3928 }
3929
3930 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3931 {
3932         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3933         struct nig_stats *old = &(bp->port.old_nig_stats);
3934         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3935         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3936         struct {
3937                 u32 lo;
3938                 u32 hi;
3939         } diff;
3940         u32 nig_timer_max;
3941
3942         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3943                 bnx2x_bmac_stats_update(bp);
3944
3945         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3946                 bnx2x_emac_stats_update(bp);
3947
3948         else { /* unreached */
3949                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3950                 return -1;
3951         }
3952
3953         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3954                       new->brb_discard - old->brb_discard);
3955         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3956                       new->brb_truncate - old->brb_truncate);
3957
3958         UPDATE_STAT64_NIG(egress_mac_pkt0,
3959                                         etherstatspkts1024octetsto1522octets);
3960         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3961
3962         memcpy(old, new, sizeof(struct nig_stats));
3963
3964         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3965                sizeof(struct mac_stx));
3966         estats->brb_drop_hi = pstats->brb_drop_hi;
3967         estats->brb_drop_lo = pstats->brb_drop_lo;
3968
3969         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3970
3971         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3972         if (nig_timer_max != estats->nig_timer_max) {
3973                 estats->nig_timer_max = nig_timer_max;
3974                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3975         }
3976
3977         return 0;
3978 }
3979
3980 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3981 {
3982         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3983         struct tstorm_per_port_stats *tport =
3984                                         &stats->tstorm_common.port_statistics;
3985         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3986         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3987         int i;
3988
3989         memcpy(&(fstats->total_bytes_received_hi),
3990                &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
3991                sizeof(struct host_func_stats) - 2*sizeof(u32));
3992         estats->error_bytes_received_hi = 0;
3993         estats->error_bytes_received_lo = 0;
3994         estats->etherstatsoverrsizepkts_hi = 0;
3995         estats->etherstatsoverrsizepkts_lo = 0;
3996         estats->no_buff_discard_hi = 0;
3997         estats->no_buff_discard_lo = 0;
3998
3999         for_each_queue(bp, i) {
4000                 struct bnx2x_fastpath *fp = &bp->fp[i];
4001                 int cl_id = fp->cl_id;
4002                 struct tstorm_per_client_stats *tclient =
4003                                 &stats->tstorm_common.client_statistics[cl_id];
4004                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4005                 struct ustorm_per_client_stats *uclient =
4006                                 &stats->ustorm_common.client_statistics[cl_id];
4007                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4008                 struct xstorm_per_client_stats *xclient =
4009                                 &stats->xstorm_common.client_statistics[cl_id];
4010                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4011                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4012                 u32 diff;
4013
4014                 /* are storm stats valid? */
4015                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4016                                                         bp->stats_counter) {
4017                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4018                            "  xstorm counter (%d) != stats_counter (%d)\n",
4019                            i, xclient->stats_counter, bp->stats_counter);
4020                         return -1;
4021                 }
4022                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4023                                                         bp->stats_counter) {
4024                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4025                            "  tstorm counter (%d) != stats_counter (%d)\n",
4026                            i, tclient->stats_counter, bp->stats_counter);
4027                         return -2;
4028                 }
4029                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4030                                                         bp->stats_counter) {
4031                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4032                            "  ustorm counter (%d) != stats_counter (%d)\n",
4033                            i, uclient->stats_counter, bp->stats_counter);
4034                         return -4;
4035                 }
4036
4037                 qstats->total_bytes_received_hi =
4038                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4039                 qstats->total_bytes_received_lo =
4040                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4041
4042                 ADD_64(qstats->total_bytes_received_hi,
4043                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4044                        qstats->total_bytes_received_lo,
4045                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4046
4047                 ADD_64(qstats->total_bytes_received_hi,
4048                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4049                        qstats->total_bytes_received_lo,
4050                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4051
4052                 qstats->valid_bytes_received_hi =
4053                                         qstats->total_bytes_received_hi;
4054                 qstats->valid_bytes_received_lo =
4055                                         qstats->total_bytes_received_lo;
4056
4057                 qstats->error_bytes_received_hi =
4058                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
4059                 qstats->error_bytes_received_lo =
4060                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
4061
4062                 ADD_64(qstats->total_bytes_received_hi,
4063                        qstats->error_bytes_received_hi,
4064                        qstats->total_bytes_received_lo,
4065                        qstats->error_bytes_received_lo);
4066
4067                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4068                                         total_unicast_packets_received);
4069                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4070                                         total_multicast_packets_received);
4071                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4072                                         total_broadcast_packets_received);
4073                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4074                                         etherstatsoverrsizepkts);
4075                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4076
4077                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4078                                         total_unicast_packets_received);
4079                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4080                                         total_multicast_packets_received);
4081                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4082                                         total_broadcast_packets_received);
4083                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4084                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4085                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4086
4087                 qstats->total_bytes_transmitted_hi =
4088                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4089                 qstats->total_bytes_transmitted_lo =
4090                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4091
4092                 ADD_64(qstats->total_bytes_transmitted_hi,
4093                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
4094                        qstats->total_bytes_transmitted_lo,
4095                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
4096
4097                 ADD_64(qstats->total_bytes_transmitted_hi,
4098                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4099                        qstats->total_bytes_transmitted_lo,
4100                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4101
4102                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4103                                         total_unicast_packets_transmitted);
4104                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4105                                         total_multicast_packets_transmitted);
4106                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4107                                         total_broadcast_packets_transmitted);
4108
4109                 old_tclient->checksum_discard = tclient->checksum_discard;
4110                 old_tclient->ttl0_discard = tclient->ttl0_discard;
4111
4112                 ADD_64(fstats->total_bytes_received_hi,
4113                        qstats->total_bytes_received_hi,
4114                        fstats->total_bytes_received_lo,
4115                        qstats->total_bytes_received_lo);
4116                 ADD_64(fstats->total_bytes_transmitted_hi,
4117                        qstats->total_bytes_transmitted_hi,
4118                        fstats->total_bytes_transmitted_lo,
4119                        qstats->total_bytes_transmitted_lo);
4120                 ADD_64(fstats->total_unicast_packets_received_hi,
4121                        qstats->total_unicast_packets_received_hi,
4122                        fstats->total_unicast_packets_received_lo,
4123                        qstats->total_unicast_packets_received_lo);
4124                 ADD_64(fstats->total_multicast_packets_received_hi,
4125                        qstats->total_multicast_packets_received_hi,
4126                        fstats->total_multicast_packets_received_lo,
4127                        qstats->total_multicast_packets_received_lo);
4128                 ADD_64(fstats->total_broadcast_packets_received_hi,
4129                        qstats->total_broadcast_packets_received_hi,
4130                        fstats->total_broadcast_packets_received_lo,
4131                        qstats->total_broadcast_packets_received_lo);
4132                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4133                        qstats->total_unicast_packets_transmitted_hi,
4134                        fstats->total_unicast_packets_transmitted_lo,
4135                        qstats->total_unicast_packets_transmitted_lo);
4136                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4137                        qstats->total_multicast_packets_transmitted_hi,
4138                        fstats->total_multicast_packets_transmitted_lo,
4139                        qstats->total_multicast_packets_transmitted_lo);
4140                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4141                        qstats->total_broadcast_packets_transmitted_hi,
4142                        fstats->total_broadcast_packets_transmitted_lo,
4143                        qstats->total_broadcast_packets_transmitted_lo);
4144                 ADD_64(fstats->valid_bytes_received_hi,
4145                        qstats->valid_bytes_received_hi,
4146                        fstats->valid_bytes_received_lo,
4147                        qstats->valid_bytes_received_lo);
4148
4149                 ADD_64(estats->error_bytes_received_hi,
4150                        qstats->error_bytes_received_hi,
4151                        estats->error_bytes_received_lo,
4152                        qstats->error_bytes_received_lo);
4153                 ADD_64(estats->etherstatsoverrsizepkts_hi,
4154                        qstats->etherstatsoverrsizepkts_hi,
4155                        estats->etherstatsoverrsizepkts_lo,
4156                        qstats->etherstatsoverrsizepkts_lo);
4157                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4158                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4159         }
4160
4161         ADD_64(fstats->total_bytes_received_hi,
4162                estats->rx_stat_ifhcinbadoctets_hi,
4163                fstats->total_bytes_received_lo,
4164                estats->rx_stat_ifhcinbadoctets_lo);
4165
4166         memcpy(estats, &(fstats->total_bytes_received_hi),
4167                sizeof(struct host_func_stats) - 2*sizeof(u32));
4168
4169         ADD_64(estats->etherstatsoverrsizepkts_hi,
4170                estats->rx_stat_dot3statsframestoolong_hi,
4171                estats->etherstatsoverrsizepkts_lo,
4172                estats->rx_stat_dot3statsframestoolong_lo);
4173         ADD_64(estats->error_bytes_received_hi,
4174                estats->rx_stat_ifhcinbadoctets_hi,
4175                estats->error_bytes_received_lo,
4176                estats->rx_stat_ifhcinbadoctets_lo);
4177
4178         if (bp->port.pmf) {
4179                 estats->mac_filter_discard =
4180                                 le32_to_cpu(tport->mac_filter_discard);
4181                 estats->xxoverflow_discard =
4182                                 le32_to_cpu(tport->xxoverflow_discard);
4183                 estats->brb_truncate_discard =
4184                                 le32_to_cpu(tport->brb_truncate_discard);
4185                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4186         }
4187
4188         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4189
4190         bp->stats_pending = 0;
4191
4192         return 0;
4193 }
4194
4195 static void bnx2x_net_stats_update(struct bnx2x *bp)
4196 {
4197         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4198         struct net_device_stats *nstats = &bp->dev->stats;
4199         int i;
4200
4201         nstats->rx_packets =
4202                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4203                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4204                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4205
4206         nstats->tx_packets =
4207                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4208                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4209                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4210
4211         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4212
4213         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4214
4215         nstats->rx_dropped = estats->mac_discard;
4216         for_each_queue(bp, i)
4217                 nstats->rx_dropped +=
4218                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4219
4220         nstats->tx_dropped = 0;
4221
4222         nstats->multicast =
4223                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4224
4225         nstats->collisions =
4226                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4227
4228         nstats->rx_length_errors =
4229                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4230                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4231         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4232                                  bnx2x_hilo(&estats->brb_truncate_hi);
4233         nstats->rx_crc_errors =
4234                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4235         nstats->rx_frame_errors =
4236                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4237         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4238         nstats->rx_missed_errors = estats->xxoverflow_discard;
4239
4240         nstats->rx_errors = nstats->rx_length_errors +
4241                             nstats->rx_over_errors +
4242                             nstats->rx_crc_errors +
4243                             nstats->rx_frame_errors +
4244                             nstats->rx_fifo_errors +
4245                             nstats->rx_missed_errors;
4246
4247         nstats->tx_aborted_errors =
4248                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4249                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4250         nstats->tx_carrier_errors =
4251                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4252         nstats->tx_fifo_errors = 0;
4253         nstats->tx_heartbeat_errors = 0;
4254         nstats->tx_window_errors = 0;
4255
4256         nstats->tx_errors = nstats->tx_aborted_errors +
4257                             nstats->tx_carrier_errors +
4258             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4259 }
4260
4261 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4262 {
4263         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4264         int i;
4265
4266         estats->driver_xoff = 0;
4267         estats->rx_err_discard_pkt = 0;
4268         estats->rx_skb_alloc_failed = 0;
4269         estats->hw_csum_err = 0;
4270         for_each_queue(bp, i) {
4271                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4272
4273                 estats->driver_xoff += qstats->driver_xoff;
4274                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4275                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4276                 estats->hw_csum_err += qstats->hw_csum_err;
4277         }
4278 }
4279
4280 static void bnx2x_stats_update(struct bnx2x *bp)
4281 {
4282         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4283
4284         if (*stats_comp != DMAE_COMP_VAL)
4285                 return;
4286
4287         if (bp->port.pmf)
4288                 bnx2x_hw_stats_update(bp);
4289
4290         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4291                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4292                 bnx2x_panic();
4293                 return;
4294         }
4295
4296         bnx2x_net_stats_update(bp);
4297         bnx2x_drv_stats_update(bp);
4298
4299         if (bp->msglevel & NETIF_MSG_TIMER) {
4300                 struct bnx2x_fastpath *fp0_rx = bp->fp;
4301                 struct bnx2x_fastpath *fp0_tx = bp->fp;
4302                 struct tstorm_per_client_stats *old_tclient =
4303                                                         &bp->fp->old_tclient;
4304                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4305                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4306                 struct net_device_stats *nstats = &bp->dev->stats;
4307                 int i;
4308
4309                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4310                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4311                                   "  tx pkt (%lx)\n",
4312                        bnx2x_tx_avail(fp0_tx),
4313                        le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4314                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4315                                   "  rx pkt (%lx)\n",
4316                        (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4317                              fp0_rx->rx_comp_cons),
4318                        le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4319                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4320                                   "brb truncate %u\n",
4321                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4322                        qstats->driver_xoff,
4323                        estats->brb_drop_lo, estats->brb_truncate_lo);
4324                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4325                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4326                         "mac_discard %u  mac_filter_discard %u  "
4327                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4328                         "ttl0_discard %u\n",
4329                        le32_to_cpu(old_tclient->checksum_discard),
4330                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4331                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4332                        estats->mac_discard, estats->mac_filter_discard,
4333                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4334                        le32_to_cpu(old_tclient->ttl0_discard));
4335
4336                 for_each_queue(bp, i) {
4337                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4338                                bnx2x_fp(bp, i, tx_pkt),
4339                                bnx2x_fp(bp, i, rx_pkt),
4340                                bnx2x_fp(bp, i, rx_calls));
4341                 }
4342         }
4343
4344         bnx2x_hw_stats_post(bp);
4345         bnx2x_storm_stats_post(bp);
4346 }
4347
4348 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4349 {
4350         struct dmae_command *dmae;
4351         u32 opcode;
4352         int loader_idx = PMF_DMAE_C(bp);
4353         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4354
4355         bp->executer_idx = 0;
4356
4357         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4358                   DMAE_CMD_C_ENABLE |
4359                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4360 #ifdef __BIG_ENDIAN
4361                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4362 #else
4363                   DMAE_CMD_ENDIANITY_DW_SWAP |
4364 #endif
4365                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4366                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4367
4368         if (bp->port.port_stx) {
4369
4370                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4371                 if (bp->func_stx)
4372                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4373                 else
4374                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4375                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4376                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4377                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4378                 dmae->dst_addr_hi = 0;
4379                 dmae->len = sizeof(struct host_port_stats) >> 2;
4380                 if (bp->func_stx) {
4381                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4382                         dmae->comp_addr_hi = 0;
4383                         dmae->comp_val = 1;
4384                 } else {
4385                         dmae->comp_addr_lo =
4386                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4387                         dmae->comp_addr_hi =
4388                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4389                         dmae->comp_val = DMAE_COMP_VAL;
4390
4391                         *stats_comp = 0;
4392                 }
4393         }
4394
4395         if (bp->func_stx) {
4396
4397                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4398                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4399                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4400                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4401                 dmae->dst_addr_lo = bp->func_stx >> 2;
4402                 dmae->dst_addr_hi = 0;
4403                 dmae->len = sizeof(struct host_func_stats) >> 2;
4404                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4405                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4406                 dmae->comp_val = DMAE_COMP_VAL;
4407
4408                 *stats_comp = 0;
4409         }
4410 }
4411
4412 static void bnx2x_stats_stop(struct bnx2x *bp)
4413 {
4414         int update = 0;
4415
4416         bnx2x_stats_comp(bp);
4417
4418         if (bp->port.pmf)
4419                 update = (bnx2x_hw_stats_update(bp) == 0);
4420
4421         update |= (bnx2x_storm_stats_update(bp) == 0);
4422
4423         if (update) {
4424                 bnx2x_net_stats_update(bp);
4425
4426                 if (bp->port.pmf)
4427                         bnx2x_port_stats_stop(bp);
4428
4429                 bnx2x_hw_stats_post(bp);
4430                 bnx2x_stats_comp(bp);
4431         }
4432 }
4433
4434 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4435 {
4436 }
4437
4438 static const struct {
4439         void (*action)(struct bnx2x *bp);
4440         enum bnx2x_stats_state next_state;
4441 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4442 /* state        event   */
4443 {
4444 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4445 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4446 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4447 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4448 },
4449 {
4450 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4451 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4452 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4453 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4454 }
4455 };
4456
4457 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4458 {
4459         enum bnx2x_stats_state state = bp->stats_state;
4460
4461         bnx2x_stats_stm[state][event].action(bp);
4462         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4463
4464         /* Make sure the state has been "changed" */
4465         smp_wmb();
4466
4467         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4468                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4469                    state, event, bp->stats_state);
4470 }
4471
4472 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4473 {
4474         struct dmae_command *dmae;
4475         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4476
4477         /* sanity */
4478         if (!bp->port.pmf || !bp->port.port_stx) {
4479                 BNX2X_ERR("BUG!\n");
4480                 return;
4481         }
4482
4483         bp->executer_idx = 0;
4484
4485         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4486         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4487                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4488                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4489 #ifdef __BIG_ENDIAN
4490                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4491 #else
4492                         DMAE_CMD_ENDIANITY_DW_SWAP |
4493 #endif
4494                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4495                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4496         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4497         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4498         dmae->dst_addr_lo = bp->port.port_stx >> 2;
4499         dmae->dst_addr_hi = 0;
4500         dmae->len = sizeof(struct host_port_stats) >> 2;
4501         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4502         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4503         dmae->comp_val = DMAE_COMP_VAL;
4504
4505         *stats_comp = 0;
4506         bnx2x_hw_stats_post(bp);
4507         bnx2x_stats_comp(bp);
4508 }
4509
4510 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4511 {
4512         int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4513         int port = BP_PORT(bp);
4514         int func;
4515         u32 func_stx;
4516
4517         /* sanity */
4518         if (!bp->port.pmf || !bp->func_stx) {
4519                 BNX2X_ERR("BUG!\n");
4520                 return;
4521         }
4522
4523         /* save our func_stx */
4524         func_stx = bp->func_stx;
4525
4526         for (vn = VN_0; vn < vn_max; vn++) {
4527                 func = 2*vn + port;
4528
4529                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4530                 bnx2x_func_stats_init(bp);
4531                 bnx2x_hw_stats_post(bp);
4532                 bnx2x_stats_comp(bp);
4533         }
4534
4535         /* restore our func_stx */
4536         bp->func_stx = func_stx;
4537 }
4538
4539 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4540 {
4541         struct dmae_command *dmae = &bp->stats_dmae;
4542         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4543
4544         /* sanity */
4545         if (!bp->func_stx) {
4546                 BNX2X_ERR("BUG!\n");
4547                 return;
4548         }
4549
4550         bp->executer_idx = 0;
4551         memset(dmae, 0, sizeof(struct dmae_command));
4552
4553         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4554                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4555                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4556 #ifdef __BIG_ENDIAN
4557                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4558 #else
4559                         DMAE_CMD_ENDIANITY_DW_SWAP |
4560 #endif
4561                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4562                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4563         dmae->src_addr_lo = bp->func_stx >> 2;
4564         dmae->src_addr_hi = 0;
4565         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4566         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4567         dmae->len = sizeof(struct host_func_stats) >> 2;
4568         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4569         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4570         dmae->comp_val = DMAE_COMP_VAL;
4571
4572         *stats_comp = 0;
4573         bnx2x_hw_stats_post(bp);
4574         bnx2x_stats_comp(bp);
4575 }
4576
4577 static void bnx2x_stats_init(struct bnx2x *bp)
4578 {
4579         int port = BP_PORT(bp);
4580         int func = BP_FUNC(bp);
4581         int i;
4582
4583         bp->stats_pending = 0;
4584         bp->executer_idx = 0;
4585         bp->stats_counter = 0;
4586
4587         /* port and func stats for management */
4588         if (!BP_NOMCP(bp)) {
4589                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4590                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4591
4592         } else {
4593                 bp->port.port_stx = 0;
4594                 bp->func_stx = 0;
4595         }
4596         DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
4597            bp->port.port_stx, bp->func_stx);
4598
4599         /* port stats */
4600         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4601         bp->port.old_nig_stats.brb_discard =
4602                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4603         bp->port.old_nig_stats.brb_truncate =
4604                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4605         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4606                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4607         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4608                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4609
4610         /* function stats */
4611         for_each_queue(bp, i) {
4612                 struct bnx2x_fastpath *fp = &bp->fp[i];
4613
4614                 memset(&fp->old_tclient, 0,
4615                        sizeof(struct tstorm_per_client_stats));
4616                 memset(&fp->old_uclient, 0,
4617                        sizeof(struct ustorm_per_client_stats));
4618                 memset(&fp->old_xclient, 0,
4619                        sizeof(struct xstorm_per_client_stats));
4620                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4621         }
4622
4623         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4624         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4625
4626         bp->stats_state = STATS_STATE_DISABLED;
4627
4628         if (bp->port.pmf) {
4629                 if (bp->port.port_stx)
4630                         bnx2x_port_stats_base_init(bp);
4631
4632                 if (bp->func_stx)
4633                         bnx2x_func_stats_base_init(bp);
4634
4635         } else if (bp->func_stx)
4636                 bnx2x_func_stats_base_update(bp);
4637 }
4638
4639 static void bnx2x_timer(unsigned long data)
4640 {
4641         struct bnx2x *bp = (struct bnx2x *) data;
4642
4643         if (!netif_running(bp->dev))
4644                 return;
4645
4646         if (atomic_read(&bp->intr_sem) != 0)
4647                 goto timer_restart;
4648
4649         if (poll) {
4650                 struct bnx2x_fastpath *fp = &bp->fp[0];
4651                 int rc;
4652
4653                 bnx2x_tx_int(fp);
4654                 rc = bnx2x_rx_int(fp, 1000);
4655         }
4656
4657         if (!BP_NOMCP(bp)) {
4658                 int func = BP_FUNC(bp);
4659                 u32 drv_pulse;
4660                 u32 mcp_pulse;
4661
4662                 ++bp->fw_drv_pulse_wr_seq;
4663                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4664                 /* TBD - add SYSTEM_TIME */
4665                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4666                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4667
4668                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4669                              MCP_PULSE_SEQ_MASK);
4670                 /* The delta between driver pulse and mcp response
4671                  * should be 1 (before mcp response) or 0 (after mcp response)
4672                  */
4673                 if ((drv_pulse != mcp_pulse) &&
4674                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4675                         /* someone lost a heartbeat... */
4676                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4677                                   drv_pulse, mcp_pulse);
4678                 }
4679         }
4680
4681         if (bp->state == BNX2X_STATE_OPEN)
4682                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4683
4684 timer_restart:
4685         mod_timer(&bp->timer, jiffies + bp->current_interval);
4686 }
4687
4688 /* end of Statistics */
4689
4690 /* nic init */
4691
4692 /*
4693  * nic init service functions
4694  */
4695
4696 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4697 {
4698         int port = BP_PORT(bp);
4699
4700         /* "CSTORM" */
4701         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4702                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4703                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4704         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4705                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4706                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4707 }
4708
4709 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4710                           dma_addr_t mapping, int sb_id)
4711 {
4712         int port = BP_PORT(bp);
4713         int func = BP_FUNC(bp);
4714         int index;
4715         u64 section;
4716
4717         /* USTORM */
4718         section = ((u64)mapping) + offsetof(struct host_status_block,
4719                                             u_status_block);
4720         sb->u_status_block.status_block_id = sb_id;
4721
4722         REG_WR(bp, BAR_CSTRORM_INTMEM +
4723                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4724         REG_WR(bp, BAR_CSTRORM_INTMEM +
4725                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4726                U64_HI(section));
4727         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4728                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4729
4730         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4731                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4732                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4733
4734         /* CSTORM */
4735         section = ((u64)mapping) + offsetof(struct host_status_block,
4736                                             c_status_block);
4737         sb->c_status_block.status_block_id = sb_id;
4738
4739         REG_WR(bp, BAR_CSTRORM_INTMEM +
4740                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4741         REG_WR(bp, BAR_CSTRORM_INTMEM +
4742                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4743                U64_HI(section));
4744         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4745                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4746
4747         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4748                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4749                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4750
4751         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4752 }
4753
4754 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4755 {
4756         int func = BP_FUNC(bp);
4757
4758         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4759                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4760                         sizeof(struct tstorm_def_status_block)/4);
4761         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4762                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4763                         sizeof(struct cstorm_def_status_block_u)/4);
4764         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4765                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4766                         sizeof(struct cstorm_def_status_block_c)/4);
4767         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4768                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4769                         sizeof(struct xstorm_def_status_block)/4);
4770 }
4771
4772 static void bnx2x_init_def_sb(struct bnx2x *bp,
4773                               struct host_def_status_block *def_sb,
4774                               dma_addr_t mapping, int sb_id)
4775 {
4776         int port = BP_PORT(bp);
4777         int func = BP_FUNC(bp);
4778         int index, val, reg_offset;
4779         u64 section;
4780
4781         /* ATTN */
4782         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4783                                             atten_status_block);
4784         def_sb->atten_status_block.status_block_id = sb_id;
4785
4786         bp->attn_state = 0;
4787
4788         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4789                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4790
4791         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4792                 bp->attn_group[index].sig[0] = REG_RD(bp,
4793                                                      reg_offset + 0x10*index);
4794                 bp->attn_group[index].sig[1] = REG_RD(bp,
4795                                                reg_offset + 0x4 + 0x10*index);
4796                 bp->attn_group[index].sig[2] = REG_RD(bp,
4797                                                reg_offset + 0x8 + 0x10*index);
4798                 bp->attn_group[index].sig[3] = REG_RD(bp,
4799                                                reg_offset + 0xc + 0x10*index);
4800         }
4801
4802         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4803                              HC_REG_ATTN_MSG0_ADDR_L);
4804
4805         REG_WR(bp, reg_offset, U64_LO(section));
4806         REG_WR(bp, reg_offset + 4, U64_HI(section));
4807
4808         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4809
4810         val = REG_RD(bp, reg_offset);
4811         val |= sb_id;
4812         REG_WR(bp, reg_offset, val);
4813
4814         /* USTORM */
4815         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4816                                             u_def_status_block);
4817         def_sb->u_def_status_block.status_block_id = sb_id;
4818
4819         REG_WR(bp, BAR_CSTRORM_INTMEM +
4820                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4821         REG_WR(bp, BAR_CSTRORM_INTMEM +
4822                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4823                U64_HI(section));
4824         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4825                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4826
4827         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4828                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4829                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4830
4831         /* CSTORM */
4832         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4833                                             c_def_status_block);
4834         def_sb->c_def_status_block.status_block_id = sb_id;
4835
4836         REG_WR(bp, BAR_CSTRORM_INTMEM +
4837                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4838         REG_WR(bp, BAR_CSTRORM_INTMEM +
4839                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4840                U64_HI(section));
4841         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4842                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4843
4844         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4845                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4846                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4847
4848         /* TSTORM */
4849         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4850                                             t_def_status_block);
4851         def_sb->t_def_status_block.status_block_id = sb_id;
4852
4853         REG_WR(bp, BAR_TSTRORM_INTMEM +
4854                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4855         REG_WR(bp, BAR_TSTRORM_INTMEM +
4856                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4857                U64_HI(section));
4858         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4859                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4860
4861         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4862                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4863                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4864
4865         /* XSTORM */
4866         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4867                                             x_def_status_block);
4868         def_sb->x_def_status_block.status_block_id = sb_id;
4869
4870         REG_WR(bp, BAR_XSTRORM_INTMEM +
4871                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4872         REG_WR(bp, BAR_XSTRORM_INTMEM +
4873                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4874                U64_HI(section));
4875         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4876                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4877
4878         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4879                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4880                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4881
4882         bp->stats_pending = 0;
4883         bp->set_mac_pending = 0;
4884
4885         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4886 }
4887
4888 static void bnx2x_update_coalesce(struct bnx2x *bp)
4889 {
4890         int port = BP_PORT(bp);
4891         int i;
4892
4893         for_each_queue(bp, i) {
4894                 int sb_id = bp->fp[i].sb_id;
4895
4896                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4897                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4898                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4899                                                       U_SB_ETH_RX_CQ_INDEX),
4900                         bp->rx_ticks/(4 * BNX2X_BTR));
4901                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4902                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4903                                                        U_SB_ETH_RX_CQ_INDEX),
4904                          (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4905
4906                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4907                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4908                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4909                                                       C_SB_ETH_TX_CQ_INDEX),
4910                         bp->tx_ticks/(4 * BNX2X_BTR));
4911                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4912                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4913                                                        C_SB_ETH_TX_CQ_INDEX),
4914                          (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4915         }
4916 }
4917
4918 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4919                                        struct bnx2x_fastpath *fp, int last)
4920 {
4921         int i;
4922
4923         for (i = 0; i < last; i++) {
4924                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4925                 struct sk_buff *skb = rx_buf->skb;
4926
4927                 if (skb == NULL) {
4928                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4929                         continue;
4930                 }
4931
4932                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4933                         pci_unmap_single(bp->pdev,
4934                                          pci_unmap_addr(rx_buf, mapping),
4935                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4936
4937                 dev_kfree_skb(skb);
4938                 rx_buf->skb = NULL;
4939         }
4940 }
4941
4942 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4943 {
4944         int func = BP_FUNC(bp);
4945         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4946                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4947         u16 ring_prod, cqe_ring_prod;
4948         int i, j;
4949
4950         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4951         DP(NETIF_MSG_IFUP,
4952            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4953
4954         if (bp->flags & TPA_ENABLE_FLAG) {
4955
4956                 for_each_queue(bp, j) {
4957                         struct bnx2x_fastpath *fp = &bp->fp[j];
4958
4959                         for (i = 0; i < max_agg_queues; i++) {
4960                                 fp->tpa_pool[i].skb =
4961                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4962                                 if (!fp->tpa_pool[i].skb) {
4963                                         BNX2X_ERR("Failed to allocate TPA "
4964                                                   "skb pool for queue[%d] - "
4965                                                   "disabling TPA on this "
4966                                                   "queue!\n", j);
4967                                         bnx2x_free_tpa_pool(bp, fp, i);
4968                                         fp->disable_tpa = 1;
4969                                         break;
4970                                 }
4971                                 pci_unmap_addr_set((struct sw_rx_bd *)
4972                                                         &bp->fp->tpa_pool[i],
4973                                                    mapping, 0);
4974                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4975                         }
4976                 }
4977         }
4978
4979         for_each_queue(bp, j) {
4980                 struct bnx2x_fastpath *fp = &bp->fp[j];
4981
4982                 fp->rx_bd_cons = 0;
4983                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4984                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4985
4986                 /* "next page" elements initialization */
4987                 /* SGE ring */
4988                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4989                         struct eth_rx_sge *sge;
4990
4991                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4992                         sge->addr_hi =
4993                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4994                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4995                         sge->addr_lo =
4996                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4997                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4998                 }
4999
5000                 bnx2x_init_sge_ring_bit_mask(fp);
5001
5002                 /* RX BD ring */
5003                 for (i = 1; i <= NUM_RX_RINGS; i++) {
5004                         struct eth_rx_bd *rx_bd;
5005
5006                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5007                         rx_bd->addr_hi =
5008                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5009                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5010                         rx_bd->addr_lo =
5011                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5012                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5013                 }
5014
5015                 /* CQ ring */
5016                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5017                         struct eth_rx_cqe_next_page *nextpg;
5018
5019                         nextpg = (struct eth_rx_cqe_next_page *)
5020                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5021                         nextpg->addr_hi =
5022                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5023                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5024                         nextpg->addr_lo =
5025                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5026                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5027                 }
5028
5029                 /* Allocate SGEs and initialize the ring elements */
5030                 for (i = 0, ring_prod = 0;
5031                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5032
5033                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5034                                 BNX2X_ERR("was only able to allocate "
5035                                           "%d rx sges\n", i);
5036                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5037                                 /* Cleanup already allocated elements */
5038                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5039                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5040                                 fp->disable_tpa = 1;
5041                                 ring_prod = 0;
5042                                 break;
5043                         }
5044                         ring_prod = NEXT_SGE_IDX(ring_prod);
5045                 }
5046                 fp->rx_sge_prod = ring_prod;
5047
5048                 /* Allocate BDs and initialize BD ring */
5049                 fp->rx_comp_cons = 0;
5050                 cqe_ring_prod = ring_prod = 0;
5051                 for (i = 0; i < bp->rx_ring_size; i++) {
5052                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5053                                 BNX2X_ERR("was only able to allocate "
5054                                           "%d rx skbs on queue[%d]\n", i, j);
5055                                 fp->eth_q_stats.rx_skb_alloc_failed++;
5056                                 break;
5057                         }
5058                         ring_prod = NEXT_RX_IDX(ring_prod);
5059                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5060                         WARN_ON(ring_prod <= i);
5061                 }
5062
5063                 fp->rx_bd_prod = ring_prod;
5064                 /* must not have more available CQEs than BDs */
5065                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5066                                        cqe_ring_prod);
5067                 fp->rx_pkt = fp->rx_calls = 0;
5068
5069                 /* Warning!
5070                  * this will generate an interrupt (to the TSTORM)
5071                  * must only be done after chip is initialized
5072                  */
5073                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5074                                      fp->rx_sge_prod);
5075                 if (j != 0)
5076                         continue;
5077
5078                 REG_WR(bp, BAR_USTRORM_INTMEM +
5079                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5080                        U64_LO(fp->rx_comp_mapping));
5081                 REG_WR(bp, BAR_USTRORM_INTMEM +
5082                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5083                        U64_HI(fp->rx_comp_mapping));
5084         }
5085 }
5086
5087 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5088 {
5089         int i, j;
5090
5091         for_each_queue(bp, j) {
5092                 struct bnx2x_fastpath *fp = &bp->fp[j];
5093
5094                 for (i = 1; i <= NUM_TX_RINGS; i++) {
5095                         struct eth_tx_next_bd *tx_next_bd =
5096                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5097
5098                         tx_next_bd->addr_hi =
5099                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5100                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5101                         tx_next_bd->addr_lo =
5102                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5103                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5104                 }
5105
5106                 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5107                 fp->tx_db.data.zero_fill1 = 0;
5108                 fp->tx_db.data.prod = 0;
5109
5110                 fp->tx_pkt_prod = 0;
5111                 fp->tx_pkt_cons = 0;
5112                 fp->tx_bd_prod = 0;
5113                 fp->tx_bd_cons = 0;
5114                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5115                 fp->tx_pkt = 0;
5116         }
5117 }
5118
5119 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5120 {
5121         int func = BP_FUNC(bp);
5122
5123         spin_lock_init(&bp->spq_lock);
5124
5125         bp->spq_left = MAX_SPQ_PENDING;
5126         bp->spq_prod_idx = 0;
5127         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5128         bp->spq_prod_bd = bp->spq;
5129         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5130
5131         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5132                U64_LO(bp->spq_mapping));
5133         REG_WR(bp,
5134                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5135                U64_HI(bp->spq_mapping));
5136
5137         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5138                bp->spq_prod_idx);
5139 }
5140
5141 static void bnx2x_init_context(struct bnx2x *bp)
5142 {
5143         int i;
5144
5145         /* Rx */
5146         for_each_queue(bp, i) {
5147                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5148                 struct bnx2x_fastpath *fp = &bp->fp[i];
5149                 u8 cl_id = fp->cl_id;
5150
5151                 context->ustorm_st_context.common.sb_index_numbers =
5152                                                 BNX2X_RX_SB_INDEX_NUM;
5153                 context->ustorm_st_context.common.clientId = cl_id;
5154                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5155                 context->ustorm_st_context.common.flags =
5156                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5157                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5158                 context->ustorm_st_context.common.statistics_counter_id =
5159                                                 cl_id;
5160                 context->ustorm_st_context.common.mc_alignment_log_size =
5161                                                 BNX2X_RX_ALIGN_SHIFT;
5162                 context->ustorm_st_context.common.bd_buff_size =
5163                                                 bp->rx_buf_size;
5164                 context->ustorm_st_context.common.bd_page_base_hi =
5165                                                 U64_HI(fp->rx_desc_mapping);
5166                 context->ustorm_st_context.common.bd_page_base_lo =
5167                                                 U64_LO(fp->rx_desc_mapping);
5168                 if (!fp->disable_tpa) {
5169                         context->ustorm_st_context.common.flags |=
5170                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5171                         context->ustorm_st_context.common.sge_buff_size =
5172                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5173                                          (u32)0xffff);
5174                         context->ustorm_st_context.common.sge_page_base_hi =
5175                                                 U64_HI(fp->rx_sge_mapping);
5176                         context->ustorm_st_context.common.sge_page_base_lo =
5177                                                 U64_LO(fp->rx_sge_mapping);
5178
5179                         context->ustorm_st_context.common.max_sges_for_packet =
5180                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5181                         context->ustorm_st_context.common.max_sges_for_packet =
5182                                 ((context->ustorm_st_context.common.
5183                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
5184                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5185                 }
5186
5187                 context->ustorm_ag_context.cdu_usage =
5188                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5189                                                CDU_REGION_NUMBER_UCM_AG,
5190                                                ETH_CONNECTION_TYPE);
5191
5192                 context->xstorm_ag_context.cdu_reserved =
5193                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5194                                                CDU_REGION_NUMBER_XCM_AG,
5195                                                ETH_CONNECTION_TYPE);
5196         }
5197
5198         /* Tx */
5199         for_each_queue(bp, i) {
5200                 struct bnx2x_fastpath *fp = &bp->fp[i];
5201                 struct eth_context *context =
5202                         bnx2x_sp(bp, context[i].eth);
5203
5204                 context->cstorm_st_context.sb_index_number =
5205                                                 C_SB_ETH_TX_CQ_INDEX;
5206                 context->cstorm_st_context.status_block_id = fp->sb_id;
5207
5208                 context->xstorm_st_context.tx_bd_page_base_hi =
5209                                                 U64_HI(fp->tx_desc_mapping);
5210                 context->xstorm_st_context.tx_bd_page_base_lo =
5211                                                 U64_LO(fp->tx_desc_mapping);
5212                 context->xstorm_st_context.statistics_data = (fp->cl_id |
5213                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5214         }
5215 }
5216
5217 static void bnx2x_init_ind_table(struct bnx2x *bp)
5218 {
5219         int func = BP_FUNC(bp);
5220         int i;
5221
5222         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5223                 return;
5224
5225         DP(NETIF_MSG_IFUP,
5226            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
5227         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5228                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5229                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5230                         bp->fp->cl_id + (i % bp->num_queues));
5231 }
5232
5233 static void bnx2x_set_client_config(struct bnx2x *bp)
5234 {
5235         struct tstorm_eth_client_config tstorm_client = {0};
5236         int port = BP_PORT(bp);
5237         int i;
5238
5239         tstorm_client.mtu = bp->dev->mtu;
5240         tstorm_client.config_flags =
5241                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5242                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5243 #ifdef BCM_VLAN
5244         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5245                 tstorm_client.config_flags |=
5246                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5247                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5248         }
5249 #endif
5250
5251         for_each_queue(bp, i) {
5252                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5253
5254                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5255                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5256                        ((u32 *)&tstorm_client)[0]);
5257                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5258                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5259                        ((u32 *)&tstorm_client)[1]);
5260         }
5261
5262         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5263            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5264 }
5265
5266 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5267 {
5268         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5269         int mode = bp->rx_mode;
5270         int mask = bp->rx_mode_cl_mask;
5271         int func = BP_FUNC(bp);
5272         int port = BP_PORT(bp);
5273         int i;
5274         /* All but management unicast packets should pass to the host as well */
5275         u32 llh_mask =
5276                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5277                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5278                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5279                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5280
5281         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
5282
5283         switch (mode) {
5284         case BNX2X_RX_MODE_NONE: /* no Rx */
5285                 tstorm_mac_filter.ucast_drop_all = mask;
5286                 tstorm_mac_filter.mcast_drop_all = mask;
5287                 tstorm_mac_filter.bcast_drop_all = mask;
5288                 break;
5289
5290         case BNX2X_RX_MODE_NORMAL:
5291                 tstorm_mac_filter.bcast_accept_all = mask;
5292                 break;
5293
5294         case BNX2X_RX_MODE_ALLMULTI:
5295                 tstorm_mac_filter.mcast_accept_all = mask;
5296                 tstorm_mac_filter.bcast_accept_all = mask;
5297                 break;
5298
5299         case BNX2X_RX_MODE_PROMISC:
5300                 tstorm_mac_filter.ucast_accept_all = mask;
5301                 tstorm_mac_filter.mcast_accept_all = mask;
5302                 tstorm_mac_filter.bcast_accept_all = mask;
5303                 /* pass management unicast packets as well */
5304                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5305                 break;
5306
5307         default:
5308                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5309                 break;
5310         }
5311
5312         REG_WR(bp,
5313                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5314                llh_mask);
5315
5316         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5317                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5318                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5319                        ((u32 *)&tstorm_mac_filter)[i]);
5320
5321 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5322                    ((u32 *)&tstorm_mac_filter)[i]); */
5323         }
5324
5325         if (mode != BNX2X_RX_MODE_NONE)
5326                 bnx2x_set_client_config(bp);
5327 }
5328
5329 static void bnx2x_init_internal_common(struct bnx2x *bp)
5330 {
5331         int i;
5332
5333         /* Zero this manually as its initialization is
5334            currently missing in the initTool */
5335         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5336                 REG_WR(bp, BAR_USTRORM_INTMEM +
5337                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5338 }
5339
5340 static void bnx2x_init_internal_port(struct bnx2x *bp)
5341 {
5342         int port = BP_PORT(bp);
5343
5344         REG_WR(bp,
5345                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5346         REG_WR(bp,
5347                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5348         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5349         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5350 }
5351
5352 static void bnx2x_init_internal_func(struct bnx2x *bp)
5353 {
5354         struct tstorm_eth_function_common_config tstorm_config = {0};
5355         struct stats_indication_flags stats_flags = {0};
5356         int port = BP_PORT(bp);
5357         int func = BP_FUNC(bp);
5358         int i, j;
5359         u32 offset;
5360         u16 max_agg_size;
5361
5362         if (is_multi(bp)) {
5363                 tstorm_config.config_flags = MULTI_FLAGS(bp);
5364                 tstorm_config.rss_result_mask = MULTI_MASK;
5365         }
5366
5367         /* Enable TPA if needed */
5368         if (bp->flags & TPA_ENABLE_FLAG)
5369                 tstorm_config.config_flags |=
5370                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5371
5372         if (IS_E1HMF(bp))
5373                 tstorm_config.config_flags |=
5374                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5375
5376         tstorm_config.leading_client_id = BP_L_ID(bp);
5377
5378         REG_WR(bp, BAR_TSTRORM_INTMEM +
5379                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5380                (*(u32 *)&tstorm_config));
5381
5382         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5383         bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5384         bnx2x_set_storm_rx_mode(bp);
5385
5386         for_each_queue(bp, i) {
5387                 u8 cl_id = bp->fp[i].cl_id;
5388
5389                 /* reset xstorm per client statistics */
5390                 offset = BAR_XSTRORM_INTMEM +
5391                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5392                 for (j = 0;
5393                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5394                         REG_WR(bp, offset + j*4, 0);
5395
5396                 /* reset tstorm per client statistics */
5397                 offset = BAR_TSTRORM_INTMEM +
5398                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5399                 for (j = 0;
5400                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5401                         REG_WR(bp, offset + j*4, 0);
5402
5403                 /* reset ustorm per client statistics */
5404                 offset = BAR_USTRORM_INTMEM +
5405                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5406                 for (j = 0;
5407                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5408                         REG_WR(bp, offset + j*4, 0);
5409         }
5410
5411         /* Init statistics related context */
5412         stats_flags.collect_eth = 1;
5413
5414         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5415                ((u32 *)&stats_flags)[0]);
5416         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5417                ((u32 *)&stats_flags)[1]);
5418
5419         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5420                ((u32 *)&stats_flags)[0]);
5421         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5422                ((u32 *)&stats_flags)[1]);
5423
5424         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5425                ((u32 *)&stats_flags)[0]);
5426         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5427                ((u32 *)&stats_flags)[1]);
5428
5429         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5430                ((u32 *)&stats_flags)[0]);
5431         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5432                ((u32 *)&stats_flags)[1]);
5433
5434         REG_WR(bp, BAR_XSTRORM_INTMEM +
5435                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5436                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5437         REG_WR(bp, BAR_XSTRORM_INTMEM +
5438                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5439                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5440
5441         REG_WR(bp, BAR_TSTRORM_INTMEM +
5442                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5443                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5444         REG_WR(bp, BAR_TSTRORM_INTMEM +
5445                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5446                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5447
5448         REG_WR(bp, BAR_USTRORM_INTMEM +
5449                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5450                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5451         REG_WR(bp, BAR_USTRORM_INTMEM +
5452                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5453                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5454
5455         if (CHIP_IS_E1H(bp)) {
5456                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5457                         IS_E1HMF(bp));
5458                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5459                         IS_E1HMF(bp));
5460                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5461                         IS_E1HMF(bp));
5462                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5463                         IS_E1HMF(bp));
5464
5465                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5466                          bp->e1hov);
5467         }
5468
5469         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5470         max_agg_size =
5471                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5472                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5473                     (u32)0xffff);
5474         for_each_queue(bp, i) {
5475                 struct bnx2x_fastpath *fp = &bp->fp[i];
5476
5477                 REG_WR(bp, BAR_USTRORM_INTMEM +
5478                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5479                        U64_LO(fp->rx_comp_mapping));
5480                 REG_WR(bp, BAR_USTRORM_INTMEM +
5481                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5482                        U64_HI(fp->rx_comp_mapping));
5483
5484                 /* Next page */
5485                 REG_WR(bp, BAR_USTRORM_INTMEM +
5486                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5487                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5488                 REG_WR(bp, BAR_USTRORM_INTMEM +
5489                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5490                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5491
5492                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5493                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5494                          max_agg_size);
5495         }
5496
5497         /* dropless flow control */
5498         if (CHIP_IS_E1H(bp)) {
5499                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5500
5501                 rx_pause.bd_thr_low = 250;
5502                 rx_pause.cqe_thr_low = 250;
5503                 rx_pause.cos = 1;
5504                 rx_pause.sge_thr_low = 0;
5505                 rx_pause.bd_thr_high = 350;
5506                 rx_pause.cqe_thr_high = 350;
5507                 rx_pause.sge_thr_high = 0;
5508
5509                 for_each_queue(bp, i) {
5510                         struct bnx2x_fastpath *fp = &bp->fp[i];
5511
5512                         if (!fp->disable_tpa) {
5513                                 rx_pause.sge_thr_low = 150;
5514                                 rx_pause.sge_thr_high = 250;
5515                         }
5516
5517
5518                         offset = BAR_USTRORM_INTMEM +
5519                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5520                                                                    fp->cl_id);
5521                         for (j = 0;
5522                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5523                              j++)
5524                                 REG_WR(bp, offset + j*4,
5525                                        ((u32 *)&rx_pause)[j]);
5526                 }
5527         }
5528
5529         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5530
5531         /* Init rate shaping and fairness contexts */
5532         if (IS_E1HMF(bp)) {
5533                 int vn;
5534
5535                 /* During init there is no active link
5536                    Until link is up, set link rate to 10Gbps */
5537                 bp->link_vars.line_speed = SPEED_10000;
5538                 bnx2x_init_port_minmax(bp);
5539
5540                 if (!BP_NOMCP(bp))
5541                         bp->mf_config =
5542                               SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5543                 bnx2x_calc_vn_weight_sum(bp);
5544
5545                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5546                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5547
5548                 /* Enable rate shaping and fairness */
5549                 bp->cmng.flags.cmng_enables |=
5550                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5551
5552         } else {
5553                 /* rate shaping and fairness are disabled */
5554                 DP(NETIF_MSG_IFUP,
5555                    "single function mode  minmax will be disabled\n");
5556         }
5557
5558
5559         /* Store it to internal memory */
5560         if (bp->port.pmf)
5561                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5562                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5563                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5564                                ((u32 *)(&bp->cmng))[i]);
5565 }
5566
5567 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5568 {
5569         switch (load_code) {
5570         case FW_MSG_CODE_DRV_LOAD_COMMON:
5571                 bnx2x_init_internal_common(bp);
5572                 /* no break */
5573
5574         case FW_MSG_CODE_DRV_LOAD_PORT:
5575                 bnx2x_init_internal_port(bp);
5576                 /* no break */
5577
5578         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5579                 bnx2x_init_internal_func(bp);
5580                 break;
5581
5582         default:
5583                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5584                 break;
5585         }
5586 }
5587
5588 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5589 {
5590         int i;
5591
5592         for_each_queue(bp, i) {
5593                 struct bnx2x_fastpath *fp = &bp->fp[i];
5594
5595                 fp->bp = bp;
5596                 fp->state = BNX2X_FP_STATE_CLOSED;
5597                 fp->index = i;
5598                 fp->cl_id = BP_L_ID(bp) + i;
5599 #ifdef BCM_CNIC
5600                 fp->sb_id = fp->cl_id + 1;
5601 #else
5602                 fp->sb_id = fp->cl_id;
5603 #endif
5604                 DP(NETIF_MSG_IFUP,
5605                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5606                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5607                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5608                               fp->sb_id);
5609                 bnx2x_update_fpsb_idx(fp);
5610         }
5611
5612         /* ensure status block indices were read */
5613         rmb();
5614
5615
5616         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5617                           DEF_SB_ID);
5618         bnx2x_update_dsb_idx(bp);
5619         bnx2x_update_coalesce(bp);
5620         bnx2x_init_rx_rings(bp);
5621         bnx2x_init_tx_ring(bp);
5622         bnx2x_init_sp_ring(bp);
5623         bnx2x_init_context(bp);
5624         bnx2x_init_internal(bp, load_code);
5625         bnx2x_init_ind_table(bp);
5626         bnx2x_stats_init(bp);
5627
5628         /* At this point, we are ready for interrupts */
5629         atomic_set(&bp->intr_sem, 0);
5630
5631         /* flush all before enabling interrupts */
5632         mb();
5633         mmiowb();
5634
5635         bnx2x_int_enable(bp);
5636
5637         /* Check for SPIO5 */
5638         bnx2x_attn_int_deasserted0(bp,
5639                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5640                                    AEU_INPUTS_ATTN_BITS_SPIO5);
5641 }
5642
5643 /* end of nic init */
5644
5645 /*
5646  * gzip service functions
5647  */
5648
5649 static int bnx2x_gunzip_init(struct bnx2x *bp)
5650 {
5651         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5652                                               &bp->gunzip_mapping);
5653         if (bp->gunzip_buf  == NULL)
5654                 goto gunzip_nomem1;
5655
5656         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5657         if (bp->strm  == NULL)
5658                 goto gunzip_nomem2;
5659
5660         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5661                                       GFP_KERNEL);
5662         if (bp->strm->workspace == NULL)
5663                 goto gunzip_nomem3;
5664
5665         return 0;
5666
5667 gunzip_nomem3:
5668         kfree(bp->strm);
5669         bp->strm = NULL;
5670
5671 gunzip_nomem2:
5672         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5673                             bp->gunzip_mapping);
5674         bp->gunzip_buf = NULL;
5675
5676 gunzip_nomem1:
5677         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5678                " un-compression\n", bp->dev->name);
5679         return -ENOMEM;
5680 }
5681
5682 static void bnx2x_gunzip_end(struct bnx2x *bp)
5683 {
5684         kfree(bp->strm->workspace);
5685
5686         kfree(bp->strm);
5687         bp->strm = NULL;
5688
5689         if (bp->gunzip_buf) {
5690                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5691                                     bp->gunzip_mapping);
5692                 bp->gunzip_buf = NULL;
5693         }
5694 }
5695
5696 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5697 {
5698         int n, rc;
5699
5700         /* check gzip header */
5701         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5702                 BNX2X_ERR("Bad gzip header\n");
5703                 return -EINVAL;
5704         }
5705
5706         n = 10;
5707
5708 #define FNAME                           0x8
5709
5710         if (zbuf[3] & FNAME)
5711                 while ((zbuf[n++] != 0) && (n < len));
5712
5713         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5714         bp->strm->avail_in = len - n;
5715         bp->strm->next_out = bp->gunzip_buf;
5716         bp->strm->avail_out = FW_BUF_SIZE;
5717
5718         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5719         if (rc != Z_OK)
5720                 return rc;
5721
5722         rc = zlib_inflate(bp->strm, Z_FINISH);
5723         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5724                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5725                        bp->dev->name, bp->strm->msg);
5726
5727         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5728         if (bp->gunzip_outlen & 0x3)
5729                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5730                                     " gunzip_outlen (%d) not aligned\n",
5731                        bp->dev->name, bp->gunzip_outlen);
5732         bp->gunzip_outlen >>= 2;
5733
5734         zlib_inflateEnd(bp->strm);
5735
5736         if (rc == Z_STREAM_END)
5737                 return 0;
5738
5739         return rc;
5740 }
5741
5742 /* nic load/unload */
5743
5744 /*
5745  * General service functions
5746  */
5747
5748 /* send a NIG loopback debug packet */
5749 static void bnx2x_lb_pckt(struct bnx2x *bp)
5750 {
5751         u32 wb_write[3];
5752
5753         /* Ethernet source and destination addresses */
5754         wb_write[0] = 0x55555555;
5755         wb_write[1] = 0x55555555;
5756         wb_write[2] = 0x20;             /* SOP */
5757         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5758
5759         /* NON-IP protocol */
5760         wb_write[0] = 0x09000000;
5761         wb_write[1] = 0x55555555;
5762         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5763         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5764 }
5765
5766 /* some of the internal memories
5767  * are not directly readable from the driver
5768  * to test them we send debug packets
5769  */
5770 static int bnx2x_int_mem_test(struct bnx2x *bp)
5771 {
5772         int factor;
5773         int count, i;
5774         u32 val = 0;
5775
5776         if (CHIP_REV_IS_FPGA(bp))
5777                 factor = 120;
5778         else if (CHIP_REV_IS_EMUL(bp))
5779                 factor = 200;
5780         else
5781                 factor = 1;
5782
5783         DP(NETIF_MSG_HW, "start part1\n");
5784
5785         /* Disable inputs of parser neighbor blocks */
5786         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5787         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5788         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5789         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5790
5791         /*  Write 0 to parser credits for CFC search request */
5792         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5793
5794         /* send Ethernet packet */
5795         bnx2x_lb_pckt(bp);
5796
5797         /* TODO do i reset NIG statistic? */
5798         /* Wait until NIG register shows 1 packet of size 0x10 */
5799         count = 1000 * factor;
5800         while (count) {
5801
5802                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5803                 val = *bnx2x_sp(bp, wb_data[0]);
5804                 if (val == 0x10)
5805                         break;
5806
5807                 msleep(10);
5808                 count--;
5809         }
5810         if (val != 0x10) {
5811                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5812                 return -1;
5813         }
5814
5815         /* Wait until PRS register shows 1 packet */
5816         count = 1000 * factor;
5817         while (count) {
5818                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5819                 if (val == 1)
5820                         break;
5821
5822                 msleep(10);
5823                 count--;
5824         }
5825         if (val != 0x1) {
5826                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5827                 return -2;
5828         }
5829
5830         /* Reset and init BRB, PRS */
5831         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5832         msleep(50);
5833         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5834         msleep(50);
5835         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5836         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5837
5838         DP(NETIF_MSG_HW, "part2\n");
5839
5840         /* Disable inputs of parser neighbor blocks */
5841         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5842         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5843         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5844         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5845
5846         /* Write 0 to parser credits for CFC search request */
5847         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5848
5849         /* send 10 Ethernet packets */
5850         for (i = 0; i < 10; i++)
5851                 bnx2x_lb_pckt(bp);
5852
5853         /* Wait until NIG register shows 10 + 1
5854            packets of size 11*0x10 = 0xb0 */
5855         count = 1000 * factor;
5856         while (count) {
5857
5858                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5859                 val = *bnx2x_sp(bp, wb_data[0]);
5860                 if (val == 0xb0)
5861                         break;
5862
5863                 msleep(10);
5864                 count--;
5865         }
5866         if (val != 0xb0) {
5867                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5868                 return -3;
5869         }
5870
5871         /* Wait until PRS register shows 2 packets */
5872         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5873         if (val != 2)
5874                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5875
5876         /* Write 1 to parser credits for CFC search request */
5877         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5878
5879         /* Wait until PRS register shows 3 packets */
5880         msleep(10 * factor);
5881         /* Wait until NIG register shows 1 packet of size 0x10 */
5882         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5883         if (val != 3)
5884                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5885
5886         /* clear NIG EOP FIFO */
5887         for (i = 0; i < 11; i++)
5888                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5889         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5890         if (val != 1) {
5891                 BNX2X_ERR("clear of NIG failed\n");
5892                 return -4;
5893         }
5894
5895         /* Reset and init BRB, PRS, NIG */
5896         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5897         msleep(50);
5898         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5899         msleep(50);
5900         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5901         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5902 #ifndef BCM_CNIC
5903         /* set NIC mode */
5904         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5905 #endif
5906
5907         /* Enable inputs of parser neighbor blocks */
5908         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5909         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5910         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5911         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5912
5913         DP(NETIF_MSG_HW, "done\n");
5914
5915         return 0; /* OK */
5916 }
5917
5918 static void enable_blocks_attention(struct bnx2x *bp)
5919 {
5920         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5921         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5922         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5923         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5924         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5925         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5926         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5927         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5928         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5929 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5930 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5931         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5932         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5933         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5934 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5935 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5936         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5937         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5938         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5939         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5940 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5941 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5942         if (CHIP_REV_IS_FPGA(bp))
5943                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5944         else
5945                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5946         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5947         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5948         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5949 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5950 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5951         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5952         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5953 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5954         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5955 }
5956
5957
5958 static void bnx2x_reset_common(struct bnx2x *bp)
5959 {
5960         /* reset_common */
5961         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5962                0xd3ffff7f);
5963         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5964 }
5965
5966 static void bnx2x_init_pxp(struct bnx2x *bp)
5967 {
5968         u16 devctl;
5969         int r_order, w_order;
5970
5971         pci_read_config_word(bp->pdev,
5972                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5973         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5974         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5975         if (bp->mrrs == -1)
5976                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5977         else {
5978                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5979                 r_order = bp->mrrs;
5980         }
5981
5982         bnx2x_init_pxp_arb(bp, r_order, w_order);
5983 }
5984
5985 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5986 {
5987         u32 val;
5988         u8 port;
5989         u8 is_required = 0;
5990
5991         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5992               SHARED_HW_CFG_FAN_FAILURE_MASK;
5993
5994         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5995                 is_required = 1;
5996
5997         /*
5998          * The fan failure mechanism is usually related to the PHY type since
5999          * the power consumption of the board is affected by the PHY. Currently,
6000          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6001          */
6002         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6003                 for (port = PORT_0; port < PORT_MAX; port++) {
6004                         u32 phy_type =
6005                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
6006                                          external_phy_config) &
6007                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6008                         is_required |=
6009                                 ((phy_type ==
6010                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6011                                  (phy_type ==
6012                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6013                                  (phy_type ==
6014                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6015                 }
6016
6017         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6018
6019         if (is_required == 0)
6020                 return;
6021
6022         /* Fan failure is indicated by SPIO 5 */
6023         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6024                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
6025
6026         /* set to active low mode */
6027         val = REG_RD(bp, MISC_REG_SPIO_INT);
6028         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6029                                 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6030         REG_WR(bp, MISC_REG_SPIO_INT, val);
6031
6032         /* enable interrupt to signal the IGU */
6033         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6034         val |= (1 << MISC_REGISTERS_SPIO_5);
6035         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6036 }
6037
6038 static int bnx2x_init_common(struct bnx2x *bp)
6039 {
6040         u32 val, i;
6041 #ifdef BCM_CNIC
6042         u32 wb_write[2];
6043 #endif
6044
6045         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
6046
6047         bnx2x_reset_common(bp);
6048         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6049         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6050
6051         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6052         if (CHIP_IS_E1H(bp))
6053                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6054
6055         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6056         msleep(30);
6057         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6058
6059         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6060         if (CHIP_IS_E1(bp)) {
6061                 /* enable HW interrupt from PXP on USDM overflow
6062                    bit 16 on INT_MASK_0 */
6063                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6064         }
6065
6066         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6067         bnx2x_init_pxp(bp);
6068
6069 #ifdef __BIG_ENDIAN
6070         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6071         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6072         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6073         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6074         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6075         /* make sure this value is 0 */
6076         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6077
6078 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6079         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6080         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6081         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6082         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6083 #endif
6084
6085         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6086 #ifdef BCM_CNIC
6087         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6088         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6089         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6090 #endif
6091
6092         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6093                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6094
6095         /* let the HW do it's magic ... */
6096         msleep(100);
6097         /* finish PXP init */
6098         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6099         if (val != 1) {
6100                 BNX2X_ERR("PXP2 CFG failed\n");
6101                 return -EBUSY;
6102         }
6103         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6104         if (val != 1) {
6105                 BNX2X_ERR("PXP2 RD_INIT failed\n");
6106                 return -EBUSY;
6107         }
6108
6109         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6110         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6111
6112         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6113
6114         /* clean the DMAE memory */
6115         bp->dmae_ready = 1;
6116         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6117
6118         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6119         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6120         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6121         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6122
6123         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6124         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6125         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6126         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6127
6128         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6129
6130 #ifdef BCM_CNIC
6131         wb_write[0] = 0;
6132         wb_write[1] = 0;
6133         for (i = 0; i < 64; i++) {
6134                 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6135                 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6136
6137                 if (CHIP_IS_E1H(bp)) {
6138                         REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6139                         bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6140                                           wb_write, 2);
6141                 }
6142         }
6143 #endif
6144         /* soft reset pulse */
6145         REG_WR(bp, QM_REG_SOFT_RESET, 1);
6146         REG_WR(bp, QM_REG_SOFT_RESET, 0);
6147
6148 #ifdef BCM_CNIC
6149         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6150 #endif
6151
6152         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6153         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6154         if (!CHIP_REV_IS_SLOW(bp)) {
6155                 /* enable hw interrupt from doorbell Q */
6156                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6157         }
6158
6159         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6160         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6161         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6162 #ifndef BCM_CNIC
6163         /* set NIC mode */
6164         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6165 #endif
6166         if (CHIP_IS_E1H(bp))
6167                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6168
6169         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6170         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6171         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6172         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6173
6174         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6175         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6176         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6177         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6178
6179         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6180         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6181         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6182         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6183
6184         /* sync semi rtc */
6185         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6186                0x80000000);
6187         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6188                0x80000000);
6189
6190         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6191         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6192         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6193
6194         REG_WR(bp, SRC_REG_SOFT_RST, 1);
6195         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6196                 REG_WR(bp, i, 0xc0cac01a);
6197                 /* TODO: replace with something meaningful */
6198         }
6199         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6200 #ifdef BCM_CNIC
6201         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6202         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6203         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6204         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6205         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6206         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6207         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6208         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6209         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6210         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6211 #endif
6212         REG_WR(bp, SRC_REG_SOFT_RST, 0);
6213
6214         if (sizeof(union cdu_context) != 1024)
6215                 /* we currently assume that a context is 1024 bytes */
6216                 printk(KERN_ALERT PFX "please adjust the size of"
6217                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
6218
6219         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6220         val = (4 << 24) + (0 << 12) + 1024;
6221         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6222
6223         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6224         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6225         /* enable context validation interrupt from CFC */
6226         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6227
6228         /* set the thresholds to prevent CFC/CDU race */
6229         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6230
6231         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6232         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6233
6234         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6235         /* Reset PCIE errors for debug */
6236         REG_WR(bp, 0x2814, 0xffffffff);
6237         REG_WR(bp, 0x3820, 0xffffffff);
6238
6239         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6240         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6241         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6242         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6243
6244         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6245         if (CHIP_IS_E1H(bp)) {
6246                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6247                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6248         }
6249
6250         if (CHIP_REV_IS_SLOW(bp))
6251                 msleep(200);
6252
6253         /* finish CFC init */
6254         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6255         if (val != 1) {
6256                 BNX2X_ERR("CFC LL_INIT failed\n");
6257                 return -EBUSY;
6258         }
6259         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6260         if (val != 1) {
6261                 BNX2X_ERR("CFC AC_INIT failed\n");
6262                 return -EBUSY;
6263         }
6264         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6265         if (val != 1) {
6266                 BNX2X_ERR("CFC CAM_INIT failed\n");
6267                 return -EBUSY;
6268         }
6269         REG_WR(bp, CFC_REG_DEBUG0, 0);
6270
6271         /* read NIG statistic
6272            to see if this is our first up since powerup */
6273         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6274         val = *bnx2x_sp(bp, wb_data[0]);
6275
6276         /* do internal memory self test */
6277         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6278                 BNX2X_ERR("internal mem self test failed\n");
6279                 return -EBUSY;
6280         }
6281
6282         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6283         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6284         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6285         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6286         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6287                 bp->port.need_hw_lock = 1;
6288                 break;
6289
6290         default:
6291                 break;
6292         }
6293
6294         bnx2x_setup_fan_failure_detection(bp);
6295
6296         /* clear PXP2 attentions */
6297         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6298
6299         enable_blocks_attention(bp);
6300
6301         if (!BP_NOMCP(bp)) {
6302                 bnx2x_acquire_phy_lock(bp);
6303                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6304                 bnx2x_release_phy_lock(bp);
6305         } else
6306                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6307
6308         return 0;
6309 }
6310
6311 static int bnx2x_init_port(struct bnx2x *bp)
6312 {
6313         int port = BP_PORT(bp);
6314         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6315         u32 low, high;
6316         u32 val;
6317
6318         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
6319
6320         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6321
6322         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6323         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6324
6325         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6326         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6327         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6328         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6329
6330 #ifdef BCM_CNIC
6331         REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6332
6333         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6334         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6335         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6336 #endif
6337         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6338
6339         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6340         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6341                 /* no pause for emulation and FPGA */
6342                 low = 0;
6343                 high = 513;
6344         } else {
6345                 if (IS_E1HMF(bp))
6346                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6347                 else if (bp->dev->mtu > 4096) {
6348                         if (bp->flags & ONE_PORT_FLAG)
6349                                 low = 160;
6350                         else {
6351                                 val = bp->dev->mtu;
6352                                 /* (24*1024 + val*4)/256 */
6353                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6354                         }
6355                 } else
6356                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6357                 high = low + 56;        /* 14*1024/256 */
6358         }
6359         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6360         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6361
6362
6363         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6364
6365         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6366         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6367         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6368         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6369
6370         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6371         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6372         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6373         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6374
6375         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6376         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6377
6378         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6379
6380         /* configure PBF to work without PAUSE mtu 9000 */
6381         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6382
6383         /* update threshold */
6384         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6385         /* update init credit */
6386         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6387
6388         /* probe changes */
6389         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6390         msleep(5);
6391         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6392
6393 #ifdef BCM_CNIC
6394         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6395 #endif
6396         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6397         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6398
6399         if (CHIP_IS_E1(bp)) {
6400                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6401                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6402         }
6403         bnx2x_init_block(bp, HC_BLOCK, init_stage);
6404
6405         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6406         /* init aeu_mask_attn_func_0/1:
6407          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6408          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6409          *             bits 4-7 are used for "per vn group attention" */
6410         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6411                (IS_E1HMF(bp) ? 0xF7 : 0x7));
6412
6413         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6414         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6415         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6416         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6417         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6418
6419         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6420
6421         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6422
6423         if (CHIP_IS_E1H(bp)) {
6424                 /* 0x2 disable e1hov, 0x1 enable */
6425                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6426                        (IS_E1HMF(bp) ? 0x1 : 0x2));
6427
6428                 {
6429                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6430                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6431                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6432                 }
6433         }
6434
6435         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6436         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6437
6438         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6439         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6440                 {
6441                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6442
6443                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6444                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6445
6446                 /* The GPIO should be swapped if the swap register is
6447                    set and active */
6448                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6449                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6450
6451                 /* Select function upon port-swap configuration */
6452                 if (port == 0) {
6453                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6454                         aeu_gpio_mask = (swap_val && swap_override) ?
6455                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6456                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6457                 } else {
6458                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6459                         aeu_gpio_mask = (swap_val && swap_override) ?
6460                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6461                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6462                 }
6463                 val = REG_RD(bp, offset);
6464                 /* add GPIO3 to group */
6465                 val |= aeu_gpio_mask;
6466                 REG_WR(bp, offset, val);
6467                 }
6468                 break;
6469
6470         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6471         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6472                 /* add SPIO 5 to group 0 */
6473                 {
6474                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6475                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6476                 val = REG_RD(bp, reg_addr);
6477                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6478                 REG_WR(bp, reg_addr, val);
6479                 }
6480                 break;
6481
6482         default:
6483                 break;
6484         }
6485
6486         bnx2x__link_reset(bp);
6487
6488         return 0;
6489 }
6490
6491 #define ILT_PER_FUNC            (768/2)
6492 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6493 /* the phys address is shifted right 12 bits and has an added
6494    1=valid bit added to the 53rd bit
6495    then since this is a wide register(TM)
6496    we split it into two 32 bit writes
6497  */
6498 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6499 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6500 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6501 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6502
6503 #ifdef BCM_CNIC
6504 #define CNIC_ILT_LINES          127
6505 #define CNIC_CTX_PER_ILT        16
6506 #else
6507 #define CNIC_ILT_LINES          0
6508 #endif
6509
6510 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6511 {
6512         int reg;
6513
6514         if (CHIP_IS_E1H(bp))
6515                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6516         else /* E1 */
6517                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6518
6519         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6520 }
6521
6522 static int bnx2x_init_func(struct bnx2x *bp)
6523 {
6524         int port = BP_PORT(bp);
6525         int func = BP_FUNC(bp);
6526         u32 addr, val;
6527         int i;
6528
6529         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
6530
6531         /* set MSI reconfigure capability */
6532         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6533         val = REG_RD(bp, addr);
6534         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6535         REG_WR(bp, addr, val);
6536
6537         i = FUNC_ILT_BASE(func);
6538
6539         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6540         if (CHIP_IS_E1H(bp)) {
6541                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6542                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6543         } else /* E1 */
6544                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6545                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6546
6547 #ifdef BCM_CNIC
6548         i += 1 + CNIC_ILT_LINES;
6549         bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6550         if (CHIP_IS_E1(bp))
6551                 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6552         else {
6553                 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6554                 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6555         }
6556
6557         i++;
6558         bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6559         if (CHIP_IS_E1(bp))
6560                 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6561         else {
6562                 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6563                 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6564         }
6565
6566         i++;
6567         bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6568         if (CHIP_IS_E1(bp))
6569                 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6570         else {
6571                 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6572                 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6573         }
6574
6575         /* tell the searcher where the T2 table is */
6576         REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6577
6578         bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6579                     U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6580
6581         bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6582                     U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6583                     U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6584
6585         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6586 #endif
6587
6588         if (CHIP_IS_E1H(bp)) {
6589                 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6590                 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6591                 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6592                 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6593                 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6594                 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6595                 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6596                 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6597                 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
6598
6599                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6600                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6601         }
6602
6603         /* HC init per function */
6604         if (CHIP_IS_E1H(bp)) {
6605                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6606
6607                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6608                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6609         }
6610         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6611
6612         /* Reset PCIE errors for debug */
6613         REG_WR(bp, 0x2114, 0xffffffff);
6614         REG_WR(bp, 0x2120, 0xffffffff);
6615
6616         return 0;
6617 }
6618
6619 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6620 {
6621         int i, rc = 0;
6622
6623         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
6624            BP_FUNC(bp), load_code);
6625
6626         bp->dmae_ready = 0;
6627         mutex_init(&bp->dmae_mutex);
6628         rc = bnx2x_gunzip_init(bp);
6629         if (rc)
6630                 return rc;
6631
6632         switch (load_code) {
6633         case FW_MSG_CODE_DRV_LOAD_COMMON:
6634                 rc = bnx2x_init_common(bp);
6635                 if (rc)
6636                         goto init_hw_err;
6637                 /* no break */
6638
6639         case FW_MSG_CODE_DRV_LOAD_PORT:
6640                 bp->dmae_ready = 1;
6641                 rc = bnx2x_init_port(bp);
6642                 if (rc)
6643                         goto init_hw_err;
6644                 /* no break */
6645
6646         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6647                 bp->dmae_ready = 1;
6648                 rc = bnx2x_init_func(bp);
6649                 if (rc)
6650                         goto init_hw_err;
6651                 break;
6652
6653         default:
6654                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6655                 break;
6656         }
6657
6658         if (!BP_NOMCP(bp)) {
6659                 int func = BP_FUNC(bp);
6660
6661                 bp->fw_drv_pulse_wr_seq =
6662                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6663                                  DRV_PULSE_SEQ_MASK);
6664                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6665         }
6666
6667         /* this needs to be done before gunzip end */
6668         bnx2x_zero_def_sb(bp);
6669         for_each_queue(bp, i)
6670                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6671 #ifdef BCM_CNIC
6672         bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6673 #endif
6674
6675 init_hw_err:
6676         bnx2x_gunzip_end(bp);
6677
6678         return rc;
6679 }
6680
6681 static void bnx2x_free_mem(struct bnx2x *bp)
6682 {
6683
6684 #define BNX2X_PCI_FREE(x, y, size) \
6685         do { \
6686                 if (x) { \
6687                         pci_free_consistent(bp->pdev, size, x, y); \
6688                         x = NULL; \
6689                         y = 0; \
6690                 } \
6691         } while (0)
6692
6693 #define BNX2X_FREE(x) \
6694         do { \
6695                 if (x) { \
6696                         vfree(x); \
6697                         x = NULL; \
6698                 } \
6699         } while (0)
6700
6701         int i;
6702
6703         /* fastpath */
6704         /* Common */
6705         for_each_queue(bp, i) {
6706
6707                 /* status blocks */
6708                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6709                                bnx2x_fp(bp, i, status_blk_mapping),
6710                                sizeof(struct host_status_block));
6711         }
6712         /* Rx */
6713         for_each_queue(bp, i) {
6714
6715                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6716                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6717                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6718                                bnx2x_fp(bp, i, rx_desc_mapping),
6719                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
6720
6721                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6722                                bnx2x_fp(bp, i, rx_comp_mapping),
6723                                sizeof(struct eth_fast_path_rx_cqe) *
6724                                NUM_RCQ_BD);
6725
6726                 /* SGE ring */
6727                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6728                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6729                                bnx2x_fp(bp, i, rx_sge_mapping),
6730                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6731         }
6732         /* Tx */
6733         for_each_queue(bp, i) {
6734
6735                 /* fastpath tx rings: tx_buf tx_desc */
6736                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6737                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6738                                bnx2x_fp(bp, i, tx_desc_mapping),
6739                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6740         }
6741         /* end of fastpath */
6742
6743         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6744                        sizeof(struct host_def_status_block));
6745
6746         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6747                        sizeof(struct bnx2x_slowpath));
6748
6749 #ifdef BCM_CNIC
6750         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6751         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6752         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6753         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6754         BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6755                        sizeof(struct host_status_block));
6756 #endif
6757         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6758
6759 #undef BNX2X_PCI_FREE
6760 #undef BNX2X_KFREE
6761 }
6762
6763 static int bnx2x_alloc_mem(struct bnx2x *bp)
6764 {
6765
6766 #define BNX2X_PCI_ALLOC(x, y, size) \
6767         do { \
6768                 x = pci_alloc_consistent(bp->pdev, size, y); \
6769                 if (x == NULL) \
6770                         goto alloc_mem_err; \
6771                 memset(x, 0, size); \
6772         } while (0)
6773
6774 #define BNX2X_ALLOC(x, size) \
6775         do { \
6776                 x = vmalloc(size); \
6777                 if (x == NULL) \
6778                         goto alloc_mem_err; \
6779                 memset(x, 0, size); \
6780         } while (0)
6781
6782         int i;
6783
6784         /* fastpath */
6785         /* Common */
6786         for_each_queue(bp, i) {
6787                 bnx2x_fp(bp, i, bp) = bp;
6788
6789                 /* status blocks */
6790                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6791                                 &bnx2x_fp(bp, i, status_blk_mapping),
6792                                 sizeof(struct host_status_block));
6793         }
6794         /* Rx */
6795         for_each_queue(bp, i) {
6796
6797                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6798                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6799                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6800                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6801                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6802                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6803
6804                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6805                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6806                                 sizeof(struct eth_fast_path_rx_cqe) *
6807                                 NUM_RCQ_BD);
6808
6809                 /* SGE ring */
6810                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6811                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6812                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6813                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6814                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6815         }
6816         /* Tx */
6817         for_each_queue(bp, i) {
6818
6819                 /* fastpath tx rings: tx_buf tx_desc */
6820                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6821                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6822                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6823                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6824                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6825         }
6826         /* end of fastpath */
6827
6828         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6829                         sizeof(struct host_def_status_block));
6830
6831         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6832                         sizeof(struct bnx2x_slowpath));
6833
6834 #ifdef BCM_CNIC
6835         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6836
6837         /* allocate searcher T2 table
6838            we allocate 1/4 of alloc num for T2
6839           (which is not entered into the ILT) */
6840         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6841
6842         /* Initialize T2 (for 1024 connections) */
6843         for (i = 0; i < 16*1024; i += 64)
6844                 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6845
6846         /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
6847         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6848
6849         /* QM queues (128*MAX_CONN) */
6850         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6851
6852         BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6853                         sizeof(struct host_status_block));
6854 #endif
6855
6856         /* Slow path ring */
6857         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6858
6859         return 0;
6860
6861 alloc_mem_err:
6862         bnx2x_free_mem(bp);
6863         return -ENOMEM;
6864
6865 #undef BNX2X_PCI_ALLOC
6866 #undef BNX2X_ALLOC
6867 }
6868
6869 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6870 {
6871         int i;
6872
6873         for_each_queue(bp, i) {
6874                 struct bnx2x_fastpath *fp = &bp->fp[i];
6875
6876                 u16 bd_cons = fp->tx_bd_cons;
6877                 u16 sw_prod = fp->tx_pkt_prod;
6878                 u16 sw_cons = fp->tx_pkt_cons;
6879
6880                 while (sw_cons != sw_prod) {
6881                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6882                         sw_cons++;
6883                 }
6884         }
6885 }
6886
6887 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6888 {
6889         int i, j;
6890
6891         for_each_queue(bp, j) {
6892                 struct bnx2x_fastpath *fp = &bp->fp[j];
6893
6894                 for (i = 0; i < NUM_RX_BD; i++) {
6895                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6896                         struct sk_buff *skb = rx_buf->skb;
6897
6898                         if (skb == NULL)
6899                                 continue;
6900
6901                         pci_unmap_single(bp->pdev,
6902                                          pci_unmap_addr(rx_buf, mapping),
6903                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6904
6905                         rx_buf->skb = NULL;
6906                         dev_kfree_skb(skb);
6907                 }
6908                 if (!fp->disable_tpa)
6909                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6910                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6911                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6912         }
6913 }
6914
6915 static void bnx2x_free_skbs(struct bnx2x *bp)
6916 {
6917         bnx2x_free_tx_skbs(bp);
6918         bnx2x_free_rx_skbs(bp);
6919 }
6920
6921 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6922 {
6923         int i, offset = 1;
6924
6925         free_irq(bp->msix_table[0].vector, bp->dev);
6926         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6927            bp->msix_table[0].vector);
6928
6929 #ifdef BCM_CNIC
6930         offset++;
6931 #endif
6932         for_each_queue(bp, i) {
6933                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6934                    "state %x\n", i, bp->msix_table[i + offset].vector,
6935                    bnx2x_fp(bp, i, state));
6936
6937                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6938         }
6939 }
6940
6941 static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
6942 {
6943         if (bp->flags & USING_MSIX_FLAG) {
6944                 if (!disable_only)
6945                         bnx2x_free_msix_irqs(bp);
6946                 pci_disable_msix(bp->pdev);
6947                 bp->flags &= ~USING_MSIX_FLAG;
6948
6949         } else if (bp->flags & USING_MSI_FLAG) {
6950                 if (!disable_only)
6951                         free_irq(bp->pdev->irq, bp->dev);
6952                 pci_disable_msi(bp->pdev);
6953                 bp->flags &= ~USING_MSI_FLAG;
6954
6955         } else if (!disable_only)
6956                 free_irq(bp->pdev->irq, bp->dev);
6957 }
6958
6959 static int bnx2x_enable_msix(struct bnx2x *bp)
6960 {
6961         int i, rc, offset = 1;
6962         int igu_vec = 0;
6963
6964         bp->msix_table[0].entry = igu_vec;
6965         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6966
6967 #ifdef BCM_CNIC
6968         igu_vec = BP_L_ID(bp) + offset;
6969         bp->msix_table[1].entry = igu_vec;
6970         DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6971         offset++;
6972 #endif
6973         for_each_queue(bp, i) {
6974                 igu_vec = BP_L_ID(bp) + offset + i;
6975                 bp->msix_table[i + offset].entry = igu_vec;
6976                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6977                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6978         }
6979
6980         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6981                              BNX2X_NUM_QUEUES(bp) + offset);
6982         if (rc) {
6983                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
6984                 return rc;
6985         }
6986
6987         bp->flags |= USING_MSIX_FLAG;
6988
6989         return 0;
6990 }
6991
6992 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6993 {
6994         int i, rc, offset = 1;
6995
6996         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6997                          bp->dev->name, bp->dev);
6998         if (rc) {
6999                 BNX2X_ERR("request sp irq failed\n");
7000                 return -EBUSY;
7001         }
7002
7003 #ifdef BCM_CNIC
7004         offset++;
7005 #endif
7006         for_each_queue(bp, i) {
7007                 struct bnx2x_fastpath *fp = &bp->fp[i];
7008                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7009                          bp->dev->name, i);
7010
7011                 rc = request_irq(bp->msix_table[i + offset].vector,
7012                                  bnx2x_msix_fp_int, 0, fp->name, fp);
7013                 if (rc) {
7014                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
7015                         bnx2x_free_msix_irqs(bp);
7016                         return -EBUSY;
7017                 }
7018
7019                 fp->state = BNX2X_FP_STATE_IRQ;
7020         }
7021
7022         i = BNX2X_NUM_QUEUES(bp);
7023         printk(KERN_INFO PFX "%s: using MSI-X  IRQs: sp %d  fp[%d] %d"
7024                " ... fp[%d] %d\n",
7025                bp->dev->name, bp->msix_table[0].vector,
7026                0, bp->msix_table[offset].vector,
7027                i - 1, bp->msix_table[offset + i - 1].vector);
7028
7029         return 0;
7030 }
7031
7032 static int bnx2x_enable_msi(struct bnx2x *bp)
7033 {
7034         int rc;
7035
7036         rc = pci_enable_msi(bp->pdev);
7037         if (rc) {
7038                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7039                 return -1;
7040         }
7041         bp->flags |= USING_MSI_FLAG;
7042
7043         return 0;
7044 }
7045
7046 static int bnx2x_req_irq(struct bnx2x *bp)
7047 {
7048         unsigned long flags;
7049         int rc;
7050
7051         if (bp->flags & USING_MSI_FLAG)
7052                 flags = 0;
7053         else
7054                 flags = IRQF_SHARED;
7055
7056         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7057                          bp->dev->name, bp->dev);
7058         if (!rc)
7059                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7060
7061         return rc;
7062 }
7063
7064 static void bnx2x_napi_enable(struct bnx2x *bp)
7065 {
7066         int i;
7067
7068         for_each_queue(bp, i)
7069                 napi_enable(&bnx2x_fp(bp, i, napi));
7070 }
7071
7072 static void bnx2x_napi_disable(struct bnx2x *bp)
7073 {
7074         int i;
7075
7076         for_each_queue(bp, i)
7077                 napi_disable(&bnx2x_fp(bp, i, napi));
7078 }
7079
7080 static void bnx2x_netif_start(struct bnx2x *bp)
7081 {
7082         int intr_sem;
7083
7084         intr_sem = atomic_dec_and_test(&bp->intr_sem);
7085         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7086
7087         if (intr_sem) {
7088                 if (netif_running(bp->dev)) {
7089                         bnx2x_napi_enable(bp);
7090                         bnx2x_int_enable(bp);
7091                         if (bp->state == BNX2X_STATE_OPEN)
7092                                 netif_tx_wake_all_queues(bp->dev);
7093                 }
7094         }
7095 }
7096
7097 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7098 {
7099         bnx2x_int_disable_sync(bp, disable_hw);
7100         bnx2x_napi_disable(bp);
7101         netif_tx_disable(bp->dev);
7102 }
7103
7104 /*
7105  * Init service functions
7106  */
7107
7108 /**
7109  * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7110  *
7111  * @param bp driver descriptor
7112  * @param set set or clear an entry (1 or 0)
7113  * @param mac pointer to a buffer containing a MAC
7114  * @param cl_bit_vec bit vector of clients to register a MAC for
7115  * @param cam_offset offset in a CAM to use
7116  * @param with_bcast set broadcast MAC as well
7117  */
7118 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7119                                       u32 cl_bit_vec, u8 cam_offset,
7120                                       u8 with_bcast)
7121 {
7122         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7123         int port = BP_PORT(bp);
7124
7125         /* CAM allocation
7126          * unicasts 0-31:port0 32-63:port1
7127          * multicast 64-127:port0 128-191:port1
7128          */
7129         config->hdr.length = 1 + (with_bcast ? 1 : 0);
7130         config->hdr.offset = cam_offset;
7131         config->hdr.client_id = 0xff;
7132         config->hdr.reserved1 = 0;
7133
7134         /* primary MAC */
7135         config->config_table[0].cam_entry.msb_mac_addr =
7136                                         swab16(*(u16 *)&mac[0]);
7137         config->config_table[0].cam_entry.middle_mac_addr =
7138                                         swab16(*(u16 *)&mac[2]);
7139         config->config_table[0].cam_entry.lsb_mac_addr =
7140                                         swab16(*(u16 *)&mac[4]);
7141         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7142         if (set)
7143                 config->config_table[0].target_table_entry.flags = 0;
7144         else
7145                 CAM_INVALIDATE(config->config_table[0]);
7146         config->config_table[0].target_table_entry.clients_bit_vector =
7147                                                 cpu_to_le32(cl_bit_vec);
7148         config->config_table[0].target_table_entry.vlan_id = 0;
7149
7150         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7151            (set ? "setting" : "clearing"),
7152            config->config_table[0].cam_entry.msb_mac_addr,
7153            config->config_table[0].cam_entry.middle_mac_addr,
7154            config->config_table[0].cam_entry.lsb_mac_addr);
7155
7156         /* broadcast */
7157         if (with_bcast) {
7158                 config->config_table[1].cam_entry.msb_mac_addr =
7159                         cpu_to_le16(0xffff);
7160                 config->config_table[1].cam_entry.middle_mac_addr =
7161                         cpu_to_le16(0xffff);
7162                 config->config_table[1].cam_entry.lsb_mac_addr =
7163                         cpu_to_le16(0xffff);
7164                 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7165                 if (set)
7166                         config->config_table[1].target_table_entry.flags =
7167                                         TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7168                 else
7169                         CAM_INVALIDATE(config->config_table[1]);
7170                 config->config_table[1].target_table_entry.clients_bit_vector =
7171                                                         cpu_to_le32(cl_bit_vec);
7172                 config->config_table[1].target_table_entry.vlan_id = 0;
7173         }
7174
7175         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7176                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7177                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7178 }
7179
7180 /**
7181  * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7182  *
7183  * @param bp driver descriptor
7184  * @param set set or clear an entry (1 or 0)
7185  * @param mac pointer to a buffer containing a MAC
7186  * @param cl_bit_vec bit vector of clients to register a MAC for
7187  * @param cam_offset offset in a CAM to use
7188  */
7189 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7190                                        u32 cl_bit_vec, u8 cam_offset)
7191 {
7192         struct mac_configuration_cmd_e1h *config =
7193                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7194
7195         config->hdr.length = 1;
7196         config->hdr.offset = cam_offset;
7197         config->hdr.client_id = 0xff;
7198         config->hdr.reserved1 = 0;
7199
7200         /* primary MAC */
7201         config->config_table[0].msb_mac_addr =
7202                                         swab16(*(u16 *)&mac[0]);
7203         config->config_table[0].middle_mac_addr =
7204                                         swab16(*(u16 *)&mac[2]);
7205         config->config_table[0].lsb_mac_addr =
7206                                         swab16(*(u16 *)&mac[4]);
7207         config->config_table[0].clients_bit_vector =
7208                                         cpu_to_le32(cl_bit_vec);
7209         config->config_table[0].vlan_id = 0;
7210         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7211         if (set)
7212                 config->config_table[0].flags = BP_PORT(bp);
7213         else
7214                 config->config_table[0].flags =
7215                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7216
7217         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
7218            (set ? "setting" : "clearing"),
7219            config->config_table[0].msb_mac_addr,
7220            config->config_table[0].middle_mac_addr,
7221            config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7222
7223         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7224                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7225                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7226 }
7227
7228 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7229                              int *state_p, int poll)
7230 {
7231         /* can take a while if any port is running */
7232         int cnt = 5000;
7233
7234         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7235            poll ? "polling" : "waiting", state, idx);
7236
7237         might_sleep();
7238         while (cnt--) {
7239                 if (poll) {
7240                         bnx2x_rx_int(bp->fp, 10);
7241                         /* if index is different from 0
7242                          * the reply for some commands will
7243                          * be on the non default queue
7244                          */
7245                         if (idx)
7246                                 bnx2x_rx_int(&bp->fp[idx], 10);
7247                 }
7248
7249                 mb(); /* state is changed by bnx2x_sp_event() */
7250                 if (*state_p == state) {
7251 #ifdef BNX2X_STOP_ON_ERROR
7252                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
7253 #endif
7254                         return 0;
7255                 }
7256
7257                 msleep(1);
7258
7259                 if (bp->panic)
7260                         return -EIO;
7261         }
7262
7263         /* timeout! */
7264         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7265                   poll ? "polling" : "waiting", state, idx);
7266 #ifdef BNX2X_STOP_ON_ERROR
7267         bnx2x_panic();
7268 #endif
7269
7270         return -EBUSY;
7271 }
7272
7273 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7274 {
7275         bp->set_mac_pending++;
7276         smp_wmb();
7277
7278         bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7279                                    (1 << bp->fp->cl_id), BP_FUNC(bp));
7280
7281         /* Wait for a completion */
7282         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7283 }
7284
7285 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7286 {
7287         bp->set_mac_pending++;
7288         smp_wmb();
7289
7290         bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7291                                   (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7292                                   1);
7293
7294         /* Wait for a completion */
7295         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7296 }
7297
7298 #ifdef BCM_CNIC
7299 /**
7300  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7301  * MAC(s). This function will wait until the ramdord completion
7302  * returns.
7303  *
7304  * @param bp driver handle
7305  * @param set set or clear the CAM entry
7306  *
7307  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7308  */
7309 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7310 {
7311         u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7312
7313         bp->set_mac_pending++;
7314         smp_wmb();
7315
7316         /* Send a SET_MAC ramrod */
7317         if (CHIP_IS_E1(bp))
7318                 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7319                                   cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7320                                   1);
7321         else
7322                 /* CAM allocation for E1H
7323                 * unicasts: by func number
7324                 * multicast: 20+FUNC*20, 20 each
7325                 */
7326                 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7327                                    cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7328
7329         /* Wait for a completion when setting */
7330         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7331
7332         return 0;
7333 }
7334 #endif
7335
7336 static int bnx2x_setup_leading(struct bnx2x *bp)
7337 {
7338         int rc;
7339
7340         /* reset IGU state */
7341         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7342
7343         /* SETUP ramrod */
7344         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7345
7346         /* Wait for completion */
7347         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7348
7349         return rc;
7350 }
7351
7352 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7353 {
7354         struct bnx2x_fastpath *fp = &bp->fp[index];
7355
7356         /* reset IGU state */
7357         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7358
7359         /* SETUP ramrod */
7360         fp->state = BNX2X_FP_STATE_OPENING;
7361         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7362                       fp->cl_id, 0);
7363
7364         /* Wait for completion */
7365         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7366                                  &(fp->state), 0);
7367 }
7368
7369 static int bnx2x_poll(struct napi_struct *napi, int budget);
7370
7371 static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7372 {
7373
7374         switch (bp->multi_mode) {
7375         case ETH_RSS_MODE_DISABLED:
7376                 bp->num_queues = 1;
7377                 break;
7378
7379         case ETH_RSS_MODE_REGULAR:
7380                 if (num_queues)
7381                         bp->num_queues = min_t(u32, num_queues,
7382                                                   BNX2X_MAX_QUEUES(bp));
7383                 else
7384                         bp->num_queues = min_t(u32, num_online_cpus(),
7385                                                   BNX2X_MAX_QUEUES(bp));
7386                 break;
7387
7388
7389         default:
7390                 bp->num_queues = 1;
7391                 break;
7392         }
7393 }
7394
7395 static int bnx2x_set_num_queues(struct bnx2x *bp)
7396 {
7397         int rc = 0;
7398
7399         switch (int_mode) {
7400         case INT_MODE_INTx:
7401         case INT_MODE_MSI:
7402                 bp->num_queues = 1;
7403                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7404                 break;
7405
7406         case INT_MODE_MSIX:
7407         default:
7408                 /* Set number of queues according to bp->multi_mode value */
7409                 bnx2x_set_num_queues_msix(bp);
7410
7411                 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7412                    bp->num_queues);
7413
7414                 /* if we can't use MSI-X we only need one fp,
7415                  * so try to enable MSI-X with the requested number of fp's
7416                  * and fallback to MSI or legacy INTx with one fp
7417                  */
7418                 rc = bnx2x_enable_msix(bp);
7419                 if (rc)
7420                         /* failed to enable MSI-X */
7421                         bp->num_queues = 1;
7422                 break;
7423         }
7424         bp->dev->real_num_tx_queues = bp->num_queues;
7425         return rc;
7426 }
7427
7428 #ifdef BCM_CNIC
7429 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7430 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7431 #endif
7432
7433 /* must be called with rtnl_lock */
7434 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7435 {
7436         u32 load_code;
7437         int i, rc;
7438
7439 #ifdef BNX2X_STOP_ON_ERROR
7440         if (unlikely(bp->panic))
7441                 return -EPERM;
7442 #endif
7443
7444         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7445
7446         rc = bnx2x_set_num_queues(bp);
7447
7448         if (bnx2x_alloc_mem(bp)) {
7449                 bnx2x_free_irq(bp, true);
7450                 return -ENOMEM;
7451         }
7452
7453         for_each_queue(bp, i)
7454                 bnx2x_fp(bp, i, disable_tpa) =
7455                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
7456
7457         for_each_queue(bp, i)
7458                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7459                                bnx2x_poll, 128);
7460
7461         bnx2x_napi_enable(bp);
7462
7463         if (bp->flags & USING_MSIX_FLAG) {
7464                 rc = bnx2x_req_msix_irqs(bp);
7465                 if (rc) {
7466                         bnx2x_free_irq(bp, true);
7467                         goto load_error1;
7468                 }
7469         } else {
7470                 /* Fall to INTx if failed to enable MSI-X due to lack of
7471                    memory (in bnx2x_set_num_queues()) */
7472                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7473                         bnx2x_enable_msi(bp);
7474                 bnx2x_ack_int(bp);
7475                 rc = bnx2x_req_irq(bp);
7476                 if (rc) {
7477                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
7478                         bnx2x_free_irq(bp, true);
7479                         goto load_error1;
7480                 }
7481                 if (bp->flags & USING_MSI_FLAG) {
7482                         bp->dev->irq = bp->pdev->irq;
7483                         printk(KERN_INFO PFX "%s: using MSI  IRQ %d\n",
7484                                bp->dev->name, bp->pdev->irq);
7485                 }
7486         }
7487
7488         /* Send LOAD_REQUEST command to MCP
7489            Returns the type of LOAD command:
7490            if it is the first port to be initialized
7491            common blocks should be initialized, otherwise - not
7492         */
7493         if (!BP_NOMCP(bp)) {
7494                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7495                 if (!load_code) {
7496                         BNX2X_ERR("MCP response failure, aborting\n");
7497                         rc = -EBUSY;
7498                         goto load_error2;
7499                 }
7500                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7501                         rc = -EBUSY; /* other port in diagnostic mode */
7502                         goto load_error2;
7503                 }
7504
7505         } else {
7506                 int port = BP_PORT(bp);
7507
7508                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
7509                    load_count[0], load_count[1], load_count[2]);
7510                 load_count[0]++;
7511                 load_count[1 + port]++;
7512                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
7513                    load_count[0], load_count[1], load_count[2]);
7514                 if (load_count[0] == 1)
7515                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7516                 else if (load_count[1 + port] == 1)
7517                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7518                 else
7519                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7520         }
7521
7522         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7523             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7524                 bp->port.pmf = 1;
7525         else
7526                 bp->port.pmf = 0;
7527         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7528
7529         /* Initialize HW */
7530         rc = bnx2x_init_hw(bp, load_code);
7531         if (rc) {
7532                 BNX2X_ERR("HW init failed, aborting\n");
7533                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7534                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7535                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7536                 goto load_error2;
7537         }
7538
7539         /* Setup NIC internals and enable interrupts */
7540         bnx2x_nic_init(bp, load_code);
7541
7542         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7543             (bp->common.shmem2_base))
7544                 SHMEM2_WR(bp, dcc_support,
7545                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7546                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7547
7548         /* Send LOAD_DONE command to MCP */
7549         if (!BP_NOMCP(bp)) {
7550                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7551                 if (!load_code) {
7552                         BNX2X_ERR("MCP response failure, aborting\n");
7553                         rc = -EBUSY;
7554                         goto load_error3;
7555                 }
7556         }
7557
7558         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7559
7560         rc = bnx2x_setup_leading(bp);
7561         if (rc) {
7562                 BNX2X_ERR("Setup leading failed!\n");
7563 #ifndef BNX2X_STOP_ON_ERROR
7564                 goto load_error3;
7565 #else
7566                 bp->panic = 1;
7567                 return -EBUSY;
7568 #endif
7569         }
7570
7571         if (CHIP_IS_E1H(bp))
7572                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7573                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7574                         bp->flags |= MF_FUNC_DIS;
7575                 }
7576
7577         if (bp->state == BNX2X_STATE_OPEN) {
7578 #ifdef BCM_CNIC
7579                 /* Enable Timer scan */
7580                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7581 #endif
7582                 for_each_nondefault_queue(bp, i) {
7583                         rc = bnx2x_setup_multi(bp, i);
7584                         if (rc)
7585 #ifdef BCM_CNIC
7586                                 goto load_error4;
7587 #else
7588                                 goto load_error3;
7589 #endif
7590                 }
7591
7592                 if (CHIP_IS_E1(bp))
7593                         bnx2x_set_eth_mac_addr_e1(bp, 1);
7594                 else
7595                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
7596 #ifdef BCM_CNIC
7597                 /* Set iSCSI L2 MAC */
7598                 mutex_lock(&bp->cnic_mutex);
7599                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7600                         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7601                         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7602                         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
7603                                       CNIC_SB_ID(bp));
7604                 }
7605                 mutex_unlock(&bp->cnic_mutex);
7606 #endif
7607         }
7608
7609         if (bp->port.pmf)
7610                 bnx2x_initial_phy_init(bp, load_mode);
7611
7612         /* Start fast path */
7613         switch (load_mode) {
7614         case LOAD_NORMAL:
7615                 if (bp->state == BNX2X_STATE_OPEN) {
7616                         /* Tx queue should be only reenabled */
7617                         netif_tx_wake_all_queues(bp->dev);
7618                 }
7619                 /* Initialize the receive filter. */
7620                 bnx2x_set_rx_mode(bp->dev);
7621                 break;
7622
7623         case LOAD_OPEN:
7624                 netif_tx_start_all_queues(bp->dev);
7625                 if (bp->state != BNX2X_STATE_OPEN)
7626                         netif_tx_disable(bp->dev);
7627                 /* Initialize the receive filter. */
7628                 bnx2x_set_rx_mode(bp->dev);
7629                 break;
7630
7631         case LOAD_DIAG:
7632                 /* Initialize the receive filter. */
7633                 bnx2x_set_rx_mode(bp->dev);
7634                 bp->state = BNX2X_STATE_DIAG;
7635                 break;
7636
7637         default:
7638                 break;
7639         }
7640
7641         if (!bp->port.pmf)
7642                 bnx2x__link_status_update(bp);
7643
7644         /* start the timer */
7645         mod_timer(&bp->timer, jiffies + bp->current_interval);
7646
7647 #ifdef BCM_CNIC
7648         bnx2x_setup_cnic_irq_info(bp);
7649         if (bp->state == BNX2X_STATE_OPEN)
7650                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7651 #endif
7652
7653         return 0;
7654
7655 #ifdef BCM_CNIC
7656 load_error4:
7657         /* Disable Timer scan */
7658         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7659 #endif
7660 load_error3:
7661         bnx2x_int_disable_sync(bp, 1);
7662         if (!BP_NOMCP(bp)) {
7663                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7664                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7665         }
7666         bp->port.pmf = 0;
7667         /* Free SKBs, SGEs, TPA pool and driver internals */
7668         bnx2x_free_skbs(bp);
7669         for_each_queue(bp, i)
7670                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7671 load_error2:
7672         /* Release IRQs */
7673         bnx2x_free_irq(bp, false);
7674 load_error1:
7675         bnx2x_napi_disable(bp);
7676         for_each_queue(bp, i)
7677                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7678         bnx2x_free_mem(bp);
7679
7680         return rc;
7681 }
7682
7683 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7684 {
7685         struct bnx2x_fastpath *fp = &bp->fp[index];
7686         int rc;
7687
7688         /* halt the connection */
7689         fp->state = BNX2X_FP_STATE_HALTING;
7690         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7691
7692         /* Wait for completion */
7693         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7694                                &(fp->state), 1);
7695         if (rc) /* timeout */
7696                 return rc;
7697
7698         /* delete cfc entry */
7699         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7700
7701         /* Wait for completion */
7702         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7703                                &(fp->state), 1);
7704         return rc;
7705 }
7706
7707 static int bnx2x_stop_leading(struct bnx2x *bp)
7708 {
7709         __le16 dsb_sp_prod_idx;
7710         /* if the other port is handling traffic,
7711            this can take a lot of time */
7712         int cnt = 500;
7713         int rc;
7714
7715         might_sleep();
7716
7717         /* Send HALT ramrod */
7718         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7719         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7720
7721         /* Wait for completion */
7722         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7723                                &(bp->fp[0].state), 1);
7724         if (rc) /* timeout */
7725                 return rc;
7726
7727         dsb_sp_prod_idx = *bp->dsb_sp_prod;
7728
7729         /* Send PORT_DELETE ramrod */
7730         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7731
7732         /* Wait for completion to arrive on default status block
7733            we are going to reset the chip anyway
7734            so there is not much to do if this times out
7735          */
7736         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7737                 if (!cnt) {
7738                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7739                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7740                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
7741 #ifdef BNX2X_STOP_ON_ERROR
7742                         bnx2x_panic();
7743 #endif
7744                         rc = -EBUSY;
7745                         break;
7746                 }
7747                 cnt--;
7748                 msleep(1);
7749                 rmb(); /* Refresh the dsb_sp_prod */
7750         }
7751         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7752         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7753
7754         return rc;
7755 }
7756
7757 static void bnx2x_reset_func(struct bnx2x *bp)
7758 {
7759         int port = BP_PORT(bp);
7760         int func = BP_FUNC(bp);
7761         int base, i;
7762
7763         /* Configure IGU */
7764         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7765         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7766
7767 #ifdef BCM_CNIC
7768         /* Disable Timer scan */
7769         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7770         /*
7771          * Wait for at least 10ms and up to 2 second for the timers scan to
7772          * complete
7773          */
7774         for (i = 0; i < 200; i++) {
7775                 msleep(10);
7776                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7777                         break;
7778         }
7779 #endif
7780         /* Clear ILT */
7781         base = FUNC_ILT_BASE(func);
7782         for (i = base; i < base + ILT_PER_FUNC; i++)
7783                 bnx2x_ilt_wr(bp, i, 0);
7784 }
7785
7786 static void bnx2x_reset_port(struct bnx2x *bp)
7787 {
7788         int port = BP_PORT(bp);
7789         u32 val;
7790
7791         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7792
7793         /* Do not rcv packets to BRB */
7794         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7795         /* Do not direct rcv packets that are not for MCP to the BRB */
7796         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7797                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7798
7799         /* Configure AEU */
7800         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7801
7802         msleep(100);
7803         /* Check for BRB port occupancy */
7804         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7805         if (val)
7806                 DP(NETIF_MSG_IFDOWN,
7807                    "BRB1 is not empty  %d blocks are occupied\n", val);
7808
7809         /* TODO: Close Doorbell port? */
7810 }
7811
7812 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7813 {
7814         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7815            BP_FUNC(bp), reset_code);
7816
7817         switch (reset_code) {
7818         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7819                 bnx2x_reset_port(bp);
7820                 bnx2x_reset_func(bp);
7821                 bnx2x_reset_common(bp);
7822                 break;
7823
7824         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7825                 bnx2x_reset_port(bp);
7826                 bnx2x_reset_func(bp);
7827                 break;
7828
7829         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7830                 bnx2x_reset_func(bp);
7831                 break;
7832
7833         default:
7834                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7835                 break;
7836         }
7837 }
7838
7839 /* must be called with rtnl_lock */
7840 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7841 {
7842         int port = BP_PORT(bp);
7843         u32 reset_code = 0;
7844         int i, cnt, rc;
7845
7846 #ifdef BCM_CNIC
7847         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7848 #endif
7849         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7850
7851         /* Set "drop all" */
7852         bp->rx_mode = BNX2X_RX_MODE_NONE;
7853         bnx2x_set_storm_rx_mode(bp);
7854
7855         /* Disable HW interrupts, NAPI and Tx */
7856         bnx2x_netif_stop(bp, 1);
7857
7858         del_timer_sync(&bp->timer);
7859         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7860                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7861         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7862
7863         /* Release IRQs */
7864         bnx2x_free_irq(bp, false);
7865
7866         /* Wait until tx fastpath tasks complete */
7867         for_each_queue(bp, i) {
7868                 struct bnx2x_fastpath *fp = &bp->fp[i];
7869
7870                 cnt = 1000;
7871                 while (bnx2x_has_tx_work_unload(fp)) {
7872
7873                         bnx2x_tx_int(fp);
7874                         if (!cnt) {
7875                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7876                                           i);
7877 #ifdef BNX2X_STOP_ON_ERROR
7878                                 bnx2x_panic();
7879                                 return -EBUSY;
7880 #else
7881                                 break;
7882 #endif
7883                         }
7884                         cnt--;
7885                         msleep(1);
7886                 }
7887         }
7888         /* Give HW time to discard old tx messages */
7889         msleep(1);
7890
7891         if (CHIP_IS_E1(bp)) {
7892                 struct mac_configuration_cmd *config =
7893                                                 bnx2x_sp(bp, mcast_config);
7894
7895                 bnx2x_set_eth_mac_addr_e1(bp, 0);
7896
7897                 for (i = 0; i < config->hdr.length; i++)
7898                         CAM_INVALIDATE(config->config_table[i]);
7899
7900                 config->hdr.length = i;
7901                 if (CHIP_REV_IS_SLOW(bp))
7902                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7903                 else
7904                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7905                 config->hdr.client_id = bp->fp->cl_id;
7906                 config->hdr.reserved1 = 0;
7907
7908                 bp->set_mac_pending++;
7909                 smp_wmb();
7910
7911                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7912                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7913                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7914
7915         } else { /* E1H */
7916                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7917
7918                 bnx2x_set_eth_mac_addr_e1h(bp, 0);
7919
7920                 for (i = 0; i < MC_HASH_SIZE; i++)
7921                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7922
7923                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7924         }
7925 #ifdef BCM_CNIC
7926         /* Clear iSCSI L2 MAC */
7927         mutex_lock(&bp->cnic_mutex);
7928         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7929                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7930                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7931         }
7932         mutex_unlock(&bp->cnic_mutex);
7933 #endif
7934
7935         if (unload_mode == UNLOAD_NORMAL)
7936                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7937
7938         else if (bp->flags & NO_WOL_FLAG)
7939                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7940
7941         else if (bp->wol) {
7942                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7943                 u8 *mac_addr = bp->dev->dev_addr;
7944                 u32 val;
7945                 /* The mac address is written to entries 1-4 to
7946                    preserve entry 0 which is used by the PMF */
7947                 u8 entry = (BP_E1HVN(bp) + 1)*8;
7948
7949                 val = (mac_addr[0] << 8) | mac_addr[1];
7950                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7951
7952                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7953                       (mac_addr[4] << 8) | mac_addr[5];
7954                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7955
7956                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7957
7958         } else
7959                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7960
7961         /* Close multi and leading connections
7962            Completions for ramrods are collected in a synchronous way */
7963         for_each_nondefault_queue(bp, i)
7964                 if (bnx2x_stop_multi(bp, i))
7965                         goto unload_error;
7966
7967         rc = bnx2x_stop_leading(bp);
7968         if (rc) {
7969                 BNX2X_ERR("Stop leading failed!\n");
7970 #ifdef BNX2X_STOP_ON_ERROR
7971                 return -EBUSY;
7972 #else
7973                 goto unload_error;
7974 #endif
7975         }
7976
7977 unload_error:
7978         if (!BP_NOMCP(bp))
7979                 reset_code = bnx2x_fw_command(bp, reset_code);
7980         else {
7981                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
7982                    load_count[0], load_count[1], load_count[2]);
7983                 load_count[0]--;
7984                 load_count[1 + port]--;
7985                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
7986                    load_count[0], load_count[1], load_count[2]);
7987                 if (load_count[0] == 0)
7988                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7989                 else if (load_count[1 + port] == 0)
7990                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7991                 else
7992                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7993         }
7994
7995         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7996             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7997                 bnx2x__link_reset(bp);
7998
7999         /* Reset the chip */
8000         bnx2x_reset_chip(bp, reset_code);
8001
8002         /* Report UNLOAD_DONE to MCP */
8003         if (!BP_NOMCP(bp))
8004                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8005
8006         bp->port.pmf = 0;
8007
8008         /* Free SKBs, SGEs, TPA pool and driver internals */
8009         bnx2x_free_skbs(bp);
8010         for_each_queue(bp, i)
8011                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8012         for_each_queue(bp, i)
8013                 netif_napi_del(&bnx2x_fp(bp, i, napi));
8014         bnx2x_free_mem(bp);
8015
8016         bp->state = BNX2X_STATE_CLOSED;
8017
8018         netif_carrier_off(bp->dev);
8019
8020         return 0;
8021 }
8022
8023 static void bnx2x_reset_task(struct work_struct *work)
8024 {
8025         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8026
8027 #ifdef BNX2X_STOP_ON_ERROR
8028         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8029                   " so reset not done to allow debug dump,\n"
8030                   " you will need to reboot when done\n");
8031         return;
8032 #endif
8033
8034         rtnl_lock();
8035
8036         if (!netif_running(bp->dev))
8037                 goto reset_task_exit;
8038
8039         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8040         bnx2x_nic_load(bp, LOAD_NORMAL);
8041
8042 reset_task_exit:
8043         rtnl_unlock();
8044 }
8045
8046 /* end of nic load/unload */
8047
8048 /* ethtool_ops */
8049
8050 /*
8051  * Init service functions
8052  */
8053
8054 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8055 {
8056         switch (func) {
8057         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8058         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8059         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8060         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8061         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8062         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8063         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8064         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8065         default:
8066                 BNX2X_ERR("Unsupported function index: %d\n", func);
8067                 return (u32)(-1);
8068         }
8069 }
8070
8071 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8072 {
8073         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8074
8075         /* Flush all outstanding writes */
8076         mmiowb();
8077
8078         /* Pretend to be function 0 */
8079         REG_WR(bp, reg, 0);
8080         /* Flush the GRC transaction (in the chip) */
8081         new_val = REG_RD(bp, reg);
8082         if (new_val != 0) {
8083                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8084                           new_val);
8085                 BUG();
8086         }
8087
8088         /* From now we are in the "like-E1" mode */
8089         bnx2x_int_disable(bp);
8090
8091         /* Flush all outstanding writes */
8092         mmiowb();
8093
8094         /* Restore the original funtion settings */
8095         REG_WR(bp, reg, orig_func);
8096         new_val = REG_RD(bp, reg);
8097         if (new_val != orig_func) {
8098                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8099                           orig_func, new_val);
8100                 BUG();
8101         }
8102 }
8103
8104 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8105 {
8106         if (CHIP_IS_E1H(bp))
8107                 bnx2x_undi_int_disable_e1h(bp, func);
8108         else
8109                 bnx2x_int_disable(bp);
8110 }
8111
8112 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8113 {
8114         u32 val;
8115
8116         /* Check if there is any driver already loaded */
8117         val = REG_RD(bp, MISC_REG_UNPREPARED);
8118         if (val == 0x1) {
8119                 /* Check if it is the UNDI driver
8120                  * UNDI driver initializes CID offset for normal bell to 0x7
8121                  */
8122                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8123                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8124                 if (val == 0x7) {
8125                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8126                         /* save our func */
8127                         int func = BP_FUNC(bp);
8128                         u32 swap_en;
8129                         u32 swap_val;
8130
8131                         /* clear the UNDI indication */
8132                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8133
8134                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
8135
8136                         /* try unload UNDI on port 0 */
8137                         bp->func = 0;
8138                         bp->fw_seq =
8139                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8140                                 DRV_MSG_SEQ_NUMBER_MASK);
8141                         reset_code = bnx2x_fw_command(bp, reset_code);
8142
8143                         /* if UNDI is loaded on the other port */
8144                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8145
8146                                 /* send "DONE" for previous unload */
8147                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8148
8149                                 /* unload UNDI on port 1 */
8150                                 bp->func = 1;
8151                                 bp->fw_seq =
8152                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8153                                         DRV_MSG_SEQ_NUMBER_MASK);
8154                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8155
8156                                 bnx2x_fw_command(bp, reset_code);
8157                         }
8158
8159                         /* now it's safe to release the lock */
8160                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8161
8162                         bnx2x_undi_int_disable(bp, func);
8163
8164                         /* close input traffic and wait for it */
8165                         /* Do not rcv packets to BRB */
8166                         REG_WR(bp,
8167                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8168                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8169                         /* Do not direct rcv packets that are not for MCP to
8170                          * the BRB */
8171                         REG_WR(bp,
8172                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8173                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8174                         /* clear AEU */
8175                         REG_WR(bp,
8176                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8177                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8178                         msleep(10);
8179
8180                         /* save NIG port swap info */
8181                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8182                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
8183                         /* reset device */
8184                         REG_WR(bp,
8185                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8186                                0xd3ffffff);
8187                         REG_WR(bp,
8188                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8189                                0x1403);
8190                         /* take the NIG out of reset and restore swap values */
8191                         REG_WR(bp,
8192                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8193                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
8194                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8195                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8196
8197                         /* send unload done to the MCP */
8198                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8199
8200                         /* restore our func and fw_seq */
8201                         bp->func = func;
8202                         bp->fw_seq =
8203                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8204                                 DRV_MSG_SEQ_NUMBER_MASK);
8205
8206                 } else
8207                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8208         }
8209 }
8210
8211 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8212 {
8213         u32 val, val2, val3, val4, id;
8214         u16 pmc;
8215
8216         /* Get the chip revision id and number. */
8217         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8218         val = REG_RD(bp, MISC_REG_CHIP_NUM);
8219         id = ((val & 0xffff) << 16);
8220         val = REG_RD(bp, MISC_REG_CHIP_REV);
8221         id |= ((val & 0xf) << 12);
8222         val = REG_RD(bp, MISC_REG_CHIP_METAL);
8223         id |= ((val & 0xff) << 4);
8224         val = REG_RD(bp, MISC_REG_BOND_ID);
8225         id |= (val & 0xf);
8226         bp->common.chip_id = id;
8227         bp->link_params.chip_id = bp->common.chip_id;
8228         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8229
8230         val = (REG_RD(bp, 0x2874) & 0x55);
8231         if ((bp->common.chip_id & 0x1) ||
8232             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8233                 bp->flags |= ONE_PORT_FLAG;
8234                 BNX2X_DEV_INFO("single port device\n");
8235         }
8236
8237         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8238         bp->common.flash_size = (NVRAM_1MB_SIZE <<
8239                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
8240         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8241                        bp->common.flash_size, bp->common.flash_size);
8242
8243         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8244         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
8245         bp->link_params.shmem_base = bp->common.shmem_base;
8246         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
8247                        bp->common.shmem_base, bp->common.shmem2_base);
8248
8249         if (!bp->common.shmem_base ||
8250             (bp->common.shmem_base < 0xA0000) ||
8251             (bp->common.shmem_base >= 0xC0000)) {
8252                 BNX2X_DEV_INFO("MCP not active\n");
8253                 bp->flags |= NO_MCP_FLAG;
8254                 return;
8255         }
8256
8257         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8258         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8259                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8260                 BNX2X_ERR("BAD MCP validity signature\n");
8261
8262         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8263         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8264
8265         bp->link_params.hw_led_mode = ((bp->common.hw_config &
8266                                         SHARED_HW_CFG_LED_MODE_MASK) >>
8267                                        SHARED_HW_CFG_LED_MODE_SHIFT);
8268
8269         bp->link_params.feature_config_flags = 0;
8270         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8271         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8272                 bp->link_params.feature_config_flags |=
8273                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8274         else
8275                 bp->link_params.feature_config_flags &=
8276                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8277
8278         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8279         bp->common.bc_ver = val;
8280         BNX2X_DEV_INFO("bc_ver %X\n", val);
8281         if (val < BNX2X_BC_VER) {
8282                 /* for now only warn
8283                  * later we might need to enforce this */
8284                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8285                           " please upgrade BC\n", BNX2X_BC_VER, val);
8286         }
8287         bp->link_params.feature_config_flags |=
8288                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8289                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8290
8291         if (BP_E1HVN(bp) == 0) {
8292                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8293                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8294         } else {
8295                 /* no WOL capability for E1HVN != 0 */
8296                 bp->flags |= NO_WOL_FLAG;
8297         }
8298         BNX2X_DEV_INFO("%sWoL capable\n",
8299                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
8300
8301         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8302         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8303         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8304         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8305
8306         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8307                val, val2, val3, val4);
8308 }
8309
8310 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8311                                                     u32 switch_cfg)
8312 {
8313         int port = BP_PORT(bp);
8314         u32 ext_phy_type;
8315
8316         switch (switch_cfg) {
8317         case SWITCH_CFG_1G:
8318                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8319
8320                 ext_phy_type =
8321                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8322                 switch (ext_phy_type) {
8323                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8324                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8325                                        ext_phy_type);
8326
8327                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8328                                                SUPPORTED_10baseT_Full |
8329                                                SUPPORTED_100baseT_Half |
8330                                                SUPPORTED_100baseT_Full |
8331                                                SUPPORTED_1000baseT_Full |
8332                                                SUPPORTED_2500baseX_Full |
8333                                                SUPPORTED_TP |
8334                                                SUPPORTED_FIBRE |
8335                                                SUPPORTED_Autoneg |
8336                                                SUPPORTED_Pause |
8337                                                SUPPORTED_Asym_Pause);
8338                         break;
8339
8340                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8341                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8342                                        ext_phy_type);
8343
8344                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8345                                                SUPPORTED_10baseT_Full |
8346                                                SUPPORTED_100baseT_Half |
8347                                                SUPPORTED_100baseT_Full |
8348                                                SUPPORTED_1000baseT_Full |
8349                                                SUPPORTED_TP |
8350                                                SUPPORTED_FIBRE |
8351                                                SUPPORTED_Autoneg |
8352                                                SUPPORTED_Pause |
8353                                                SUPPORTED_Asym_Pause);
8354                         break;
8355
8356                 default:
8357                         BNX2X_ERR("NVRAM config error. "
8358                                   "BAD SerDes ext_phy_config 0x%x\n",
8359                                   bp->link_params.ext_phy_config);
8360                         return;
8361                 }
8362
8363                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8364                                            port*0x10);
8365                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8366                 break;
8367
8368         case SWITCH_CFG_10G:
8369                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8370
8371                 ext_phy_type =
8372                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8373                 switch (ext_phy_type) {
8374                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8375                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8376                                        ext_phy_type);
8377
8378                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8379                                                SUPPORTED_10baseT_Full |
8380                                                SUPPORTED_100baseT_Half |
8381                                                SUPPORTED_100baseT_Full |
8382                                                SUPPORTED_1000baseT_Full |
8383                                                SUPPORTED_2500baseX_Full |
8384                                                SUPPORTED_10000baseT_Full |
8385                                                SUPPORTED_TP |
8386                                                SUPPORTED_FIBRE |
8387                                                SUPPORTED_Autoneg |
8388                                                SUPPORTED_Pause |
8389                                                SUPPORTED_Asym_Pause);
8390                         break;
8391
8392                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8393                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8394                                        ext_phy_type);
8395
8396                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8397                                                SUPPORTED_1000baseT_Full |
8398                                                SUPPORTED_FIBRE |
8399                                                SUPPORTED_Autoneg |
8400                                                SUPPORTED_Pause |
8401                                                SUPPORTED_Asym_Pause);
8402                         break;
8403
8404                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8405                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8406                                        ext_phy_type);
8407
8408                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8409                                                SUPPORTED_2500baseX_Full |
8410                                                SUPPORTED_1000baseT_Full |
8411                                                SUPPORTED_FIBRE |
8412                                                SUPPORTED_Autoneg |
8413                                                SUPPORTED_Pause |
8414                                                SUPPORTED_Asym_Pause);
8415                         break;
8416
8417                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8418                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8419                                        ext_phy_type);
8420
8421                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8422                                                SUPPORTED_FIBRE |
8423                                                SUPPORTED_Pause |
8424                                                SUPPORTED_Asym_Pause);
8425                         break;
8426
8427                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8428                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8429                                        ext_phy_type);
8430
8431                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8432                                                SUPPORTED_1000baseT_Full |
8433                                                SUPPORTED_FIBRE |
8434                                                SUPPORTED_Pause |
8435                                                SUPPORTED_Asym_Pause);
8436                         break;
8437
8438                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8439                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8440                                        ext_phy_type);
8441
8442                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8443                                                SUPPORTED_1000baseT_Full |
8444                                                SUPPORTED_Autoneg |
8445                                                SUPPORTED_FIBRE |
8446                                                SUPPORTED_Pause |
8447                                                SUPPORTED_Asym_Pause);
8448                         break;
8449
8450                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8451                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8452                                        ext_phy_type);
8453
8454                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8455                                                SUPPORTED_1000baseT_Full |
8456                                                SUPPORTED_Autoneg |
8457                                                SUPPORTED_FIBRE |
8458                                                SUPPORTED_Pause |
8459                                                SUPPORTED_Asym_Pause);
8460                         break;
8461
8462                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8463                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8464                                        ext_phy_type);
8465
8466                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8467                                                SUPPORTED_TP |
8468                                                SUPPORTED_Autoneg |
8469                                                SUPPORTED_Pause |
8470                                                SUPPORTED_Asym_Pause);
8471                         break;
8472
8473                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8474                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8475                                        ext_phy_type);
8476
8477                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8478                                                SUPPORTED_10baseT_Full |
8479                                                SUPPORTED_100baseT_Half |
8480                                                SUPPORTED_100baseT_Full |
8481                                                SUPPORTED_1000baseT_Full |
8482                                                SUPPORTED_10000baseT_Full |
8483                                                SUPPORTED_TP |
8484                                                SUPPORTED_Autoneg |
8485                                                SUPPORTED_Pause |
8486                                                SUPPORTED_Asym_Pause);
8487                         break;
8488
8489                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8490                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8491                                   bp->link_params.ext_phy_config);
8492                         break;
8493
8494                 default:
8495                         BNX2X_ERR("NVRAM config error. "
8496                                   "BAD XGXS ext_phy_config 0x%x\n",
8497                                   bp->link_params.ext_phy_config);
8498                         return;
8499                 }
8500
8501                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8502                                            port*0x18);
8503                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8504
8505                 break;
8506
8507         default:
8508                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8509                           bp->port.link_config);
8510                 return;
8511         }
8512         bp->link_params.phy_addr = bp->port.phy_addr;
8513
8514         /* mask what we support according to speed_cap_mask */
8515         if (!(bp->link_params.speed_cap_mask &
8516                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8517                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8518
8519         if (!(bp->link_params.speed_cap_mask &
8520                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8521                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8522
8523         if (!(bp->link_params.speed_cap_mask &
8524                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8525                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8526
8527         if (!(bp->link_params.speed_cap_mask &
8528                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8529                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8530
8531         if (!(bp->link_params.speed_cap_mask &
8532                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8533                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8534                                         SUPPORTED_1000baseT_Full);
8535
8536         if (!(bp->link_params.speed_cap_mask &
8537                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8538                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8539
8540         if (!(bp->link_params.speed_cap_mask &
8541                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8542                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8543
8544         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8545 }
8546
8547 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8548 {
8549         bp->link_params.req_duplex = DUPLEX_FULL;
8550
8551         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8552         case PORT_FEATURE_LINK_SPEED_AUTO:
8553                 if (bp->port.supported & SUPPORTED_Autoneg) {
8554                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8555                         bp->port.advertising = bp->port.supported;
8556                 } else {
8557                         u32 ext_phy_type =
8558                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8559
8560                         if ((ext_phy_type ==
8561                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8562                             (ext_phy_type ==
8563                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8564                                 /* force 10G, no AN */
8565                                 bp->link_params.req_line_speed = SPEED_10000;
8566                                 bp->port.advertising =
8567                                                 (ADVERTISED_10000baseT_Full |
8568                                                  ADVERTISED_FIBRE);
8569                                 break;
8570                         }
8571                         BNX2X_ERR("NVRAM config error. "
8572                                   "Invalid link_config 0x%x"
8573                                   "  Autoneg not supported\n",
8574                                   bp->port.link_config);
8575                         return;
8576                 }
8577                 break;
8578
8579         case PORT_FEATURE_LINK_SPEED_10M_FULL:
8580                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8581                         bp->link_params.req_line_speed = SPEED_10;
8582                         bp->port.advertising = (ADVERTISED_10baseT_Full |
8583                                                 ADVERTISED_TP);
8584                 } else {
8585                         BNX2X_ERR("NVRAM config error. "
8586                                   "Invalid link_config 0x%x"
8587                                   "  speed_cap_mask 0x%x\n",
8588                                   bp->port.link_config,
8589                                   bp->link_params.speed_cap_mask);
8590                         return;
8591                 }
8592                 break;
8593
8594         case PORT_FEATURE_LINK_SPEED_10M_HALF:
8595                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8596                         bp->link_params.req_line_speed = SPEED_10;
8597                         bp->link_params.req_duplex = DUPLEX_HALF;
8598                         bp->port.advertising = (ADVERTISED_10baseT_Half |
8599                                                 ADVERTISED_TP);
8600                 } else {
8601                         BNX2X_ERR("NVRAM config error. "
8602                                   "Invalid link_config 0x%x"
8603                                   "  speed_cap_mask 0x%x\n",
8604                                   bp->port.link_config,
8605                                   bp->link_params.speed_cap_mask);
8606                         return;
8607                 }
8608                 break;
8609
8610         case PORT_FEATURE_LINK_SPEED_100M_FULL:
8611                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8612                         bp->link_params.req_line_speed = SPEED_100;
8613                         bp->port.advertising = (ADVERTISED_100baseT_Full |
8614                                                 ADVERTISED_TP);
8615                 } else {
8616                         BNX2X_ERR("NVRAM config error. "
8617                                   "Invalid link_config 0x%x"
8618                                   "  speed_cap_mask 0x%x\n",
8619                                   bp->port.link_config,
8620                                   bp->link_params.speed_cap_mask);
8621                         return;
8622                 }
8623                 break;
8624
8625         case PORT_FEATURE_LINK_SPEED_100M_HALF:
8626                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8627                         bp->link_params.req_line_speed = SPEED_100;
8628                         bp->link_params.req_duplex = DUPLEX_HALF;
8629                         bp->port.advertising = (ADVERTISED_100baseT_Half |
8630                                                 ADVERTISED_TP);
8631                 } else {
8632                         BNX2X_ERR("NVRAM config error. "
8633                                   "Invalid link_config 0x%x"
8634                                   "  speed_cap_mask 0x%x\n",
8635                                   bp->port.link_config,
8636                                   bp->link_params.speed_cap_mask);
8637                         return;
8638                 }
8639                 break;
8640
8641         case PORT_FEATURE_LINK_SPEED_1G:
8642                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8643                         bp->link_params.req_line_speed = SPEED_1000;
8644                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
8645                                                 ADVERTISED_TP);
8646                 } else {
8647                         BNX2X_ERR("NVRAM config error. "
8648                                   "Invalid link_config 0x%x"
8649                                   "  speed_cap_mask 0x%x\n",
8650                                   bp->port.link_config,
8651                                   bp->link_params.speed_cap_mask);
8652                         return;
8653                 }
8654                 break;
8655
8656         case PORT_FEATURE_LINK_SPEED_2_5G:
8657                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8658                         bp->link_params.req_line_speed = SPEED_2500;
8659                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
8660                                                 ADVERTISED_TP);
8661                 } else {
8662                         BNX2X_ERR("NVRAM config error. "
8663                                   "Invalid link_config 0x%x"
8664                                   "  speed_cap_mask 0x%x\n",
8665                                   bp->port.link_config,
8666                                   bp->link_params.speed_cap_mask);
8667                         return;
8668                 }
8669                 break;
8670
8671         case PORT_FEATURE_LINK_SPEED_10G_CX4:
8672         case PORT_FEATURE_LINK_SPEED_10G_KX4:
8673         case PORT_FEATURE_LINK_SPEED_10G_KR:
8674                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8675                         bp->link_params.req_line_speed = SPEED_10000;
8676                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
8677                                                 ADVERTISED_FIBRE);
8678                 } else {
8679                         BNX2X_ERR("NVRAM config error. "
8680                                   "Invalid link_config 0x%x"
8681                                   "  speed_cap_mask 0x%x\n",
8682                                   bp->port.link_config,
8683                                   bp->link_params.speed_cap_mask);
8684                         return;
8685                 }
8686                 break;
8687
8688         default:
8689                 BNX2X_ERR("NVRAM config error. "
8690                           "BAD link speed link_config 0x%x\n",
8691                           bp->port.link_config);
8692                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8693                 bp->port.advertising = bp->port.supported;
8694                 break;
8695         }
8696
8697         bp->link_params.req_flow_ctrl = (bp->port.link_config &
8698                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8699         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8700             !(bp->port.supported & SUPPORTED_Autoneg))
8701                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8702
8703         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
8704                        "  advertising 0x%x\n",
8705                        bp->link_params.req_line_speed,
8706                        bp->link_params.req_duplex,
8707                        bp->link_params.req_flow_ctrl, bp->port.advertising);
8708 }
8709
8710 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8711 {
8712         mac_hi = cpu_to_be16(mac_hi);
8713         mac_lo = cpu_to_be32(mac_lo);
8714         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8715         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8716 }
8717
8718 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8719 {
8720         int port = BP_PORT(bp);
8721         u32 val, val2;
8722         u32 config;
8723         u16 i;
8724         u32 ext_phy_type;
8725
8726         bp->link_params.bp = bp;
8727         bp->link_params.port = port;
8728
8729         bp->link_params.lane_config =
8730                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8731         bp->link_params.ext_phy_config =
8732                 SHMEM_RD(bp,
8733                          dev_info.port_hw_config[port].external_phy_config);
8734         /* BCM8727_NOC => BCM8727 no over current */
8735         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8736             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8737                 bp->link_params.ext_phy_config &=
8738                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8739                 bp->link_params.ext_phy_config |=
8740                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8741                 bp->link_params.feature_config_flags |=
8742                         FEATURE_CONFIG_BCM8727_NOC;
8743         }
8744
8745         bp->link_params.speed_cap_mask =
8746                 SHMEM_RD(bp,
8747                          dev_info.port_hw_config[port].speed_capability_mask);
8748
8749         bp->port.link_config =
8750                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8751
8752         /* Get the 4 lanes xgxs config rx and tx */
8753         for (i = 0; i < 2; i++) {
8754                 val = SHMEM_RD(bp,
8755                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8756                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8757                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8758
8759                 val = SHMEM_RD(bp,
8760                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8761                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8762                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8763         }
8764
8765         /* If the device is capable of WoL, set the default state according
8766          * to the HW
8767          */
8768         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8769         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8770                    (config & PORT_FEATURE_WOL_ENABLED));
8771
8772         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
8773                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
8774                        bp->link_params.lane_config,
8775                        bp->link_params.ext_phy_config,
8776                        bp->link_params.speed_cap_mask, bp->port.link_config);
8777
8778         bp->link_params.switch_cfg |= (bp->port.link_config &
8779                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
8780         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8781
8782         bnx2x_link_settings_requested(bp);
8783
8784         /*
8785          * If connected directly, work with the internal PHY, otherwise, work
8786          * with the external PHY
8787          */
8788         ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8789         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8790                 bp->mdio.prtad = bp->link_params.phy_addr;
8791
8792         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8793                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8794                 bp->mdio.prtad =
8795                         XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
8796
8797         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8798         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8799         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8800         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8801         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8802
8803 #ifdef BCM_CNIC
8804         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8805         val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8806         bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8807 #endif
8808 }
8809
8810 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8811 {
8812         int func = BP_FUNC(bp);
8813         u32 val, val2;
8814         int rc = 0;
8815
8816         bnx2x_get_common_hwinfo(bp);
8817
8818         bp->e1hov = 0;
8819         bp->e1hmf = 0;
8820         if (CHIP_IS_E1H(bp)) {
8821                 bp->mf_config =
8822                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8823
8824                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8825                        FUNC_MF_CFG_E1HOV_TAG_MASK);
8826                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8827                         bp->e1hmf = 1;
8828                 BNX2X_DEV_INFO("%s function mode\n",
8829                                IS_E1HMF(bp) ? "multi" : "single");
8830
8831                 if (IS_E1HMF(bp)) {
8832                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8833                                                                 e1hov_tag) &
8834                                FUNC_MF_CFG_E1HOV_TAG_MASK);
8835                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8836                                 bp->e1hov = val;
8837                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8838                                                "(0x%04x)\n",
8839                                                func, bp->e1hov, bp->e1hov);
8840                         } else {
8841                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
8842                                           "  aborting\n", func);
8843                                 rc = -EPERM;
8844                         }
8845                 } else {
8846                         if (BP_E1HVN(bp)) {
8847                                 BNX2X_ERR("!!!  VN %d in single function mode,"
8848                                           "  aborting\n", BP_E1HVN(bp));
8849                                 rc = -EPERM;
8850                         }
8851                 }
8852         }
8853
8854         if (!BP_NOMCP(bp)) {
8855                 bnx2x_get_port_hwinfo(bp);
8856
8857                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8858                               DRV_MSG_SEQ_NUMBER_MASK);
8859                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8860         }
8861
8862         if (IS_E1HMF(bp)) {
8863                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8864                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
8865                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8866                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8867                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8868                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8869                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8870                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8871                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8872                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8873                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8874                                ETH_ALEN);
8875                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8876                                ETH_ALEN);
8877                 }
8878
8879                 return rc;
8880         }
8881
8882         if (BP_NOMCP(bp)) {
8883                 /* only supposed to happen on emulation/FPGA */
8884                 BNX2X_ERR("warning random MAC workaround active\n");
8885                 random_ether_addr(bp->dev->dev_addr);
8886                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8887         }
8888
8889         return rc;
8890 }
8891
8892 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8893 {
8894         int func = BP_FUNC(bp);
8895         int timer_interval;
8896         int rc;
8897
8898         /* Disable interrupt handling until HW is initialized */
8899         atomic_set(&bp->intr_sem, 1);
8900         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8901
8902         mutex_init(&bp->port.phy_mutex);
8903         mutex_init(&bp->fw_mb_mutex);
8904 #ifdef BCM_CNIC
8905         mutex_init(&bp->cnic_mutex);
8906 #endif
8907
8908         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8909         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8910
8911         rc = bnx2x_get_hwinfo(bp);
8912
8913         /* need to reset chip if undi was active */
8914         if (!BP_NOMCP(bp))
8915                 bnx2x_undi_unload(bp);
8916
8917         if (CHIP_REV_IS_FPGA(bp))
8918                 printk(KERN_ERR PFX "FPGA detected\n");
8919
8920         if (BP_NOMCP(bp) && (func == 0))
8921                 printk(KERN_ERR PFX
8922                        "MCP disabled, must load devices in order!\n");
8923
8924         /* Set multi queue mode */
8925         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8926             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8927                 printk(KERN_ERR PFX
8928                       "Multi disabled since int_mode requested is not MSI-X\n");
8929                 multi_mode = ETH_RSS_MODE_DISABLED;
8930         }
8931         bp->multi_mode = multi_mode;
8932
8933
8934         /* Set TPA flags */
8935         if (disable_tpa) {
8936                 bp->flags &= ~TPA_ENABLE_FLAG;
8937                 bp->dev->features &= ~NETIF_F_LRO;
8938         } else {
8939                 bp->flags |= TPA_ENABLE_FLAG;
8940                 bp->dev->features |= NETIF_F_LRO;
8941         }
8942
8943         if (CHIP_IS_E1(bp))
8944                 bp->dropless_fc = 0;
8945         else
8946                 bp->dropless_fc = dropless_fc;
8947
8948         bp->mrrs = mrrs;
8949
8950         bp->tx_ring_size = MAX_TX_AVAIL;
8951         bp->rx_ring_size = MAX_RX_AVAIL;
8952
8953         bp->rx_csum = 1;
8954
8955         /* make sure that the numbers are in the right granularity */
8956         bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8957         bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8958
8959         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8960         bp->current_interval = (poll ? poll : timer_interval);
8961
8962         init_timer(&bp->timer);
8963         bp->timer.expires = jiffies + bp->current_interval;
8964         bp->timer.data = (unsigned long) bp;
8965         bp->timer.function = bnx2x_timer;
8966
8967         return rc;
8968 }
8969
8970 /*
8971  * ethtool service functions
8972  */
8973
8974 /* All ethtool functions called with rtnl_lock */
8975
8976 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8977 {
8978         struct bnx2x *bp = netdev_priv(dev);
8979
8980         cmd->supported = bp->port.supported;
8981         cmd->advertising = bp->port.advertising;
8982
8983         if ((bp->state == BNX2X_STATE_OPEN) &&
8984             !(bp->flags & MF_FUNC_DIS) &&
8985             (bp->link_vars.link_up)) {
8986                 cmd->speed = bp->link_vars.line_speed;
8987                 cmd->duplex = bp->link_vars.duplex;
8988                 if (IS_E1HMF(bp)) {
8989                         u16 vn_max_rate;
8990
8991                         vn_max_rate =
8992                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8993                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8994                         if (vn_max_rate < cmd->speed)
8995                                 cmd->speed = vn_max_rate;
8996                 }
8997         } else {
8998                 cmd->speed = -1;
8999                 cmd->duplex = -1;
9000         }
9001
9002         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9003                 u32 ext_phy_type =
9004                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9005
9006                 switch (ext_phy_type) {
9007                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9008                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9009                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9010                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9011                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9012                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9013                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9014                         cmd->port = PORT_FIBRE;
9015                         break;
9016
9017                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9018                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9019                         cmd->port = PORT_TP;
9020                         break;
9021
9022                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9023                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9024                                   bp->link_params.ext_phy_config);
9025                         break;
9026
9027                 default:
9028                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
9029                            bp->link_params.ext_phy_config);
9030                         break;
9031                 }
9032         } else
9033                 cmd->port = PORT_TP;
9034
9035         cmd->phy_address = bp->mdio.prtad;
9036         cmd->transceiver = XCVR_INTERNAL;
9037
9038         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9039                 cmd->autoneg = AUTONEG_ENABLE;
9040         else
9041                 cmd->autoneg = AUTONEG_DISABLE;
9042
9043         cmd->maxtxpkt = 0;
9044         cmd->maxrxpkt = 0;
9045
9046         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9047            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
9048            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
9049            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
9050            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9051            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9052            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9053
9054         return 0;
9055 }
9056
9057 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9058 {
9059         struct bnx2x *bp = netdev_priv(dev);
9060         u32 advertising;
9061
9062         if (IS_E1HMF(bp))
9063                 return 0;
9064
9065         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9066            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
9067            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
9068            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
9069            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9070            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9071            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9072
9073         if (cmd->autoneg == AUTONEG_ENABLE) {
9074                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9075                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
9076                         return -EINVAL;
9077                 }
9078
9079                 /* advertise the requested speed and duplex if supported */
9080                 cmd->advertising &= bp->port.supported;
9081
9082                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9083                 bp->link_params.req_duplex = DUPLEX_FULL;
9084                 bp->port.advertising |= (ADVERTISED_Autoneg |
9085                                          cmd->advertising);
9086
9087         } else { /* forced speed */
9088                 /* advertise the requested speed and duplex if supported */
9089                 switch (cmd->speed) {
9090                 case SPEED_10:
9091                         if (cmd->duplex == DUPLEX_FULL) {
9092                                 if (!(bp->port.supported &
9093                                       SUPPORTED_10baseT_Full)) {
9094                                         DP(NETIF_MSG_LINK,
9095                                            "10M full not supported\n");
9096                                         return -EINVAL;
9097                                 }
9098
9099                                 advertising = (ADVERTISED_10baseT_Full |
9100                                                ADVERTISED_TP);
9101                         } else {
9102                                 if (!(bp->port.supported &
9103                                       SUPPORTED_10baseT_Half)) {
9104                                         DP(NETIF_MSG_LINK,
9105                                            "10M half not supported\n");
9106                                         return -EINVAL;
9107                                 }
9108
9109                                 advertising = (ADVERTISED_10baseT_Half |
9110                                                ADVERTISED_TP);
9111                         }
9112                         break;
9113
9114                 case SPEED_100:
9115                         if (cmd->duplex == DUPLEX_FULL) {
9116                                 if (!(bp->port.supported &
9117                                                 SUPPORTED_100baseT_Full)) {
9118                                         DP(NETIF_MSG_LINK,
9119                                            "100M full not supported\n");
9120                                         return -EINVAL;
9121                                 }
9122
9123                                 advertising = (ADVERTISED_100baseT_Full |
9124                                                ADVERTISED_TP);
9125                         } else {
9126                                 if (!(bp->port.supported &
9127                                                 SUPPORTED_100baseT_Half)) {
9128                                         DP(NETIF_MSG_LINK,
9129                                            "100M half not supported\n");
9130                                         return -EINVAL;
9131                                 }
9132
9133                                 advertising = (ADVERTISED_100baseT_Half |
9134                                                ADVERTISED_TP);
9135                         }
9136                         break;
9137
9138                 case SPEED_1000:
9139                         if (cmd->duplex != DUPLEX_FULL) {
9140                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
9141                                 return -EINVAL;
9142                         }
9143
9144                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
9145                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
9146                                 return -EINVAL;
9147                         }
9148
9149                         advertising = (ADVERTISED_1000baseT_Full |
9150                                        ADVERTISED_TP);
9151                         break;
9152
9153                 case SPEED_2500:
9154                         if (cmd->duplex != DUPLEX_FULL) {
9155                                 DP(NETIF_MSG_LINK,
9156                                    "2.5G half not supported\n");
9157                                 return -EINVAL;
9158                         }
9159
9160                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
9161                                 DP(NETIF_MSG_LINK,
9162                                    "2.5G full not supported\n");
9163                                 return -EINVAL;
9164                         }
9165
9166                         advertising = (ADVERTISED_2500baseX_Full |
9167                                        ADVERTISED_TP);
9168                         break;
9169
9170                 case SPEED_10000:
9171                         if (cmd->duplex != DUPLEX_FULL) {
9172                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
9173                                 return -EINVAL;
9174                         }
9175
9176                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
9177                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
9178                                 return -EINVAL;
9179                         }
9180
9181                         advertising = (ADVERTISED_10000baseT_Full |
9182                                        ADVERTISED_FIBRE);
9183                         break;
9184
9185                 default:
9186                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
9187                         return -EINVAL;
9188                 }
9189
9190                 bp->link_params.req_line_speed = cmd->speed;
9191                 bp->link_params.req_duplex = cmd->duplex;
9192                 bp->port.advertising = advertising;
9193         }
9194
9195         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
9196            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
9197            bp->link_params.req_line_speed, bp->link_params.req_duplex,
9198            bp->port.advertising);
9199
9200         if (netif_running(dev)) {
9201                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9202                 bnx2x_link_set(bp);
9203         }
9204
9205         return 0;
9206 }
9207
9208 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9209 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9210
9211 static int bnx2x_get_regs_len(struct net_device *dev)
9212 {
9213         struct bnx2x *bp = netdev_priv(dev);
9214         int regdump_len = 0;
9215         int i;
9216
9217         if (CHIP_IS_E1(bp)) {
9218                 for (i = 0; i < REGS_COUNT; i++)
9219                         if (IS_E1_ONLINE(reg_addrs[i].info))
9220                                 regdump_len += reg_addrs[i].size;
9221
9222                 for (i = 0; i < WREGS_COUNT_E1; i++)
9223                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9224                                 regdump_len += wreg_addrs_e1[i].size *
9225                                         (1 + wreg_addrs_e1[i].read_regs_count);
9226
9227         } else { /* E1H */
9228                 for (i = 0; i < REGS_COUNT; i++)
9229                         if (IS_E1H_ONLINE(reg_addrs[i].info))
9230                                 regdump_len += reg_addrs[i].size;
9231
9232                 for (i = 0; i < WREGS_COUNT_E1H; i++)
9233                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9234                                 regdump_len += wreg_addrs_e1h[i].size *
9235                                         (1 + wreg_addrs_e1h[i].read_regs_count);
9236         }
9237         regdump_len *= 4;
9238         regdump_len += sizeof(struct dump_hdr);
9239
9240         return regdump_len;
9241 }
9242
9243 static void bnx2x_get_regs(struct net_device *dev,
9244                            struct ethtool_regs *regs, void *_p)
9245 {
9246         u32 *p = _p, i, j;
9247         struct bnx2x *bp = netdev_priv(dev);
9248         struct dump_hdr dump_hdr = {0};
9249
9250         regs->version = 0;
9251         memset(p, 0, regs->len);
9252
9253         if (!netif_running(bp->dev))
9254                 return;
9255
9256         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9257         dump_hdr.dump_sign = dump_sign_all;
9258         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9259         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9260         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9261         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9262         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9263
9264         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9265         p += dump_hdr.hdr_size + 1;
9266
9267         if (CHIP_IS_E1(bp)) {
9268                 for (i = 0; i < REGS_COUNT; i++)
9269                         if (IS_E1_ONLINE(reg_addrs[i].info))
9270                                 for (j = 0; j < reg_addrs[i].size; j++)
9271                                         *p++ = REG_RD(bp,
9272                                                       reg_addrs[i].addr + j*4);
9273
9274         } else { /* E1H */
9275                 for (i = 0; i < REGS_COUNT; i++)
9276                         if (IS_E1H_ONLINE(reg_addrs[i].info))
9277                                 for (j = 0; j < reg_addrs[i].size; j++)
9278                                         *p++ = REG_RD(bp,
9279                                                       reg_addrs[i].addr + j*4);
9280         }
9281 }
9282
9283 #define PHY_FW_VER_LEN                  10
9284
9285 static void bnx2x_get_drvinfo(struct net_device *dev,
9286                               struct ethtool_drvinfo *info)
9287 {
9288         struct bnx2x *bp = netdev_priv(dev);
9289         u8 phy_fw_ver[PHY_FW_VER_LEN];
9290
9291         strcpy(info->driver, DRV_MODULE_NAME);
9292         strcpy(info->version, DRV_MODULE_VERSION);
9293
9294         phy_fw_ver[0] = '\0';
9295         if (bp->port.pmf) {
9296                 bnx2x_acquire_phy_lock(bp);
9297                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9298                                              (bp->state != BNX2X_STATE_CLOSED),
9299                                              phy_fw_ver, PHY_FW_VER_LEN);
9300                 bnx2x_release_phy_lock(bp);
9301         }
9302
9303         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9304                  (bp->common.bc_ver & 0xff0000) >> 16,
9305                  (bp->common.bc_ver & 0xff00) >> 8,
9306                  (bp->common.bc_ver & 0xff),
9307                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9308         strcpy(info->bus_info, pci_name(bp->pdev));
9309         info->n_stats = BNX2X_NUM_STATS;
9310         info->testinfo_len = BNX2X_NUM_TESTS;
9311         info->eedump_len = bp->common.flash_size;
9312         info->regdump_len = bnx2x_get_regs_len(dev);
9313 }
9314
9315 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9316 {
9317         struct bnx2x *bp = netdev_priv(dev);
9318
9319         if (bp->flags & NO_WOL_FLAG) {
9320                 wol->supported = 0;
9321                 wol->wolopts = 0;
9322         } else {
9323                 wol->supported = WAKE_MAGIC;
9324                 if (bp->wol)
9325                         wol->wolopts = WAKE_MAGIC;
9326                 else
9327                         wol->wolopts = 0;
9328         }
9329         memset(&wol->sopass, 0, sizeof(wol->sopass));
9330 }
9331
9332 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9333 {
9334         struct bnx2x *bp = netdev_priv(dev);
9335
9336         if (wol->wolopts & ~WAKE_MAGIC)
9337                 return -EINVAL;
9338
9339         if (wol->wolopts & WAKE_MAGIC) {
9340                 if (bp->flags & NO_WOL_FLAG)
9341                         return -EINVAL;
9342
9343                 bp->wol = 1;
9344         } else
9345                 bp->wol = 0;
9346
9347         return 0;
9348 }
9349
9350 static u32 bnx2x_get_msglevel(struct net_device *dev)
9351 {
9352         struct bnx2x *bp = netdev_priv(dev);
9353
9354         return bp->msglevel;
9355 }
9356
9357 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9358 {
9359         struct bnx2x *bp = netdev_priv(dev);
9360
9361         if (capable(CAP_NET_ADMIN))
9362                 bp->msglevel = level;
9363 }
9364
9365 static int bnx2x_nway_reset(struct net_device *dev)
9366 {
9367         struct bnx2x *bp = netdev_priv(dev);
9368
9369         if (!bp->port.pmf)
9370                 return 0;
9371
9372         if (netif_running(dev)) {
9373                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9374                 bnx2x_link_set(bp);
9375         }
9376
9377         return 0;
9378 }
9379
9380 static u32 bnx2x_get_link(struct net_device *dev)
9381 {
9382         struct bnx2x *bp = netdev_priv(dev);
9383
9384         if (bp->flags & MF_FUNC_DIS)
9385                 return 0;
9386
9387         return bp->link_vars.link_up;
9388 }
9389
9390 static int bnx2x_get_eeprom_len(struct net_device *dev)
9391 {
9392         struct bnx2x *bp = netdev_priv(dev);
9393
9394         return bp->common.flash_size;
9395 }
9396
9397 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9398 {
9399         int port = BP_PORT(bp);
9400         int count, i;
9401         u32 val = 0;
9402
9403         /* adjust timeout for emulation/FPGA */
9404         count = NVRAM_TIMEOUT_COUNT;
9405         if (CHIP_REV_IS_SLOW(bp))
9406                 count *= 100;
9407
9408         /* request access to nvram interface */
9409         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9410                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9411
9412         for (i = 0; i < count*10; i++) {
9413                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9414                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9415                         break;
9416
9417                 udelay(5);
9418         }
9419
9420         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9421                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9422                 return -EBUSY;
9423         }
9424
9425         return 0;
9426 }
9427
9428 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9429 {
9430         int port = BP_PORT(bp);
9431         int count, i;
9432         u32 val = 0;
9433
9434         /* adjust timeout for emulation/FPGA */
9435         count = NVRAM_TIMEOUT_COUNT;
9436         if (CHIP_REV_IS_SLOW(bp))
9437                 count *= 100;
9438
9439         /* relinquish nvram interface */
9440         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9441                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9442
9443         for (i = 0; i < count*10; i++) {
9444                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9445                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9446                         break;
9447
9448                 udelay(5);
9449         }
9450
9451         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9452                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9453                 return -EBUSY;
9454         }
9455
9456         return 0;
9457 }
9458
9459 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9460 {
9461         u32 val;
9462
9463         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9464
9465         /* enable both bits, even on read */
9466         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9467                (val | MCPR_NVM_ACCESS_ENABLE_EN |
9468                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
9469 }
9470
9471 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9472 {
9473         u32 val;
9474
9475         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9476
9477         /* disable both bits, even after read */
9478         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9479                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9480                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9481 }
9482
9483 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9484                                   u32 cmd_flags)
9485 {
9486         int count, i, rc;
9487         u32 val;
9488
9489         /* build the command word */
9490         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9491
9492         /* need to clear DONE bit separately */
9493         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9494
9495         /* address of the NVRAM to read from */
9496         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9497                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9498
9499         /* issue a read command */
9500         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9501
9502         /* adjust timeout for emulation/FPGA */
9503         count = NVRAM_TIMEOUT_COUNT;
9504         if (CHIP_REV_IS_SLOW(bp))
9505                 count *= 100;
9506
9507         /* wait for completion */
9508         *ret_val = 0;
9509         rc = -EBUSY;
9510         for (i = 0; i < count; i++) {
9511                 udelay(5);
9512                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9513
9514                 if (val & MCPR_NVM_COMMAND_DONE) {
9515                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9516                         /* we read nvram data in cpu order
9517                          * but ethtool sees it as an array of bytes
9518                          * converting to big-endian will do the work */
9519                         *ret_val = cpu_to_be32(val);
9520                         rc = 0;
9521                         break;
9522                 }
9523         }
9524
9525         return rc;
9526 }
9527
9528 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9529                             int buf_size)
9530 {
9531         int rc;
9532         u32 cmd_flags;
9533         __be32 val;
9534
9535         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9536                 DP(BNX2X_MSG_NVM,
9537                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9538                    offset, buf_size);
9539                 return -EINVAL;
9540         }
9541
9542         if (offset + buf_size > bp->common.flash_size) {
9543                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9544                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9545                    offset, buf_size, bp->common.flash_size);
9546                 return -EINVAL;
9547         }
9548
9549         /* request access to nvram interface */
9550         rc = bnx2x_acquire_nvram_lock(bp);
9551         if (rc)
9552                 return rc;
9553
9554         /* enable access to nvram interface */
9555         bnx2x_enable_nvram_access(bp);
9556
9557         /* read the first word(s) */
9558         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9559         while ((buf_size > sizeof(u32)) && (rc == 0)) {
9560                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9561                 memcpy(ret_buf, &val, 4);
9562
9563                 /* advance to the next dword */
9564                 offset += sizeof(u32);
9565                 ret_buf += sizeof(u32);
9566                 buf_size -= sizeof(u32);
9567                 cmd_flags = 0;
9568         }
9569
9570         if (rc == 0) {
9571                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9572                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9573                 memcpy(ret_buf, &val, 4);
9574         }
9575
9576         /* disable access to nvram interface */
9577         bnx2x_disable_nvram_access(bp);
9578         bnx2x_release_nvram_lock(bp);
9579
9580         return rc;
9581 }
9582
9583 static int bnx2x_get_eeprom(struct net_device *dev,
9584                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9585 {
9586         struct bnx2x *bp = netdev_priv(dev);
9587         int rc;
9588
9589         if (!netif_running(dev))
9590                 return -EAGAIN;
9591
9592         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9593            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9594            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9595            eeprom->len, eeprom->len);
9596
9597         /* parameters already validated in ethtool_get_eeprom */
9598
9599         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9600
9601         return rc;
9602 }
9603
9604 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9605                                    u32 cmd_flags)
9606 {
9607         int count, i, rc;
9608
9609         /* build the command word */
9610         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9611
9612         /* need to clear DONE bit separately */
9613         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9614
9615         /* write the data */
9616         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9617
9618         /* address of the NVRAM to write to */
9619         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9620                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9621
9622         /* issue the write command */
9623         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9624
9625         /* adjust timeout for emulation/FPGA */
9626         count = NVRAM_TIMEOUT_COUNT;
9627         if (CHIP_REV_IS_SLOW(bp))
9628                 count *= 100;
9629
9630         /* wait for completion */
9631         rc = -EBUSY;
9632         for (i = 0; i < count; i++) {
9633                 udelay(5);
9634                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9635                 if (val & MCPR_NVM_COMMAND_DONE) {
9636                         rc = 0;
9637                         break;
9638                 }
9639         }
9640
9641         return rc;
9642 }
9643
9644 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
9645
9646 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9647                               int buf_size)
9648 {
9649         int rc;
9650         u32 cmd_flags;
9651         u32 align_offset;
9652         __be32 val;
9653
9654         if (offset + buf_size > bp->common.flash_size) {
9655                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9656                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9657                    offset, buf_size, bp->common.flash_size);
9658                 return -EINVAL;
9659         }
9660
9661         /* request access to nvram interface */
9662         rc = bnx2x_acquire_nvram_lock(bp);
9663         if (rc)
9664                 return rc;
9665
9666         /* enable access to nvram interface */
9667         bnx2x_enable_nvram_access(bp);
9668
9669         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9670         align_offset = (offset & ~0x03);
9671         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9672
9673         if (rc == 0) {
9674                 val &= ~(0xff << BYTE_OFFSET(offset));
9675                 val |= (*data_buf << BYTE_OFFSET(offset));
9676
9677                 /* nvram data is returned as an array of bytes
9678                  * convert it back to cpu order */
9679                 val = be32_to_cpu(val);
9680
9681                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9682                                              cmd_flags);
9683         }
9684
9685         /* disable access to nvram interface */
9686         bnx2x_disable_nvram_access(bp);
9687         bnx2x_release_nvram_lock(bp);
9688
9689         return rc;
9690 }
9691
9692 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9693                              int buf_size)
9694 {
9695         int rc;
9696         u32 cmd_flags;
9697         u32 val;
9698         u32 written_so_far;
9699
9700         if (buf_size == 1)      /* ethtool */
9701                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9702
9703         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9704                 DP(BNX2X_MSG_NVM,
9705                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9706                    offset, buf_size);
9707                 return -EINVAL;
9708         }
9709
9710         if (offset + buf_size > bp->common.flash_size) {
9711                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9712                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9713                    offset, buf_size, bp->common.flash_size);
9714                 return -EINVAL;
9715         }
9716
9717         /* request access to nvram interface */
9718         rc = bnx2x_acquire_nvram_lock(bp);
9719         if (rc)
9720                 return rc;
9721
9722         /* enable access to nvram interface */
9723         bnx2x_enable_nvram_access(bp);
9724
9725         written_so_far = 0;
9726         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9727         while ((written_so_far < buf_size) && (rc == 0)) {
9728                 if (written_so_far == (buf_size - sizeof(u32)))
9729                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9730                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9731                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9732                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9733                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9734
9735                 memcpy(&val, data_buf, 4);
9736
9737                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9738
9739                 /* advance to the next dword */
9740                 offset += sizeof(u32);
9741                 data_buf += sizeof(u32);
9742                 written_so_far += sizeof(u32);
9743                 cmd_flags = 0;
9744         }
9745
9746         /* disable access to nvram interface */
9747         bnx2x_disable_nvram_access(bp);
9748         bnx2x_release_nvram_lock(bp);
9749
9750         return rc;
9751 }
9752
9753 static int bnx2x_set_eeprom(struct net_device *dev,
9754                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9755 {
9756         struct bnx2x *bp = netdev_priv(dev);
9757         int port = BP_PORT(bp);
9758         int rc = 0;
9759
9760         if (!netif_running(dev))
9761                 return -EAGAIN;
9762
9763         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9764            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9765            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9766            eeprom->len, eeprom->len);
9767
9768         /* parameters already validated in ethtool_set_eeprom */
9769
9770         /* PHY eeprom can be accessed only by the PMF */
9771         if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9772             !bp->port.pmf)
9773                 return -EINVAL;
9774
9775         if (eeprom->magic == 0x50485950) {
9776                 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9777                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9778
9779                 bnx2x_acquire_phy_lock(bp);
9780                 rc |= bnx2x_link_reset(&bp->link_params,
9781                                        &bp->link_vars, 0);
9782                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9783                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9784                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9785                                        MISC_REGISTERS_GPIO_HIGH, port);
9786                 bnx2x_release_phy_lock(bp);
9787                 bnx2x_link_report(bp);
9788
9789         } else if (eeprom->magic == 0x50485952) {
9790                 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9791                 if (bp->state == BNX2X_STATE_OPEN) {
9792                         bnx2x_acquire_phy_lock(bp);
9793                         rc |= bnx2x_link_reset(&bp->link_params,
9794                                                &bp->link_vars, 1);
9795
9796                         rc |= bnx2x_phy_init(&bp->link_params,
9797                                              &bp->link_vars);
9798                         bnx2x_release_phy_lock(bp);
9799                         bnx2x_calc_fc_adv(bp);
9800                 }
9801         } else if (eeprom->magic == 0x53985943) {
9802                 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9803                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9804                                        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9805                         u8 ext_phy_addr =
9806                              XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9807
9808                         /* DSP Remove Download Mode */
9809                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9810                                        MISC_REGISTERS_GPIO_LOW, port);
9811
9812                         bnx2x_acquire_phy_lock(bp);
9813
9814                         bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9815
9816                         /* wait 0.5 sec to allow it to run */
9817                         msleep(500);
9818                         bnx2x_ext_phy_hw_reset(bp, port);
9819                         msleep(500);
9820                         bnx2x_release_phy_lock(bp);
9821                 }
9822         } else
9823                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9824
9825         return rc;
9826 }
9827
9828 static int bnx2x_get_coalesce(struct net_device *dev,
9829                               struct ethtool_coalesce *coal)
9830 {
9831         struct bnx2x *bp = netdev_priv(dev);
9832
9833         memset(coal, 0, sizeof(struct ethtool_coalesce));
9834
9835         coal->rx_coalesce_usecs = bp->rx_ticks;
9836         coal->tx_coalesce_usecs = bp->tx_ticks;
9837
9838         return 0;
9839 }
9840
9841 #define BNX2X_MAX_COALES_TOUT  (0xf0*12) /* Maximal coalescing timeout in us */
9842 static int bnx2x_set_coalesce(struct net_device *dev,
9843                               struct ethtool_coalesce *coal)
9844 {
9845         struct bnx2x *bp = netdev_priv(dev);
9846
9847         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9848         if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9849                 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9850
9851         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9852         if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9853                 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9854
9855         if (netif_running(dev))
9856                 bnx2x_update_coalesce(bp);
9857
9858         return 0;
9859 }
9860
9861 static void bnx2x_get_ringparam(struct net_device *dev,
9862                                 struct ethtool_ringparam *ering)
9863 {
9864         struct bnx2x *bp = netdev_priv(dev);
9865
9866         ering->rx_max_pending = MAX_RX_AVAIL;
9867         ering->rx_mini_max_pending = 0;
9868         ering->rx_jumbo_max_pending = 0;
9869
9870         ering->rx_pending = bp->rx_ring_size;
9871         ering->rx_mini_pending = 0;
9872         ering->rx_jumbo_pending = 0;
9873
9874         ering->tx_max_pending = MAX_TX_AVAIL;
9875         ering->tx_pending = bp->tx_ring_size;
9876 }
9877
9878 static int bnx2x_set_ringparam(struct net_device *dev,
9879                                struct ethtool_ringparam *ering)
9880 {
9881         struct bnx2x *bp = netdev_priv(dev);
9882         int rc = 0;
9883
9884         if ((ering->rx_pending > MAX_RX_AVAIL) ||
9885             (ering->tx_pending > MAX_TX_AVAIL) ||
9886             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9887                 return -EINVAL;
9888
9889         bp->rx_ring_size = ering->rx_pending;
9890         bp->tx_ring_size = ering->tx_pending;
9891
9892         if (netif_running(dev)) {
9893                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9894                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9895         }
9896
9897         return rc;
9898 }
9899
9900 static void bnx2x_get_pauseparam(struct net_device *dev,
9901                                  struct ethtool_pauseparam *epause)
9902 {
9903         struct bnx2x *bp = netdev_priv(dev);
9904
9905         epause->autoneg = (bp->link_params.req_flow_ctrl ==
9906                            BNX2X_FLOW_CTRL_AUTO) &&
9907                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9908
9909         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9910                             BNX2X_FLOW_CTRL_RX);
9911         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9912                             BNX2X_FLOW_CTRL_TX);
9913
9914         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9915            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9916            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9917 }
9918
9919 static int bnx2x_set_pauseparam(struct net_device *dev,
9920                                 struct ethtool_pauseparam *epause)
9921 {
9922         struct bnx2x *bp = netdev_priv(dev);
9923
9924         if (IS_E1HMF(bp))
9925                 return 0;
9926
9927         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9928            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9929            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9930
9931         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9932
9933         if (epause->rx_pause)
9934                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9935
9936         if (epause->tx_pause)
9937                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9938
9939         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9940                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9941
9942         if (epause->autoneg) {
9943                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9944                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
9945                         return -EINVAL;
9946                 }
9947
9948                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9949                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9950         }
9951
9952         DP(NETIF_MSG_LINK,
9953            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9954
9955         if (netif_running(dev)) {
9956                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9957                 bnx2x_link_set(bp);
9958         }
9959
9960         return 0;
9961 }
9962
9963 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9964 {
9965         struct bnx2x *bp = netdev_priv(dev);
9966         int changed = 0;
9967         int rc = 0;
9968
9969         /* TPA requires Rx CSUM offloading */
9970         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9971                 if (!disable_tpa) {
9972                         if (!(dev->features & NETIF_F_LRO)) {
9973                                 dev->features |= NETIF_F_LRO;
9974                                 bp->flags |= TPA_ENABLE_FLAG;
9975                                 changed = 1;
9976                         }
9977                 } else
9978                         rc = -EINVAL;
9979         } else if (dev->features & NETIF_F_LRO) {
9980                 dev->features &= ~NETIF_F_LRO;
9981                 bp->flags &= ~TPA_ENABLE_FLAG;
9982                 changed = 1;
9983         }
9984
9985         if (changed && netif_running(dev)) {
9986                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9987                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9988         }
9989
9990         return rc;
9991 }
9992
9993 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9994 {
9995         struct bnx2x *bp = netdev_priv(dev);
9996
9997         return bp->rx_csum;
9998 }
9999
10000 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10001 {
10002         struct bnx2x *bp = netdev_priv(dev);
10003         int rc = 0;
10004
10005         bp->rx_csum = data;
10006
10007         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10008            TPA'ed packets will be discarded due to wrong TCP CSUM */
10009         if (!data) {
10010                 u32 flags = ethtool_op_get_flags(dev);
10011
10012                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10013         }
10014
10015         return rc;
10016 }
10017
10018 static int bnx2x_set_tso(struct net_device *dev, u32 data)
10019 {
10020         if (data) {
10021                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10022                 dev->features |= NETIF_F_TSO6;
10023         } else {
10024                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
10025                 dev->features &= ~NETIF_F_TSO6;
10026         }
10027
10028         return 0;
10029 }
10030
10031 static const struct {
10032         char string[ETH_GSTRING_LEN];
10033 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
10034         { "register_test (offline)" },
10035         { "memory_test (offline)" },
10036         { "loopback_test (offline)" },
10037         { "nvram_test (online)" },
10038         { "interrupt_test (online)" },
10039         { "link_test (online)" },
10040         { "idle check (online)" }
10041 };
10042
10043 static int bnx2x_test_registers(struct bnx2x *bp)
10044 {
10045         int idx, i, rc = -ENODEV;
10046         u32 wr_val = 0;
10047         int port = BP_PORT(bp);
10048         static const struct {
10049                 u32  offset0;
10050                 u32  offset1;
10051                 u32  mask;
10052         } reg_tbl[] = {
10053 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
10054                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
10055                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
10056                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
10057                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
10058                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
10059                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
10060                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
10061                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
10062                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
10063 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
10064                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
10065                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
10066                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
10067                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
10068                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10069                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
10070                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
10071                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
10072                 { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
10073 /* 20 */        { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
10074                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
10075                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
10076                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
10077                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
10078                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
10079                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
10080                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
10081                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
10082                 { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
10083 /* 30 */        { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
10084                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
10085                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
10086                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10087                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
10088                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10089                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
10090
10091                 { 0xffffffff, 0, 0x00000000 }
10092         };
10093
10094         if (!netif_running(bp->dev))
10095                 return rc;
10096
10097         /* Repeat the test twice:
10098            First by writing 0x00000000, second by writing 0xffffffff */
10099         for (idx = 0; idx < 2; idx++) {
10100
10101                 switch (idx) {
10102                 case 0:
10103                         wr_val = 0;
10104                         break;
10105                 case 1:
10106                         wr_val = 0xffffffff;
10107                         break;
10108                 }
10109
10110                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10111                         u32 offset, mask, save_val, val;
10112
10113                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10114                         mask = reg_tbl[i].mask;
10115
10116                         save_val = REG_RD(bp, offset);
10117
10118                         REG_WR(bp, offset, wr_val);
10119                         val = REG_RD(bp, offset);
10120
10121                         /* Restore the original register's value */
10122                         REG_WR(bp, offset, save_val);
10123
10124                         /* verify that value is as expected value */
10125                         if ((val & mask) != (wr_val & mask))
10126                                 goto test_reg_exit;
10127                 }
10128         }
10129
10130         rc = 0;
10131
10132 test_reg_exit:
10133         return rc;
10134 }
10135
10136 static int bnx2x_test_memory(struct bnx2x *bp)
10137 {
10138         int i, j, rc = -ENODEV;
10139         u32 val;
10140         static const struct {
10141                 u32 offset;
10142                 int size;
10143         } mem_tbl[] = {
10144                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
10145                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10146                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
10147                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
10148                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
10149                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
10150                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
10151
10152                 { 0xffffffff, 0 }
10153         };
10154         static const struct {
10155                 char *name;
10156                 u32 offset;
10157                 u32 e1_mask;
10158                 u32 e1h_mask;
10159         } prty_tbl[] = {
10160                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
10161                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
10162                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
10163                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
10164                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
10165                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
10166
10167                 { NULL, 0xffffffff, 0, 0 }
10168         };
10169
10170         if (!netif_running(bp->dev))
10171                 return rc;
10172
10173         /* Go through all the memories */
10174         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10175                 for (j = 0; j < mem_tbl[i].size; j++)
10176                         REG_RD(bp, mem_tbl[i].offset + j*4);
10177
10178         /* Check the parity status */
10179         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10180                 val = REG_RD(bp, prty_tbl[i].offset);
10181                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10182                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
10183                         DP(NETIF_MSG_HW,
10184                            "%s is 0x%x\n", prty_tbl[i].name, val);
10185                         goto test_mem_exit;
10186                 }
10187         }
10188
10189         rc = 0;
10190
10191 test_mem_exit:
10192         return rc;
10193 }
10194
10195 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10196 {
10197         int cnt = 1000;
10198
10199         if (link_up)
10200                 while (bnx2x_link_test(bp) && cnt--)
10201                         msleep(10);
10202 }
10203
10204 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10205 {
10206         unsigned int pkt_size, num_pkts, i;
10207         struct sk_buff *skb;
10208         unsigned char *packet;
10209         struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10210         struct bnx2x_fastpath *fp_tx = &bp->fp[0];
10211         u16 tx_start_idx, tx_idx;
10212         u16 rx_start_idx, rx_idx;
10213         u16 pkt_prod, bd_prod;
10214         struct sw_tx_bd *tx_buf;
10215         struct eth_tx_start_bd *tx_start_bd;
10216         struct eth_tx_parse_bd *pbd = NULL;
10217         dma_addr_t mapping;
10218         union eth_rx_cqe *cqe;
10219         u8 cqe_fp_flags;
10220         struct sw_rx_bd *rx_buf;
10221         u16 len;
10222         int rc = -ENODEV;
10223
10224         /* check the loopback mode */
10225         switch (loopback_mode) {
10226         case BNX2X_PHY_LOOPBACK:
10227                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10228                         return -EINVAL;
10229                 break;
10230         case BNX2X_MAC_LOOPBACK:
10231                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
10232                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
10233                 break;
10234         default:
10235                 return -EINVAL;
10236         }
10237
10238         /* prepare the loopback packet */
10239         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10240                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
10241         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10242         if (!skb) {
10243                 rc = -ENOMEM;
10244                 goto test_loopback_exit;
10245         }
10246         packet = skb_put(skb, pkt_size);
10247         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
10248         memset(packet + ETH_ALEN, 0, ETH_ALEN);
10249         memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
10250         for (i = ETH_HLEN; i < pkt_size; i++)
10251                 packet[i] = (unsigned char) (i & 0xff);
10252
10253         /* send the loopback packet */
10254         num_pkts = 0;
10255         tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10256         rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10257
10258         pkt_prod = fp_tx->tx_pkt_prod++;
10259         tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10260         tx_buf->first_bd = fp_tx->tx_bd_prod;
10261         tx_buf->skb = skb;
10262         tx_buf->flags = 0;
10263
10264         bd_prod = TX_BD(fp_tx->tx_bd_prod);
10265         tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10266         mapping = pci_map_single(bp->pdev, skb->data,
10267                                  skb_headlen(skb), PCI_DMA_TODEVICE);
10268         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10269         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10270         tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10271         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10272         tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10273         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10274         tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10275                                 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10276
10277         /* turn on parsing and get a BD */
10278         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10279         pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10280
10281         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10282
10283         wmb();
10284
10285         fp_tx->tx_db.data.prod += 2;
10286         barrier();
10287         DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
10288
10289         mmiowb();
10290
10291         num_pkts++;
10292         fp_tx->tx_bd_prod += 2; /* start + pbd */
10293
10294         udelay(100);
10295
10296         tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10297         if (tx_idx != tx_start_idx + num_pkts)
10298                 goto test_loopback_exit;
10299
10300         rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10301         if (rx_idx != rx_start_idx + num_pkts)
10302                 goto test_loopback_exit;
10303
10304         cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
10305         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10306         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10307                 goto test_loopback_rx_exit;
10308
10309         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10310         if (len != pkt_size)
10311                 goto test_loopback_rx_exit;
10312
10313         rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
10314         skb = rx_buf->skb;
10315         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10316         for (i = ETH_HLEN; i < pkt_size; i++)
10317                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10318                         goto test_loopback_rx_exit;
10319
10320         rc = 0;
10321
10322 test_loopback_rx_exit:
10323
10324         fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10325         fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10326         fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10327         fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
10328
10329         /* Update producers */
10330         bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10331                              fp_rx->rx_sge_prod);
10332
10333 test_loopback_exit:
10334         bp->link_params.loopback_mode = LOOPBACK_NONE;
10335
10336         return rc;
10337 }
10338
10339 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10340 {
10341         int rc = 0, res;
10342
10343         if (!netif_running(bp->dev))
10344                 return BNX2X_LOOPBACK_FAILED;
10345
10346         bnx2x_netif_stop(bp, 1);
10347         bnx2x_acquire_phy_lock(bp);
10348
10349         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10350         if (res) {
10351                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
10352                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
10353         }
10354
10355         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10356         if (res) {
10357                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
10358                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
10359         }
10360
10361         bnx2x_release_phy_lock(bp);
10362         bnx2x_netif_start(bp);
10363
10364         return rc;
10365 }
10366
10367 #define CRC32_RESIDUAL                  0xdebb20e3
10368
10369 static int bnx2x_test_nvram(struct bnx2x *bp)
10370 {
10371         static const struct {
10372                 int offset;
10373                 int size;
10374         } nvram_tbl[] = {
10375                 {     0,  0x14 }, /* bootstrap */
10376                 {  0x14,  0xec }, /* dir */
10377                 { 0x100, 0x350 }, /* manuf_info */
10378                 { 0x450,  0xf0 }, /* feature_info */
10379                 { 0x640,  0x64 }, /* upgrade_key_info */
10380                 { 0x6a4,  0x64 },
10381                 { 0x708,  0x70 }, /* manuf_key_info */
10382                 { 0x778,  0x70 },
10383                 {     0,     0 }
10384         };
10385         __be32 buf[0x350 / 4];
10386         u8 *data = (u8 *)buf;
10387         int i, rc;
10388         u32 magic, crc;
10389
10390         rc = bnx2x_nvram_read(bp, 0, data, 4);
10391         if (rc) {
10392                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
10393                 goto test_nvram_exit;
10394         }
10395
10396         magic = be32_to_cpu(buf[0]);
10397         if (magic != 0x669955aa) {
10398                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10399                 rc = -ENODEV;
10400                 goto test_nvram_exit;
10401         }
10402
10403         for (i = 0; nvram_tbl[i].size; i++) {
10404
10405                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10406                                       nvram_tbl[i].size);
10407                 if (rc) {
10408                         DP(NETIF_MSG_PROBE,
10409                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10410                         goto test_nvram_exit;
10411                 }
10412
10413                 crc = ether_crc_le(nvram_tbl[i].size, data);
10414                 if (crc != CRC32_RESIDUAL) {
10415                         DP(NETIF_MSG_PROBE,
10416                            "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
10417                         rc = -ENODEV;
10418                         goto test_nvram_exit;
10419                 }
10420         }
10421
10422 test_nvram_exit:
10423         return rc;
10424 }
10425
10426 static int bnx2x_test_intr(struct bnx2x *bp)
10427 {
10428         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10429         int i, rc;
10430
10431         if (!netif_running(bp->dev))
10432                 return -ENODEV;
10433
10434         config->hdr.length = 0;
10435         if (CHIP_IS_E1(bp))
10436                 /* use last unicast entries */
10437                 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
10438         else
10439                 config->hdr.offset = BP_FUNC(bp);
10440         config->hdr.client_id = bp->fp->cl_id;
10441         config->hdr.reserved1 = 0;
10442
10443         bp->set_mac_pending++;
10444         smp_wmb();
10445         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10446                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10447                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10448         if (rc == 0) {
10449                 for (i = 0; i < 10; i++) {
10450                         if (!bp->set_mac_pending)
10451                                 break;
10452                         smp_rmb();
10453                         msleep_interruptible(10);
10454                 }
10455                 if (i == 10)
10456                         rc = -ENODEV;
10457         }
10458
10459         return rc;
10460 }
10461
10462 static void bnx2x_self_test(struct net_device *dev,
10463                             struct ethtool_test *etest, u64 *buf)
10464 {
10465         struct bnx2x *bp = netdev_priv(dev);
10466
10467         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10468
10469         if (!netif_running(dev))
10470                 return;
10471
10472         /* offline tests are not supported in MF mode */
10473         if (IS_E1HMF(bp))
10474                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10475
10476         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10477                 int port = BP_PORT(bp);
10478                 u32 val;
10479                 u8 link_up;
10480
10481                 /* save current value of input enable for TX port IF */
10482                 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10483                 /* disable input for TX port IF */
10484                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10485
10486                 link_up = (bnx2x_link_test(bp) == 0);
10487                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10488                 bnx2x_nic_load(bp, LOAD_DIAG);
10489                 /* wait until link state is restored */
10490                 bnx2x_wait_for_link(bp, link_up);
10491
10492                 if (bnx2x_test_registers(bp) != 0) {
10493                         buf[0] = 1;
10494                         etest->flags |= ETH_TEST_FL_FAILED;
10495                 }
10496                 if (bnx2x_test_memory(bp) != 0) {
10497                         buf[1] = 1;
10498                         etest->flags |= ETH_TEST_FL_FAILED;
10499                 }
10500                 buf[2] = bnx2x_test_loopback(bp, link_up);
10501                 if (buf[2] != 0)
10502                         etest->flags |= ETH_TEST_FL_FAILED;
10503
10504                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10505
10506                 /* restore input for TX port IF */
10507                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10508
10509                 bnx2x_nic_load(bp, LOAD_NORMAL);
10510                 /* wait until link state is restored */
10511                 bnx2x_wait_for_link(bp, link_up);
10512         }
10513         if (bnx2x_test_nvram(bp) != 0) {
10514                 buf[3] = 1;
10515                 etest->flags |= ETH_TEST_FL_FAILED;
10516         }
10517         if (bnx2x_test_intr(bp) != 0) {
10518                 buf[4] = 1;
10519                 etest->flags |= ETH_TEST_FL_FAILED;
10520         }
10521         if (bp->port.pmf)
10522                 if (bnx2x_link_test(bp) != 0) {
10523                         buf[5] = 1;
10524                         etest->flags |= ETH_TEST_FL_FAILED;
10525                 }
10526
10527 #ifdef BNX2X_EXTRA_DEBUG
10528         bnx2x_panic_dump(bp);
10529 #endif
10530 }
10531
10532 static const struct {
10533         long offset;
10534         int size;
10535         u8 string[ETH_GSTRING_LEN];
10536 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10537 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10538         { Q_STATS_OFFSET32(error_bytes_received_hi),
10539                                                 8, "[%d]: rx_error_bytes" },
10540         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10541                                                 8, "[%d]: rx_ucast_packets" },
10542         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10543                                                 8, "[%d]: rx_mcast_packets" },
10544         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10545                                                 8, "[%d]: rx_bcast_packets" },
10546         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10547         { Q_STATS_OFFSET32(rx_err_discard_pkt),
10548                                          4, "[%d]: rx_phy_ip_err_discards"},
10549         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10550                                          4, "[%d]: rx_skb_alloc_discard" },
10551         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10552
10553 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10554         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10555                                                         8, "[%d]: tx_packets" }
10556 };
10557
10558 static const struct {
10559         long offset;
10560         int size;
10561         u32 flags;
10562 #define STATS_FLAGS_PORT                1
10563 #define STATS_FLAGS_FUNC                2
10564 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10565         u8 string[ETH_GSTRING_LEN];
10566 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10567 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10568                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
10569         { STATS_OFFSET32(error_bytes_received_hi),
10570                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10571         { STATS_OFFSET32(total_unicast_packets_received_hi),
10572                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10573         { STATS_OFFSET32(total_multicast_packets_received_hi),
10574                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10575         { STATS_OFFSET32(total_broadcast_packets_received_hi),
10576                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10577         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10578                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10579         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10580                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
10581         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10582                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10583         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10584                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10585 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10586                                 8, STATS_FLAGS_PORT, "rx_fragments" },
10587         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10588                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
10589         { STATS_OFFSET32(no_buff_discard_hi),
10590                                 8, STATS_FLAGS_BOTH, "rx_discards" },
10591         { STATS_OFFSET32(mac_filter_discard),
10592                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10593         { STATS_OFFSET32(xxoverflow_discard),
10594                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10595         { STATS_OFFSET32(brb_drop_hi),
10596                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10597         { STATS_OFFSET32(brb_truncate_hi),
10598                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10599         { STATS_OFFSET32(pause_frames_received_hi),
10600                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10601         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10602                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10603         { STATS_OFFSET32(nig_timer_max),
10604                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10605 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10606                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10607         { STATS_OFFSET32(rx_skb_alloc_failed),
10608                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10609         { STATS_OFFSET32(hw_csum_err),
10610                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10611
10612         { STATS_OFFSET32(total_bytes_transmitted_hi),
10613                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
10614         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10615                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10616         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10617                                 8, STATS_FLAGS_BOTH, "tx_packets" },
10618         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10619                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10620         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10621                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10622         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10623                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10624         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10625                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10626 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10627                                 8, STATS_FLAGS_PORT, "tx_deferred" },
10628         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10629                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10630         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10631                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10632         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10633                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10634         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10635                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10636         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10637                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10638         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10639                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10640         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10641                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10642         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10643                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10644         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10645                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10646 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10647                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10648         { STATS_OFFSET32(pause_frames_sent_hi),
10649                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10650 };
10651
10652 #define IS_PORT_STAT(i) \
10653         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10654 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10655 #define IS_E1HMF_MODE_STAT(bp) \
10656                         (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10657
10658 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10659 {
10660         struct bnx2x *bp = netdev_priv(dev);
10661         int i, num_stats;
10662
10663         switch(stringset) {
10664         case ETH_SS_STATS:
10665                 if (is_multi(bp)) {
10666                         num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
10667                         if (!IS_E1HMF_MODE_STAT(bp))
10668                                 num_stats += BNX2X_NUM_STATS;
10669                 } else {
10670                         if (IS_E1HMF_MODE_STAT(bp)) {
10671                                 num_stats = 0;
10672                                 for (i = 0; i < BNX2X_NUM_STATS; i++)
10673                                         if (IS_FUNC_STAT(i))
10674                                                 num_stats++;
10675                         } else
10676                                 num_stats = BNX2X_NUM_STATS;
10677                 }
10678                 return num_stats;
10679
10680         case ETH_SS_TEST:
10681                 return BNX2X_NUM_TESTS;
10682
10683         default:
10684                 return -EINVAL;
10685         }
10686 }
10687
10688 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10689 {
10690         struct bnx2x *bp = netdev_priv(dev);
10691         int i, j, k;
10692
10693         switch (stringset) {
10694         case ETH_SS_STATS:
10695                 if (is_multi(bp)) {
10696                         k = 0;
10697                         for_each_queue(bp, i) {
10698                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10699                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10700                                                 bnx2x_q_stats_arr[j].string, i);
10701                                 k += BNX2X_NUM_Q_STATS;
10702                         }
10703                         if (IS_E1HMF_MODE_STAT(bp))
10704                                 break;
10705                         for (j = 0; j < BNX2X_NUM_STATS; j++)
10706                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10707                                        bnx2x_stats_arr[j].string);
10708                 } else {
10709                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10710                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10711                                         continue;
10712                                 strcpy(buf + j*ETH_GSTRING_LEN,
10713                                        bnx2x_stats_arr[i].string);
10714                                 j++;
10715                         }
10716                 }
10717                 break;
10718
10719         case ETH_SS_TEST:
10720                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10721                 break;
10722         }
10723 }
10724
10725 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10726                                     struct ethtool_stats *stats, u64 *buf)
10727 {
10728         struct bnx2x *bp = netdev_priv(dev);
10729         u32 *hw_stats, *offset;
10730         int i, j, k;
10731
10732         if (is_multi(bp)) {
10733                 k = 0;
10734                 for_each_queue(bp, i) {
10735                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10736                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10737                                 if (bnx2x_q_stats_arr[j].size == 0) {
10738                                         /* skip this counter */
10739                                         buf[k + j] = 0;
10740                                         continue;
10741                                 }
10742                                 offset = (hw_stats +
10743                                           bnx2x_q_stats_arr[j].offset);
10744                                 if (bnx2x_q_stats_arr[j].size == 4) {
10745                                         /* 4-byte counter */
10746                                         buf[k + j] = (u64) *offset;
10747                                         continue;
10748                                 }
10749                                 /* 8-byte counter */
10750                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10751                         }
10752                         k += BNX2X_NUM_Q_STATS;
10753                 }
10754                 if (IS_E1HMF_MODE_STAT(bp))
10755                         return;
10756                 hw_stats = (u32 *)&bp->eth_stats;
10757                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10758                         if (bnx2x_stats_arr[j].size == 0) {
10759                                 /* skip this counter */
10760                                 buf[k + j] = 0;
10761                                 continue;
10762                         }
10763                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
10764                         if (bnx2x_stats_arr[j].size == 4) {
10765                                 /* 4-byte counter */
10766                                 buf[k + j] = (u64) *offset;
10767                                 continue;
10768                         }
10769                         /* 8-byte counter */
10770                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
10771                 }
10772         } else {
10773                 hw_stats = (u32 *)&bp->eth_stats;
10774                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10775                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10776                                 continue;
10777                         if (bnx2x_stats_arr[i].size == 0) {
10778                                 /* skip this counter */
10779                                 buf[j] = 0;
10780                                 j++;
10781                                 continue;
10782                         }
10783                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
10784                         if (bnx2x_stats_arr[i].size == 4) {
10785                                 /* 4-byte counter */
10786                                 buf[j] = (u64) *offset;
10787                                 j++;
10788                                 continue;
10789                         }
10790                         /* 8-byte counter */
10791                         buf[j] = HILO_U64(*offset, *(offset + 1));
10792                         j++;
10793                 }
10794         }
10795 }
10796
10797 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10798 {
10799         struct bnx2x *bp = netdev_priv(dev);
10800         int i;
10801
10802         if (!netif_running(dev))
10803                 return 0;
10804
10805         if (!bp->port.pmf)
10806                 return 0;
10807
10808         if (data == 0)
10809                 data = 2;
10810
10811         for (i = 0; i < (data * 2); i++) {
10812                 if ((i % 2) == 0)
10813                         bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10814                                       SPEED_1000);
10815                 else
10816                         bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
10817
10818                 msleep_interruptible(500);
10819                 if (signal_pending(current))
10820                         break;
10821         }
10822
10823         if (bp->link_vars.link_up)
10824                 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10825                               bp->link_vars.line_speed);
10826
10827         return 0;
10828 }
10829
10830 static const struct ethtool_ops bnx2x_ethtool_ops = {
10831         .get_settings           = bnx2x_get_settings,
10832         .set_settings           = bnx2x_set_settings,
10833         .get_drvinfo            = bnx2x_get_drvinfo,
10834         .get_regs_len           = bnx2x_get_regs_len,
10835         .get_regs               = bnx2x_get_regs,
10836         .get_wol                = bnx2x_get_wol,
10837         .set_wol                = bnx2x_set_wol,
10838         .get_msglevel           = bnx2x_get_msglevel,
10839         .set_msglevel           = bnx2x_set_msglevel,
10840         .nway_reset             = bnx2x_nway_reset,
10841         .get_link               = bnx2x_get_link,
10842         .get_eeprom_len         = bnx2x_get_eeprom_len,
10843         .get_eeprom             = bnx2x_get_eeprom,
10844         .set_eeprom             = bnx2x_set_eeprom,
10845         .get_coalesce           = bnx2x_get_coalesce,
10846         .set_coalesce           = bnx2x_set_coalesce,
10847         .get_ringparam          = bnx2x_get_ringparam,
10848         .set_ringparam          = bnx2x_set_ringparam,
10849         .get_pauseparam         = bnx2x_get_pauseparam,
10850         .set_pauseparam         = bnx2x_set_pauseparam,
10851         .get_rx_csum            = bnx2x_get_rx_csum,
10852         .set_rx_csum            = bnx2x_set_rx_csum,
10853         .get_tx_csum            = ethtool_op_get_tx_csum,
10854         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
10855         .set_flags              = bnx2x_set_flags,
10856         .get_flags              = ethtool_op_get_flags,
10857         .get_sg                 = ethtool_op_get_sg,
10858         .set_sg                 = ethtool_op_set_sg,
10859         .get_tso                = ethtool_op_get_tso,
10860         .set_tso                = bnx2x_set_tso,
10861         .self_test              = bnx2x_self_test,
10862         .get_sset_count         = bnx2x_get_sset_count,
10863         .get_strings            = bnx2x_get_strings,
10864         .phys_id                = bnx2x_phys_id,
10865         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
10866 };
10867
10868 /* end of ethtool_ops */
10869
10870 /****************************************************************************
10871 * General service functions
10872 ****************************************************************************/
10873
10874 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10875 {
10876         u16 pmcsr;
10877
10878         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10879
10880         switch (state) {
10881         case PCI_D0:
10882                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10883                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10884                                        PCI_PM_CTRL_PME_STATUS));
10885
10886                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10887                         /* delay required during transition out of D3hot */
10888                         msleep(20);
10889                 break;
10890
10891         case PCI_D3hot:
10892                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10893                 pmcsr |= 3;
10894
10895                 if (bp->wol)
10896                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10897
10898                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10899                                       pmcsr);
10900
10901                 /* No more memory access after this point until
10902                 * device is brought back to D0.
10903                 */
10904                 break;
10905
10906         default:
10907                 return -EINVAL;
10908         }
10909         return 0;
10910 }
10911
10912 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10913 {
10914         u16 rx_cons_sb;
10915
10916         /* Tell compiler that status block fields can change */
10917         barrier();
10918         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10919         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10920                 rx_cons_sb++;
10921         return (fp->rx_comp_cons != rx_cons_sb);
10922 }
10923
10924 /*
10925  * net_device service functions
10926  */
10927
10928 static int bnx2x_poll(struct napi_struct *napi, int budget)
10929 {
10930         int work_done = 0;
10931         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10932                                                  napi);
10933         struct bnx2x *bp = fp->bp;
10934
10935         while (1) {
10936 #ifdef BNX2X_STOP_ON_ERROR
10937                 if (unlikely(bp->panic)) {
10938                         napi_complete(napi);
10939                         return 0;
10940                 }
10941 #endif
10942
10943                 if (bnx2x_has_tx_work(fp))
10944                         bnx2x_tx_int(fp);
10945
10946                 if (bnx2x_has_rx_work(fp)) {
10947                         work_done += bnx2x_rx_int(fp, budget - work_done);
10948
10949                         /* must not complete if we consumed full budget */
10950                         if (work_done >= budget)
10951                                 break;
10952                 }
10953
10954                 /* Fall out from the NAPI loop if needed */
10955                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10956                         bnx2x_update_fpsb_idx(fp);
10957                 /* bnx2x_has_rx_work() reads the status block, thus we need
10958                  * to ensure that status block indices have been actually read
10959                  * (bnx2x_update_fpsb_idx) prior to this check
10960                  * (bnx2x_has_rx_work) so that we won't write the "newer"
10961                  * value of the status block to IGU (if there was a DMA right
10962                  * after bnx2x_has_rx_work and if there is no rmb, the memory
10963                  * reading (bnx2x_update_fpsb_idx) may be postponed to right
10964                  * before bnx2x_ack_sb). In this case there will never be
10965                  * another interrupt until there is another update of the
10966                  * status block, while there is still unhandled work.
10967                  */
10968                         rmb();
10969
10970                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10971                                 napi_complete(napi);
10972                                 /* Re-enable interrupts */
10973                                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10974                                              le16_to_cpu(fp->fp_c_idx),
10975                                              IGU_INT_NOP, 1);
10976                                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10977                                              le16_to_cpu(fp->fp_u_idx),
10978                                              IGU_INT_ENABLE, 1);
10979                                 break;
10980                         }
10981                 }
10982         }
10983
10984         return work_done;
10985 }
10986
10987
10988 /* we split the first BD into headers and data BDs
10989  * to ease the pain of our fellow microcode engineers
10990  * we use one mapping for both BDs
10991  * So far this has only been observed to happen
10992  * in Other Operating Systems(TM)
10993  */
10994 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10995                                    struct bnx2x_fastpath *fp,
10996                                    struct sw_tx_bd *tx_buf,
10997                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
10998                                    u16 bd_prod, int nbd)
10999 {
11000         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
11001         struct eth_tx_bd *d_tx_bd;
11002         dma_addr_t mapping;
11003         int old_len = le16_to_cpu(h_tx_bd->nbytes);
11004
11005         /* first fix first BD */
11006         h_tx_bd->nbd = cpu_to_le16(nbd);
11007         h_tx_bd->nbytes = cpu_to_le16(hlen);
11008
11009         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11010            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11011            h_tx_bd->addr_lo, h_tx_bd->nbd);
11012
11013         /* now get a new data BD
11014          * (after the pbd) and fill it */
11015         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11016         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11017
11018         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11019                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11020
11021         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11022         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11023         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
11024
11025         /* this marks the BD as one that has no individual mapping */
11026         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11027
11028         DP(NETIF_MSG_TX_QUEUED,
11029            "TSO split data size is %d (%x:%x)\n",
11030            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11031
11032         /* update tx_bd */
11033         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
11034
11035         return bd_prod;
11036 }
11037
11038 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11039 {
11040         if (fix > 0)
11041                 csum = (u16) ~csum_fold(csum_sub(csum,
11042                                 csum_partial(t_header - fix, fix, 0)));
11043
11044         else if (fix < 0)
11045                 csum = (u16) ~csum_fold(csum_add(csum,
11046                                 csum_partial(t_header, -fix, 0)));
11047
11048         return swab16(csum);
11049 }
11050
11051 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11052 {
11053         u32 rc;
11054
11055         if (skb->ip_summed != CHECKSUM_PARTIAL)
11056                 rc = XMIT_PLAIN;
11057
11058         else {
11059                 if (skb->protocol == htons(ETH_P_IPV6)) {
11060                         rc = XMIT_CSUM_V6;
11061                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11062                                 rc |= XMIT_CSUM_TCP;
11063
11064                 } else {
11065                         rc = XMIT_CSUM_V4;
11066                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11067                                 rc |= XMIT_CSUM_TCP;
11068                 }
11069         }
11070
11071         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
11072                 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
11073
11074         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
11075                 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
11076
11077         return rc;
11078 }
11079
11080 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11081 /* check if packet requires linearization (packet is too fragmented)
11082    no need to check fragmentation if page size > 8K (there will be no
11083    violation to FW restrictions) */
11084 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11085                              u32 xmit_type)
11086 {
11087         int to_copy = 0;
11088         int hlen = 0;
11089         int first_bd_sz = 0;
11090
11091         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11092         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11093
11094                 if (xmit_type & XMIT_GSO) {
11095                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11096                         /* Check if LSO packet needs to be copied:
11097                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11098                         int wnd_size = MAX_FETCH_BD - 3;
11099                         /* Number of windows to check */
11100                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11101                         int wnd_idx = 0;
11102                         int frag_idx = 0;
11103                         u32 wnd_sum = 0;
11104
11105                         /* Headers length */
11106                         hlen = (int)(skb_transport_header(skb) - skb->data) +
11107                                 tcp_hdrlen(skb);
11108
11109                         /* Amount of data (w/o headers) on linear part of SKB*/
11110                         first_bd_sz = skb_headlen(skb) - hlen;
11111
11112                         wnd_sum  = first_bd_sz;
11113
11114                         /* Calculate the first sum - it's special */
11115                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11116                                 wnd_sum +=
11117                                         skb_shinfo(skb)->frags[frag_idx].size;
11118
11119                         /* If there was data on linear skb data - check it */
11120                         if (first_bd_sz > 0) {
11121                                 if (unlikely(wnd_sum < lso_mss)) {
11122                                         to_copy = 1;
11123                                         goto exit_lbl;
11124                                 }
11125
11126                                 wnd_sum -= first_bd_sz;
11127                         }
11128
11129                         /* Others are easier: run through the frag list and
11130                            check all windows */
11131                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11132                                 wnd_sum +=
11133                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11134
11135                                 if (unlikely(wnd_sum < lso_mss)) {
11136                                         to_copy = 1;
11137                                         break;
11138                                 }
11139                                 wnd_sum -=
11140                                         skb_shinfo(skb)->frags[wnd_idx].size;
11141                         }
11142                 } else {
11143                         /* in non-LSO too fragmented packet should always
11144                            be linearized */
11145                         to_copy = 1;
11146                 }
11147         }
11148
11149 exit_lbl:
11150         if (unlikely(to_copy))
11151                 DP(NETIF_MSG_TX_QUEUED,
11152                    "Linearization IS REQUIRED for %s packet. "
11153                    "num_frags %d  hlen %d  first_bd_sz %d\n",
11154                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11155                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11156
11157         return to_copy;
11158 }
11159 #endif
11160
11161 /* called with netif_tx_lock
11162  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
11163  * netif_wake_queue()
11164  */
11165 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11166 {
11167         struct bnx2x *bp = netdev_priv(dev);
11168         struct bnx2x_fastpath *fp;
11169         struct netdev_queue *txq;
11170         struct sw_tx_bd *tx_buf;
11171         struct eth_tx_start_bd *tx_start_bd;
11172         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
11173         struct eth_tx_parse_bd *pbd = NULL;
11174         u16 pkt_prod, bd_prod;
11175         int nbd, fp_index;
11176         dma_addr_t mapping;
11177         u32 xmit_type = bnx2x_xmit_type(bp, skb);
11178         int i;
11179         u8 hlen = 0;
11180         __le16 pkt_size = 0;
11181
11182 #ifdef BNX2X_STOP_ON_ERROR
11183         if (unlikely(bp->panic))
11184                 return NETDEV_TX_BUSY;
11185 #endif
11186
11187         fp_index = skb_get_queue_mapping(skb);
11188         txq = netdev_get_tx_queue(dev, fp_index);
11189
11190         fp = &bp->fp[fp_index];
11191
11192         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
11193                 fp->eth_q_stats.driver_xoff++;
11194                 netif_tx_stop_queue(txq);
11195                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11196                 return NETDEV_TX_BUSY;
11197         }
11198
11199         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
11200            "  gso type %x  xmit_type %x\n",
11201            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11202            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11203
11204 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11205         /* First, check if we need to linearize the skb (due to FW
11206            restrictions). No need to check fragmentation if page size > 8K
11207            (there will be no violation to FW restrictions) */
11208         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11209                 /* Statistics of linearization */
11210                 bp->lin_cnt++;
11211                 if (skb_linearize(skb) != 0) {
11212                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11213                            "silently dropping this SKB\n");
11214                         dev_kfree_skb_any(skb);
11215                         return NETDEV_TX_OK;
11216                 }
11217         }
11218 #endif
11219
11220         /*
11221         Please read carefully. First we use one BD which we mark as start,
11222         then we have a parsing info BD (used for TSO or xsum),
11223         and only then we have the rest of the TSO BDs.
11224         (don't forget to mark the last one as last,
11225         and to unmap only AFTER you write to the BD ...)
11226         And above all, all pdb sizes are in words - NOT DWORDS!
11227         */
11228
11229         pkt_prod = fp->tx_pkt_prod++;
11230         bd_prod = TX_BD(fp->tx_bd_prod);
11231
11232         /* get a tx_buf and first BD */
11233         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
11234         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
11235
11236         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11237         tx_start_bd->general_data = (UNICAST_ADDRESS <<
11238                                      ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
11239         /* header nbd */
11240         tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
11241
11242         /* remember the first BD of the packet */
11243         tx_buf->first_bd = fp->tx_bd_prod;
11244         tx_buf->skb = skb;
11245         tx_buf->flags = 0;
11246
11247         DP(NETIF_MSG_TX_QUEUED,
11248            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
11249            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
11250
11251 #ifdef BCM_VLAN
11252         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11253             (bp->flags & HW_VLAN_TX_FLAG)) {
11254                 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11255                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
11256         } else
11257 #endif
11258                 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11259
11260         /* turn on parsing and get a BD */
11261         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11262         pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
11263
11264         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11265
11266         if (xmit_type & XMIT_CSUM) {
11267                 hlen = (skb_network_header(skb) - skb->data) / 2;
11268
11269                 /* for now NS flag is not used in Linux */
11270                 pbd->global_data =
11271                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11272                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
11273
11274                 pbd->ip_hlen = (skb_transport_header(skb) -
11275                                 skb_network_header(skb)) / 2;
11276
11277                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11278
11279                 pbd->total_hlen = cpu_to_le16(hlen);
11280                 hlen = hlen*2;
11281
11282                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
11283
11284                 if (xmit_type & XMIT_CSUM_V4)
11285                         tx_start_bd->bd_flags.as_bitfield |=
11286                                                 ETH_TX_BD_FLAGS_IP_CSUM;
11287                 else
11288                         tx_start_bd->bd_flags.as_bitfield |=
11289                                                 ETH_TX_BD_FLAGS_IPV6;
11290
11291                 if (xmit_type & XMIT_CSUM_TCP) {
11292                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11293
11294                 } else {
11295                         s8 fix = SKB_CS_OFF(skb); /* signed! */
11296
11297                         pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
11298
11299                         DP(NETIF_MSG_TX_QUEUED,
11300                            "hlen %d  fix %d  csum before fix %x\n",
11301                            le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
11302
11303                         /* HW bug: fixup the CSUM */
11304                         pbd->tcp_pseudo_csum =
11305                                 bnx2x_csum_fix(skb_transport_header(skb),
11306                                                SKB_CS(skb), fix);
11307
11308                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11309                            pbd->tcp_pseudo_csum);
11310                 }
11311         }
11312
11313         mapping = pci_map_single(bp->pdev, skb->data,
11314                                  skb_headlen(skb), PCI_DMA_TODEVICE);
11315
11316         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11317         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11318         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11319         tx_start_bd->nbd = cpu_to_le16(nbd);
11320         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11321         pkt_size = tx_start_bd->nbytes;
11322
11323         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
11324            "  nbytes %d  flags %x  vlan %x\n",
11325            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11326            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11327            tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
11328
11329         if (xmit_type & XMIT_GSO) {
11330
11331                 DP(NETIF_MSG_TX_QUEUED,
11332                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
11333                    skb->len, hlen, skb_headlen(skb),
11334                    skb_shinfo(skb)->gso_size);
11335
11336                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
11337
11338                 if (unlikely(skb_headlen(skb) > hlen))
11339                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11340                                                  hlen, bd_prod, ++nbd);
11341
11342                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11343                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
11344                 pbd->tcp_flags = pbd_tcp_flags(skb);
11345
11346                 if (xmit_type & XMIT_GSO_V4) {
11347                         pbd->ip_id = swab16(ip_hdr(skb)->id);
11348                         pbd->tcp_pseudo_csum =
11349                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11350                                                           ip_hdr(skb)->daddr,
11351                                                           0, IPPROTO_TCP, 0));
11352
11353                 } else
11354                         pbd->tcp_pseudo_csum =
11355                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11356                                                         &ipv6_hdr(skb)->daddr,
11357                                                         0, IPPROTO_TCP, 0));
11358
11359                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11360         }
11361         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
11362
11363         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11364                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
11365
11366                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11367                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11368                 if (total_pkt_bd == NULL)
11369                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11370
11371                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11372                                        frag->size, PCI_DMA_TODEVICE);
11373
11374                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11375                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11376                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11377                 le16_add_cpu(&pkt_size, frag->size);
11378
11379                 DP(NETIF_MSG_TX_QUEUED,
11380                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
11381                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11382                    le16_to_cpu(tx_data_bd->nbytes));
11383         }
11384
11385         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
11386
11387         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11388
11389         /* now send a tx doorbell, counting the next BD
11390          * if the packet contains or ends with it
11391          */
11392         if (TX_BD_POFF(bd_prod) < nbd)
11393                 nbd++;
11394
11395         if (total_pkt_bd != NULL)
11396                 total_pkt_bd->total_pkt_bytes = pkt_size;
11397
11398         if (pbd)
11399                 DP(NETIF_MSG_TX_QUEUED,
11400                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
11401                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
11402                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11403                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11404                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11405
11406         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
11407
11408         /*
11409          * Make sure that the BD data is updated before updating the producer
11410          * since FW might read the BD right after the producer is updated.
11411          * This is only applicable for weak-ordered memory model archs such
11412          * as IA-64. The following barrier is also mandatory since FW will
11413          * assumes packets must have BDs.
11414          */
11415         wmb();
11416
11417         fp->tx_db.data.prod += nbd;
11418         barrier();
11419         DOORBELL(bp, fp->index, fp->tx_db.raw);
11420
11421         mmiowb();
11422
11423         fp->tx_bd_prod += nbd;
11424
11425         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11426                 netif_tx_stop_queue(txq);
11427                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11428                    if we put Tx into XOFF state. */
11429                 smp_mb();
11430                 fp->eth_q_stats.driver_xoff++;
11431                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11432                         netif_tx_wake_queue(txq);
11433         }
11434         fp->tx_pkt++;
11435
11436         return NETDEV_TX_OK;
11437 }
11438
11439 /* called with rtnl_lock */
11440 static int bnx2x_open(struct net_device *dev)
11441 {
11442         struct bnx2x *bp = netdev_priv(dev);
11443
11444         netif_carrier_off(dev);
11445
11446         bnx2x_set_power_state(bp, PCI_D0);
11447
11448         return bnx2x_nic_load(bp, LOAD_OPEN);
11449 }
11450
11451 /* called with rtnl_lock */
11452 static int bnx2x_close(struct net_device *dev)
11453 {
11454         struct bnx2x *bp = netdev_priv(dev);
11455
11456         /* Unload the driver, release IRQs */
11457         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11458         if (atomic_read(&bp->pdev->enable_cnt) == 1)
11459                 if (!CHIP_REV_IS_SLOW(bp))
11460                         bnx2x_set_power_state(bp, PCI_D3hot);
11461
11462         return 0;
11463 }
11464
11465 /* called with netif_tx_lock from dev_mcast.c */
11466 static void bnx2x_set_rx_mode(struct net_device *dev)
11467 {
11468         struct bnx2x *bp = netdev_priv(dev);
11469         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11470         int port = BP_PORT(bp);
11471
11472         if (bp->state != BNX2X_STATE_OPEN) {
11473                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11474                 return;
11475         }
11476
11477         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11478
11479         if (dev->flags & IFF_PROMISC)
11480                 rx_mode = BNX2X_RX_MODE_PROMISC;
11481
11482         else if ((dev->flags & IFF_ALLMULTI) ||
11483                  ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
11484                   CHIP_IS_E1(bp)))
11485                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11486
11487         else { /* some multicasts */
11488                 if (CHIP_IS_E1(bp)) {
11489                         int i, old, offset;
11490                         struct dev_mc_list *mclist;
11491                         struct mac_configuration_cmd *config =
11492                                                 bnx2x_sp(bp, mcast_config);
11493
11494                         for (i = 0, mclist = dev->mc_list;
11495                              mclist && (i < netdev_mc_count(dev));
11496                              i++, mclist = mclist->next) {
11497
11498                                 config->config_table[i].
11499                                         cam_entry.msb_mac_addr =
11500                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
11501                                 config->config_table[i].
11502                                         cam_entry.middle_mac_addr =
11503                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
11504                                 config->config_table[i].
11505                                         cam_entry.lsb_mac_addr =
11506                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
11507                                 config->config_table[i].cam_entry.flags =
11508                                                         cpu_to_le16(port);
11509                                 config->config_table[i].
11510                                         target_table_entry.flags = 0;
11511                                 config->config_table[i].target_table_entry.
11512                                         clients_bit_vector =
11513                                                 cpu_to_le32(1 << BP_L_ID(bp));
11514                                 config->config_table[i].
11515                                         target_table_entry.vlan_id = 0;
11516
11517                                 DP(NETIF_MSG_IFUP,
11518                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11519                                    config->config_table[i].
11520                                                 cam_entry.msb_mac_addr,
11521                                    config->config_table[i].
11522                                                 cam_entry.middle_mac_addr,
11523                                    config->config_table[i].
11524                                                 cam_entry.lsb_mac_addr);
11525                         }
11526                         old = config->hdr.length;
11527                         if (old > i) {
11528                                 for (; i < old; i++) {
11529                                         if (CAM_IS_INVALID(config->
11530                                                            config_table[i])) {
11531                                                 /* already invalidated */
11532                                                 break;
11533                                         }
11534                                         /* invalidate */
11535                                         CAM_INVALIDATE(config->
11536                                                        config_table[i]);
11537                                 }
11538                         }
11539
11540                         if (CHIP_REV_IS_SLOW(bp))
11541                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11542                         else
11543                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
11544
11545                         config->hdr.length = i;
11546                         config->hdr.offset = offset;
11547                         config->hdr.client_id = bp->fp->cl_id;
11548                         config->hdr.reserved1 = 0;
11549
11550                         bp->set_mac_pending++;
11551                         smp_wmb();
11552
11553                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11554                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11555                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11556                                       0);
11557                 } else { /* E1H */
11558                         /* Accept one or more multicasts */
11559                         struct dev_mc_list *mclist;
11560                         u32 mc_filter[MC_HASH_SIZE];
11561                         u32 crc, bit, regidx;
11562                         int i;
11563
11564                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11565
11566                         for (i = 0, mclist = dev->mc_list;
11567                              mclist && (i < netdev_mc_count(dev));
11568                              i++, mclist = mclist->next) {
11569
11570                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11571                                    mclist->dmi_addr);
11572
11573                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11574                                 bit = (crc >> 24) & 0xff;
11575                                 regidx = bit >> 5;
11576                                 bit &= 0x1f;
11577                                 mc_filter[regidx] |= (1 << bit);
11578                         }
11579
11580                         for (i = 0; i < MC_HASH_SIZE; i++)
11581                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11582                                        mc_filter[i]);
11583                 }
11584         }
11585
11586         bp->rx_mode = rx_mode;
11587         bnx2x_set_storm_rx_mode(bp);
11588 }
11589
11590 /* called with rtnl_lock */
11591 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11592 {
11593         struct sockaddr *addr = p;
11594         struct bnx2x *bp = netdev_priv(dev);
11595
11596         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11597                 return -EINVAL;
11598
11599         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11600         if (netif_running(dev)) {
11601                 if (CHIP_IS_E1(bp))
11602                         bnx2x_set_eth_mac_addr_e1(bp, 1);
11603                 else
11604                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
11605         }
11606
11607         return 0;
11608 }
11609
11610 /* called with rtnl_lock */
11611 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11612                            int devad, u16 addr)
11613 {
11614         struct bnx2x *bp = netdev_priv(netdev);
11615         u16 value;
11616         int rc;
11617         u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11618
11619         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11620            prtad, devad, addr);
11621
11622         if (prtad != bp->mdio.prtad) {
11623                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11624                    prtad, bp->mdio.prtad);
11625                 return -EINVAL;
11626         }
11627
11628         /* The HW expects different devad if CL22 is used */
11629         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11630
11631         bnx2x_acquire_phy_lock(bp);
11632         rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11633                              devad, addr, &value);
11634         bnx2x_release_phy_lock(bp);
11635         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11636
11637         if (!rc)
11638                 rc = value;
11639         return rc;
11640 }
11641
11642 /* called with rtnl_lock */
11643 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11644                             u16 addr, u16 value)
11645 {
11646         struct bnx2x *bp = netdev_priv(netdev);
11647         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11648         int rc;
11649
11650         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11651                            " value 0x%x\n", prtad, devad, addr, value);
11652
11653         if (prtad != bp->mdio.prtad) {
11654                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11655                    prtad, bp->mdio.prtad);
11656                 return -EINVAL;
11657         }
11658
11659         /* The HW expects different devad if CL22 is used */
11660         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11661
11662         bnx2x_acquire_phy_lock(bp);
11663         rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11664                               devad, addr, value);
11665         bnx2x_release_phy_lock(bp);
11666         return rc;
11667 }
11668
11669 /* called with rtnl_lock */
11670 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11671 {
11672         struct bnx2x *bp = netdev_priv(dev);
11673         struct mii_ioctl_data *mdio = if_mii(ifr);
11674
11675         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11676            mdio->phy_id, mdio->reg_num, mdio->val_in);
11677
11678         if (!netif_running(dev))
11679                 return -EAGAIN;
11680
11681         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11682 }
11683
11684 /* called with rtnl_lock */
11685 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11686 {
11687         struct bnx2x *bp = netdev_priv(dev);
11688         int rc = 0;
11689
11690         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11691             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11692                 return -EINVAL;
11693
11694         /* This does not race with packet allocation
11695          * because the actual alloc size is
11696          * only updated as part of load
11697          */
11698         dev->mtu = new_mtu;
11699
11700         if (netif_running(dev)) {
11701                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11702                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11703         }
11704
11705         return rc;
11706 }
11707
11708 static void bnx2x_tx_timeout(struct net_device *dev)
11709 {
11710         struct bnx2x *bp = netdev_priv(dev);
11711
11712 #ifdef BNX2X_STOP_ON_ERROR
11713         if (!bp->panic)
11714                 bnx2x_panic();
11715 #endif
11716         /* This allows the netif to be shutdown gracefully before resetting */
11717         schedule_work(&bp->reset_task);
11718 }
11719
11720 #ifdef BCM_VLAN
11721 /* called with rtnl_lock */
11722 static void bnx2x_vlan_rx_register(struct net_device *dev,
11723                                    struct vlan_group *vlgrp)
11724 {
11725         struct bnx2x *bp = netdev_priv(dev);
11726
11727         bp->vlgrp = vlgrp;
11728
11729         /* Set flags according to the required capabilities */
11730         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11731
11732         if (dev->features & NETIF_F_HW_VLAN_TX)
11733                 bp->flags |= HW_VLAN_TX_FLAG;
11734
11735         if (dev->features & NETIF_F_HW_VLAN_RX)
11736                 bp->flags |= HW_VLAN_RX_FLAG;
11737
11738         if (netif_running(dev))
11739                 bnx2x_set_client_config(bp);
11740 }
11741
11742 #endif
11743
11744 #ifdef CONFIG_NET_POLL_CONTROLLER
11745 static void poll_bnx2x(struct net_device *dev)
11746 {
11747         struct bnx2x *bp = netdev_priv(dev);
11748
11749         disable_irq(bp->pdev->irq);
11750         bnx2x_interrupt(bp->pdev->irq, dev);
11751         enable_irq(bp->pdev->irq);
11752 }
11753 #endif
11754
11755 static const struct net_device_ops bnx2x_netdev_ops = {
11756         .ndo_open               = bnx2x_open,
11757         .ndo_stop               = bnx2x_close,
11758         .ndo_start_xmit         = bnx2x_start_xmit,
11759         .ndo_set_multicast_list = bnx2x_set_rx_mode,
11760         .ndo_set_mac_address    = bnx2x_change_mac_addr,
11761         .ndo_validate_addr      = eth_validate_addr,
11762         .ndo_do_ioctl           = bnx2x_ioctl,
11763         .ndo_change_mtu         = bnx2x_change_mtu,
11764         .ndo_tx_timeout         = bnx2x_tx_timeout,
11765 #ifdef BCM_VLAN
11766         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
11767 #endif
11768 #ifdef CONFIG_NET_POLL_CONTROLLER
11769         .ndo_poll_controller    = poll_bnx2x,
11770 #endif
11771 };
11772
11773 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11774                                     struct net_device *dev)
11775 {
11776         struct bnx2x *bp;
11777         int rc;
11778
11779         SET_NETDEV_DEV(dev, &pdev->dev);
11780         bp = netdev_priv(dev);
11781
11782         bp->dev = dev;
11783         bp->pdev = pdev;
11784         bp->flags = 0;
11785         bp->func = PCI_FUNC(pdev->devfn);
11786
11787         rc = pci_enable_device(pdev);
11788         if (rc) {
11789                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11790                 goto err_out;
11791         }
11792
11793         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11794                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11795                        " aborting\n");
11796                 rc = -ENODEV;
11797                 goto err_out_disable;
11798         }
11799
11800         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11801                 printk(KERN_ERR PFX "Cannot find second PCI device"
11802                        " base address, aborting\n");
11803                 rc = -ENODEV;
11804                 goto err_out_disable;
11805         }
11806
11807         if (atomic_read(&pdev->enable_cnt) == 1) {
11808                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11809                 if (rc) {
11810                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11811                                " aborting\n");
11812                         goto err_out_disable;
11813                 }
11814
11815                 pci_set_master(pdev);
11816                 pci_save_state(pdev);
11817         }
11818
11819         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11820         if (bp->pm_cap == 0) {
11821                 printk(KERN_ERR PFX "Cannot find power management"
11822                        " capability, aborting\n");
11823                 rc = -EIO;
11824                 goto err_out_release;
11825         }
11826
11827         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11828         if (bp->pcie_cap == 0) {
11829                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11830                        " aborting\n");
11831                 rc = -EIO;
11832                 goto err_out_release;
11833         }
11834
11835         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11836                 bp->flags |= USING_DAC_FLAG;
11837                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11838                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11839                                " failed, aborting\n");
11840                         rc = -EIO;
11841                         goto err_out_release;
11842                 }
11843
11844         } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11845                 printk(KERN_ERR PFX "System does not support DMA,"
11846                        " aborting\n");
11847                 rc = -EIO;
11848                 goto err_out_release;
11849         }
11850
11851         dev->mem_start = pci_resource_start(pdev, 0);
11852         dev->base_addr = dev->mem_start;
11853         dev->mem_end = pci_resource_end(pdev, 0);
11854
11855         dev->irq = pdev->irq;
11856
11857         bp->regview = pci_ioremap_bar(pdev, 0);
11858         if (!bp->regview) {
11859                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11860                 rc = -ENOMEM;
11861                 goto err_out_release;
11862         }
11863
11864         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11865                                         min_t(u64, BNX2X_DB_SIZE,
11866                                               pci_resource_len(pdev, 2)));
11867         if (!bp->doorbells) {
11868                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11869                 rc = -ENOMEM;
11870                 goto err_out_unmap;
11871         }
11872
11873         bnx2x_set_power_state(bp, PCI_D0);
11874
11875         /* clean indirect addresses */
11876         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11877                                PCICFG_VENDOR_ID_OFFSET);
11878         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11879         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11880         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11881         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11882
11883         dev->watchdog_timeo = TX_TIMEOUT;
11884
11885         dev->netdev_ops = &bnx2x_netdev_ops;
11886         dev->ethtool_ops = &bnx2x_ethtool_ops;
11887         dev->features |= NETIF_F_SG;
11888         dev->features |= NETIF_F_HW_CSUM;
11889         if (bp->flags & USING_DAC_FLAG)
11890                 dev->features |= NETIF_F_HIGHDMA;
11891         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11892         dev->features |= NETIF_F_TSO6;
11893 #ifdef BCM_VLAN
11894         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11895         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11896
11897         dev->vlan_features |= NETIF_F_SG;
11898         dev->vlan_features |= NETIF_F_HW_CSUM;
11899         if (bp->flags & USING_DAC_FLAG)
11900                 dev->vlan_features |= NETIF_F_HIGHDMA;
11901         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11902         dev->vlan_features |= NETIF_F_TSO6;
11903 #endif
11904
11905         /* get_port_hwinfo() will set prtad and mmds properly */
11906         bp->mdio.prtad = MDIO_PRTAD_NONE;
11907         bp->mdio.mmds = 0;
11908         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11909         bp->mdio.dev = dev;
11910         bp->mdio.mdio_read = bnx2x_mdio_read;
11911         bp->mdio.mdio_write = bnx2x_mdio_write;
11912
11913         return 0;
11914
11915 err_out_unmap:
11916         if (bp->regview) {
11917                 iounmap(bp->regview);
11918                 bp->regview = NULL;
11919         }
11920         if (bp->doorbells) {
11921                 iounmap(bp->doorbells);
11922                 bp->doorbells = NULL;
11923         }
11924
11925 err_out_release:
11926         if (atomic_read(&pdev->enable_cnt) == 1)
11927                 pci_release_regions(pdev);
11928
11929 err_out_disable:
11930         pci_disable_device(pdev);
11931         pci_set_drvdata(pdev, NULL);
11932
11933 err_out:
11934         return rc;
11935 }
11936
11937 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11938                                                  int *width, int *speed)
11939 {
11940         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11941
11942         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11943
11944         /* return value of 1=2.5GHz 2=5GHz */
11945         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11946 }
11947
11948 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11949 {
11950         const struct firmware *firmware = bp->firmware;
11951         struct bnx2x_fw_file_hdr *fw_hdr;
11952         struct bnx2x_fw_file_section *sections;
11953         u32 offset, len, num_ops;
11954         u16 *ops_offsets;
11955         int i;
11956         const u8 *fw_ver;
11957
11958         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11959                 return -EINVAL;
11960
11961         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11962         sections = (struct bnx2x_fw_file_section *)fw_hdr;
11963
11964         /* Make sure none of the offsets and sizes make us read beyond
11965          * the end of the firmware data */
11966         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11967                 offset = be32_to_cpu(sections[i].offset);
11968                 len = be32_to_cpu(sections[i].len);
11969                 if (offset + len > firmware->size) {
11970                         printk(KERN_ERR PFX "Section %d length is out of "
11971                                             "bounds\n", i);
11972                         return -EINVAL;
11973                 }
11974         }
11975
11976         /* Likewise for the init_ops offsets */
11977         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11978         ops_offsets = (u16 *)(firmware->data + offset);
11979         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11980
11981         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11982                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11983                         printk(KERN_ERR PFX "Section offset %d is out of "
11984                                             "bounds\n", i);
11985                         return -EINVAL;
11986                 }
11987         }
11988
11989         /* Check FW version */
11990         offset = be32_to_cpu(fw_hdr->fw_version.offset);
11991         fw_ver = firmware->data + offset;
11992         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11993             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11994             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11995             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11996                 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11997                                     " Should be %d.%d.%d.%d\n",
11998                        fw_ver[0], fw_ver[1], fw_ver[2],
11999                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
12000                        BCM_5710_FW_MINOR_VERSION,
12001                        BCM_5710_FW_REVISION_VERSION,
12002                        BCM_5710_FW_ENGINEERING_VERSION);
12003                 return -EINVAL;
12004         }
12005
12006         return 0;
12007 }
12008
12009 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12010 {
12011         const __be32 *source = (const __be32 *)_source;
12012         u32 *target = (u32 *)_target;
12013         u32 i;
12014
12015         for (i = 0; i < n/4; i++)
12016                 target[i] = be32_to_cpu(source[i]);
12017 }
12018
12019 /*
12020    Ops array is stored in the following format:
12021    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12022  */
12023 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
12024 {
12025         const __be32 *source = (const __be32 *)_source;
12026         struct raw_op *target = (struct raw_op *)_target;
12027         u32 i, j, tmp;
12028
12029         for (i = 0, j = 0; i < n/8; i++, j += 2) {
12030                 tmp = be32_to_cpu(source[j]);
12031                 target[i].op = (tmp >> 24) & 0xff;
12032                 target[i].offset =  tmp & 0xffffff;
12033                 target[i].raw_data = be32_to_cpu(source[j+1]);
12034         }
12035 }
12036
12037 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12038 {
12039         const __be16 *source = (const __be16 *)_source;
12040         u16 *target = (u16 *)_target;
12041         u32 i;
12042
12043         for (i = 0; i < n/2; i++)
12044                 target[i] = be16_to_cpu(source[i]);
12045 }
12046
12047 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12048         do { \
12049                 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12050                 bp->arr = kmalloc(len, GFP_KERNEL); \
12051                 if (!bp->arr) { \
12052                         printk(KERN_ERR PFX "Failed to allocate %d bytes " \
12053                                             "for "#arr"\n", len); \
12054                         goto lbl; \
12055                 } \
12056                 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12057                      (u8 *)bp->arr, len); \
12058         } while (0)
12059
12060 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12061 {
12062         const char *fw_file_name;
12063         struct bnx2x_fw_file_hdr *fw_hdr;
12064         int rc;
12065
12066         if (CHIP_IS_E1(bp))
12067                 fw_file_name = FW_FILE_NAME_E1;
12068         else
12069                 fw_file_name = FW_FILE_NAME_E1H;
12070
12071         printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
12072
12073         rc = request_firmware(&bp->firmware, fw_file_name, dev);
12074         if (rc) {
12075                 printk(KERN_ERR PFX "Can't load firmware file %s\n",
12076                        fw_file_name);
12077                 goto request_firmware_exit;
12078         }
12079
12080         rc = bnx2x_check_firmware(bp);
12081         if (rc) {
12082                 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
12083                 goto request_firmware_exit;
12084         }
12085
12086         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12087
12088         /* Initialize the pointers to the init arrays */
12089         /* Blob */
12090         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12091
12092         /* Opcodes */
12093         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12094
12095         /* Offsets */
12096         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12097                             be16_to_cpu_n);
12098
12099         /* STORMs firmware */
12100         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12101                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12102         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
12103                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12104         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12105                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12106         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
12107                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
12108         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12109                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12110         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
12111                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12112         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12113                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12114         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
12115                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
12116
12117         return 0;
12118
12119 init_offsets_alloc_err:
12120         kfree(bp->init_ops);
12121 init_ops_alloc_err:
12122         kfree(bp->init_data);
12123 request_firmware_exit:
12124         release_firmware(bp->firmware);
12125
12126         return rc;
12127 }
12128
12129
12130 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12131                                     const struct pci_device_id *ent)
12132 {
12133         struct net_device *dev = NULL;
12134         struct bnx2x *bp;
12135         int pcie_width, pcie_speed;
12136         int rc;
12137
12138         /* dev zeroed in init_etherdev */
12139         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
12140         if (!dev) {
12141                 printk(KERN_ERR PFX "Cannot allocate net device\n");
12142                 return -ENOMEM;
12143         }
12144
12145         bp = netdev_priv(dev);
12146         bp->msglevel = debug;
12147
12148         pci_set_drvdata(pdev, dev);
12149
12150         rc = bnx2x_init_dev(pdev, dev);
12151         if (rc < 0) {
12152                 free_netdev(dev);
12153                 return rc;
12154         }
12155
12156         rc = bnx2x_init_bp(bp);
12157         if (rc)
12158                 goto init_one_exit;
12159
12160         /* Set init arrays */
12161         rc = bnx2x_init_firmware(bp, &pdev->dev);
12162         if (rc) {
12163                 printk(KERN_ERR PFX "Error loading firmware\n");
12164                 goto init_one_exit;
12165         }
12166
12167         rc = register_netdev(dev);
12168         if (rc) {
12169                 dev_err(&pdev->dev, "Cannot register net device\n");
12170                 goto init_one_exit;
12171         }
12172
12173         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12174         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
12175                " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
12176                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12177                pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12178                dev->base_addr, bp->pdev->irq);
12179         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
12180
12181         return 0;
12182
12183 init_one_exit:
12184         if (bp->regview)
12185                 iounmap(bp->regview);
12186
12187         if (bp->doorbells)
12188                 iounmap(bp->doorbells);
12189
12190         free_netdev(dev);
12191
12192         if (atomic_read(&pdev->enable_cnt) == 1)
12193                 pci_release_regions(pdev);
12194
12195         pci_disable_device(pdev);
12196         pci_set_drvdata(pdev, NULL);
12197
12198         return rc;
12199 }
12200
12201 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12202 {
12203         struct net_device *dev = pci_get_drvdata(pdev);
12204         struct bnx2x *bp;
12205
12206         if (!dev) {
12207                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12208                 return;
12209         }
12210         bp = netdev_priv(dev);
12211
12212         unregister_netdev(dev);
12213
12214         kfree(bp->init_ops_offsets);
12215         kfree(bp->init_ops);
12216         kfree(bp->init_data);
12217         release_firmware(bp->firmware);
12218
12219         if (bp->regview)
12220                 iounmap(bp->regview);
12221
12222         if (bp->doorbells)
12223                 iounmap(bp->doorbells);
12224
12225         free_netdev(dev);
12226
12227         if (atomic_read(&pdev->enable_cnt) == 1)
12228                 pci_release_regions(pdev);
12229
12230         pci_disable_device(pdev);
12231         pci_set_drvdata(pdev, NULL);
12232 }
12233
12234 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12235 {
12236         struct net_device *dev = pci_get_drvdata(pdev);
12237         struct bnx2x *bp;
12238
12239         if (!dev) {
12240                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12241                 return -ENODEV;
12242         }
12243         bp = netdev_priv(dev);
12244
12245         rtnl_lock();
12246
12247         pci_save_state(pdev);
12248
12249         if (!netif_running(dev)) {
12250                 rtnl_unlock();
12251                 return 0;
12252         }
12253
12254         netif_device_detach(dev);
12255
12256         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12257
12258         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
12259
12260         rtnl_unlock();
12261
12262         return 0;
12263 }
12264
12265 static int bnx2x_resume(struct pci_dev *pdev)
12266 {
12267         struct net_device *dev = pci_get_drvdata(pdev);
12268         struct bnx2x *bp;
12269         int rc;
12270
12271         if (!dev) {
12272                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12273                 return -ENODEV;
12274         }
12275         bp = netdev_priv(dev);
12276
12277         rtnl_lock();
12278
12279         pci_restore_state(pdev);
12280
12281         if (!netif_running(dev)) {
12282                 rtnl_unlock();
12283                 return 0;
12284         }
12285
12286         bnx2x_set_power_state(bp, PCI_D0);
12287         netif_device_attach(dev);
12288
12289         rc = bnx2x_nic_load(bp, LOAD_OPEN);
12290
12291         rtnl_unlock();
12292
12293         return rc;
12294 }
12295
12296 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12297 {
12298         int i;
12299
12300         bp->state = BNX2X_STATE_ERROR;
12301
12302         bp->rx_mode = BNX2X_RX_MODE_NONE;
12303
12304         bnx2x_netif_stop(bp, 0);
12305
12306         del_timer_sync(&bp->timer);
12307         bp->stats_state = STATS_STATE_DISABLED;
12308         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12309
12310         /* Release IRQs */
12311         bnx2x_free_irq(bp, false);
12312
12313         if (CHIP_IS_E1(bp)) {
12314                 struct mac_configuration_cmd *config =
12315                                                 bnx2x_sp(bp, mcast_config);
12316
12317                 for (i = 0; i < config->hdr.length; i++)
12318                         CAM_INVALIDATE(config->config_table[i]);
12319         }
12320
12321         /* Free SKBs, SGEs, TPA pool and driver internals */
12322         bnx2x_free_skbs(bp);
12323         for_each_queue(bp, i)
12324                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12325         for_each_queue(bp, i)
12326                 netif_napi_del(&bnx2x_fp(bp, i, napi));
12327         bnx2x_free_mem(bp);
12328
12329         bp->state = BNX2X_STATE_CLOSED;
12330
12331         netif_carrier_off(bp->dev);
12332
12333         return 0;
12334 }
12335
12336 static void bnx2x_eeh_recover(struct bnx2x *bp)
12337 {
12338         u32 val;
12339
12340         mutex_init(&bp->port.phy_mutex);
12341
12342         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12343         bp->link_params.shmem_base = bp->common.shmem_base;
12344         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12345
12346         if (!bp->common.shmem_base ||
12347             (bp->common.shmem_base < 0xA0000) ||
12348             (bp->common.shmem_base >= 0xC0000)) {
12349                 BNX2X_DEV_INFO("MCP not active\n");
12350                 bp->flags |= NO_MCP_FLAG;
12351                 return;
12352         }
12353
12354         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12355         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12356                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12357                 BNX2X_ERR("BAD MCP validity signature\n");
12358
12359         if (!BP_NOMCP(bp)) {
12360                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12361                               & DRV_MSG_SEQ_NUMBER_MASK);
12362                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12363         }
12364 }
12365
12366 /**
12367  * bnx2x_io_error_detected - called when PCI error is detected
12368  * @pdev: Pointer to PCI device
12369  * @state: The current pci connection state
12370  *
12371  * This function is called after a PCI bus error affecting
12372  * this device has been detected.
12373  */
12374 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12375                                                 pci_channel_state_t state)
12376 {
12377         struct net_device *dev = pci_get_drvdata(pdev);
12378         struct bnx2x *bp = netdev_priv(dev);
12379
12380         rtnl_lock();
12381
12382         netif_device_detach(dev);
12383
12384         if (state == pci_channel_io_perm_failure) {
12385                 rtnl_unlock();
12386                 return PCI_ERS_RESULT_DISCONNECT;
12387         }
12388
12389         if (netif_running(dev))
12390                 bnx2x_eeh_nic_unload(bp);
12391
12392         pci_disable_device(pdev);
12393
12394         rtnl_unlock();
12395
12396         /* Request a slot reset */
12397         return PCI_ERS_RESULT_NEED_RESET;
12398 }
12399
12400 /**
12401  * bnx2x_io_slot_reset - called after the PCI bus has been reset
12402  * @pdev: Pointer to PCI device
12403  *
12404  * Restart the card from scratch, as if from a cold-boot.
12405  */
12406 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12407 {
12408         struct net_device *dev = pci_get_drvdata(pdev);
12409         struct bnx2x *bp = netdev_priv(dev);
12410
12411         rtnl_lock();
12412
12413         if (pci_enable_device(pdev)) {
12414                 dev_err(&pdev->dev,
12415                         "Cannot re-enable PCI device after reset\n");
12416                 rtnl_unlock();
12417                 return PCI_ERS_RESULT_DISCONNECT;
12418         }
12419
12420         pci_set_master(pdev);
12421         pci_restore_state(pdev);
12422
12423         if (netif_running(dev))
12424                 bnx2x_set_power_state(bp, PCI_D0);
12425
12426         rtnl_unlock();
12427
12428         return PCI_ERS_RESULT_RECOVERED;
12429 }
12430
12431 /**
12432  * bnx2x_io_resume - called when traffic can start flowing again
12433  * @pdev: Pointer to PCI device
12434  *
12435  * This callback is called when the error recovery driver tells us that
12436  * its OK to resume normal operation.
12437  */
12438 static void bnx2x_io_resume(struct pci_dev *pdev)
12439 {
12440         struct net_device *dev = pci_get_drvdata(pdev);
12441         struct bnx2x *bp = netdev_priv(dev);
12442
12443         rtnl_lock();
12444
12445         bnx2x_eeh_recover(bp);
12446
12447         if (netif_running(dev))
12448                 bnx2x_nic_load(bp, LOAD_NORMAL);
12449
12450         netif_device_attach(dev);
12451
12452         rtnl_unlock();
12453 }
12454
12455 static struct pci_error_handlers bnx2x_err_handler = {
12456         .error_detected = bnx2x_io_error_detected,
12457         .slot_reset     = bnx2x_io_slot_reset,
12458         .resume         = bnx2x_io_resume,
12459 };
12460
12461 static struct pci_driver bnx2x_pci_driver = {
12462         .name        = DRV_MODULE_NAME,
12463         .id_table    = bnx2x_pci_tbl,
12464         .probe       = bnx2x_init_one,
12465         .remove      = __devexit_p(bnx2x_remove_one),
12466         .suspend     = bnx2x_suspend,
12467         .resume      = bnx2x_resume,
12468         .err_handler = &bnx2x_err_handler,
12469 };
12470
12471 static int __init bnx2x_init(void)
12472 {
12473         int ret;
12474
12475         printk(KERN_INFO "%s", version);
12476
12477         bnx2x_wq = create_singlethread_workqueue("bnx2x");
12478         if (bnx2x_wq == NULL) {
12479                 printk(KERN_ERR PFX "Cannot create workqueue\n");
12480                 return -ENOMEM;
12481         }
12482
12483         ret = pci_register_driver(&bnx2x_pci_driver);
12484         if (ret) {
12485                 printk(KERN_ERR PFX "Cannot register driver\n");
12486                 destroy_workqueue(bnx2x_wq);
12487         }
12488         return ret;
12489 }
12490
12491 static void __exit bnx2x_cleanup(void)
12492 {
12493         pci_unregister_driver(&bnx2x_pci_driver);
12494
12495         destroy_workqueue(bnx2x_wq);
12496 }
12497
12498 module_init(bnx2x_init);
12499 module_exit(bnx2x_cleanup);
12500
12501 #ifdef BCM_CNIC
12502
12503 /* count denotes the number of new completions we have seen */
12504 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12505 {
12506         struct eth_spe *spe;
12507
12508 #ifdef BNX2X_STOP_ON_ERROR
12509         if (unlikely(bp->panic))
12510                 return;
12511 #endif
12512
12513         spin_lock_bh(&bp->spq_lock);
12514         bp->cnic_spq_pending -= count;
12515
12516         for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12517              bp->cnic_spq_pending++) {
12518
12519                 if (!bp->cnic_kwq_pending)
12520                         break;
12521
12522                 spe = bnx2x_sp_get_next(bp);
12523                 *spe = *bp->cnic_kwq_cons;
12524
12525                 bp->cnic_kwq_pending--;
12526
12527                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12528                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12529
12530                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12531                         bp->cnic_kwq_cons = bp->cnic_kwq;
12532                 else
12533                         bp->cnic_kwq_cons++;
12534         }
12535         bnx2x_sp_prod_update(bp);
12536         spin_unlock_bh(&bp->spq_lock);
12537 }
12538
12539 static int bnx2x_cnic_sp_queue(struct net_device *dev,
12540                                struct kwqe_16 *kwqes[], u32 count)
12541 {
12542         struct bnx2x *bp = netdev_priv(dev);
12543         int i;
12544
12545 #ifdef BNX2X_STOP_ON_ERROR
12546         if (unlikely(bp->panic))
12547                 return -EIO;
12548 #endif
12549
12550         spin_lock_bh(&bp->spq_lock);
12551
12552         for (i = 0; i < count; i++) {
12553                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12554
12555                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12556                         break;
12557
12558                 *bp->cnic_kwq_prod = *spe;
12559
12560                 bp->cnic_kwq_pending++;
12561
12562                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12563                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
12564                    spe->data.mac_config_addr.hi,
12565                    spe->data.mac_config_addr.lo,
12566                    bp->cnic_kwq_pending);
12567
12568                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12569                         bp->cnic_kwq_prod = bp->cnic_kwq;
12570                 else
12571                         bp->cnic_kwq_prod++;
12572         }
12573
12574         spin_unlock_bh(&bp->spq_lock);
12575
12576         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12577                 bnx2x_cnic_sp_post(bp, 0);
12578
12579         return i;
12580 }
12581
12582 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12583 {
12584         struct cnic_ops *c_ops;
12585         int rc = 0;
12586
12587         mutex_lock(&bp->cnic_mutex);
12588         c_ops = bp->cnic_ops;
12589         if (c_ops)
12590                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12591         mutex_unlock(&bp->cnic_mutex);
12592
12593         return rc;
12594 }
12595
12596 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12597 {
12598         struct cnic_ops *c_ops;
12599         int rc = 0;
12600
12601         rcu_read_lock();
12602         c_ops = rcu_dereference(bp->cnic_ops);
12603         if (c_ops)
12604                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12605         rcu_read_unlock();
12606
12607         return rc;
12608 }
12609
12610 /*
12611  * for commands that have no data
12612  */
12613 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12614 {
12615         struct cnic_ctl_info ctl = {0};
12616
12617         ctl.cmd = cmd;
12618
12619         return bnx2x_cnic_ctl_send(bp, &ctl);
12620 }
12621
12622 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12623 {
12624         struct cnic_ctl_info ctl;
12625
12626         /* first we tell CNIC and only then we count this as a completion */
12627         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12628         ctl.data.comp.cid = cid;
12629
12630         bnx2x_cnic_ctl_send_bh(bp, &ctl);
12631         bnx2x_cnic_sp_post(bp, 1);
12632 }
12633
12634 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12635 {
12636         struct bnx2x *bp = netdev_priv(dev);
12637         int rc = 0;
12638
12639         switch (ctl->cmd) {
12640         case DRV_CTL_CTXTBL_WR_CMD: {
12641                 u32 index = ctl->data.io.offset;
12642                 dma_addr_t addr = ctl->data.io.dma_addr;
12643
12644                 bnx2x_ilt_wr(bp, index, addr);
12645                 break;
12646         }
12647
12648         case DRV_CTL_COMPLETION_CMD: {
12649                 int count = ctl->data.comp.comp_count;
12650
12651                 bnx2x_cnic_sp_post(bp, count);
12652                 break;
12653         }
12654
12655         /* rtnl_lock is held.  */
12656         case DRV_CTL_START_L2_CMD: {
12657                 u32 cli = ctl->data.ring.client_id;
12658
12659                 bp->rx_mode_cl_mask |= (1 << cli);
12660                 bnx2x_set_storm_rx_mode(bp);
12661                 break;
12662         }
12663
12664         /* rtnl_lock is held.  */
12665         case DRV_CTL_STOP_L2_CMD: {
12666                 u32 cli = ctl->data.ring.client_id;
12667
12668                 bp->rx_mode_cl_mask &= ~(1 << cli);
12669                 bnx2x_set_storm_rx_mode(bp);
12670                 break;
12671         }
12672
12673         default:
12674                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12675                 rc = -EINVAL;
12676         }
12677
12678         return rc;
12679 }
12680
12681 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12682 {
12683         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12684
12685         if (bp->flags & USING_MSIX_FLAG) {
12686                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12687                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12688                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12689         } else {
12690                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12691                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12692         }
12693         cp->irq_arr[0].status_blk = bp->cnic_sb;
12694         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12695         cp->irq_arr[1].status_blk = bp->def_status_blk;
12696         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12697
12698         cp->num_irq = 2;
12699 }
12700
12701 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12702                                void *data)
12703 {
12704         struct bnx2x *bp = netdev_priv(dev);
12705         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12706
12707         if (ops == NULL)
12708                 return -EINVAL;
12709
12710         if (atomic_read(&bp->intr_sem) != 0)
12711                 return -EBUSY;
12712
12713         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12714         if (!bp->cnic_kwq)
12715                 return -ENOMEM;
12716
12717         bp->cnic_kwq_cons = bp->cnic_kwq;
12718         bp->cnic_kwq_prod = bp->cnic_kwq;
12719         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12720
12721         bp->cnic_spq_pending = 0;
12722         bp->cnic_kwq_pending = 0;
12723
12724         bp->cnic_data = data;
12725
12726         cp->num_irq = 0;
12727         cp->drv_state = CNIC_DRV_STATE_REGD;
12728
12729         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12730
12731         bnx2x_setup_cnic_irq_info(bp);
12732         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12733         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12734         rcu_assign_pointer(bp->cnic_ops, ops);
12735
12736         return 0;
12737 }
12738
12739 static int bnx2x_unregister_cnic(struct net_device *dev)
12740 {
12741         struct bnx2x *bp = netdev_priv(dev);
12742         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12743
12744         mutex_lock(&bp->cnic_mutex);
12745         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12746                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12747                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12748         }
12749         cp->drv_state = 0;
12750         rcu_assign_pointer(bp->cnic_ops, NULL);
12751         mutex_unlock(&bp->cnic_mutex);
12752         synchronize_rcu();
12753         kfree(bp->cnic_kwq);
12754         bp->cnic_kwq = NULL;
12755
12756         return 0;
12757 }
12758
12759 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12760 {
12761         struct bnx2x *bp = netdev_priv(dev);
12762         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12763
12764         cp->drv_owner = THIS_MODULE;
12765         cp->chip_id = CHIP_ID(bp);
12766         cp->pdev = bp->pdev;
12767         cp->io_base = bp->regview;
12768         cp->io_base2 = bp->doorbells;
12769         cp->max_kwqe_pending = 8;
12770         cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12771         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12772         cp->ctx_tbl_len = CNIC_ILT_LINES;
12773         cp->starting_cid = BCM_CNIC_CID_START;
12774         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12775         cp->drv_ctl = bnx2x_drv_ctl;
12776         cp->drv_register_cnic = bnx2x_register_cnic;
12777         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12778
12779         return cp;
12780 }
12781 EXPORT_SYMBOL(bnx2x_cnic_probe);
12782
12783 #endif /* BCM_CNIC */
12784