]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/bnx2x_main.c
bnx2x: Supporting Device Control Channel
[net-next-2.6.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
58
59 #define DRV_MODULE_VERSION      "1.48.114-1"
60 #define DRV_MODULE_RELDATE      "2009/07/29"
61 #define BNX2X_BC_VER            0x040200
62
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
65 /* FW files */
66 #define FW_FILE_PREFIX_E1               "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H              "bnx2x-e1h-"
68
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT              (5*HZ)
71
72 static char version[] __devinitdata =
73         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84                              "(0 Disable; 1 Enable (default))");
85
86 static int num_rx_queues;
87 module_param(num_rx_queues, int, 0);
88 MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89                                 " (default is half number of CPUs)");
90
91 static int num_tx_queues;
92 module_param(num_tx_queues, int, 0);
93 MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94                                 " (default is half number of CPUs)");
95
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
99
100 static int int_mode;
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
104 static int poll;
105 module_param(poll, int, 0);
106 MODULE_PARM_DESC(poll, " Use polling (for debug)");
107
108 static int mrrs = -1;
109 module_param(mrrs, int, 0);
110 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
111
112 static int debug;
113 module_param(debug, int, 0);
114 MODULE_PARM_DESC(debug, " Default debug msglevel");
115
116 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
117
118 static struct workqueue_struct *bnx2x_wq;
119
120 enum bnx2x_board_type {
121         BCM57710 = 0,
122         BCM57711 = 1,
123         BCM57711E = 2,
124 };
125
126 /* indexed by board_type, above */
127 static struct {
128         char *name;
129 } board_info[] __devinitdata = {
130         { "Broadcom NetXtreme II BCM57710 XGb" },
131         { "Broadcom NetXtreme II BCM57711 XGb" },
132         { "Broadcom NetXtreme II BCM57711E XGb" }
133 };
134
135
136 static const struct pci_device_id bnx2x_pci_tbl[] = {
137         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
138                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
139         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
140                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
141         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
142                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
143         { 0 }
144 };
145
146 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148 /****************************************************************************
149 * General service functions
150 ****************************************************************************/
151
152 /* used only at init
153  * locking is done by mcp
154  */
155 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
156 {
157         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160                                PCICFG_VENDOR_ID_OFFSET);
161 }
162
163 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
164 {
165         u32 val;
166
167         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170                                PCICFG_VENDOR_ID_OFFSET);
171
172         return val;
173 }
174
175 static const u32 dmae_reg_go_c[] = {
176         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
180 };
181
182 /* copy command into DMAE command memory and set DMAE command go */
183 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
184                             int idx)
185 {
186         u32 cmd_offset;
187         int i;
188
189         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
190         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
191                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
192
193                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
194                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
195         }
196         REG_WR(bp, dmae_reg_go_c[idx], 1);
197 }
198
199 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
200                       u32 len32)
201 {
202         struct dmae_command *dmae = &bp->init_dmae;
203         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
204         int cnt = 200;
205
206         if (!bp->dmae_ready) {
207                 u32 *data = bnx2x_sp(bp, wb_data[0]);
208
209                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
210                    "  using indirect\n", dst_addr, len32);
211                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
212                 return;
213         }
214
215         mutex_lock(&bp->dmae_mutex);
216
217         memset(dmae, 0, sizeof(struct dmae_command));
218
219         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
220                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
221                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
222 #ifdef __BIG_ENDIAN
223                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
224 #else
225                         DMAE_CMD_ENDIANITY_DW_SWAP |
226 #endif
227                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
228                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
229         dmae->src_addr_lo = U64_LO(dma_addr);
230         dmae->src_addr_hi = U64_HI(dma_addr);
231         dmae->dst_addr_lo = dst_addr >> 2;
232         dmae->dst_addr_hi = 0;
233         dmae->len = len32;
234         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
235         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
236         dmae->comp_val = DMAE_COMP_VAL;
237
238         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
239            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
240                     "dst_addr [%x:%08x (%08x)]\n"
241            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
242            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
243            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
244            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
245         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
246            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
247            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
248
249         *wb_comp = 0;
250
251         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
252
253         udelay(5);
254
255         while (*wb_comp != DMAE_COMP_VAL) {
256                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
257
258                 if (!cnt) {
259                         BNX2X_ERR("DMAE timeout!\n");
260                         break;
261                 }
262                 cnt--;
263                 /* adjust delay for emulation/FPGA */
264                 if (CHIP_REV_IS_SLOW(bp))
265                         msleep(100);
266                 else
267                         udelay(5);
268         }
269
270         mutex_unlock(&bp->dmae_mutex);
271 }
272
273 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
274 {
275         struct dmae_command *dmae = &bp->init_dmae;
276         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
277         int cnt = 200;
278
279         if (!bp->dmae_ready) {
280                 u32 *data = bnx2x_sp(bp, wb_data[0]);
281                 int i;
282
283                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
284                    "  using indirect\n", src_addr, len32);
285                 for (i = 0; i < len32; i++)
286                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
287                 return;
288         }
289
290         mutex_lock(&bp->dmae_mutex);
291
292         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
293         memset(dmae, 0, sizeof(struct dmae_command));
294
295         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
296                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
297                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
298 #ifdef __BIG_ENDIAN
299                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
300 #else
301                         DMAE_CMD_ENDIANITY_DW_SWAP |
302 #endif
303                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
304                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
305         dmae->src_addr_lo = src_addr >> 2;
306         dmae->src_addr_hi = 0;
307         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
308         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
309         dmae->len = len32;
310         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
311         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
312         dmae->comp_val = DMAE_COMP_VAL;
313
314         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
315            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
316                     "dst_addr [%x:%08x (%08x)]\n"
317            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
318            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
319            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
320            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
321
322         *wb_comp = 0;
323
324         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
325
326         udelay(5);
327
328         while (*wb_comp != DMAE_COMP_VAL) {
329
330                 if (!cnt) {
331                         BNX2X_ERR("DMAE timeout!\n");
332                         break;
333                 }
334                 cnt--;
335                 /* adjust delay for emulation/FPGA */
336                 if (CHIP_REV_IS_SLOW(bp))
337                         msleep(100);
338                 else
339                         udelay(5);
340         }
341         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
342            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
343            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
344
345         mutex_unlock(&bp->dmae_mutex);
346 }
347
348 /* used only for slowpath so not inlined */
349 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
350 {
351         u32 wb_write[2];
352
353         wb_write[0] = val_hi;
354         wb_write[1] = val_lo;
355         REG_WR_DMAE(bp, reg, wb_write, 2);
356 }
357
358 #ifdef USE_WB_RD
359 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
360 {
361         u32 wb_data[2];
362
363         REG_RD_DMAE(bp, reg, wb_data, 2);
364
365         return HILO_U64(wb_data[0], wb_data[1]);
366 }
367 #endif
368
369 static int bnx2x_mc_assert(struct bnx2x *bp)
370 {
371         char last_idx;
372         int i, rc = 0;
373         u32 row0, row1, row2, row3;
374
375         /* XSTORM */
376         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
377                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
378         if (last_idx)
379                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
380
381         /* print the asserts */
382         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
383
384                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
385                               XSTORM_ASSERT_LIST_OFFSET(i));
386                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
387                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
388                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
389                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
390                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
391                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
392
393                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
394                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
395                                   " 0x%08x 0x%08x 0x%08x\n",
396                                   i, row3, row2, row1, row0);
397                         rc++;
398                 } else {
399                         break;
400                 }
401         }
402
403         /* TSTORM */
404         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
405                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
406         if (last_idx)
407                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
408
409         /* print the asserts */
410         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
411
412                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
413                               TSTORM_ASSERT_LIST_OFFSET(i));
414                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
415                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
416                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
417                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
418                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
419                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
420
421                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
422                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
423                                   " 0x%08x 0x%08x 0x%08x\n",
424                                   i, row3, row2, row1, row0);
425                         rc++;
426                 } else {
427                         break;
428                 }
429         }
430
431         /* CSTORM */
432         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
433                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
434         if (last_idx)
435                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
436
437         /* print the asserts */
438         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
439
440                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
441                               CSTORM_ASSERT_LIST_OFFSET(i));
442                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
443                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
444                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
445                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
446                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
447                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
448
449                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
450                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
451                                   " 0x%08x 0x%08x 0x%08x\n",
452                                   i, row3, row2, row1, row0);
453                         rc++;
454                 } else {
455                         break;
456                 }
457         }
458
459         /* USTORM */
460         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
461                            USTORM_ASSERT_LIST_INDEX_OFFSET);
462         if (last_idx)
463                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
464
465         /* print the asserts */
466         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
467
468                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
469                               USTORM_ASSERT_LIST_OFFSET(i));
470                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
471                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
472                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
473                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
474                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
475                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
476
477                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
478                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
479                                   " 0x%08x 0x%08x 0x%08x\n",
480                                   i, row3, row2, row1, row0);
481                         rc++;
482                 } else {
483                         break;
484                 }
485         }
486
487         return rc;
488 }
489
490 static void bnx2x_fw_dump(struct bnx2x *bp)
491 {
492         u32 mark, offset;
493         __be32 data[9];
494         int word;
495
496         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
497         mark = ((mark + 0x3) & ~0x3);
498         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
499
500         printk(KERN_ERR PFX);
501         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
502                 for (word = 0; word < 8; word++)
503                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
504                                                   offset + 4*word));
505                 data[8] = 0x0;
506                 printk(KERN_CONT "%s", (char *)data);
507         }
508         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
509                 for (word = 0; word < 8; word++)
510                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
511                                                   offset + 4*word));
512                 data[8] = 0x0;
513                 printk(KERN_CONT "%s", (char *)data);
514         }
515         printk(KERN_ERR PFX "end of fw dump\n");
516 }
517
518 static void bnx2x_panic_dump(struct bnx2x *bp)
519 {
520         int i;
521         u16 j, start, end;
522
523         bp->stats_state = STATS_STATE_DISABLED;
524         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
525
526         BNX2X_ERR("begin crash dump -----------------\n");
527
528         /* Indices */
529         /* Common */
530         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
531                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
532                   "  spq_prod_idx(%u)\n",
533                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
534                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
535
536         /* Rx */
537         for_each_rx_queue(bp, i) {
538                 struct bnx2x_fastpath *fp = &bp->fp[i];
539
540                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
541                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
542                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
543                           i, fp->rx_bd_prod, fp->rx_bd_cons,
544                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
545                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
546                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
547                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
548                           fp->rx_sge_prod, fp->last_max_sge,
549                           le16_to_cpu(fp->fp_u_idx),
550                           fp->status_blk->u_status_block.status_block_index);
551         }
552
553         /* Tx */
554         for_each_tx_queue(bp, i) {
555                 struct bnx2x_fastpath *fp = &bp->fp[i];
556
557                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
558                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
559                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
560                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
561                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
562                           "  tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
563                           fp->status_blk->c_status_block.status_block_index,
564                           fp->tx_db.data.prod);
565         }
566
567         /* Rings */
568         /* Rx */
569         for_each_rx_queue(bp, i) {
570                 struct bnx2x_fastpath *fp = &bp->fp[i];
571
572                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
573                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
574                 for (j = start; j != end; j = RX_BD(j + 1)) {
575                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
576                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
577
578                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
579                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
580                 }
581
582                 start = RX_SGE(fp->rx_sge_prod);
583                 end = RX_SGE(fp->last_max_sge);
584                 for (j = start; j != end; j = RX_SGE(j + 1)) {
585                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
586                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
587
588                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
589                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
590                 }
591
592                 start = RCQ_BD(fp->rx_comp_cons - 10);
593                 end = RCQ_BD(fp->rx_comp_cons + 503);
594                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
595                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
596
597                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
598                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
599                 }
600         }
601
602         /* Tx */
603         for_each_tx_queue(bp, i) {
604                 struct bnx2x_fastpath *fp = &bp->fp[i];
605
606                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
607                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
608                 for (j = start; j != end; j = TX_BD(j + 1)) {
609                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
610
611                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
612                                   i, j, sw_bd->skb, sw_bd->first_bd);
613                 }
614
615                 start = TX_BD(fp->tx_bd_cons - 10);
616                 end = TX_BD(fp->tx_bd_cons + 254);
617                 for (j = start; j != end; j = TX_BD(j + 1)) {
618                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
619
620                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
621                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
622                 }
623         }
624
625         bnx2x_fw_dump(bp);
626         bnx2x_mc_assert(bp);
627         BNX2X_ERR("end crash dump -----------------\n");
628 }
629
630 static void bnx2x_int_enable(struct bnx2x *bp)
631 {
632         int port = BP_PORT(bp);
633         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
634         u32 val = REG_RD(bp, addr);
635         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
636         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
637
638         if (msix) {
639                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640                          HC_CONFIG_0_REG_INT_LINE_EN_0);
641                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
643         } else if (msi) {
644                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
645                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
648         } else {
649                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
650                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
651                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
652                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
653
654                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
655                    val, port, addr);
656
657                 REG_WR(bp, addr, val);
658
659                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
660         }
661
662         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
663            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
664
665         REG_WR(bp, addr, val);
666         /*
667          * Ensure that HC_CONFIG is written before leading/trailing edge config
668          */
669         mmiowb();
670         barrier();
671
672         if (CHIP_IS_E1H(bp)) {
673                 /* init leading/trailing edge */
674                 if (IS_E1HMF(bp)) {
675                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
676                         if (bp->port.pmf)
677                                 /* enable nig and gpio3 attention */
678                                 val |= 0x1100;
679                 } else
680                         val = 0xffff;
681
682                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
683                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
684         }
685
686         /* Make sure that interrupts are indeed enabled from here on */
687         mmiowb();
688 }
689
690 static void bnx2x_int_disable(struct bnx2x *bp)
691 {
692         int port = BP_PORT(bp);
693         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
694         u32 val = REG_RD(bp, addr);
695
696         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
697                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
698                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
699                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
700
701         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
702            val, port, addr);
703
704         /* flush all outstanding writes */
705         mmiowb();
706
707         REG_WR(bp, addr, val);
708         if (REG_RD(bp, addr) != val)
709                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
710
711 }
712
713 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
714 {
715         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
716         int i, offset;
717
718         /* disable interrupt handling */
719         atomic_inc(&bp->intr_sem);
720         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
721
722         if (disable_hw)
723                 /* prevent the HW from sending interrupts */
724                 bnx2x_int_disable(bp);
725
726         /* make sure all ISRs are done */
727         if (msix) {
728                 synchronize_irq(bp->msix_table[0].vector);
729                 offset = 1;
730                 for_each_queue(bp, i)
731                         synchronize_irq(bp->msix_table[i + offset].vector);
732         } else
733                 synchronize_irq(bp->pdev->irq);
734
735         /* make sure sp_task is not running */
736         cancel_delayed_work(&bp->sp_task);
737         flush_workqueue(bnx2x_wq);
738 }
739
740 /* fast path */
741
742 /*
743  * General service functions
744  */
745
746 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
747                                 u8 storm, u16 index, u8 op, u8 update)
748 {
749         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
750                        COMMAND_REG_INT_ACK);
751         struct igu_ack_register igu_ack;
752
753         igu_ack.status_block_index = index;
754         igu_ack.sb_id_and_flags =
755                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
756                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
757                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
758                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
759
760         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
761            (*(u32 *)&igu_ack), hc_addr);
762         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
763
764         /* Make sure that ACK is written */
765         mmiowb();
766         barrier();
767 }
768
769 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
770 {
771         struct host_status_block *fpsb = fp->status_blk;
772         u16 rc = 0;
773
774         barrier(); /* status block is written to by the chip */
775         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
776                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
777                 rc |= 1;
778         }
779         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
780                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
781                 rc |= 2;
782         }
783         return rc;
784 }
785
786 static u16 bnx2x_ack_int(struct bnx2x *bp)
787 {
788         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
789                        COMMAND_REG_SIMD_MASK);
790         u32 result = REG_RD(bp, hc_addr);
791
792         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
793            result, hc_addr);
794
795         return result;
796 }
797
798
799 /*
800  * fast path service functions
801  */
802
803 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
804 {
805         /* Tell compiler that consumer and producer can change */
806         barrier();
807         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
808 }
809
810 /* free skb in the packet ring at pos idx
811  * return idx of last bd freed
812  */
813 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
814                              u16 idx)
815 {
816         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
817         struct eth_tx_start_bd *tx_start_bd;
818         struct eth_tx_bd *tx_data_bd;
819         struct sk_buff *skb = tx_buf->skb;
820         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
821         int nbd;
822
823         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
824            idx, tx_buf, skb);
825
826         /* unmap first bd */
827         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
828         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
829         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
830                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
831
832         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
833 #ifdef BNX2X_STOP_ON_ERROR
834         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
835                 BNX2X_ERR("BAD nbd!\n");
836                 bnx2x_panic();
837         }
838 #endif
839         new_cons = nbd + tx_buf->first_bd;
840
841         /* Get the next bd */
842         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
843
844         /* Skip a parse bd... */
845         --nbd;
846         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
847
848         /* ...and the TSO split header bd since they have no mapping */
849         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
850                 --nbd;
851                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
852         }
853
854         /* now free frags */
855         while (nbd > 0) {
856
857                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
858                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
859                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
860                                BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
861                 if (--nbd)
862                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863         }
864
865         /* release skb */
866         WARN_ON(!skb);
867         dev_kfree_skb_any(skb);
868         tx_buf->first_bd = 0;
869         tx_buf->skb = NULL;
870
871         return new_cons;
872 }
873
874 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
875 {
876         s16 used;
877         u16 prod;
878         u16 cons;
879
880         barrier(); /* Tell compiler that prod and cons can change */
881         prod = fp->tx_bd_prod;
882         cons = fp->tx_bd_cons;
883
884         /* NUM_TX_RINGS = number of "next-page" entries
885            It will be used as a threshold */
886         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
887
888 #ifdef BNX2X_STOP_ON_ERROR
889         WARN_ON(used < 0);
890         WARN_ON(used > fp->bp->tx_ring_size);
891         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
892 #endif
893
894         return (s16)(fp->bp->tx_ring_size) - used;
895 }
896
897 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
898 {
899         struct bnx2x *bp = fp->bp;
900         struct netdev_queue *txq;
901         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
902         int done = 0;
903
904 #ifdef BNX2X_STOP_ON_ERROR
905         if (unlikely(bp->panic))
906                 return;
907 #endif
908
909         txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
910         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
911         sw_cons = fp->tx_pkt_cons;
912
913         while (sw_cons != hw_cons) {
914                 u16 pkt_cons;
915
916                 pkt_cons = TX_BD(sw_cons);
917
918                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
919
920                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
921                    hw_cons, sw_cons, pkt_cons);
922
923 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
924                         rmb();
925                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
926                 }
927 */
928                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
929                 sw_cons++;
930                 done++;
931         }
932
933         fp->tx_pkt_cons = sw_cons;
934         fp->tx_bd_cons = bd_cons;
935
936         /* TBD need a thresh? */
937         if (unlikely(netif_tx_queue_stopped(txq))) {
938
939                 /* Need to make the tx_bd_cons update visible to start_xmit()
940                  * before checking for netif_tx_queue_stopped().  Without the
941                  * memory barrier, there is a small possibility that
942                  * start_xmit() will miss it and cause the queue to be stopped
943                  * forever.
944                  */
945                 smp_mb();
946
947                 if ((netif_tx_queue_stopped(txq)) &&
948                     (bp->state == BNX2X_STATE_OPEN) &&
949                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
950                         netif_tx_wake_queue(txq);
951         }
952 }
953
954
955 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
956                            union eth_rx_cqe *rr_cqe)
957 {
958         struct bnx2x *bp = fp->bp;
959         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
960         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
961
962         DP(BNX2X_MSG_SP,
963            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
964            fp->index, cid, command, bp->state,
965            rr_cqe->ramrod_cqe.ramrod_type);
966
967         bp->spq_left++;
968
969         if (fp->index) {
970                 switch (command | fp->state) {
971                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
972                                                 BNX2X_FP_STATE_OPENING):
973                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
974                            cid);
975                         fp->state = BNX2X_FP_STATE_OPEN;
976                         break;
977
978                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
979                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
980                            cid);
981                         fp->state = BNX2X_FP_STATE_HALTED;
982                         break;
983
984                 default:
985                         BNX2X_ERR("unexpected MC reply (%d)  "
986                                   "fp->state is %x\n", command, fp->state);
987                         break;
988                 }
989                 mb(); /* force bnx2x_wait_ramrod() to see the change */
990                 return;
991         }
992
993         switch (command | bp->state) {
994         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
995                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
996                 bp->state = BNX2X_STATE_OPEN;
997                 break;
998
999         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1000                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1001                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1002                 fp->state = BNX2X_FP_STATE_HALTED;
1003                 break;
1004
1005         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1006                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1007                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1008                 break;
1009
1010
1011         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1012         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1013                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1014                 bp->set_mac_pending = 0;
1015                 break;
1016
1017         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1018         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
1019                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1020                 break;
1021
1022         default:
1023                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1024                           command, bp->state);
1025                 break;
1026         }
1027         mb(); /* force bnx2x_wait_ramrod() to see the change */
1028 }
1029
1030 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1031                                      struct bnx2x_fastpath *fp, u16 index)
1032 {
1033         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1034         struct page *page = sw_buf->page;
1035         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1036
1037         /* Skip "next page" elements */
1038         if (!page)
1039                 return;
1040
1041         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1042                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1043         __free_pages(page, PAGES_PER_SGE_SHIFT);
1044
1045         sw_buf->page = NULL;
1046         sge->addr_hi = 0;
1047         sge->addr_lo = 0;
1048 }
1049
1050 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1051                                            struct bnx2x_fastpath *fp, int last)
1052 {
1053         int i;
1054
1055         for (i = 0; i < last; i++)
1056                 bnx2x_free_rx_sge(bp, fp, i);
1057 }
1058
1059 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1060                                      struct bnx2x_fastpath *fp, u16 index)
1061 {
1062         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1063         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1064         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1065         dma_addr_t mapping;
1066
1067         if (unlikely(page == NULL))
1068                 return -ENOMEM;
1069
1070         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1071                                PCI_DMA_FROMDEVICE);
1072         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1073                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1074                 return -ENOMEM;
1075         }
1076
1077         sw_buf->page = page;
1078         pci_unmap_addr_set(sw_buf, mapping, mapping);
1079
1080         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1081         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1082
1083         return 0;
1084 }
1085
1086 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1087                                      struct bnx2x_fastpath *fp, u16 index)
1088 {
1089         struct sk_buff *skb;
1090         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1091         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1092         dma_addr_t mapping;
1093
1094         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1095         if (unlikely(skb == NULL))
1096                 return -ENOMEM;
1097
1098         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1099                                  PCI_DMA_FROMDEVICE);
1100         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1101                 dev_kfree_skb(skb);
1102                 return -ENOMEM;
1103         }
1104
1105         rx_buf->skb = skb;
1106         pci_unmap_addr_set(rx_buf, mapping, mapping);
1107
1108         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1109         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1110
1111         return 0;
1112 }
1113
1114 /* note that we are not allocating a new skb,
1115  * we are just moving one from cons to prod
1116  * we are not creating a new mapping,
1117  * so there is no need to check for dma_mapping_error().
1118  */
1119 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1120                                struct sk_buff *skb, u16 cons, u16 prod)
1121 {
1122         struct bnx2x *bp = fp->bp;
1123         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1124         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1125         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1126         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1127
1128         pci_dma_sync_single_for_device(bp->pdev,
1129                                        pci_unmap_addr(cons_rx_buf, mapping),
1130                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1131
1132         prod_rx_buf->skb = cons_rx_buf->skb;
1133         pci_unmap_addr_set(prod_rx_buf, mapping,
1134                            pci_unmap_addr(cons_rx_buf, mapping));
1135         *prod_bd = *cons_bd;
1136 }
1137
1138 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1139                                              u16 idx)
1140 {
1141         u16 last_max = fp->last_max_sge;
1142
1143         if (SUB_S16(idx, last_max) > 0)
1144                 fp->last_max_sge = idx;
1145 }
1146
1147 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1148 {
1149         int i, j;
1150
1151         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1152                 int idx = RX_SGE_CNT * i - 1;
1153
1154                 for (j = 0; j < 2; j++) {
1155                         SGE_MASK_CLEAR_BIT(fp, idx);
1156                         idx--;
1157                 }
1158         }
1159 }
1160
1161 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1162                                   struct eth_fast_path_rx_cqe *fp_cqe)
1163 {
1164         struct bnx2x *bp = fp->bp;
1165         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1166                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1167                       SGE_PAGE_SHIFT;
1168         u16 last_max, last_elem, first_elem;
1169         u16 delta = 0;
1170         u16 i;
1171
1172         if (!sge_len)
1173                 return;
1174
1175         /* First mark all used pages */
1176         for (i = 0; i < sge_len; i++)
1177                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1178
1179         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1180            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1181
1182         /* Here we assume that the last SGE index is the biggest */
1183         prefetch((void *)(fp->sge_mask));
1184         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1185
1186         last_max = RX_SGE(fp->last_max_sge);
1187         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1188         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1189
1190         /* If ring is not full */
1191         if (last_elem + 1 != first_elem)
1192                 last_elem++;
1193
1194         /* Now update the prod */
1195         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1196                 if (likely(fp->sge_mask[i]))
1197                         break;
1198
1199                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1200                 delta += RX_SGE_MASK_ELEM_SZ;
1201         }
1202
1203         if (delta > 0) {
1204                 fp->rx_sge_prod += delta;
1205                 /* clear page-end entries */
1206                 bnx2x_clear_sge_mask_next_elems(fp);
1207         }
1208
1209         DP(NETIF_MSG_RX_STATUS,
1210            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1211            fp->last_max_sge, fp->rx_sge_prod);
1212 }
1213
1214 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1215 {
1216         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1217         memset(fp->sge_mask, 0xff,
1218                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1219
1220         /* Clear the two last indices in the page to 1:
1221            these are the indices that correspond to the "next" element,
1222            hence will never be indicated and should be removed from
1223            the calculations. */
1224         bnx2x_clear_sge_mask_next_elems(fp);
1225 }
1226
1227 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1228                             struct sk_buff *skb, u16 cons, u16 prod)
1229 {
1230         struct bnx2x *bp = fp->bp;
1231         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1232         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1233         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1234         dma_addr_t mapping;
1235
1236         /* move empty skb from pool to prod and map it */
1237         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1238         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1239                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1240         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1241
1242         /* move partial skb from cons to pool (don't unmap yet) */
1243         fp->tpa_pool[queue] = *cons_rx_buf;
1244
1245         /* mark bin state as start - print error if current state != stop */
1246         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1247                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1248
1249         fp->tpa_state[queue] = BNX2X_TPA_START;
1250
1251         /* point prod_bd to new skb */
1252         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1253         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1254
1255 #ifdef BNX2X_STOP_ON_ERROR
1256         fp->tpa_queue_used |= (1 << queue);
1257 #ifdef __powerpc64__
1258         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1259 #else
1260         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1261 #endif
1262            fp->tpa_queue_used);
1263 #endif
1264 }
1265
1266 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1267                                struct sk_buff *skb,
1268                                struct eth_fast_path_rx_cqe *fp_cqe,
1269                                u16 cqe_idx)
1270 {
1271         struct sw_rx_page *rx_pg, old_rx_pg;
1272         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1273         u32 i, frag_len, frag_size, pages;
1274         int err;
1275         int j;
1276
1277         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1278         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1279
1280         /* This is needed in order to enable forwarding support */
1281         if (frag_size)
1282                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1283                                                max(frag_size, (u32)len_on_bd));
1284
1285 #ifdef BNX2X_STOP_ON_ERROR
1286         if (pages >
1287             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1288                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1289                           pages, cqe_idx);
1290                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1291                           fp_cqe->pkt_len, len_on_bd);
1292                 bnx2x_panic();
1293                 return -EINVAL;
1294         }
1295 #endif
1296
1297         /* Run through the SGL and compose the fragmented skb */
1298         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1299                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1300
1301                 /* FW gives the indices of the SGE as if the ring is an array
1302                    (meaning that "next" element will consume 2 indices) */
1303                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1304                 rx_pg = &fp->rx_page_ring[sge_idx];
1305                 old_rx_pg = *rx_pg;
1306
1307                 /* If we fail to allocate a substitute page, we simply stop
1308                    where we are and drop the whole packet */
1309                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1310                 if (unlikely(err)) {
1311                         fp->eth_q_stats.rx_skb_alloc_failed++;
1312                         return err;
1313                 }
1314
1315                 /* Unmap the page as we r going to pass it to the stack */
1316                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1317                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1318
1319                 /* Add one frag and update the appropriate fields in the skb */
1320                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1321
1322                 skb->data_len += frag_len;
1323                 skb->truesize += frag_len;
1324                 skb->len += frag_len;
1325
1326                 frag_size -= frag_len;
1327         }
1328
1329         return 0;
1330 }
1331
1332 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1333                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1334                            u16 cqe_idx)
1335 {
1336         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1337         struct sk_buff *skb = rx_buf->skb;
1338         /* alloc new skb */
1339         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1340
1341         /* Unmap skb in the pool anyway, as we are going to change
1342            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1343            fails. */
1344         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1345                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1346
1347         if (likely(new_skb)) {
1348                 /* fix ip xsum and give it to the stack */
1349                 /* (no need to map the new skb) */
1350 #ifdef BCM_VLAN
1351                 int is_vlan_cqe =
1352                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1353                          PARSING_FLAGS_VLAN);
1354                 int is_not_hwaccel_vlan_cqe =
1355                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1356 #endif
1357
1358                 prefetch(skb);
1359                 prefetch(((char *)(skb)) + 128);
1360
1361 #ifdef BNX2X_STOP_ON_ERROR
1362                 if (pad + len > bp->rx_buf_size) {
1363                         BNX2X_ERR("skb_put is about to fail...  "
1364                                   "pad %d  len %d  rx_buf_size %d\n",
1365                                   pad, len, bp->rx_buf_size);
1366                         bnx2x_panic();
1367                         return;
1368                 }
1369 #endif
1370
1371                 skb_reserve(skb, pad);
1372                 skb_put(skb, len);
1373
1374                 skb->protocol = eth_type_trans(skb, bp->dev);
1375                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1376
1377                 {
1378                         struct iphdr *iph;
1379
1380                         iph = (struct iphdr *)skb->data;
1381 #ifdef BCM_VLAN
1382                         /* If there is no Rx VLAN offloading -
1383                            take VLAN tag into an account */
1384                         if (unlikely(is_not_hwaccel_vlan_cqe))
1385                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1386 #endif
1387                         iph->check = 0;
1388                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1389                 }
1390
1391                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1392                                          &cqe->fast_path_cqe, cqe_idx)) {
1393 #ifdef BCM_VLAN
1394                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1395                             (!is_not_hwaccel_vlan_cqe))
1396                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1397                                                 le16_to_cpu(cqe->fast_path_cqe.
1398                                                             vlan_tag));
1399                         else
1400 #endif
1401                                 netif_receive_skb(skb);
1402                 } else {
1403                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1404                            " - dropping packet!\n");
1405                         dev_kfree_skb(skb);
1406                 }
1407
1408
1409                 /* put new skb in bin */
1410                 fp->tpa_pool[queue].skb = new_skb;
1411
1412         } else {
1413                 /* else drop the packet and keep the buffer in the bin */
1414                 DP(NETIF_MSG_RX_STATUS,
1415                    "Failed to allocate new skb - dropping packet!\n");
1416                 fp->eth_q_stats.rx_skb_alloc_failed++;
1417         }
1418
1419         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1420 }
1421
1422 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1423                                         struct bnx2x_fastpath *fp,
1424                                         u16 bd_prod, u16 rx_comp_prod,
1425                                         u16 rx_sge_prod)
1426 {
1427         struct ustorm_eth_rx_producers rx_prods = {0};
1428         int i;
1429
1430         /* Update producers */
1431         rx_prods.bd_prod = bd_prod;
1432         rx_prods.cqe_prod = rx_comp_prod;
1433         rx_prods.sge_prod = rx_sge_prod;
1434
1435         /*
1436          * Make sure that the BD and SGE data is updated before updating the
1437          * producers since FW might read the BD/SGE right after the producer
1438          * is updated.
1439          * This is only applicable for weak-ordered memory model archs such
1440          * as IA-64. The following barrier is also mandatory since FW will
1441          * assumes BDs must have buffers.
1442          */
1443         wmb();
1444
1445         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1446                 REG_WR(bp, BAR_USTRORM_INTMEM +
1447                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1448                        ((u32 *)&rx_prods)[i]);
1449
1450         mmiowb(); /* keep prod updates ordered */
1451
1452         DP(NETIF_MSG_RX_STATUS,
1453            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1454            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1455 }
1456
1457 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1458 {
1459         struct bnx2x *bp = fp->bp;
1460         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1461         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1462         int rx_pkt = 0;
1463
1464 #ifdef BNX2X_STOP_ON_ERROR
1465         if (unlikely(bp->panic))
1466                 return 0;
1467 #endif
1468
1469         /* CQ "next element" is of the size of the regular element,
1470            that's why it's ok here */
1471         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1472         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1473                 hw_comp_cons++;
1474
1475         bd_cons = fp->rx_bd_cons;
1476         bd_prod = fp->rx_bd_prod;
1477         bd_prod_fw = bd_prod;
1478         sw_comp_cons = fp->rx_comp_cons;
1479         sw_comp_prod = fp->rx_comp_prod;
1480
1481         /* Memory barrier necessary as speculative reads of the rx
1482          * buffer can be ahead of the index in the status block
1483          */
1484         rmb();
1485
1486         DP(NETIF_MSG_RX_STATUS,
1487            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1488            fp->index, hw_comp_cons, sw_comp_cons);
1489
1490         while (sw_comp_cons != hw_comp_cons) {
1491                 struct sw_rx_bd *rx_buf = NULL;
1492                 struct sk_buff *skb;
1493                 union eth_rx_cqe *cqe;
1494                 u8 cqe_fp_flags;
1495                 u16 len, pad;
1496
1497                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1498                 bd_prod = RX_BD(bd_prod);
1499                 bd_cons = RX_BD(bd_cons);
1500
1501                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1502                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1503
1504                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1505                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1506                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1507                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1508                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1509                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1510
1511                 /* is this a slowpath msg? */
1512                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1513                         bnx2x_sp_event(fp, cqe);
1514                         goto next_cqe;
1515
1516                 /* this is an rx packet */
1517                 } else {
1518                         rx_buf = &fp->rx_buf_ring[bd_cons];
1519                         skb = rx_buf->skb;
1520                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1521                         pad = cqe->fast_path_cqe.placement_offset;
1522
1523                         /* If CQE is marked both TPA_START and TPA_END
1524                            it is a non-TPA CQE */
1525                         if ((!fp->disable_tpa) &&
1526                             (TPA_TYPE(cqe_fp_flags) !=
1527                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1528                                 u16 queue = cqe->fast_path_cqe.queue_index;
1529
1530                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1531                                         DP(NETIF_MSG_RX_STATUS,
1532                                            "calling tpa_start on queue %d\n",
1533                                            queue);
1534
1535                                         bnx2x_tpa_start(fp, queue, skb,
1536                                                         bd_cons, bd_prod);
1537                                         goto next_rx;
1538                                 }
1539
1540                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1541                                         DP(NETIF_MSG_RX_STATUS,
1542                                            "calling tpa_stop on queue %d\n",
1543                                            queue);
1544
1545                                         if (!BNX2X_RX_SUM_FIX(cqe))
1546                                                 BNX2X_ERR("STOP on none TCP "
1547                                                           "data\n");
1548
1549                                         /* This is a size of the linear data
1550                                            on this skb */
1551                                         len = le16_to_cpu(cqe->fast_path_cqe.
1552                                                                 len_on_bd);
1553                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1554                                                     len, cqe, comp_ring_cons);
1555 #ifdef BNX2X_STOP_ON_ERROR
1556                                         if (bp->panic)
1557                                                 return 0;
1558 #endif
1559
1560                                         bnx2x_update_sge_prod(fp,
1561                                                         &cqe->fast_path_cqe);
1562                                         goto next_cqe;
1563                                 }
1564                         }
1565
1566                         pci_dma_sync_single_for_device(bp->pdev,
1567                                         pci_unmap_addr(rx_buf, mapping),
1568                                                        pad + RX_COPY_THRESH,
1569                                                        PCI_DMA_FROMDEVICE);
1570                         prefetch(skb);
1571                         prefetch(((char *)(skb)) + 128);
1572
1573                         /* is this an error packet? */
1574                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1575                                 DP(NETIF_MSG_RX_ERR,
1576                                    "ERROR  flags %x  rx packet %u\n",
1577                                    cqe_fp_flags, sw_comp_cons);
1578                                 fp->eth_q_stats.rx_err_discard_pkt++;
1579                                 goto reuse_rx;
1580                         }
1581
1582                         /* Since we don't have a jumbo ring
1583                          * copy small packets if mtu > 1500
1584                          */
1585                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1586                             (len <= RX_COPY_THRESH)) {
1587                                 struct sk_buff *new_skb;
1588
1589                                 new_skb = netdev_alloc_skb(bp->dev,
1590                                                            len + pad);
1591                                 if (new_skb == NULL) {
1592                                         DP(NETIF_MSG_RX_ERR,
1593                                            "ERROR  packet dropped "
1594                                            "because of alloc failure\n");
1595                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1596                                         goto reuse_rx;
1597                                 }
1598
1599                                 /* aligned copy */
1600                                 skb_copy_from_linear_data_offset(skb, pad,
1601                                                     new_skb->data + pad, len);
1602                                 skb_reserve(new_skb, pad);
1603                                 skb_put(new_skb, len);
1604
1605                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1606
1607                                 skb = new_skb;
1608
1609                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1610                                 pci_unmap_single(bp->pdev,
1611                                         pci_unmap_addr(rx_buf, mapping),
1612                                                  bp->rx_buf_size,
1613                                                  PCI_DMA_FROMDEVICE);
1614                                 skb_reserve(skb, pad);
1615                                 skb_put(skb, len);
1616
1617                         } else {
1618                                 DP(NETIF_MSG_RX_ERR,
1619                                    "ERROR  packet dropped because "
1620                                    "of alloc failure\n");
1621                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1622 reuse_rx:
1623                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1624                                 goto next_rx;
1625                         }
1626
1627                         skb->protocol = eth_type_trans(skb, bp->dev);
1628
1629                         skb->ip_summed = CHECKSUM_NONE;
1630                         if (bp->rx_csum) {
1631                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1632                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1633                                 else
1634                                         fp->eth_q_stats.hw_csum_err++;
1635                         }
1636                 }
1637
1638                 skb_record_rx_queue(skb, fp->index);
1639 #ifdef BCM_VLAN
1640                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1641                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1642                      PARSING_FLAGS_VLAN))
1643                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1644                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1645                 else
1646 #endif
1647                         netif_receive_skb(skb);
1648
1649
1650 next_rx:
1651                 rx_buf->skb = NULL;
1652
1653                 bd_cons = NEXT_RX_IDX(bd_cons);
1654                 bd_prod = NEXT_RX_IDX(bd_prod);
1655                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1656                 rx_pkt++;
1657 next_cqe:
1658                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1659                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1660
1661                 if (rx_pkt == budget)
1662                         break;
1663         } /* while */
1664
1665         fp->rx_bd_cons = bd_cons;
1666         fp->rx_bd_prod = bd_prod_fw;
1667         fp->rx_comp_cons = sw_comp_cons;
1668         fp->rx_comp_prod = sw_comp_prod;
1669
1670         /* Update producers */
1671         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1672                              fp->rx_sge_prod);
1673
1674         fp->rx_pkt += rx_pkt;
1675         fp->rx_calls++;
1676
1677         return rx_pkt;
1678 }
1679
1680 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1681 {
1682         struct bnx2x_fastpath *fp = fp_cookie;
1683         struct bnx2x *bp = fp->bp;
1684
1685         /* Return here if interrupt is disabled */
1686         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1687                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1688                 return IRQ_HANDLED;
1689         }
1690
1691         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1692            fp->index, fp->sb_id);
1693         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1694
1695 #ifdef BNX2X_STOP_ON_ERROR
1696         if (unlikely(bp->panic))
1697                 return IRQ_HANDLED;
1698 #endif
1699         /* Handle Rx or Tx according to MSI-X vector */
1700         if (fp->is_rx_queue) {
1701                 prefetch(fp->rx_cons_sb);
1702                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1703
1704                 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1705
1706         } else {
1707                 prefetch(fp->tx_cons_sb);
1708                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1709
1710                 bnx2x_update_fpsb_idx(fp);
1711                 rmb();
1712                 bnx2x_tx_int(fp);
1713
1714                 /* Re-enable interrupts */
1715                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1716                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1717                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1718                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1719         }
1720
1721         return IRQ_HANDLED;
1722 }
1723
1724 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1725 {
1726         struct bnx2x *bp = netdev_priv(dev_instance);
1727         u16 status = bnx2x_ack_int(bp);
1728         u16 mask;
1729         int i;
1730
1731         /* Return here if interrupt is shared and it's not for us */
1732         if (unlikely(status == 0)) {
1733                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1734                 return IRQ_NONE;
1735         }
1736         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1737
1738         /* Return here if interrupt is disabled */
1739         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1740                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1741                 return IRQ_HANDLED;
1742         }
1743
1744 #ifdef BNX2X_STOP_ON_ERROR
1745         if (unlikely(bp->panic))
1746                 return IRQ_HANDLED;
1747 #endif
1748
1749         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1750                 struct bnx2x_fastpath *fp = &bp->fp[i];
1751
1752                 mask = 0x2 << fp->sb_id;
1753                 if (status & mask) {
1754                         /* Handle Rx or Tx according to SB id */
1755                         if (fp->is_rx_queue) {
1756                                 prefetch(fp->rx_cons_sb);
1757                                 prefetch(&fp->status_blk->u_status_block.
1758                                                         status_block_index);
1759
1760                                 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1761
1762                         } else {
1763                                 prefetch(fp->tx_cons_sb);
1764                                 prefetch(&fp->status_blk->c_status_block.
1765                                                         status_block_index);
1766
1767                                 bnx2x_update_fpsb_idx(fp);
1768                                 rmb();
1769                                 bnx2x_tx_int(fp);
1770
1771                                 /* Re-enable interrupts */
1772                                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1773                                              le16_to_cpu(fp->fp_u_idx),
1774                                              IGU_INT_NOP, 1);
1775                                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1776                                              le16_to_cpu(fp->fp_c_idx),
1777                                              IGU_INT_ENABLE, 1);
1778                         }
1779                         status &= ~mask;
1780                 }
1781         }
1782
1783
1784         if (unlikely(status & 0x1)) {
1785                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1786
1787                 status &= ~0x1;
1788                 if (!status)
1789                         return IRQ_HANDLED;
1790         }
1791
1792         if (status)
1793                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1794                    status);
1795
1796         return IRQ_HANDLED;
1797 }
1798
1799 /* end of fast path */
1800
1801 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1802
1803 /* Link */
1804
1805 /*
1806  * General service functions
1807  */
1808
1809 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1810 {
1811         u32 lock_status;
1812         u32 resource_bit = (1 << resource);
1813         int func = BP_FUNC(bp);
1814         u32 hw_lock_control_reg;
1815         int cnt;
1816
1817         /* Validating that the resource is within range */
1818         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1819                 DP(NETIF_MSG_HW,
1820                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1821                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1822                 return -EINVAL;
1823         }
1824
1825         if (func <= 5) {
1826                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1827         } else {
1828                 hw_lock_control_reg =
1829                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1830         }
1831
1832         /* Validating that the resource is not already taken */
1833         lock_status = REG_RD(bp, hw_lock_control_reg);
1834         if (lock_status & resource_bit) {
1835                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1836                    lock_status, resource_bit);
1837                 return -EEXIST;
1838         }
1839
1840         /* Try for 5 second every 5ms */
1841         for (cnt = 0; cnt < 1000; cnt++) {
1842                 /* Try to acquire the lock */
1843                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1844                 lock_status = REG_RD(bp, hw_lock_control_reg);
1845                 if (lock_status & resource_bit)
1846                         return 0;
1847
1848                 msleep(5);
1849         }
1850         DP(NETIF_MSG_HW, "Timeout\n");
1851         return -EAGAIN;
1852 }
1853
1854 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1855 {
1856         u32 lock_status;
1857         u32 resource_bit = (1 << resource);
1858         int func = BP_FUNC(bp);
1859         u32 hw_lock_control_reg;
1860
1861         /* Validating that the resource is within range */
1862         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1863                 DP(NETIF_MSG_HW,
1864                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1865                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1866                 return -EINVAL;
1867         }
1868
1869         if (func <= 5) {
1870                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1871         } else {
1872                 hw_lock_control_reg =
1873                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1874         }
1875
1876         /* Validating that the resource is currently taken */
1877         lock_status = REG_RD(bp, hw_lock_control_reg);
1878         if (!(lock_status & resource_bit)) {
1879                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1880                    lock_status, resource_bit);
1881                 return -EFAULT;
1882         }
1883
1884         REG_WR(bp, hw_lock_control_reg, resource_bit);
1885         return 0;
1886 }
1887
1888 /* HW Lock for shared dual port PHYs */
1889 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1890 {
1891         mutex_lock(&bp->port.phy_mutex);
1892
1893         if (bp->port.need_hw_lock)
1894                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1895 }
1896
1897 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1898 {
1899         if (bp->port.need_hw_lock)
1900                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1901
1902         mutex_unlock(&bp->port.phy_mutex);
1903 }
1904
1905 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1906 {
1907         /* The GPIO should be swapped if swap register is set and active */
1908         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1909                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1910         int gpio_shift = gpio_num +
1911                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1912         u32 gpio_mask = (1 << gpio_shift);
1913         u32 gpio_reg;
1914         int value;
1915
1916         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1917                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1918                 return -EINVAL;
1919         }
1920
1921         /* read GPIO value */
1922         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1923
1924         /* get the requested pin value */
1925         if ((gpio_reg & gpio_mask) == gpio_mask)
1926                 value = 1;
1927         else
1928                 value = 0;
1929
1930         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1931
1932         return value;
1933 }
1934
1935 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1936 {
1937         /* The GPIO should be swapped if swap register is set and active */
1938         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1939                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1940         int gpio_shift = gpio_num +
1941                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942         u32 gpio_mask = (1 << gpio_shift);
1943         u32 gpio_reg;
1944
1945         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1946                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1947                 return -EINVAL;
1948         }
1949
1950         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1951         /* read GPIO and mask except the float bits */
1952         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1953
1954         switch (mode) {
1955         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1956                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1957                    gpio_num, gpio_shift);
1958                 /* clear FLOAT and set CLR */
1959                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1960                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1961                 break;
1962
1963         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1964                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1965                    gpio_num, gpio_shift);
1966                 /* clear FLOAT and set SET */
1967                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1968                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1969                 break;
1970
1971         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1972                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1973                    gpio_num, gpio_shift);
1974                 /* set FLOAT */
1975                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1976                 break;
1977
1978         default:
1979                 break;
1980         }
1981
1982         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1983         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1984
1985         return 0;
1986 }
1987
1988 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1989 {
1990         /* The GPIO should be swapped if swap register is set and active */
1991         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1992                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1993         int gpio_shift = gpio_num +
1994                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1995         u32 gpio_mask = (1 << gpio_shift);
1996         u32 gpio_reg;
1997
1998         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1999                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2000                 return -EINVAL;
2001         }
2002
2003         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2004         /* read GPIO int */
2005         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2006
2007         switch (mode) {
2008         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2009                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2010                                    "output low\n", gpio_num, gpio_shift);
2011                 /* clear SET and set CLR */
2012                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2013                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2014                 break;
2015
2016         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2017                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2018                                    "output high\n", gpio_num, gpio_shift);
2019                 /* clear CLR and set SET */
2020                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2021                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2022                 break;
2023
2024         default:
2025                 break;
2026         }
2027
2028         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2029         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2030
2031         return 0;
2032 }
2033
2034 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2035 {
2036         u32 spio_mask = (1 << spio_num);
2037         u32 spio_reg;
2038
2039         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2040             (spio_num > MISC_REGISTERS_SPIO_7)) {
2041                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2042                 return -EINVAL;
2043         }
2044
2045         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2046         /* read SPIO and mask except the float bits */
2047         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2048
2049         switch (mode) {
2050         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2051                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2052                 /* clear FLOAT and set CLR */
2053                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2054                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2055                 break;
2056
2057         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2058                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2059                 /* clear FLOAT and set SET */
2060                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2061                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2062                 break;
2063
2064         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2065                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2066                 /* set FLOAT */
2067                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2068                 break;
2069
2070         default:
2071                 break;
2072         }
2073
2074         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2075         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2076
2077         return 0;
2078 }
2079
2080 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2081 {
2082         switch (bp->link_vars.ieee_fc &
2083                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2084         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2085                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2086                                           ADVERTISED_Pause);
2087                 break;
2088
2089         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2090                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2091                                          ADVERTISED_Pause);
2092                 break;
2093
2094         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2095                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2096                 break;
2097
2098         default:
2099                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2100                                           ADVERTISED_Pause);
2101                 break;
2102         }
2103 }
2104
2105 static void bnx2x_link_report(struct bnx2x *bp)
2106 {
2107         if (bp->state == BNX2X_STATE_DISABLED) {
2108                 netif_carrier_off(bp->dev);
2109                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2110                 return;
2111         }
2112
2113         if (bp->link_vars.link_up) {
2114                 if (bp->state == BNX2X_STATE_OPEN)
2115                         netif_carrier_on(bp->dev);
2116                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2117
2118                 printk("%d Mbps ", bp->link_vars.line_speed);
2119
2120                 if (bp->link_vars.duplex == DUPLEX_FULL)
2121                         printk("full duplex");
2122                 else
2123                         printk("half duplex");
2124
2125                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2126                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2127                                 printk(", receive ");
2128                                 if (bp->link_vars.flow_ctrl &
2129                                     BNX2X_FLOW_CTRL_TX)
2130                                         printk("& transmit ");
2131                         } else {
2132                                 printk(", transmit ");
2133                         }
2134                         printk("flow control ON");
2135                 }
2136                 printk("\n");
2137
2138         } else { /* link_down */
2139                 netif_carrier_off(bp->dev);
2140                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2141         }
2142 }
2143
2144 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2145 {
2146         if (!BP_NOMCP(bp)) {
2147                 u8 rc;
2148
2149                 /* Initialize link parameters structure variables */
2150                 /* It is recommended to turn off RX FC for jumbo frames
2151                    for better performance */
2152                 if (IS_E1HMF(bp))
2153                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2154                 else if (bp->dev->mtu > 5000)
2155                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2156                 else
2157                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2158
2159                 bnx2x_acquire_phy_lock(bp);
2160
2161                 if (load_mode == LOAD_DIAG)
2162                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2163
2164                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2165
2166                 bnx2x_release_phy_lock(bp);
2167
2168                 bnx2x_calc_fc_adv(bp);
2169
2170                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2171                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2172                         bnx2x_link_report(bp);
2173                 }
2174
2175                 return rc;
2176         }
2177         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2178         return -EINVAL;
2179 }
2180
2181 static void bnx2x_link_set(struct bnx2x *bp)
2182 {
2183         if (!BP_NOMCP(bp)) {
2184                 bnx2x_acquire_phy_lock(bp);
2185                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2186                 bnx2x_release_phy_lock(bp);
2187
2188                 bnx2x_calc_fc_adv(bp);
2189         } else
2190                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2191 }
2192
2193 static void bnx2x__link_reset(struct bnx2x *bp)
2194 {
2195         if (!BP_NOMCP(bp)) {
2196                 bnx2x_acquire_phy_lock(bp);
2197                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2198                 bnx2x_release_phy_lock(bp);
2199         } else
2200                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2201 }
2202
2203 static u8 bnx2x_link_test(struct bnx2x *bp)
2204 {
2205         u8 rc;
2206
2207         bnx2x_acquire_phy_lock(bp);
2208         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2209         bnx2x_release_phy_lock(bp);
2210
2211         return rc;
2212 }
2213
2214 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2215 {
2216         u32 r_param = bp->link_vars.line_speed / 8;
2217         u32 fair_periodic_timeout_usec;
2218         u32 t_fair;
2219
2220         memset(&(bp->cmng.rs_vars), 0,
2221                sizeof(struct rate_shaping_vars_per_port));
2222         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2223
2224         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2225         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2226
2227         /* this is the threshold below which no timer arming will occur
2228            1.25 coefficient is for the threshold to be a little bigger
2229            than the real time, to compensate for timer in-accuracy */
2230         bp->cmng.rs_vars.rs_threshold =
2231                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2232
2233         /* resolution of fairness timer */
2234         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2235         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2236         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2237
2238         /* this is the threshold below which we won't arm the timer anymore */
2239         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2240
2241         /* we multiply by 1e3/8 to get bytes/msec.
2242            We don't want the credits to pass a credit
2243            of the t_fair*FAIR_MEM (algorithm resolution) */
2244         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2245         /* since each tick is 4 usec */
2246         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2247 }
2248
2249 /* Calculates the sum of vn_min_rates.
2250    It's needed for further normalizing of the min_rates.
2251    Returns:
2252      sum of vn_min_rates.
2253        or
2254      0 - if all the min_rates are 0.
2255      In the later case fainess algorithm should be deactivated.
2256      If not all min_rates are zero then those that are zeroes will be set to 1.
2257  */
2258 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2259 {
2260         int all_zero = 1;
2261         int port = BP_PORT(bp);
2262         int vn;
2263
2264         bp->vn_weight_sum = 0;
2265         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2266                 int func = 2*vn + port;
2267                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2268                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2269                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2270
2271                 /* Skip hidden vns */
2272                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2273                         continue;
2274
2275                 /* If min rate is zero - set it to 1 */
2276                 if (!vn_min_rate)
2277                         vn_min_rate = DEF_MIN_RATE;
2278                 else
2279                         all_zero = 0;
2280
2281                 bp->vn_weight_sum += vn_min_rate;
2282         }
2283
2284         /* ... only if all min rates are zeros - disable fairness */
2285         if (all_zero)
2286                 bp->vn_weight_sum = 0;
2287 }
2288
2289 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2290 {
2291         struct rate_shaping_vars_per_vn m_rs_vn;
2292         struct fairness_vars_per_vn m_fair_vn;
2293         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2294         u16 vn_min_rate, vn_max_rate;
2295         int i;
2296
2297         /* If function is hidden - set min and max to zeroes */
2298         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2299                 vn_min_rate = 0;
2300                 vn_max_rate = 0;
2301
2302         } else {
2303                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2304                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2305                 /* If fairness is enabled (not all min rates are zeroes) and
2306                    if current min rate is zero - set it to 1.
2307                    This is a requirement of the algorithm. */
2308                 if (bp->vn_weight_sum && (vn_min_rate == 0))
2309                         vn_min_rate = DEF_MIN_RATE;
2310                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2311                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2312         }
2313
2314         DP(NETIF_MSG_IFUP,
2315            "func %d: vn_min_rate=%d  vn_max_rate=%d  vn_weight_sum=%d\n",
2316            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2317
2318         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2319         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2320
2321         /* global vn counter - maximal Mbps for this vn */
2322         m_rs_vn.vn_counter.rate = vn_max_rate;
2323
2324         /* quota - number of bytes transmitted in this period */
2325         m_rs_vn.vn_counter.quota =
2326                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2327
2328         if (bp->vn_weight_sum) {
2329                 /* credit for each period of the fairness algorithm:
2330                    number of bytes in T_FAIR (the vn share the port rate).
2331                    vn_weight_sum should not be larger than 10000, thus
2332                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2333                    than zero */
2334                 m_fair_vn.vn_credit_delta =
2335                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2336                                                  (8 * bp->vn_weight_sum))),
2337                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2338                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2339                    m_fair_vn.vn_credit_delta);
2340         }
2341
2342         /* Store it to internal memory */
2343         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2344                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2345                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2346                        ((u32 *)(&m_rs_vn))[i]);
2347
2348         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2349                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2350                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2351                        ((u32 *)(&m_fair_vn))[i]);
2352 }
2353
2354
2355 /* This function is called upon link interrupt */
2356 static void bnx2x_link_attn(struct bnx2x *bp)
2357 {
2358         /* Make sure that we are synced with the current statistics */
2359         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2360
2361         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2362
2363         if (bp->link_vars.link_up) {
2364
2365                 /* dropless flow control */
2366                 if (CHIP_IS_E1H(bp)) {
2367                         int port = BP_PORT(bp);
2368                         u32 pause_enabled = 0;
2369
2370                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2371                                 pause_enabled = 1;
2372
2373                         REG_WR(bp, BAR_USTRORM_INTMEM +
2374                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2375                                pause_enabled);
2376                 }
2377
2378                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2379                         struct host_port_stats *pstats;
2380
2381                         pstats = bnx2x_sp(bp, port_stats);
2382                         /* reset old bmac stats */
2383                         memset(&(pstats->mac_stx[0]), 0,
2384                                sizeof(struct mac_stx));
2385                 }
2386                 if ((bp->state == BNX2X_STATE_OPEN) ||
2387                     (bp->state == BNX2X_STATE_DISABLED))
2388                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2389         }
2390
2391         /* indicate link status */
2392         bnx2x_link_report(bp);
2393
2394         if (IS_E1HMF(bp)) {
2395                 int port = BP_PORT(bp);
2396                 int func;
2397                 int vn;
2398
2399                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2400                         if (vn == BP_E1HVN(bp))
2401                                 continue;
2402
2403                         func = ((vn << 1) | port);
2404
2405                         /* Set the attention towards other drivers
2406                            on the same port */
2407                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2408                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2409                 }
2410
2411                 if (bp->link_vars.link_up) {
2412                         int i;
2413
2414                         /* Init rate shaping and fairness contexts */
2415                         bnx2x_init_port_minmax(bp);
2416
2417                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2418                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2419
2420                         /* Store it to internal memory */
2421                         for (i = 0;
2422                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2423                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2424                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2425                                        ((u32 *)(&bp->cmng))[i]);
2426                 }
2427         }
2428 }
2429
2430 static void bnx2x__link_status_update(struct bnx2x *bp)
2431 {
2432         int func = BP_FUNC(bp);
2433
2434         if (bp->state != BNX2X_STATE_OPEN)
2435                 return;
2436
2437         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2438
2439         if (bp->link_vars.link_up)
2440                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2441         else
2442                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2443
2444         bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2445         bnx2x_calc_vn_weight_sum(bp);
2446
2447         /* indicate link status */
2448         bnx2x_link_report(bp);
2449 }
2450
2451 static void bnx2x_pmf_update(struct bnx2x *bp)
2452 {
2453         int port = BP_PORT(bp);
2454         u32 val;
2455
2456         bp->port.pmf = 1;
2457         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2458
2459         /* enable nig attention */
2460         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2461         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2462         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2463
2464         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2465 }
2466
2467 /* end of Link */
2468
2469 /* slow path */
2470
2471 /*
2472  * General service functions
2473  */
2474
2475 /* send the MCP a request, block until there is a reply */
2476 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2477 {
2478         int func = BP_FUNC(bp);
2479         u32 seq = ++bp->fw_seq;
2480         u32 rc = 0;
2481         u32 cnt = 1;
2482         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2483
2484         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2485         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2486
2487         do {
2488                 /* let the FW do it's magic ... */
2489                 msleep(delay);
2490
2491                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2492
2493                 /* Give the FW up to 2 second (200*10ms) */
2494         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2495
2496         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2497            cnt*delay, rc, seq);
2498
2499         /* is this a reply to our command? */
2500         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2501                 rc &= FW_MSG_CODE_MASK;
2502         else {
2503                 /* FW BUG! */
2504                 BNX2X_ERR("FW failed to respond!\n");
2505                 bnx2x_fw_dump(bp);
2506                 rc = 0;
2507         }
2508
2509         return rc;
2510 }
2511
2512 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2513 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set);
2514 static void bnx2x_set_rx_mode(struct net_device *dev);
2515
2516 static void bnx2x_e1h_disable(struct bnx2x *bp)
2517 {
2518         int port = BP_PORT(bp);
2519         int i;
2520
2521         bp->rx_mode = BNX2X_RX_MODE_NONE;
2522         bnx2x_set_storm_rx_mode(bp);
2523
2524         netif_tx_disable(bp->dev);
2525         bp->dev->trans_start = jiffies; /* prevent tx timeout */
2526
2527         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2528
2529         bnx2x_set_mac_addr_e1h(bp, 0);
2530
2531         for (i = 0; i < MC_HASH_SIZE; i++)
2532                 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2533
2534         netif_carrier_off(bp->dev);
2535 }
2536
2537 static void bnx2x_e1h_enable(struct bnx2x *bp)
2538 {
2539         int port = BP_PORT(bp);
2540
2541         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2542
2543         bnx2x_set_mac_addr_e1h(bp, 1);
2544
2545         /* Tx queue should be only reenabled */
2546         netif_tx_wake_all_queues(bp->dev);
2547
2548         /* Initialize the receive filter. */
2549         bnx2x_set_rx_mode(bp->dev);
2550 }
2551
2552 static void bnx2x_update_min_max(struct bnx2x *bp)
2553 {
2554         int port = BP_PORT(bp);
2555         int vn, i;
2556
2557         /* Init rate shaping and fairness contexts */
2558         bnx2x_init_port_minmax(bp);
2559
2560         bnx2x_calc_vn_weight_sum(bp);
2561
2562         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2563                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2564
2565         if (bp->port.pmf) {
2566                 int func;
2567
2568                 /* Set the attention towards other drivers on the same port */
2569                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2570                         if (vn == BP_E1HVN(bp))
2571                                 continue;
2572
2573                         func = ((vn << 1) | port);
2574                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2575                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2576                 }
2577
2578                 /* Store it to internal memory */
2579                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2580                         REG_WR(bp, BAR_XSTRORM_INTMEM +
2581                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2582                                ((u32 *)(&bp->cmng))[i]);
2583         }
2584 }
2585
2586 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2587 {
2588         int func = BP_FUNC(bp);
2589
2590         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2591         bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2592
2593         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2594
2595                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2596                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2597                         bp->state = BNX2X_STATE_DISABLED;
2598
2599                         bnx2x_e1h_disable(bp);
2600                 } else {
2601                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2602                         bp->state = BNX2X_STATE_OPEN;
2603
2604                         bnx2x_e1h_enable(bp);
2605                 }
2606                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2607         }
2608         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2609
2610                 bnx2x_update_min_max(bp);
2611                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2612         }
2613
2614         /* Report results to MCP */
2615         if (dcc_event)
2616                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2617         else
2618                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2619 }
2620
2621 /* the slow path queue is odd since completions arrive on the fastpath ring */
2622 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2623                          u32 data_hi, u32 data_lo, int common)
2624 {
2625         int func = BP_FUNC(bp);
2626
2627         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2628            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2629            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2630            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2631            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2632
2633 #ifdef BNX2X_STOP_ON_ERROR
2634         if (unlikely(bp->panic))
2635                 return -EIO;
2636 #endif
2637
2638         spin_lock_bh(&bp->spq_lock);
2639
2640         if (!bp->spq_left) {
2641                 BNX2X_ERR("BUG! SPQ ring full!\n");
2642                 spin_unlock_bh(&bp->spq_lock);
2643                 bnx2x_panic();
2644                 return -EBUSY;
2645         }
2646
2647         /* CID needs port number to be encoded int it */
2648         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2649                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2650                                      HW_CID(bp, cid)));
2651         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2652         if (common)
2653                 bp->spq_prod_bd->hdr.type |=
2654                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2655
2656         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2657         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2658
2659         bp->spq_left--;
2660
2661         if (bp->spq_prod_bd == bp->spq_last_bd) {
2662                 bp->spq_prod_bd = bp->spq;
2663                 bp->spq_prod_idx = 0;
2664                 DP(NETIF_MSG_TIMER, "end of spq\n");
2665
2666         } else {
2667                 bp->spq_prod_bd++;
2668                 bp->spq_prod_idx++;
2669         }
2670
2671         /* Make sure that BD data is updated before writing the producer */
2672         wmb();
2673
2674         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2675                bp->spq_prod_idx);
2676
2677         mmiowb();
2678
2679         spin_unlock_bh(&bp->spq_lock);
2680         return 0;
2681 }
2682
2683 /* acquire split MCP access lock register */
2684 static int bnx2x_acquire_alr(struct bnx2x *bp)
2685 {
2686         u32 i, j, val;
2687         int rc = 0;
2688
2689         might_sleep();
2690         i = 100;
2691         for (j = 0; j < i*10; j++) {
2692                 val = (1UL << 31);
2693                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2694                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2695                 if (val & (1L << 31))
2696                         break;
2697
2698                 msleep(5);
2699         }
2700         if (!(val & (1L << 31))) {
2701                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2702                 rc = -EBUSY;
2703         }
2704
2705         return rc;
2706 }
2707
2708 /* release split MCP access lock register */
2709 static void bnx2x_release_alr(struct bnx2x *bp)
2710 {
2711         u32 val = 0;
2712
2713         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2714 }
2715
2716 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2717 {
2718         struct host_def_status_block *def_sb = bp->def_status_blk;
2719         u16 rc = 0;
2720
2721         barrier(); /* status block is written to by the chip */
2722         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2723                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2724                 rc |= 1;
2725         }
2726         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2727                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2728                 rc |= 2;
2729         }
2730         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2731                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2732                 rc |= 4;
2733         }
2734         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2735                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2736                 rc |= 8;
2737         }
2738         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2739                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2740                 rc |= 16;
2741         }
2742         return rc;
2743 }
2744
2745 /*
2746  * slow path service functions
2747  */
2748
2749 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2750 {
2751         int port = BP_PORT(bp);
2752         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2753                        COMMAND_REG_ATTN_BITS_SET);
2754         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2755                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2756         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2757                                        NIG_REG_MASK_INTERRUPT_PORT0;
2758         u32 aeu_mask;
2759         u32 nig_mask = 0;
2760
2761         if (bp->attn_state & asserted)
2762                 BNX2X_ERR("IGU ERROR\n");
2763
2764         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2765         aeu_mask = REG_RD(bp, aeu_addr);
2766
2767         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2768            aeu_mask, asserted);
2769         aeu_mask &= ~(asserted & 0xff);
2770         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2771
2772         REG_WR(bp, aeu_addr, aeu_mask);
2773         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2774
2775         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2776         bp->attn_state |= asserted;
2777         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2778
2779         if (asserted & ATTN_HARD_WIRED_MASK) {
2780                 if (asserted & ATTN_NIG_FOR_FUNC) {
2781
2782                         bnx2x_acquire_phy_lock(bp);
2783
2784                         /* save nig interrupt mask */
2785                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2786                         REG_WR(bp, nig_int_mask_addr, 0);
2787
2788                         bnx2x_link_attn(bp);
2789
2790                         /* handle unicore attn? */
2791                 }
2792                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2793                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2794
2795                 if (asserted & GPIO_2_FUNC)
2796                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2797
2798                 if (asserted & GPIO_3_FUNC)
2799                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2800
2801                 if (asserted & GPIO_4_FUNC)
2802                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2803
2804                 if (port == 0) {
2805                         if (asserted & ATTN_GENERAL_ATTN_1) {
2806                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2807                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2808                         }
2809                         if (asserted & ATTN_GENERAL_ATTN_2) {
2810                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2811                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2812                         }
2813                         if (asserted & ATTN_GENERAL_ATTN_3) {
2814                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2815                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2816                         }
2817                 } else {
2818                         if (asserted & ATTN_GENERAL_ATTN_4) {
2819                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2820                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2821                         }
2822                         if (asserted & ATTN_GENERAL_ATTN_5) {
2823                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2824                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2825                         }
2826                         if (asserted & ATTN_GENERAL_ATTN_6) {
2827                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2828                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2829                         }
2830                 }
2831
2832         } /* if hardwired */
2833
2834         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2835            asserted, hc_addr);
2836         REG_WR(bp, hc_addr, asserted);
2837
2838         /* now set back the mask */
2839         if (asserted & ATTN_NIG_FOR_FUNC) {
2840                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2841                 bnx2x_release_phy_lock(bp);
2842         }
2843 }
2844
2845 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2846 {
2847         int port = BP_PORT(bp);
2848
2849         /* mark the failure */
2850         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2851         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2852         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2853                  bp->link_params.ext_phy_config);
2854
2855         /* log the failure */
2856         printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2857                " the driver to shutdown the card to prevent permanent"
2858                " damage.  Please contact Dell Support for assistance\n",
2859                bp->dev->name);
2860 }
2861 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2862 {
2863         int port = BP_PORT(bp);
2864         int reg_offset;
2865         u32 val, swap_val, swap_override;
2866
2867         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2868                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2869
2870         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2871
2872                 val = REG_RD(bp, reg_offset);
2873                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2874                 REG_WR(bp, reg_offset, val);
2875
2876                 BNX2X_ERR("SPIO5 hw attention\n");
2877
2878                 /* Fan failure attention */
2879                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2880                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2881                         /* Low power mode is controlled by GPIO 2 */
2882                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2883                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2884                         /* The PHY reset is controlled by GPIO 1 */
2885                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2886                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2887                         break;
2888
2889                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2890                         /* The PHY reset is controlled by GPIO 1 */
2891                         /* fake the port number to cancel the swap done in
2892                            set_gpio() */
2893                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2894                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2895                         port = (swap_val && swap_override) ^ 1;
2896                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2897                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2898                         break;
2899
2900                 default:
2901                         break;
2902                 }
2903                 bnx2x_fan_failure(bp);
2904         }
2905
2906         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2907                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2908                 bnx2x_acquire_phy_lock(bp);
2909                 bnx2x_handle_module_detect_int(&bp->link_params);
2910                 bnx2x_release_phy_lock(bp);
2911         }
2912
2913         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2914
2915                 val = REG_RD(bp, reg_offset);
2916                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2917                 REG_WR(bp, reg_offset, val);
2918
2919                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2920                           (attn & HW_INTERRUT_ASSERT_SET_0));
2921                 bnx2x_panic();
2922         }
2923 }
2924
2925 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2926 {
2927         u32 val;
2928
2929         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2930
2931                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2932                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2933                 /* DORQ discard attention */
2934                 if (val & 0x2)
2935                         BNX2X_ERR("FATAL error from DORQ\n");
2936         }
2937
2938         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2939
2940                 int port = BP_PORT(bp);
2941                 int reg_offset;
2942
2943                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2944                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2945
2946                 val = REG_RD(bp, reg_offset);
2947                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2948                 REG_WR(bp, reg_offset, val);
2949
2950                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2951                           (attn & HW_INTERRUT_ASSERT_SET_1));
2952                 bnx2x_panic();
2953         }
2954 }
2955
2956 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2957 {
2958         u32 val;
2959
2960         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2961
2962                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2963                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2964                 /* CFC error attention */
2965                 if (val & 0x2)
2966                         BNX2X_ERR("FATAL error from CFC\n");
2967         }
2968
2969         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2970
2971                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2972                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2973                 /* RQ_USDMDP_FIFO_OVERFLOW */
2974                 if (val & 0x18000)
2975                         BNX2X_ERR("FATAL error from PXP\n");
2976         }
2977
2978         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2979
2980                 int port = BP_PORT(bp);
2981                 int reg_offset;
2982
2983                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2984                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2985
2986                 val = REG_RD(bp, reg_offset);
2987                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2988                 REG_WR(bp, reg_offset, val);
2989
2990                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2991                           (attn & HW_INTERRUT_ASSERT_SET_2));
2992                 bnx2x_panic();
2993         }
2994 }
2995
2996 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2997 {
2998         u32 val;
2999
3000         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3001
3002                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3003                         int func = BP_FUNC(bp);
3004
3005                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3006                         val = SHMEM_RD(bp, func_mb[func].drv_status);
3007                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3008                                 bnx2x_dcc_event(bp,
3009                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3010                         bnx2x__link_status_update(bp);
3011                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3012                                 bnx2x_pmf_update(bp);
3013
3014                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3015
3016                         BNX2X_ERR("MC assert!\n");
3017                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3018                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3019                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3020                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3021                         bnx2x_panic();
3022
3023                 } else if (attn & BNX2X_MCP_ASSERT) {
3024
3025                         BNX2X_ERR("MCP assert!\n");
3026                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3027                         bnx2x_fw_dump(bp);
3028
3029                 } else
3030                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3031         }
3032
3033         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3034                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3035                 if (attn & BNX2X_GRC_TIMEOUT) {
3036                         val = CHIP_IS_E1H(bp) ?
3037                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3038                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3039                 }
3040                 if (attn & BNX2X_GRC_RSV) {
3041                         val = CHIP_IS_E1H(bp) ?
3042                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3043                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3044                 }
3045                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3046         }
3047 }
3048
3049 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3050 {
3051         struct attn_route attn;
3052         struct attn_route group_mask;
3053         int port = BP_PORT(bp);
3054         int index;
3055         u32 reg_addr;
3056         u32 val;
3057         u32 aeu_mask;
3058
3059         /* need to take HW lock because MCP or other port might also
3060            try to handle this event */
3061         bnx2x_acquire_alr(bp);
3062
3063         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3064         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3065         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3066         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3067         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3068            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3069
3070         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3071                 if (deasserted & (1 << index)) {
3072                         group_mask = bp->attn_group[index];
3073
3074                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3075                            index, group_mask.sig[0], group_mask.sig[1],
3076                            group_mask.sig[2], group_mask.sig[3]);
3077
3078                         bnx2x_attn_int_deasserted3(bp,
3079                                         attn.sig[3] & group_mask.sig[3]);
3080                         bnx2x_attn_int_deasserted1(bp,
3081                                         attn.sig[1] & group_mask.sig[1]);
3082                         bnx2x_attn_int_deasserted2(bp,
3083                                         attn.sig[2] & group_mask.sig[2]);
3084                         bnx2x_attn_int_deasserted0(bp,
3085                                         attn.sig[0] & group_mask.sig[0]);
3086
3087                         if ((attn.sig[0] & group_mask.sig[0] &
3088                                                 HW_PRTY_ASSERT_SET_0) ||
3089                             (attn.sig[1] & group_mask.sig[1] &
3090                                                 HW_PRTY_ASSERT_SET_1) ||
3091                             (attn.sig[2] & group_mask.sig[2] &
3092                                                 HW_PRTY_ASSERT_SET_2))
3093                                 BNX2X_ERR("FATAL HW block parity attention\n");
3094                 }
3095         }
3096
3097         bnx2x_release_alr(bp);
3098
3099         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3100
3101         val = ~deasserted;
3102         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3103            val, reg_addr);
3104         REG_WR(bp, reg_addr, val);
3105
3106         if (~bp->attn_state & deasserted)
3107                 BNX2X_ERR("IGU ERROR\n");
3108
3109         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3110                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3111
3112         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3113         aeu_mask = REG_RD(bp, reg_addr);
3114
3115         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3116            aeu_mask, deasserted);
3117         aeu_mask |= (deasserted & 0xff);
3118         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3119
3120         REG_WR(bp, reg_addr, aeu_mask);
3121         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3122
3123         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3124         bp->attn_state &= ~deasserted;
3125         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3126 }
3127
3128 static void bnx2x_attn_int(struct bnx2x *bp)
3129 {
3130         /* read local copy of bits */
3131         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3132                                                                 attn_bits);
3133         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3134                                                                 attn_bits_ack);
3135         u32 attn_state = bp->attn_state;
3136
3137         /* look for changed bits */
3138         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3139         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3140
3141         DP(NETIF_MSG_HW,
3142            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3143            attn_bits, attn_ack, asserted, deasserted);
3144
3145         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3146                 BNX2X_ERR("BAD attention state\n");
3147
3148         /* handle bits that were raised */
3149         if (asserted)
3150                 bnx2x_attn_int_asserted(bp, asserted);
3151
3152         if (deasserted)
3153                 bnx2x_attn_int_deasserted(bp, deasserted);
3154 }
3155
3156 static void bnx2x_sp_task(struct work_struct *work)
3157 {
3158         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3159         u16 status;
3160
3161
3162         /* Return here if interrupt is disabled */
3163         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3164                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3165                 return;
3166         }
3167
3168         status = bnx2x_update_dsb_idx(bp);
3169 /*      if (status == 0)                                     */
3170 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3171
3172         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3173
3174         /* HW attentions */
3175         if (status & 0x1)
3176                 bnx2x_attn_int(bp);
3177
3178         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3179                      IGU_INT_NOP, 1);
3180         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3181                      IGU_INT_NOP, 1);
3182         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3183                      IGU_INT_NOP, 1);
3184         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3185                      IGU_INT_NOP, 1);
3186         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3187                      IGU_INT_ENABLE, 1);
3188
3189 }
3190
3191 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3192 {
3193         struct net_device *dev = dev_instance;
3194         struct bnx2x *bp = netdev_priv(dev);
3195
3196         /* Return here if interrupt is disabled */
3197         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3198                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3199                 return IRQ_HANDLED;
3200         }
3201
3202         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3203
3204 #ifdef BNX2X_STOP_ON_ERROR
3205         if (unlikely(bp->panic))
3206                 return IRQ_HANDLED;
3207 #endif
3208
3209         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3210
3211         return IRQ_HANDLED;
3212 }
3213
3214 /* end of slow path */
3215
3216 /* Statistics */
3217
3218 /****************************************************************************
3219 * Macros
3220 ****************************************************************************/
3221
3222 /* sum[hi:lo] += add[hi:lo] */
3223 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3224         do { \
3225                 s_lo += a_lo; \
3226                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3227         } while (0)
3228
3229 /* difference = minuend - subtrahend */
3230 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3231         do { \
3232                 if (m_lo < s_lo) { \
3233                         /* underflow */ \
3234                         d_hi = m_hi - s_hi; \
3235                         if (d_hi > 0) { \
3236                                 /* we can 'loan' 1 */ \
3237                                 d_hi--; \
3238                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3239                         } else { \
3240                                 /* m_hi <= s_hi */ \
3241                                 d_hi = 0; \
3242                                 d_lo = 0; \
3243                         } \
3244                 } else { \
3245                         /* m_lo >= s_lo */ \
3246                         if (m_hi < s_hi) { \
3247                                 d_hi = 0; \
3248                                 d_lo = 0; \
3249                         } else { \
3250                                 /* m_hi >= s_hi */ \
3251                                 d_hi = m_hi - s_hi; \
3252                                 d_lo = m_lo - s_lo; \
3253                         } \
3254                 } \
3255         } while (0)
3256
3257 #define UPDATE_STAT64(s, t) \
3258         do { \
3259                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3260                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3261                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3262                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3263                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3264                        pstats->mac_stx[1].t##_lo, diff.lo); \
3265         } while (0)
3266
3267 #define UPDATE_STAT64_NIG(s, t) \
3268         do { \
3269                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3270                         diff.lo, new->s##_lo, old->s##_lo); \
3271                 ADD_64(estats->t##_hi, diff.hi, \
3272                        estats->t##_lo, diff.lo); \
3273         } while (0)
3274
3275 /* sum[hi:lo] += add */
3276 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3277         do { \
3278                 s_lo += a; \
3279                 s_hi += (s_lo < a) ? 1 : 0; \
3280         } while (0)
3281
3282 #define UPDATE_EXTEND_STAT(s) \
3283         do { \
3284                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3285                               pstats->mac_stx[1].s##_lo, \
3286                               new->s); \
3287         } while (0)
3288
3289 #define UPDATE_EXTEND_TSTAT(s, t) \
3290         do { \
3291                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3292                 old_tclient->s = tclient->s; \
3293                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3294         } while (0)
3295
3296 #define UPDATE_EXTEND_USTAT(s, t) \
3297         do { \
3298                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3299                 old_uclient->s = uclient->s; \
3300                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3301         } while (0)
3302
3303 #define UPDATE_EXTEND_XSTAT(s, t) \
3304         do { \
3305                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3306                 old_xclient->s = xclient->s; \
3307                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3308         } while (0)
3309
3310 /* minuend -= subtrahend */
3311 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3312         do { \
3313                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3314         } while (0)
3315
3316 /* minuend[hi:lo] -= subtrahend */
3317 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3318         do { \
3319                 SUB_64(m_hi, 0, m_lo, s); \
3320         } while (0)
3321
3322 #define SUB_EXTEND_USTAT(s, t) \
3323         do { \
3324                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3325                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3326         } while (0)
3327
3328 /*
3329  * General service functions
3330  */
3331
3332 static inline long bnx2x_hilo(u32 *hiref)
3333 {
3334         u32 lo = *(hiref + 1);
3335 #if (BITS_PER_LONG == 64)
3336         u32 hi = *hiref;
3337
3338         return HILO_U64(hi, lo);
3339 #else
3340         return lo;
3341 #endif
3342 }
3343
3344 /*
3345  * Init service functions
3346  */
3347
3348 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3349 {
3350         if (!bp->stats_pending) {
3351                 struct eth_query_ramrod_data ramrod_data = {0};
3352                 int i, rc;
3353
3354                 ramrod_data.drv_counter = bp->stats_counter++;
3355                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3356                 for_each_queue(bp, i)
3357                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3358
3359                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3360                                    ((u32 *)&ramrod_data)[1],
3361                                    ((u32 *)&ramrod_data)[0], 0);
3362                 if (rc == 0) {
3363                         /* stats ramrod has it's own slot on the spq */
3364                         bp->spq_left++;
3365                         bp->stats_pending = 1;
3366                 }
3367         }
3368 }
3369
3370 static void bnx2x_stats_init(struct bnx2x *bp)
3371 {
3372         int port = BP_PORT(bp);
3373         int i;
3374
3375         bp->stats_pending = 0;
3376         bp->executer_idx = 0;
3377         bp->stats_counter = 0;
3378
3379         /* port stats */
3380         if (!BP_NOMCP(bp))
3381                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3382         else
3383                 bp->port.port_stx = 0;
3384         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3385
3386         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3387         bp->port.old_nig_stats.brb_discard =
3388                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3389         bp->port.old_nig_stats.brb_truncate =
3390                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3391         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3392                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3393         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3394                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3395
3396         /* function stats */
3397         for_each_queue(bp, i) {
3398                 struct bnx2x_fastpath *fp = &bp->fp[i];
3399
3400                 memset(&fp->old_tclient, 0,
3401                        sizeof(struct tstorm_per_client_stats));
3402                 memset(&fp->old_uclient, 0,
3403                        sizeof(struct ustorm_per_client_stats));
3404                 memset(&fp->old_xclient, 0,
3405                        sizeof(struct xstorm_per_client_stats));
3406                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3407         }
3408
3409         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3410         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3411
3412         bp->stats_state = STATS_STATE_DISABLED;
3413         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3414                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3415 }
3416
3417 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3418 {
3419         struct dmae_command *dmae = &bp->stats_dmae;
3420         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3421
3422         *stats_comp = DMAE_COMP_VAL;
3423         if (CHIP_REV_IS_SLOW(bp))
3424                 return;
3425
3426         /* loader */
3427         if (bp->executer_idx) {
3428                 int loader_idx = PMF_DMAE_C(bp);
3429
3430                 memset(dmae, 0, sizeof(struct dmae_command));
3431
3432                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3433                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3434                                 DMAE_CMD_DST_RESET |
3435 #ifdef __BIG_ENDIAN
3436                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3437 #else
3438                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3439 #endif
3440                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3441                                                DMAE_CMD_PORT_0) |
3442                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3443                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3444                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3445                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3446                                      sizeof(struct dmae_command) *
3447                                      (loader_idx + 1)) >> 2;
3448                 dmae->dst_addr_hi = 0;
3449                 dmae->len = sizeof(struct dmae_command) >> 2;
3450                 if (CHIP_IS_E1(bp))
3451                         dmae->len--;
3452                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3453                 dmae->comp_addr_hi = 0;
3454                 dmae->comp_val = 1;
3455
3456                 *stats_comp = 0;
3457                 bnx2x_post_dmae(bp, dmae, loader_idx);
3458
3459         } else if (bp->func_stx) {
3460                 *stats_comp = 0;
3461                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3462         }
3463 }
3464
3465 static int bnx2x_stats_comp(struct bnx2x *bp)
3466 {
3467         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3468         int cnt = 10;
3469
3470         might_sleep();
3471         while (*stats_comp != DMAE_COMP_VAL) {
3472                 if (!cnt) {
3473                         BNX2X_ERR("timeout waiting for stats finished\n");
3474                         break;
3475                 }
3476                 cnt--;
3477                 msleep(1);
3478         }
3479         return 1;
3480 }
3481
3482 /*
3483  * Statistics service functions
3484  */
3485
3486 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3487 {
3488         struct dmae_command *dmae;
3489         u32 opcode;
3490         int loader_idx = PMF_DMAE_C(bp);
3491         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3492
3493         /* sanity */
3494         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3495                 BNX2X_ERR("BUG!\n");
3496                 return;
3497         }
3498
3499         bp->executer_idx = 0;
3500
3501         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3502                   DMAE_CMD_C_ENABLE |
3503                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3504 #ifdef __BIG_ENDIAN
3505                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3506 #else
3507                   DMAE_CMD_ENDIANITY_DW_SWAP |
3508 #endif
3509                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3510                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3511
3512         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3513         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3514         dmae->src_addr_lo = bp->port.port_stx >> 2;
3515         dmae->src_addr_hi = 0;
3516         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3517         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3518         dmae->len = DMAE_LEN32_RD_MAX;
3519         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3520         dmae->comp_addr_hi = 0;
3521         dmae->comp_val = 1;
3522
3523         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3524         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3525         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3526         dmae->src_addr_hi = 0;
3527         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3528                                    DMAE_LEN32_RD_MAX * 4);
3529         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3530                                    DMAE_LEN32_RD_MAX * 4);
3531         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3532         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3533         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3534         dmae->comp_val = DMAE_COMP_VAL;
3535
3536         *stats_comp = 0;
3537         bnx2x_hw_stats_post(bp);
3538         bnx2x_stats_comp(bp);
3539 }
3540
3541 static void bnx2x_port_stats_init(struct bnx2x *bp)
3542 {
3543         struct dmae_command *dmae;
3544         int port = BP_PORT(bp);
3545         int vn = BP_E1HVN(bp);
3546         u32 opcode;
3547         int loader_idx = PMF_DMAE_C(bp);
3548         u32 mac_addr;
3549         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3550
3551         /* sanity */
3552         if (!bp->link_vars.link_up || !bp->port.pmf) {
3553                 BNX2X_ERR("BUG!\n");
3554                 return;
3555         }
3556
3557         bp->executer_idx = 0;
3558
3559         /* MCP */
3560         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3561                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3562                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3563 #ifdef __BIG_ENDIAN
3564                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3565 #else
3566                   DMAE_CMD_ENDIANITY_DW_SWAP |
3567 #endif
3568                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3569                   (vn << DMAE_CMD_E1HVN_SHIFT));
3570
3571         if (bp->port.port_stx) {
3572
3573                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3574                 dmae->opcode = opcode;
3575                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3576                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3577                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3578                 dmae->dst_addr_hi = 0;
3579                 dmae->len = sizeof(struct host_port_stats) >> 2;
3580                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3581                 dmae->comp_addr_hi = 0;
3582                 dmae->comp_val = 1;
3583         }
3584
3585         if (bp->func_stx) {
3586
3587                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3588                 dmae->opcode = opcode;
3589                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3590                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3591                 dmae->dst_addr_lo = bp->func_stx >> 2;
3592                 dmae->dst_addr_hi = 0;
3593                 dmae->len = sizeof(struct host_func_stats) >> 2;
3594                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3595                 dmae->comp_addr_hi = 0;
3596                 dmae->comp_val = 1;
3597         }
3598
3599         /* MAC */
3600         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3601                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3602                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3603 #ifdef __BIG_ENDIAN
3604                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3605 #else
3606                   DMAE_CMD_ENDIANITY_DW_SWAP |
3607 #endif
3608                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3609                   (vn << DMAE_CMD_E1HVN_SHIFT));
3610
3611         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3612
3613                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3614                                    NIG_REG_INGRESS_BMAC0_MEM);
3615
3616                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3617                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3618                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3619                 dmae->opcode = opcode;
3620                 dmae->src_addr_lo = (mac_addr +
3621                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3622                 dmae->src_addr_hi = 0;
3623                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3624                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3625                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3626                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3627                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3628                 dmae->comp_addr_hi = 0;
3629                 dmae->comp_val = 1;
3630
3631                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3632                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3633                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3634                 dmae->opcode = opcode;
3635                 dmae->src_addr_lo = (mac_addr +
3636                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3637                 dmae->src_addr_hi = 0;
3638                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3639                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3640                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3641                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3642                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3643                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3644                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3645                 dmae->comp_addr_hi = 0;
3646                 dmae->comp_val = 1;
3647
3648         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3649
3650                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3651
3652                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3653                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3654                 dmae->opcode = opcode;
3655                 dmae->src_addr_lo = (mac_addr +
3656                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3657                 dmae->src_addr_hi = 0;
3658                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3659                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3660                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3661                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3662                 dmae->comp_addr_hi = 0;
3663                 dmae->comp_val = 1;
3664
3665                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3666                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3667                 dmae->opcode = opcode;
3668                 dmae->src_addr_lo = (mac_addr +
3669                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3670                 dmae->src_addr_hi = 0;
3671                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3672                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3673                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3674                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3675                 dmae->len = 1;
3676                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3677                 dmae->comp_addr_hi = 0;
3678                 dmae->comp_val = 1;
3679
3680                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3681                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3682                 dmae->opcode = opcode;
3683                 dmae->src_addr_lo = (mac_addr +
3684                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3685                 dmae->src_addr_hi = 0;
3686                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3687                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3688                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3689                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3690                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3691                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3692                 dmae->comp_addr_hi = 0;
3693                 dmae->comp_val = 1;
3694         }
3695
3696         /* NIG */
3697         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3698         dmae->opcode = opcode;
3699         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3700                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3701         dmae->src_addr_hi = 0;
3702         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3703         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3704         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3705         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3706         dmae->comp_addr_hi = 0;
3707         dmae->comp_val = 1;
3708
3709         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3710         dmae->opcode = opcode;
3711         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3712                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3713         dmae->src_addr_hi = 0;
3714         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3715                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3716         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3717                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3718         dmae->len = (2*sizeof(u32)) >> 2;
3719         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3720         dmae->comp_addr_hi = 0;
3721         dmae->comp_val = 1;
3722
3723         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3724         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3725                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3726                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3727 #ifdef __BIG_ENDIAN
3728                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3729 #else
3730                         DMAE_CMD_ENDIANITY_DW_SWAP |
3731 #endif
3732                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3733                         (vn << DMAE_CMD_E1HVN_SHIFT));
3734         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3735                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3736         dmae->src_addr_hi = 0;
3737         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3738                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3739         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3740                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3741         dmae->len = (2*sizeof(u32)) >> 2;
3742         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3743         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3744         dmae->comp_val = DMAE_COMP_VAL;
3745
3746         *stats_comp = 0;
3747 }
3748
3749 static void bnx2x_func_stats_init(struct bnx2x *bp)
3750 {
3751         struct dmae_command *dmae = &bp->stats_dmae;
3752         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3753
3754         /* sanity */
3755         if (!bp->func_stx) {
3756                 BNX2X_ERR("BUG!\n");
3757                 return;
3758         }
3759
3760         bp->executer_idx = 0;
3761         memset(dmae, 0, sizeof(struct dmae_command));
3762
3763         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3764                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3765                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3766 #ifdef __BIG_ENDIAN
3767                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3768 #else
3769                         DMAE_CMD_ENDIANITY_DW_SWAP |
3770 #endif
3771                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3772                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3773         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3774         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3775         dmae->dst_addr_lo = bp->func_stx >> 2;
3776         dmae->dst_addr_hi = 0;
3777         dmae->len = sizeof(struct host_func_stats) >> 2;
3778         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3779         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3780         dmae->comp_val = DMAE_COMP_VAL;
3781
3782         *stats_comp = 0;
3783 }
3784
3785 static void bnx2x_stats_start(struct bnx2x *bp)
3786 {
3787         if (bp->port.pmf)
3788                 bnx2x_port_stats_init(bp);
3789
3790         else if (bp->func_stx)
3791                 bnx2x_func_stats_init(bp);
3792
3793         bnx2x_hw_stats_post(bp);
3794         bnx2x_storm_stats_post(bp);
3795 }
3796
3797 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3798 {
3799         bnx2x_stats_comp(bp);
3800         bnx2x_stats_pmf_update(bp);
3801         bnx2x_stats_start(bp);
3802 }
3803
3804 static void bnx2x_stats_restart(struct bnx2x *bp)
3805 {
3806         bnx2x_stats_comp(bp);
3807         bnx2x_stats_start(bp);
3808 }
3809
3810 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3811 {
3812         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3813         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3814         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3815         struct {
3816                 u32 lo;
3817                 u32 hi;
3818         } diff;
3819
3820         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3821         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3822         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3823         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3824         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3825         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3826         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3827         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3828         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3829         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3830         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3831         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3832         UPDATE_STAT64(tx_stat_gt127,
3833                                 tx_stat_etherstatspkts65octetsto127octets);
3834         UPDATE_STAT64(tx_stat_gt255,
3835                                 tx_stat_etherstatspkts128octetsto255octets);
3836         UPDATE_STAT64(tx_stat_gt511,
3837                                 tx_stat_etherstatspkts256octetsto511octets);
3838         UPDATE_STAT64(tx_stat_gt1023,
3839                                 tx_stat_etherstatspkts512octetsto1023octets);
3840         UPDATE_STAT64(tx_stat_gt1518,
3841                                 tx_stat_etherstatspkts1024octetsto1522octets);
3842         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3843         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3844         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3845         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3846         UPDATE_STAT64(tx_stat_gterr,
3847                                 tx_stat_dot3statsinternalmactransmiterrors);
3848         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3849
3850         estats->pause_frames_received_hi =
3851                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3852         estats->pause_frames_received_lo =
3853                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3854
3855         estats->pause_frames_sent_hi =
3856                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3857         estats->pause_frames_sent_lo =
3858                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3859 }
3860
3861 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3862 {
3863         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3864         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3865         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3866
3867         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3868         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3869         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3870         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3871         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3872         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3873         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3874         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3875         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3876         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3877         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3878         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3879         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3880         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3881         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3882         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3883         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3884         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3885         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3886         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3887         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3888         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3889         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3890         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3891         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3892         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3893         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3894         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3895         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3896         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3897         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3898
3899         estats->pause_frames_received_hi =
3900                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3901         estats->pause_frames_received_lo =
3902                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3903         ADD_64(estats->pause_frames_received_hi,
3904                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3905                estats->pause_frames_received_lo,
3906                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3907
3908         estats->pause_frames_sent_hi =
3909                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3910         estats->pause_frames_sent_lo =
3911                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3912         ADD_64(estats->pause_frames_sent_hi,
3913                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3914                estats->pause_frames_sent_lo,
3915                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3916 }
3917
3918 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3919 {
3920         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3921         struct nig_stats *old = &(bp->port.old_nig_stats);
3922         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3923         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3924         struct {
3925                 u32 lo;
3926                 u32 hi;
3927         } diff;
3928         u32 nig_timer_max;
3929
3930         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3931                 bnx2x_bmac_stats_update(bp);
3932
3933         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3934                 bnx2x_emac_stats_update(bp);
3935
3936         else { /* unreached */
3937                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3938                 return -1;
3939         }
3940
3941         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3942                       new->brb_discard - old->brb_discard);
3943         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3944                       new->brb_truncate - old->brb_truncate);
3945
3946         UPDATE_STAT64_NIG(egress_mac_pkt0,
3947                                         etherstatspkts1024octetsto1522octets);
3948         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3949
3950         memcpy(old, new, sizeof(struct nig_stats));
3951
3952         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3953                sizeof(struct mac_stx));
3954         estats->brb_drop_hi = pstats->brb_drop_hi;
3955         estats->brb_drop_lo = pstats->brb_drop_lo;
3956
3957         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3958
3959         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3960         if (nig_timer_max != estats->nig_timer_max) {
3961                 estats->nig_timer_max = nig_timer_max;
3962                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3963         }
3964
3965         return 0;
3966 }
3967
3968 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3969 {
3970         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3971         struct tstorm_per_port_stats *tport =
3972                                         &stats->tstorm_common.port_statistics;
3973         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3974         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3975         int i;
3976
3977         memset(&(fstats->total_bytes_received_hi), 0,
3978                sizeof(struct host_func_stats) - 2*sizeof(u32));
3979         estats->error_bytes_received_hi = 0;
3980         estats->error_bytes_received_lo = 0;
3981         estats->etherstatsoverrsizepkts_hi = 0;
3982         estats->etherstatsoverrsizepkts_lo = 0;
3983         estats->no_buff_discard_hi = 0;
3984         estats->no_buff_discard_lo = 0;
3985
3986         for_each_rx_queue(bp, i) {
3987                 struct bnx2x_fastpath *fp = &bp->fp[i];
3988                 int cl_id = fp->cl_id;
3989                 struct tstorm_per_client_stats *tclient =
3990                                 &stats->tstorm_common.client_statistics[cl_id];
3991                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3992                 struct ustorm_per_client_stats *uclient =
3993                                 &stats->ustorm_common.client_statistics[cl_id];
3994                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3995                 struct xstorm_per_client_stats *xclient =
3996                                 &stats->xstorm_common.client_statistics[cl_id];
3997                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3998                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3999                 u32 diff;
4000
4001                 /* are storm stats valid? */
4002                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4003                                                         bp->stats_counter) {
4004                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4005                            "  xstorm counter (%d) != stats_counter (%d)\n",
4006                            i, xclient->stats_counter, bp->stats_counter);
4007                         return -1;
4008                 }
4009                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4010                                                         bp->stats_counter) {
4011                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4012                            "  tstorm counter (%d) != stats_counter (%d)\n",
4013                            i, tclient->stats_counter, bp->stats_counter);
4014                         return -2;
4015                 }
4016                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4017                                                         bp->stats_counter) {
4018                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4019                            "  ustorm counter (%d) != stats_counter (%d)\n",
4020                            i, uclient->stats_counter, bp->stats_counter);
4021                         return -4;
4022                 }
4023
4024                 qstats->total_bytes_received_hi =
4025                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4026                 qstats->total_bytes_received_lo =
4027                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4028
4029                 ADD_64(qstats->total_bytes_received_hi,
4030                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4031                        qstats->total_bytes_received_lo,
4032                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4033
4034                 ADD_64(qstats->total_bytes_received_hi,
4035                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4036                        qstats->total_bytes_received_lo,
4037                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4038
4039                 qstats->valid_bytes_received_hi =
4040                                         qstats->total_bytes_received_hi;
4041                 qstats->valid_bytes_received_lo =
4042                                         qstats->total_bytes_received_lo;
4043
4044                 qstats->error_bytes_received_hi =
4045                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
4046                 qstats->error_bytes_received_lo =
4047                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
4048
4049                 ADD_64(qstats->total_bytes_received_hi,
4050                        qstats->error_bytes_received_hi,
4051                        qstats->total_bytes_received_lo,
4052                        qstats->error_bytes_received_lo);
4053
4054                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4055                                         total_unicast_packets_received);
4056                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4057                                         total_multicast_packets_received);
4058                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4059                                         total_broadcast_packets_received);
4060                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4061                                         etherstatsoverrsizepkts);
4062                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4063
4064                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4065                                         total_unicast_packets_received);
4066                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4067                                         total_multicast_packets_received);
4068                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4069                                         total_broadcast_packets_received);
4070                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4071                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4072                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4073
4074                 qstats->total_bytes_transmitted_hi =
4075                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4076                 qstats->total_bytes_transmitted_lo =
4077                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4078
4079                 ADD_64(qstats->total_bytes_transmitted_hi,
4080                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
4081                        qstats->total_bytes_transmitted_lo,
4082                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
4083
4084                 ADD_64(qstats->total_bytes_transmitted_hi,
4085                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4086                        qstats->total_bytes_transmitted_lo,
4087                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4088
4089                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4090                                         total_unicast_packets_transmitted);
4091                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4092                                         total_multicast_packets_transmitted);
4093                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4094                                         total_broadcast_packets_transmitted);
4095
4096                 old_tclient->checksum_discard = tclient->checksum_discard;
4097                 old_tclient->ttl0_discard = tclient->ttl0_discard;
4098
4099                 ADD_64(fstats->total_bytes_received_hi,
4100                        qstats->total_bytes_received_hi,
4101                        fstats->total_bytes_received_lo,
4102                        qstats->total_bytes_received_lo);
4103                 ADD_64(fstats->total_bytes_transmitted_hi,
4104                        qstats->total_bytes_transmitted_hi,
4105                        fstats->total_bytes_transmitted_lo,
4106                        qstats->total_bytes_transmitted_lo);
4107                 ADD_64(fstats->total_unicast_packets_received_hi,
4108                        qstats->total_unicast_packets_received_hi,
4109                        fstats->total_unicast_packets_received_lo,
4110                        qstats->total_unicast_packets_received_lo);
4111                 ADD_64(fstats->total_multicast_packets_received_hi,
4112                        qstats->total_multicast_packets_received_hi,
4113                        fstats->total_multicast_packets_received_lo,
4114                        qstats->total_multicast_packets_received_lo);
4115                 ADD_64(fstats->total_broadcast_packets_received_hi,
4116                        qstats->total_broadcast_packets_received_hi,
4117                        fstats->total_broadcast_packets_received_lo,
4118                        qstats->total_broadcast_packets_received_lo);
4119                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4120                        qstats->total_unicast_packets_transmitted_hi,
4121                        fstats->total_unicast_packets_transmitted_lo,
4122                        qstats->total_unicast_packets_transmitted_lo);
4123                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4124                        qstats->total_multicast_packets_transmitted_hi,
4125                        fstats->total_multicast_packets_transmitted_lo,
4126                        qstats->total_multicast_packets_transmitted_lo);
4127                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4128                        qstats->total_broadcast_packets_transmitted_hi,
4129                        fstats->total_broadcast_packets_transmitted_lo,
4130                        qstats->total_broadcast_packets_transmitted_lo);
4131                 ADD_64(fstats->valid_bytes_received_hi,
4132                        qstats->valid_bytes_received_hi,
4133                        fstats->valid_bytes_received_lo,
4134                        qstats->valid_bytes_received_lo);
4135
4136                 ADD_64(estats->error_bytes_received_hi,
4137                        qstats->error_bytes_received_hi,
4138                        estats->error_bytes_received_lo,
4139                        qstats->error_bytes_received_lo);
4140                 ADD_64(estats->etherstatsoverrsizepkts_hi,
4141                        qstats->etherstatsoverrsizepkts_hi,
4142                        estats->etherstatsoverrsizepkts_lo,
4143                        qstats->etherstatsoverrsizepkts_lo);
4144                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4145                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4146         }
4147
4148         ADD_64(fstats->total_bytes_received_hi,
4149                estats->rx_stat_ifhcinbadoctets_hi,
4150                fstats->total_bytes_received_lo,
4151                estats->rx_stat_ifhcinbadoctets_lo);
4152
4153         memcpy(estats, &(fstats->total_bytes_received_hi),
4154                sizeof(struct host_func_stats) - 2*sizeof(u32));
4155
4156         ADD_64(estats->etherstatsoverrsizepkts_hi,
4157                estats->rx_stat_dot3statsframestoolong_hi,
4158                estats->etherstatsoverrsizepkts_lo,
4159                estats->rx_stat_dot3statsframestoolong_lo);
4160         ADD_64(estats->error_bytes_received_hi,
4161                estats->rx_stat_ifhcinbadoctets_hi,
4162                estats->error_bytes_received_lo,
4163                estats->rx_stat_ifhcinbadoctets_lo);
4164
4165         if (bp->port.pmf) {
4166                 estats->mac_filter_discard =
4167                                 le32_to_cpu(tport->mac_filter_discard);
4168                 estats->xxoverflow_discard =
4169                                 le32_to_cpu(tport->xxoverflow_discard);
4170                 estats->brb_truncate_discard =
4171                                 le32_to_cpu(tport->brb_truncate_discard);
4172                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4173         }
4174
4175         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4176
4177         bp->stats_pending = 0;
4178
4179         return 0;
4180 }
4181
4182 static void bnx2x_net_stats_update(struct bnx2x *bp)
4183 {
4184         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4185         struct net_device_stats *nstats = &bp->dev->stats;
4186         int i;
4187
4188         nstats->rx_packets =
4189                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4190                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4191                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4192
4193         nstats->tx_packets =
4194                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4195                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4196                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4197
4198         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4199
4200         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4201
4202         nstats->rx_dropped = estats->mac_discard;
4203         for_each_rx_queue(bp, i)
4204                 nstats->rx_dropped +=
4205                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4206
4207         nstats->tx_dropped = 0;
4208
4209         nstats->multicast =
4210                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4211
4212         nstats->collisions =
4213                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4214
4215         nstats->rx_length_errors =
4216                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4217                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4218         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4219                                  bnx2x_hilo(&estats->brb_truncate_hi);
4220         nstats->rx_crc_errors =
4221                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4222         nstats->rx_frame_errors =
4223                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4224         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4225         nstats->rx_missed_errors = estats->xxoverflow_discard;
4226
4227         nstats->rx_errors = nstats->rx_length_errors +
4228                             nstats->rx_over_errors +
4229                             nstats->rx_crc_errors +
4230                             nstats->rx_frame_errors +
4231                             nstats->rx_fifo_errors +
4232                             nstats->rx_missed_errors;
4233
4234         nstats->tx_aborted_errors =
4235                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4236                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4237         nstats->tx_carrier_errors =
4238                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4239         nstats->tx_fifo_errors = 0;
4240         nstats->tx_heartbeat_errors = 0;
4241         nstats->tx_window_errors = 0;
4242
4243         nstats->tx_errors = nstats->tx_aborted_errors +
4244                             nstats->tx_carrier_errors +
4245             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4246 }
4247
4248 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4249 {
4250         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4251         int i;
4252
4253         estats->driver_xoff = 0;
4254         estats->rx_err_discard_pkt = 0;
4255         estats->rx_skb_alloc_failed = 0;
4256         estats->hw_csum_err = 0;
4257         for_each_rx_queue(bp, i) {
4258                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4259
4260                 estats->driver_xoff += qstats->driver_xoff;
4261                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4262                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4263                 estats->hw_csum_err += qstats->hw_csum_err;
4264         }
4265 }
4266
4267 static void bnx2x_stats_update(struct bnx2x *bp)
4268 {
4269         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4270
4271         if (*stats_comp != DMAE_COMP_VAL)
4272                 return;
4273
4274         if (bp->port.pmf)
4275                 bnx2x_hw_stats_update(bp);
4276
4277         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4278                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4279                 bnx2x_panic();
4280                 return;
4281         }
4282
4283         bnx2x_net_stats_update(bp);
4284         bnx2x_drv_stats_update(bp);
4285
4286         if (bp->msglevel & NETIF_MSG_TIMER) {
4287                 struct bnx2x_fastpath *fp0_rx = bp->fp;
4288                 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4289                 struct tstorm_per_client_stats *old_tclient =
4290                                                         &bp->fp->old_tclient;
4291                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4292                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4293                 struct net_device_stats *nstats = &bp->dev->stats;
4294                 int i;
4295
4296                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4297                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4298                                   "  tx pkt (%lx)\n",
4299                        bnx2x_tx_avail(fp0_tx),
4300                        le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4301                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4302                                   "  rx pkt (%lx)\n",
4303                        (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4304                              fp0_rx->rx_comp_cons),
4305                        le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4306                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4307                                   "brb truncate %u\n",
4308                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4309                        qstats->driver_xoff,
4310                        estats->brb_drop_lo, estats->brb_truncate_lo);
4311                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4312                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4313                         "mac_discard %u  mac_filter_discard %u  "
4314                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4315                         "ttl0_discard %u\n",
4316                        le32_to_cpu(old_tclient->checksum_discard),
4317                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4318                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4319                        estats->mac_discard, estats->mac_filter_discard,
4320                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4321                        le32_to_cpu(old_tclient->ttl0_discard));
4322
4323                 for_each_queue(bp, i) {
4324                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4325                                bnx2x_fp(bp, i, tx_pkt),
4326                                bnx2x_fp(bp, i, rx_pkt),
4327                                bnx2x_fp(bp, i, rx_calls));
4328                 }
4329         }
4330
4331         bnx2x_hw_stats_post(bp);
4332         bnx2x_storm_stats_post(bp);
4333 }
4334
4335 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4336 {
4337         struct dmae_command *dmae;
4338         u32 opcode;
4339         int loader_idx = PMF_DMAE_C(bp);
4340         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4341
4342         bp->executer_idx = 0;
4343
4344         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4345                   DMAE_CMD_C_ENABLE |
4346                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4347 #ifdef __BIG_ENDIAN
4348                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4349 #else
4350                   DMAE_CMD_ENDIANITY_DW_SWAP |
4351 #endif
4352                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4353                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4354
4355         if (bp->port.port_stx) {
4356
4357                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4358                 if (bp->func_stx)
4359                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4360                 else
4361                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4362                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4363                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4364                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4365                 dmae->dst_addr_hi = 0;
4366                 dmae->len = sizeof(struct host_port_stats) >> 2;
4367                 if (bp->func_stx) {
4368                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4369                         dmae->comp_addr_hi = 0;
4370                         dmae->comp_val = 1;
4371                 } else {
4372                         dmae->comp_addr_lo =
4373                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4374                         dmae->comp_addr_hi =
4375                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4376                         dmae->comp_val = DMAE_COMP_VAL;
4377
4378                         *stats_comp = 0;
4379                 }
4380         }
4381
4382         if (bp->func_stx) {
4383
4384                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4385                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4386                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4387                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4388                 dmae->dst_addr_lo = bp->func_stx >> 2;
4389                 dmae->dst_addr_hi = 0;
4390                 dmae->len = sizeof(struct host_func_stats) >> 2;
4391                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4392                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4393                 dmae->comp_val = DMAE_COMP_VAL;
4394
4395                 *stats_comp = 0;
4396         }
4397 }
4398
4399 static void bnx2x_stats_stop(struct bnx2x *bp)
4400 {
4401         int update = 0;
4402
4403         bnx2x_stats_comp(bp);
4404
4405         if (bp->port.pmf)
4406                 update = (bnx2x_hw_stats_update(bp) == 0);
4407
4408         update |= (bnx2x_storm_stats_update(bp) == 0);
4409
4410         if (update) {
4411                 bnx2x_net_stats_update(bp);
4412
4413                 if (bp->port.pmf)
4414                         bnx2x_port_stats_stop(bp);
4415
4416                 bnx2x_hw_stats_post(bp);
4417                 bnx2x_stats_comp(bp);
4418         }
4419 }
4420
4421 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4422 {
4423 }
4424
4425 static const struct {
4426         void (*action)(struct bnx2x *bp);
4427         enum bnx2x_stats_state next_state;
4428 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4429 /* state        event   */
4430 {
4431 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4432 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4433 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4434 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4435 },
4436 {
4437 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4438 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4439 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4440 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4441 }
4442 };
4443
4444 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4445 {
4446         enum bnx2x_stats_state state = bp->stats_state;
4447
4448         bnx2x_stats_stm[state][event].action(bp);
4449         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4450
4451         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4452                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4453                    state, event, bp->stats_state);
4454 }
4455
4456 static void bnx2x_timer(unsigned long data)
4457 {
4458         struct bnx2x *bp = (struct bnx2x *) data;
4459
4460         if (!netif_running(bp->dev))
4461                 return;
4462
4463         if (atomic_read(&bp->intr_sem) != 0)
4464                 goto timer_restart;
4465
4466         if (poll) {
4467                 struct bnx2x_fastpath *fp = &bp->fp[0];
4468                 int rc;
4469
4470                 bnx2x_tx_int(fp);
4471                 rc = bnx2x_rx_int(fp, 1000);
4472         }
4473
4474         if (!BP_NOMCP(bp)) {
4475                 int func = BP_FUNC(bp);
4476                 u32 drv_pulse;
4477                 u32 mcp_pulse;
4478
4479                 ++bp->fw_drv_pulse_wr_seq;
4480                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4481                 /* TBD - add SYSTEM_TIME */
4482                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4483                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4484
4485                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4486                              MCP_PULSE_SEQ_MASK);
4487                 /* The delta between driver pulse and mcp response
4488                  * should be 1 (before mcp response) or 0 (after mcp response)
4489                  */
4490                 if ((drv_pulse != mcp_pulse) &&
4491                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4492                         /* someone lost a heartbeat... */
4493                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4494                                   drv_pulse, mcp_pulse);
4495                 }
4496         }
4497
4498         if ((bp->state == BNX2X_STATE_OPEN) ||
4499             (bp->state == BNX2X_STATE_DISABLED))
4500                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4501
4502 timer_restart:
4503         mod_timer(&bp->timer, jiffies + bp->current_interval);
4504 }
4505
4506 /* end of Statistics */
4507
4508 /* nic init */
4509
4510 /*
4511  * nic init service functions
4512  */
4513
4514 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4515 {
4516         int port = BP_PORT(bp);
4517
4518         /* "CSTORM" */
4519         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4520                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4521                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4522         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4523                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4524                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4525 }
4526
4527 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4528                           dma_addr_t mapping, int sb_id)
4529 {
4530         int port = BP_PORT(bp);
4531         int func = BP_FUNC(bp);
4532         int index;
4533         u64 section;
4534
4535         /* USTORM */
4536         section = ((u64)mapping) + offsetof(struct host_status_block,
4537                                             u_status_block);
4538         sb->u_status_block.status_block_id = sb_id;
4539
4540         REG_WR(bp, BAR_CSTRORM_INTMEM +
4541                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4542         REG_WR(bp, BAR_CSTRORM_INTMEM +
4543                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4544                U64_HI(section));
4545         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4546                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4547
4548         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4549                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4550                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4551
4552         /* CSTORM */
4553         section = ((u64)mapping) + offsetof(struct host_status_block,
4554                                             c_status_block);
4555         sb->c_status_block.status_block_id = sb_id;
4556
4557         REG_WR(bp, BAR_CSTRORM_INTMEM +
4558                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4559         REG_WR(bp, BAR_CSTRORM_INTMEM +
4560                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4561                U64_HI(section));
4562         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4563                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4564
4565         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4566                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4567                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4568
4569         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4570 }
4571
4572 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4573 {
4574         int func = BP_FUNC(bp);
4575
4576         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4577                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4578                         sizeof(struct tstorm_def_status_block)/4);
4579         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4580                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4581                         sizeof(struct cstorm_def_status_block_u)/4);
4582         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4583                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4584                         sizeof(struct cstorm_def_status_block_c)/4);
4585         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4586                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4587                         sizeof(struct xstorm_def_status_block)/4);
4588 }
4589
4590 static void bnx2x_init_def_sb(struct bnx2x *bp,
4591                               struct host_def_status_block *def_sb,
4592                               dma_addr_t mapping, int sb_id)
4593 {
4594         int port = BP_PORT(bp);
4595         int func = BP_FUNC(bp);
4596         int index, val, reg_offset;
4597         u64 section;
4598
4599         /* ATTN */
4600         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4601                                             atten_status_block);
4602         def_sb->atten_status_block.status_block_id = sb_id;
4603
4604         bp->attn_state = 0;
4605
4606         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4607                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4608
4609         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4610                 bp->attn_group[index].sig[0] = REG_RD(bp,
4611                                                      reg_offset + 0x10*index);
4612                 bp->attn_group[index].sig[1] = REG_RD(bp,
4613                                                reg_offset + 0x4 + 0x10*index);
4614                 bp->attn_group[index].sig[2] = REG_RD(bp,
4615                                                reg_offset + 0x8 + 0x10*index);
4616                 bp->attn_group[index].sig[3] = REG_RD(bp,
4617                                                reg_offset + 0xc + 0x10*index);
4618         }
4619
4620         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4621                              HC_REG_ATTN_MSG0_ADDR_L);
4622
4623         REG_WR(bp, reg_offset, U64_LO(section));
4624         REG_WR(bp, reg_offset + 4, U64_HI(section));
4625
4626         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4627
4628         val = REG_RD(bp, reg_offset);
4629         val |= sb_id;
4630         REG_WR(bp, reg_offset, val);
4631
4632         /* USTORM */
4633         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4634                                             u_def_status_block);
4635         def_sb->u_def_status_block.status_block_id = sb_id;
4636
4637         REG_WR(bp, BAR_CSTRORM_INTMEM +
4638                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4639         REG_WR(bp, BAR_CSTRORM_INTMEM +
4640                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4641                U64_HI(section));
4642         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4643                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4644
4645         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4646                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4647                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4648
4649         /* CSTORM */
4650         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4651                                             c_def_status_block);
4652         def_sb->c_def_status_block.status_block_id = sb_id;
4653
4654         REG_WR(bp, BAR_CSTRORM_INTMEM +
4655                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4656         REG_WR(bp, BAR_CSTRORM_INTMEM +
4657                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4658                U64_HI(section));
4659         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4660                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4661
4662         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4663                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4664                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4665
4666         /* TSTORM */
4667         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4668                                             t_def_status_block);
4669         def_sb->t_def_status_block.status_block_id = sb_id;
4670
4671         REG_WR(bp, BAR_TSTRORM_INTMEM +
4672                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4673         REG_WR(bp, BAR_TSTRORM_INTMEM +
4674                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4675                U64_HI(section));
4676         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4677                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4678
4679         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4680                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4681                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4682
4683         /* XSTORM */
4684         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4685                                             x_def_status_block);
4686         def_sb->x_def_status_block.status_block_id = sb_id;
4687
4688         REG_WR(bp, BAR_XSTRORM_INTMEM +
4689                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4690         REG_WR(bp, BAR_XSTRORM_INTMEM +
4691                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4692                U64_HI(section));
4693         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4694                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4695
4696         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4697                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4698                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4699
4700         bp->stats_pending = 0;
4701         bp->set_mac_pending = 0;
4702
4703         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4704 }
4705
4706 static void bnx2x_update_coalesce(struct bnx2x *bp)
4707 {
4708         int port = BP_PORT(bp);
4709         int i;
4710
4711         for_each_queue(bp, i) {
4712                 int sb_id = bp->fp[i].sb_id;
4713
4714                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4715                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4716                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4717                                                       U_SB_ETH_RX_CQ_INDEX),
4718                         bp->rx_ticks/12);
4719                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4720                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4721                                                        U_SB_ETH_RX_CQ_INDEX),
4722                          (bp->rx_ticks/12) ? 0 : 1);
4723
4724                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4725                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4726                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4727                                                       C_SB_ETH_TX_CQ_INDEX),
4728                         bp->tx_ticks/12);
4729                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4730                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4731                                                        C_SB_ETH_TX_CQ_INDEX),
4732                          (bp->tx_ticks/12) ? 0 : 1);
4733         }
4734 }
4735
4736 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4737                                        struct bnx2x_fastpath *fp, int last)
4738 {
4739         int i;
4740
4741         for (i = 0; i < last; i++) {
4742                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4743                 struct sk_buff *skb = rx_buf->skb;
4744
4745                 if (skb == NULL) {
4746                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4747                         continue;
4748                 }
4749
4750                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4751                         pci_unmap_single(bp->pdev,
4752                                          pci_unmap_addr(rx_buf, mapping),
4753                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4754
4755                 dev_kfree_skb(skb);
4756                 rx_buf->skb = NULL;
4757         }
4758 }
4759
4760 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4761 {
4762         int func = BP_FUNC(bp);
4763         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4764                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4765         u16 ring_prod, cqe_ring_prod;
4766         int i, j;
4767
4768         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4769         DP(NETIF_MSG_IFUP,
4770            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4771
4772         if (bp->flags & TPA_ENABLE_FLAG) {
4773
4774                 for_each_rx_queue(bp, j) {
4775                         struct bnx2x_fastpath *fp = &bp->fp[j];
4776
4777                         for (i = 0; i < max_agg_queues; i++) {
4778                                 fp->tpa_pool[i].skb =
4779                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4780                                 if (!fp->tpa_pool[i].skb) {
4781                                         BNX2X_ERR("Failed to allocate TPA "
4782                                                   "skb pool for queue[%d] - "
4783                                                   "disabling TPA on this "
4784                                                   "queue!\n", j);
4785                                         bnx2x_free_tpa_pool(bp, fp, i);
4786                                         fp->disable_tpa = 1;
4787                                         break;
4788                                 }
4789                                 pci_unmap_addr_set((struct sw_rx_bd *)
4790                                                         &bp->fp->tpa_pool[i],
4791                                                    mapping, 0);
4792                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4793                         }
4794                 }
4795         }
4796
4797         for_each_rx_queue(bp, j) {
4798                 struct bnx2x_fastpath *fp = &bp->fp[j];
4799
4800                 fp->rx_bd_cons = 0;
4801                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4802                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4803
4804                 /* Mark queue as Rx */
4805                 fp->is_rx_queue = 1;
4806
4807                 /* "next page" elements initialization */
4808                 /* SGE ring */
4809                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4810                         struct eth_rx_sge *sge;
4811
4812                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4813                         sge->addr_hi =
4814                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4815                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4816                         sge->addr_lo =
4817                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4818                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4819                 }
4820
4821                 bnx2x_init_sge_ring_bit_mask(fp);
4822
4823                 /* RX BD ring */
4824                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4825                         struct eth_rx_bd *rx_bd;
4826
4827                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4828                         rx_bd->addr_hi =
4829                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4830                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4831                         rx_bd->addr_lo =
4832                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4833                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4834                 }
4835
4836                 /* CQ ring */
4837                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4838                         struct eth_rx_cqe_next_page *nextpg;
4839
4840                         nextpg = (struct eth_rx_cqe_next_page *)
4841                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4842                         nextpg->addr_hi =
4843                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4844                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4845                         nextpg->addr_lo =
4846                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4847                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4848                 }
4849
4850                 /* Allocate SGEs and initialize the ring elements */
4851                 for (i = 0, ring_prod = 0;
4852                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4853
4854                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4855                                 BNX2X_ERR("was only able to allocate "
4856                                           "%d rx sges\n", i);
4857                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4858                                 /* Cleanup already allocated elements */
4859                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4860                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4861                                 fp->disable_tpa = 1;
4862                                 ring_prod = 0;
4863                                 break;
4864                         }
4865                         ring_prod = NEXT_SGE_IDX(ring_prod);
4866                 }
4867                 fp->rx_sge_prod = ring_prod;
4868
4869                 /* Allocate BDs and initialize BD ring */
4870                 fp->rx_comp_cons = 0;
4871                 cqe_ring_prod = ring_prod = 0;
4872                 for (i = 0; i < bp->rx_ring_size; i++) {
4873                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4874                                 BNX2X_ERR("was only able to allocate "
4875                                           "%d rx skbs on queue[%d]\n", i, j);
4876                                 fp->eth_q_stats.rx_skb_alloc_failed++;
4877                                 break;
4878                         }
4879                         ring_prod = NEXT_RX_IDX(ring_prod);
4880                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4881                         WARN_ON(ring_prod <= i);
4882                 }
4883
4884                 fp->rx_bd_prod = ring_prod;
4885                 /* must not have more available CQEs than BDs */
4886                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4887                                        cqe_ring_prod);
4888                 fp->rx_pkt = fp->rx_calls = 0;
4889
4890                 /* Warning!
4891                  * this will generate an interrupt (to the TSTORM)
4892                  * must only be done after chip is initialized
4893                  */
4894                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4895                                      fp->rx_sge_prod);
4896                 if (j != 0)
4897                         continue;
4898
4899                 REG_WR(bp, BAR_USTRORM_INTMEM +
4900                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4901                        U64_LO(fp->rx_comp_mapping));
4902                 REG_WR(bp, BAR_USTRORM_INTMEM +
4903                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4904                        U64_HI(fp->rx_comp_mapping));
4905         }
4906 }
4907
4908 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4909 {
4910         int i, j;
4911
4912         for_each_tx_queue(bp, j) {
4913                 struct bnx2x_fastpath *fp = &bp->fp[j];
4914
4915                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4916                         struct eth_tx_next_bd *tx_next_bd =
4917                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
4918
4919                         tx_next_bd->addr_hi =
4920                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4921                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4922                         tx_next_bd->addr_lo =
4923                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4924                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4925                 }
4926
4927                 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
4928                 fp->tx_db.data.zero_fill1 = 0;
4929                 fp->tx_db.data.prod = 0;
4930
4931                 fp->tx_pkt_prod = 0;
4932                 fp->tx_pkt_cons = 0;
4933                 fp->tx_bd_prod = 0;
4934                 fp->tx_bd_cons = 0;
4935                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4936                 fp->tx_pkt = 0;
4937         }
4938 }
4939
4940 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4941 {
4942         int func = BP_FUNC(bp);
4943
4944         spin_lock_init(&bp->spq_lock);
4945
4946         bp->spq_left = MAX_SPQ_PENDING;
4947         bp->spq_prod_idx = 0;
4948         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4949         bp->spq_prod_bd = bp->spq;
4950         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4951
4952         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4953                U64_LO(bp->spq_mapping));
4954         REG_WR(bp,
4955                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4956                U64_HI(bp->spq_mapping));
4957
4958         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4959                bp->spq_prod_idx);
4960 }
4961
4962 static void bnx2x_init_context(struct bnx2x *bp)
4963 {
4964         int i;
4965
4966         for_each_rx_queue(bp, i) {
4967                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4968                 struct bnx2x_fastpath *fp = &bp->fp[i];
4969                 u8 cl_id = fp->cl_id;
4970
4971                 context->ustorm_st_context.common.sb_index_numbers =
4972                                                 BNX2X_RX_SB_INDEX_NUM;
4973                 context->ustorm_st_context.common.clientId = cl_id;
4974                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
4975                 context->ustorm_st_context.common.flags =
4976                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4977                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4978                 context->ustorm_st_context.common.statistics_counter_id =
4979                                                 cl_id;
4980                 context->ustorm_st_context.common.mc_alignment_log_size =
4981                                                 BNX2X_RX_ALIGN_SHIFT;
4982                 context->ustorm_st_context.common.bd_buff_size =
4983                                                 bp->rx_buf_size;
4984                 context->ustorm_st_context.common.bd_page_base_hi =
4985                                                 U64_HI(fp->rx_desc_mapping);
4986                 context->ustorm_st_context.common.bd_page_base_lo =
4987                                                 U64_LO(fp->rx_desc_mapping);
4988                 if (!fp->disable_tpa) {
4989                         context->ustorm_st_context.common.flags |=
4990                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
4991                         context->ustorm_st_context.common.sge_buff_size =
4992                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4993                                          (u32)0xffff);
4994                         context->ustorm_st_context.common.sge_page_base_hi =
4995                                                 U64_HI(fp->rx_sge_mapping);
4996                         context->ustorm_st_context.common.sge_page_base_lo =
4997                                                 U64_LO(fp->rx_sge_mapping);
4998
4999                         context->ustorm_st_context.common.max_sges_for_packet =
5000                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5001                         context->ustorm_st_context.common.max_sges_for_packet =
5002                                 ((context->ustorm_st_context.common.
5003                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
5004                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5005                 }
5006
5007                 context->ustorm_ag_context.cdu_usage =
5008                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5009                                                CDU_REGION_NUMBER_UCM_AG,
5010                                                ETH_CONNECTION_TYPE);
5011
5012                 context->xstorm_ag_context.cdu_reserved =
5013                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5014                                                CDU_REGION_NUMBER_XCM_AG,
5015                                                ETH_CONNECTION_TYPE);
5016         }
5017
5018         for_each_tx_queue(bp, i) {
5019                 struct bnx2x_fastpath *fp = &bp->fp[i];
5020                 struct eth_context *context =
5021                         bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5022
5023                 context->cstorm_st_context.sb_index_number =
5024                                                 C_SB_ETH_TX_CQ_INDEX;
5025                 context->cstorm_st_context.status_block_id = fp->sb_id;
5026
5027                 context->xstorm_st_context.tx_bd_page_base_hi =
5028                                                 U64_HI(fp->tx_desc_mapping);
5029                 context->xstorm_st_context.tx_bd_page_base_lo =
5030                                                 U64_LO(fp->tx_desc_mapping);
5031                 context->xstorm_st_context.statistics_data = (fp->cl_id |
5032                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5033         }
5034 }
5035
5036 static void bnx2x_init_ind_table(struct bnx2x *bp)
5037 {
5038         int func = BP_FUNC(bp);
5039         int i;
5040
5041         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5042                 return;
5043
5044         DP(NETIF_MSG_IFUP,
5045            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
5046         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5047                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5048                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5049                         bp->fp->cl_id + (i % bp->num_rx_queues));
5050 }
5051
5052 static void bnx2x_set_client_config(struct bnx2x *bp)
5053 {
5054         struct tstorm_eth_client_config tstorm_client = {0};
5055         int port = BP_PORT(bp);
5056         int i;
5057
5058         tstorm_client.mtu = bp->dev->mtu;
5059         tstorm_client.config_flags =
5060                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5061                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5062 #ifdef BCM_VLAN
5063         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5064                 tstorm_client.config_flags |=
5065                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5066                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5067         }
5068 #endif
5069
5070         for_each_queue(bp, i) {
5071                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5072
5073                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5074                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5075                        ((u32 *)&tstorm_client)[0]);
5076                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5077                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5078                        ((u32 *)&tstorm_client)[1]);
5079         }
5080
5081         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5082            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5083 }
5084
5085 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5086 {
5087         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5088         int mode = bp->rx_mode;
5089         int mask = (1 << BP_L_ID(bp));
5090         int func = BP_FUNC(bp);
5091         int port = BP_PORT(bp);
5092         int i;
5093         /* All but management unicast packets should pass to the host as well */
5094         u32 llh_mask =
5095                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5096                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5097                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5098                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5099
5100         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
5101
5102         switch (mode) {
5103         case BNX2X_RX_MODE_NONE: /* no Rx */
5104                 tstorm_mac_filter.ucast_drop_all = mask;
5105                 tstorm_mac_filter.mcast_drop_all = mask;
5106                 tstorm_mac_filter.bcast_drop_all = mask;
5107                 break;
5108
5109         case BNX2X_RX_MODE_NORMAL:
5110                 tstorm_mac_filter.bcast_accept_all = mask;
5111                 break;
5112
5113         case BNX2X_RX_MODE_ALLMULTI:
5114                 tstorm_mac_filter.mcast_accept_all = mask;
5115                 tstorm_mac_filter.bcast_accept_all = mask;
5116                 break;
5117
5118         case BNX2X_RX_MODE_PROMISC:
5119                 tstorm_mac_filter.ucast_accept_all = mask;
5120                 tstorm_mac_filter.mcast_accept_all = mask;
5121                 tstorm_mac_filter.bcast_accept_all = mask;
5122                 /* pass management unicast packets as well */
5123                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5124                 break;
5125
5126         default:
5127                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5128                 break;
5129         }
5130
5131         REG_WR(bp,
5132                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5133                llh_mask);
5134
5135         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5136                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5137                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5138                        ((u32 *)&tstorm_mac_filter)[i]);
5139
5140 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5141                    ((u32 *)&tstorm_mac_filter)[i]); */
5142         }
5143
5144         if (mode != BNX2X_RX_MODE_NONE)
5145                 bnx2x_set_client_config(bp);
5146 }
5147
5148 static void bnx2x_init_internal_common(struct bnx2x *bp)
5149 {
5150         int i;
5151
5152         /* Zero this manually as its initialization is
5153            currently missing in the initTool */
5154         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5155                 REG_WR(bp, BAR_USTRORM_INTMEM +
5156                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5157 }
5158
5159 static void bnx2x_init_internal_port(struct bnx2x *bp)
5160 {
5161         int port = BP_PORT(bp);
5162
5163         REG_WR(bp,
5164                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5165         REG_WR(bp,
5166                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5167         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5168         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5169 }
5170
5171 static void bnx2x_init_internal_func(struct bnx2x *bp)
5172 {
5173         struct tstorm_eth_function_common_config tstorm_config = {0};
5174         struct stats_indication_flags stats_flags = {0};
5175         int port = BP_PORT(bp);
5176         int func = BP_FUNC(bp);
5177         int i, j;
5178         u32 offset;
5179         u16 max_agg_size;
5180
5181         if (is_multi(bp)) {
5182                 tstorm_config.config_flags = MULTI_FLAGS(bp);
5183                 tstorm_config.rss_result_mask = MULTI_MASK;
5184         }
5185
5186         /* Enable TPA if needed */
5187         if (bp->flags & TPA_ENABLE_FLAG)
5188                 tstorm_config.config_flags |=
5189                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5190
5191         if (IS_E1HMF(bp))
5192                 tstorm_config.config_flags |=
5193                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5194
5195         tstorm_config.leading_client_id = BP_L_ID(bp);
5196
5197         REG_WR(bp, BAR_TSTRORM_INTMEM +
5198                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5199                (*(u32 *)&tstorm_config));
5200
5201         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5202         bnx2x_set_storm_rx_mode(bp);
5203
5204         for_each_queue(bp, i) {
5205                 u8 cl_id = bp->fp[i].cl_id;
5206
5207                 /* reset xstorm per client statistics */
5208                 offset = BAR_XSTRORM_INTMEM +
5209                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5210                 for (j = 0;
5211                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5212                         REG_WR(bp, offset + j*4, 0);
5213
5214                 /* reset tstorm per client statistics */
5215                 offset = BAR_TSTRORM_INTMEM +
5216                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5217                 for (j = 0;
5218                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5219                         REG_WR(bp, offset + j*4, 0);
5220
5221                 /* reset ustorm per client statistics */
5222                 offset = BAR_USTRORM_INTMEM +
5223                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5224                 for (j = 0;
5225                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5226                         REG_WR(bp, offset + j*4, 0);
5227         }
5228
5229         /* Init statistics related context */
5230         stats_flags.collect_eth = 1;
5231
5232         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5233                ((u32 *)&stats_flags)[0]);
5234         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5235                ((u32 *)&stats_flags)[1]);
5236
5237         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5238                ((u32 *)&stats_flags)[0]);
5239         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5240                ((u32 *)&stats_flags)[1]);
5241
5242         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5243                ((u32 *)&stats_flags)[0]);
5244         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5245                ((u32 *)&stats_flags)[1]);
5246
5247         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5248                ((u32 *)&stats_flags)[0]);
5249         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5250                ((u32 *)&stats_flags)[1]);
5251
5252         REG_WR(bp, BAR_XSTRORM_INTMEM +
5253                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5254                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5255         REG_WR(bp, BAR_XSTRORM_INTMEM +
5256                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5257                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5258
5259         REG_WR(bp, BAR_TSTRORM_INTMEM +
5260                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5261                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5262         REG_WR(bp, BAR_TSTRORM_INTMEM +
5263                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5264                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5265
5266         REG_WR(bp, BAR_USTRORM_INTMEM +
5267                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5268                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5269         REG_WR(bp, BAR_USTRORM_INTMEM +
5270                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5271                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5272
5273         if (CHIP_IS_E1H(bp)) {
5274                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5275                         IS_E1HMF(bp));
5276                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5277                         IS_E1HMF(bp));
5278                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5279                         IS_E1HMF(bp));
5280                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5281                         IS_E1HMF(bp));
5282
5283                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5284                          bp->e1hov);
5285         }
5286
5287         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5288         max_agg_size =
5289                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5290                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5291                     (u32)0xffff);
5292         for_each_rx_queue(bp, i) {
5293                 struct bnx2x_fastpath *fp = &bp->fp[i];
5294
5295                 REG_WR(bp, BAR_USTRORM_INTMEM +
5296                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5297                        U64_LO(fp->rx_comp_mapping));
5298                 REG_WR(bp, BAR_USTRORM_INTMEM +
5299                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5300                        U64_HI(fp->rx_comp_mapping));
5301
5302                 /* Next page */
5303                 REG_WR(bp, BAR_USTRORM_INTMEM +
5304                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5305                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5306                 REG_WR(bp, BAR_USTRORM_INTMEM +
5307                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5308                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5309
5310                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5311                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5312                          max_agg_size);
5313         }
5314
5315         /* dropless flow control */
5316         if (CHIP_IS_E1H(bp)) {
5317                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5318
5319                 rx_pause.bd_thr_low = 250;
5320                 rx_pause.cqe_thr_low = 250;
5321                 rx_pause.cos = 1;
5322                 rx_pause.sge_thr_low = 0;
5323                 rx_pause.bd_thr_high = 350;
5324                 rx_pause.cqe_thr_high = 350;
5325                 rx_pause.sge_thr_high = 0;
5326
5327                 for_each_rx_queue(bp, i) {
5328                         struct bnx2x_fastpath *fp = &bp->fp[i];
5329
5330                         if (!fp->disable_tpa) {
5331                                 rx_pause.sge_thr_low = 150;
5332                                 rx_pause.sge_thr_high = 250;
5333                         }
5334
5335
5336                         offset = BAR_USTRORM_INTMEM +
5337                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5338                                                                    fp->cl_id);
5339                         for (j = 0;
5340                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5341                              j++)
5342                                 REG_WR(bp, offset + j*4,
5343                                        ((u32 *)&rx_pause)[j]);
5344                 }
5345         }
5346
5347         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5348
5349         /* Init rate shaping and fairness contexts */
5350         if (IS_E1HMF(bp)) {
5351                 int vn;
5352
5353                 /* During init there is no active link
5354                    Until link is up, set link rate to 10Gbps */
5355                 bp->link_vars.line_speed = SPEED_10000;
5356                 bnx2x_init_port_minmax(bp);
5357
5358                 bnx2x_calc_vn_weight_sum(bp);
5359
5360                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5361                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5362
5363                 /* Enable rate shaping and fairness */
5364                 bp->cmng.flags.cmng_enables =
5365                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5366                 if (bp->vn_weight_sum)
5367                         bp->cmng.flags.cmng_enables |=
5368                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5369                 else
5370                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5371                            "  fairness will be disabled\n");
5372         } else {
5373                 /* rate shaping and fairness are disabled */
5374                 DP(NETIF_MSG_IFUP,
5375                    "single function mode  minmax will be disabled\n");
5376         }
5377
5378
5379         /* Store it to internal memory */
5380         if (bp->port.pmf)
5381                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5382                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5383                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5384                                ((u32 *)(&bp->cmng))[i]);
5385 }
5386
5387 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5388 {
5389         switch (load_code) {
5390         case FW_MSG_CODE_DRV_LOAD_COMMON:
5391                 bnx2x_init_internal_common(bp);
5392                 /* no break */
5393
5394         case FW_MSG_CODE_DRV_LOAD_PORT:
5395                 bnx2x_init_internal_port(bp);
5396                 /* no break */
5397
5398         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5399                 bnx2x_init_internal_func(bp);
5400                 break;
5401
5402         default:
5403                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5404                 break;
5405         }
5406 }
5407
5408 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5409 {
5410         int i;
5411
5412         for_each_queue(bp, i) {
5413                 struct bnx2x_fastpath *fp = &bp->fp[i];
5414
5415                 fp->bp = bp;
5416                 fp->state = BNX2X_FP_STATE_CLOSED;
5417                 fp->index = i;
5418                 fp->cl_id = BP_L_ID(bp) + i;
5419                 fp->sb_id = fp->cl_id;
5420                 /* Suitable Rx and Tx SBs are served by the same client */
5421                 if (i >= bp->num_rx_queues)
5422                         fp->cl_id -= bp->num_rx_queues;
5423                 DP(NETIF_MSG_IFUP,
5424                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5425                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5426                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5427                               fp->sb_id);
5428                 bnx2x_update_fpsb_idx(fp);
5429         }
5430
5431         /* ensure status block indices were read */
5432         rmb();
5433
5434
5435         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5436                           DEF_SB_ID);
5437         bnx2x_update_dsb_idx(bp);
5438         bnx2x_update_coalesce(bp);
5439         bnx2x_init_rx_rings(bp);
5440         bnx2x_init_tx_ring(bp);
5441         bnx2x_init_sp_ring(bp);
5442         bnx2x_init_context(bp);
5443         bnx2x_init_internal(bp, load_code);
5444         bnx2x_init_ind_table(bp);
5445         bnx2x_stats_init(bp);
5446
5447         /* At this point, we are ready for interrupts */
5448         atomic_set(&bp->intr_sem, 0);
5449
5450         /* flush all before enabling interrupts */
5451         mb();
5452         mmiowb();
5453
5454         bnx2x_int_enable(bp);
5455
5456         /* Check for SPIO5 */
5457         bnx2x_attn_int_deasserted0(bp,
5458                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5459                                    AEU_INPUTS_ATTN_BITS_SPIO5);
5460 }
5461
5462 /* end of nic init */
5463
5464 /*
5465  * gzip service functions
5466  */
5467
5468 static int bnx2x_gunzip_init(struct bnx2x *bp)
5469 {
5470         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5471                                               &bp->gunzip_mapping);
5472         if (bp->gunzip_buf  == NULL)
5473                 goto gunzip_nomem1;
5474
5475         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5476         if (bp->strm  == NULL)
5477                 goto gunzip_nomem2;
5478
5479         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5480                                       GFP_KERNEL);
5481         if (bp->strm->workspace == NULL)
5482                 goto gunzip_nomem3;
5483
5484         return 0;
5485
5486 gunzip_nomem3:
5487         kfree(bp->strm);
5488         bp->strm = NULL;
5489
5490 gunzip_nomem2:
5491         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5492                             bp->gunzip_mapping);
5493         bp->gunzip_buf = NULL;
5494
5495 gunzip_nomem1:
5496         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5497                " un-compression\n", bp->dev->name);
5498         return -ENOMEM;
5499 }
5500
5501 static void bnx2x_gunzip_end(struct bnx2x *bp)
5502 {
5503         kfree(bp->strm->workspace);
5504
5505         kfree(bp->strm);
5506         bp->strm = NULL;
5507
5508         if (bp->gunzip_buf) {
5509                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5510                                     bp->gunzip_mapping);
5511                 bp->gunzip_buf = NULL;
5512         }
5513 }
5514
5515 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5516 {
5517         int n, rc;
5518
5519         /* check gzip header */
5520         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5521                 BNX2X_ERR("Bad gzip header\n");
5522                 return -EINVAL;
5523         }
5524
5525         n = 10;
5526
5527 #define FNAME                           0x8
5528
5529         if (zbuf[3] & FNAME)
5530                 while ((zbuf[n++] != 0) && (n < len));
5531
5532         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5533         bp->strm->avail_in = len - n;
5534         bp->strm->next_out = bp->gunzip_buf;
5535         bp->strm->avail_out = FW_BUF_SIZE;
5536
5537         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5538         if (rc != Z_OK)
5539                 return rc;
5540
5541         rc = zlib_inflate(bp->strm, Z_FINISH);
5542         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5543                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5544                        bp->dev->name, bp->strm->msg);
5545
5546         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5547         if (bp->gunzip_outlen & 0x3)
5548                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5549                                     " gunzip_outlen (%d) not aligned\n",
5550                        bp->dev->name, bp->gunzip_outlen);
5551         bp->gunzip_outlen >>= 2;
5552
5553         zlib_inflateEnd(bp->strm);
5554
5555         if (rc == Z_STREAM_END)
5556                 return 0;
5557
5558         return rc;
5559 }
5560
5561 /* nic load/unload */
5562
5563 /*
5564  * General service functions
5565  */
5566
5567 /* send a NIG loopback debug packet */
5568 static void bnx2x_lb_pckt(struct bnx2x *bp)
5569 {
5570         u32 wb_write[3];
5571
5572         /* Ethernet source and destination addresses */
5573         wb_write[0] = 0x55555555;
5574         wb_write[1] = 0x55555555;
5575         wb_write[2] = 0x20;             /* SOP */
5576         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5577
5578         /* NON-IP protocol */
5579         wb_write[0] = 0x09000000;
5580         wb_write[1] = 0x55555555;
5581         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5582         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5583 }
5584
5585 /* some of the internal memories
5586  * are not directly readable from the driver
5587  * to test them we send debug packets
5588  */
5589 static int bnx2x_int_mem_test(struct bnx2x *bp)
5590 {
5591         int factor;
5592         int count, i;
5593         u32 val = 0;
5594
5595         if (CHIP_REV_IS_FPGA(bp))
5596                 factor = 120;
5597         else if (CHIP_REV_IS_EMUL(bp))
5598                 factor = 200;
5599         else
5600                 factor = 1;
5601
5602         DP(NETIF_MSG_HW, "start part1\n");
5603
5604         /* Disable inputs of parser neighbor blocks */
5605         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5606         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5607         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5608         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5609
5610         /*  Write 0 to parser credits for CFC search request */
5611         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5612
5613         /* send Ethernet packet */
5614         bnx2x_lb_pckt(bp);
5615
5616         /* TODO do i reset NIG statistic? */
5617         /* Wait until NIG register shows 1 packet of size 0x10 */
5618         count = 1000 * factor;
5619         while (count) {
5620
5621                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5622                 val = *bnx2x_sp(bp, wb_data[0]);
5623                 if (val == 0x10)
5624                         break;
5625
5626                 msleep(10);
5627                 count--;
5628         }
5629         if (val != 0x10) {
5630                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5631                 return -1;
5632         }
5633
5634         /* Wait until PRS register shows 1 packet */
5635         count = 1000 * factor;
5636         while (count) {
5637                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5638                 if (val == 1)
5639                         break;
5640
5641                 msleep(10);
5642                 count--;
5643         }
5644         if (val != 0x1) {
5645                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5646                 return -2;
5647         }
5648
5649         /* Reset and init BRB, PRS */
5650         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5651         msleep(50);
5652         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5653         msleep(50);
5654         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5655         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5656
5657         DP(NETIF_MSG_HW, "part2\n");
5658
5659         /* Disable inputs of parser neighbor blocks */
5660         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5661         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5662         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5663         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5664
5665         /* Write 0 to parser credits for CFC search request */
5666         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5667
5668         /* send 10 Ethernet packets */
5669         for (i = 0; i < 10; i++)
5670                 bnx2x_lb_pckt(bp);
5671
5672         /* Wait until NIG register shows 10 + 1
5673            packets of size 11*0x10 = 0xb0 */
5674         count = 1000 * factor;
5675         while (count) {
5676
5677                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5678                 val = *bnx2x_sp(bp, wb_data[0]);
5679                 if (val == 0xb0)
5680                         break;
5681
5682                 msleep(10);
5683                 count--;
5684         }
5685         if (val != 0xb0) {
5686                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5687                 return -3;
5688         }
5689
5690         /* Wait until PRS register shows 2 packets */
5691         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5692         if (val != 2)
5693                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5694
5695         /* Write 1 to parser credits for CFC search request */
5696         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5697
5698         /* Wait until PRS register shows 3 packets */
5699         msleep(10 * factor);
5700         /* Wait until NIG register shows 1 packet of size 0x10 */
5701         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5702         if (val != 3)
5703                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5704
5705         /* clear NIG EOP FIFO */
5706         for (i = 0; i < 11; i++)
5707                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5708         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5709         if (val != 1) {
5710                 BNX2X_ERR("clear of NIG failed\n");
5711                 return -4;
5712         }
5713
5714         /* Reset and init BRB, PRS, NIG */
5715         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5716         msleep(50);
5717         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5718         msleep(50);
5719         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5720         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5721 #ifndef BCM_ISCSI
5722         /* set NIC mode */
5723         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5724 #endif
5725
5726         /* Enable inputs of parser neighbor blocks */
5727         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5728         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5729         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5730         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5731
5732         DP(NETIF_MSG_HW, "done\n");
5733
5734         return 0; /* OK */
5735 }
5736
5737 static void enable_blocks_attention(struct bnx2x *bp)
5738 {
5739         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5740         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5741         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5742         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5743         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5744         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5745         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5746         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5747         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5748 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5749 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5750         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5751         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5752         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5753 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5754 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5755         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5756         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5757         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5758         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5759 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5760 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5761         if (CHIP_REV_IS_FPGA(bp))
5762                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5763         else
5764                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5765         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5766         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5767         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5768 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5769 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5770         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5771         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5772 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5773         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5774 }
5775
5776
5777 static void bnx2x_reset_common(struct bnx2x *bp)
5778 {
5779         /* reset_common */
5780         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5781                0xd3ffff7f);
5782         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5783 }
5784
5785
5786 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5787 {
5788         u32 val;
5789         u8 port;
5790         u8 is_required = 0;
5791
5792         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5793               SHARED_HW_CFG_FAN_FAILURE_MASK;
5794
5795         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5796                 is_required = 1;
5797
5798         /*
5799          * The fan failure mechanism is usually related to the PHY type since
5800          * the power consumption of the board is affected by the PHY. Currently,
5801          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5802          */
5803         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5804                 for (port = PORT_0; port < PORT_MAX; port++) {
5805                         u32 phy_type =
5806                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
5807                                          external_phy_config) &
5808                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5809                         is_required |=
5810                                 ((phy_type ==
5811                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5812                                  (phy_type ==
5813                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
5814                                  (phy_type ==
5815                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5816                 }
5817
5818         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5819
5820         if (is_required == 0)
5821                 return;
5822
5823         /* Fan failure is indicated by SPIO 5 */
5824         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5825                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
5826
5827         /* set to active low mode */
5828         val = REG_RD(bp, MISC_REG_SPIO_INT);
5829         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5830                                 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5831         REG_WR(bp, MISC_REG_SPIO_INT, val);
5832
5833         /* enable interrupt to signal the IGU */
5834         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5835         val |= (1 << MISC_REGISTERS_SPIO_5);
5836         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5837 }
5838
5839 static int bnx2x_init_common(struct bnx2x *bp)
5840 {
5841         u32 val, i;
5842
5843         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5844
5845         bnx2x_reset_common(bp);
5846         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5847         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5848
5849         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5850         if (CHIP_IS_E1H(bp))
5851                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5852
5853         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5854         msleep(30);
5855         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5856
5857         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5858         if (CHIP_IS_E1(bp)) {
5859                 /* enable HW interrupt from PXP on USDM overflow
5860                    bit 16 on INT_MASK_0 */
5861                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5862         }
5863
5864         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
5865         bnx2x_init_pxp(bp);
5866
5867 #ifdef __BIG_ENDIAN
5868         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5869         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5870         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5871         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5872         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5873         /* make sure this value is 0 */
5874         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5875
5876 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5877         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5878         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5879         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5880         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5881 #endif
5882
5883         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5884 #ifdef BCM_ISCSI
5885         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5886         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5887         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5888 #endif
5889
5890         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5891                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5892
5893         /* let the HW do it's magic ... */
5894         msleep(100);
5895         /* finish PXP init */
5896         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5897         if (val != 1) {
5898                 BNX2X_ERR("PXP2 CFG failed\n");
5899                 return -EBUSY;
5900         }
5901         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5902         if (val != 1) {
5903                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5904                 return -EBUSY;
5905         }
5906
5907         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5908         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5909
5910         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
5911
5912         /* clean the DMAE memory */
5913         bp->dmae_ready = 1;
5914         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5915
5916         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5917         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5918         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5919         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5920
5921         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5922         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5923         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5924         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5925
5926         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5927         /* soft reset pulse */
5928         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5929         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5930
5931 #ifdef BCM_ISCSI
5932         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5933 #endif
5934
5935         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5936         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5937         if (!CHIP_REV_IS_SLOW(bp)) {
5938                 /* enable hw interrupt from doorbell Q */
5939                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5940         }
5941
5942         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5943         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5944         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5945         /* set NIC mode */
5946         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5947         if (CHIP_IS_E1H(bp))
5948                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5949
5950         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5951         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5952         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5953         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5954
5955         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5956         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5957         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5958         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5959
5960         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5961         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5962         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5963         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5964
5965         /* sync semi rtc */
5966         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5967                0x80000000);
5968         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5969                0x80000000);
5970
5971         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5972         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5973         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5974
5975         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5976         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5977                 REG_WR(bp, i, 0xc0cac01a);
5978                 /* TODO: replace with something meaningful */
5979         }
5980         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5981         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5982
5983         if (sizeof(union cdu_context) != 1024)
5984                 /* we currently assume that a context is 1024 bytes */
5985                 printk(KERN_ALERT PFX "please adjust the size of"
5986                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5987
5988         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5989         val = (4 << 24) + (0 << 12) + 1024;
5990         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5991
5992         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5993         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5994         /* enable context validation interrupt from CFC */
5995         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5996
5997         /* set the thresholds to prevent CFC/CDU race */
5998         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5999
6000         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6001         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6002
6003         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6004         /* Reset PCIE errors for debug */
6005         REG_WR(bp, 0x2814, 0xffffffff);
6006         REG_WR(bp, 0x3820, 0xffffffff);
6007
6008         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6009         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6010         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6011         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6012
6013         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6014         if (CHIP_IS_E1H(bp)) {
6015                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6016                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6017         }
6018
6019         if (CHIP_REV_IS_SLOW(bp))
6020                 msleep(200);
6021
6022         /* finish CFC init */
6023         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6024         if (val != 1) {
6025                 BNX2X_ERR("CFC LL_INIT failed\n");
6026                 return -EBUSY;
6027         }
6028         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6029         if (val != 1) {
6030                 BNX2X_ERR("CFC AC_INIT failed\n");
6031                 return -EBUSY;
6032         }
6033         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6034         if (val != 1) {
6035                 BNX2X_ERR("CFC CAM_INIT failed\n");
6036                 return -EBUSY;
6037         }
6038         REG_WR(bp, CFC_REG_DEBUG0, 0);
6039
6040         /* read NIG statistic
6041            to see if this is our first up since powerup */
6042         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6043         val = *bnx2x_sp(bp, wb_data[0]);
6044
6045         /* do internal memory self test */
6046         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6047                 BNX2X_ERR("internal mem self test failed\n");
6048                 return -EBUSY;
6049         }
6050
6051         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6052         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6053         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6054         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6055         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6056                 bp->port.need_hw_lock = 1;
6057                 break;
6058
6059         default:
6060                 break;
6061         }
6062
6063         bnx2x_setup_fan_failure_detection(bp);
6064
6065         /* clear PXP2 attentions */
6066         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6067
6068         enable_blocks_attention(bp);
6069
6070         if (!BP_NOMCP(bp)) {
6071                 bnx2x_acquire_phy_lock(bp);
6072                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6073                 bnx2x_release_phy_lock(bp);
6074         } else
6075                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6076
6077         return 0;
6078 }
6079
6080 static int bnx2x_init_port(struct bnx2x *bp)
6081 {
6082         int port = BP_PORT(bp);
6083         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6084         u32 low, high;
6085         u32 val;
6086
6087         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
6088
6089         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6090
6091         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6092         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6093
6094         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6095         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6096         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6097 #ifdef BCM_ISCSI
6098         /* Port0  1
6099          * Port1  385 */
6100         i++;
6101         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6102         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6103         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6104         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6105
6106         /* Port0  2
6107          * Port1  386 */
6108         i++;
6109         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6110         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6111         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6112         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6113
6114         /* Port0  3
6115          * Port1  387 */
6116         i++;
6117         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6118         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6119         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6120         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6121 #endif
6122         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6123
6124 #ifdef BCM_ISCSI
6125         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6126         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6127
6128         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6129 #endif
6130         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6131
6132         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6133         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6134                 /* no pause for emulation and FPGA */
6135                 low = 0;
6136                 high = 513;
6137         } else {
6138                 if (IS_E1HMF(bp))
6139                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6140                 else if (bp->dev->mtu > 4096) {
6141                         if (bp->flags & ONE_PORT_FLAG)
6142                                 low = 160;
6143                         else {
6144                                 val = bp->dev->mtu;
6145                                 /* (24*1024 + val*4)/256 */
6146                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6147                         }
6148                 } else
6149                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6150                 high = low + 56;        /* 14*1024/256 */
6151         }
6152         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6153         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6154
6155
6156         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6157
6158         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6159         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6160         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6161         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6162
6163         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6164         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6165         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6166         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6167
6168         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6169         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6170
6171         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6172
6173         /* configure PBF to work without PAUSE mtu 9000 */
6174         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6175
6176         /* update threshold */
6177         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6178         /* update init credit */
6179         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6180
6181         /* probe changes */
6182         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6183         msleep(5);
6184         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6185
6186 #ifdef BCM_ISCSI
6187         /* tell the searcher where the T2 table is */
6188         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6189
6190         wb_write[0] = U64_LO(bp->t2_mapping);
6191         wb_write[1] = U64_HI(bp->t2_mapping);
6192         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6193         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6194         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6195         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6196
6197         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
6198 #endif
6199         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6200         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6201
6202         if (CHIP_IS_E1(bp)) {
6203                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6204                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6205         }
6206         bnx2x_init_block(bp, HC_BLOCK, init_stage);
6207
6208         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6209         /* init aeu_mask_attn_func_0/1:
6210          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6211          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6212          *             bits 4-7 are used for "per vn group attention" */
6213         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6214                (IS_E1HMF(bp) ? 0xF7 : 0x7));
6215
6216         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6217         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6218         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6219         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6220         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6221
6222         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6223
6224         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6225
6226         if (CHIP_IS_E1H(bp)) {
6227                 /* 0x2 disable e1hov, 0x1 enable */
6228                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6229                        (IS_E1HMF(bp) ? 0x1 : 0x2));
6230
6231                 /* support pause requests from USDM, TSDM and BRB */
6232                 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
6233
6234                 {
6235                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6236                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6237                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6238                 }
6239         }
6240
6241         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6242         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6243
6244         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6245         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6246                 {
6247                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6248
6249                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6250                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6251
6252                 /* The GPIO should be swapped if the swap register is
6253                    set and active */
6254                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6255                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6256
6257                 /* Select function upon port-swap configuration */
6258                 if (port == 0) {
6259                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6260                         aeu_gpio_mask = (swap_val && swap_override) ?
6261                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6262                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6263                 } else {
6264                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6265                         aeu_gpio_mask = (swap_val && swap_override) ?
6266                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6267                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6268                 }
6269                 val = REG_RD(bp, offset);
6270                 /* add GPIO3 to group */
6271                 val |= aeu_gpio_mask;
6272                 REG_WR(bp, offset, val);
6273                 }
6274                 break;
6275
6276         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6277         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6278                 /* add SPIO 5 to group 0 */
6279                 {
6280                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6281                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6282                 val = REG_RD(bp, reg_addr);
6283                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6284                 REG_WR(bp, reg_addr, val);
6285                 }
6286                 break;
6287
6288         default:
6289                 break;
6290         }
6291
6292         bnx2x__link_reset(bp);
6293
6294         return 0;
6295 }
6296
6297 #define ILT_PER_FUNC            (768/2)
6298 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6299 /* the phys address is shifted right 12 bits and has an added
6300    1=valid bit added to the 53rd bit
6301    then since this is a wide register(TM)
6302    we split it into two 32 bit writes
6303  */
6304 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6305 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6306 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6307 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6308
6309 #define CNIC_ILT_LINES          0
6310
6311 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6312 {
6313         int reg;
6314
6315         if (CHIP_IS_E1H(bp))
6316                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6317         else /* E1 */
6318                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6319
6320         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6321 }
6322
6323 static int bnx2x_init_func(struct bnx2x *bp)
6324 {
6325         int port = BP_PORT(bp);
6326         int func = BP_FUNC(bp);
6327         u32 addr, val;
6328         int i;
6329
6330         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
6331
6332         /* set MSI reconfigure capability */
6333         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6334         val = REG_RD(bp, addr);
6335         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6336         REG_WR(bp, addr, val);
6337
6338         i = FUNC_ILT_BASE(func);
6339
6340         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6341         if (CHIP_IS_E1H(bp)) {
6342                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6343                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6344         } else /* E1 */
6345                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6346                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6347
6348
6349         if (CHIP_IS_E1H(bp)) {
6350                 for (i = 0; i < 9; i++)
6351                         bnx2x_init_block(bp,
6352                                          cm_blocks[i], FUNC0_STAGE + func);
6353
6354                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6355                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6356         }
6357
6358         /* HC init per function */
6359         if (CHIP_IS_E1H(bp)) {
6360                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6361
6362                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6363                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6364         }
6365         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6366
6367         /* Reset PCIE errors for debug */
6368         REG_WR(bp, 0x2114, 0xffffffff);
6369         REG_WR(bp, 0x2120, 0xffffffff);
6370
6371         return 0;
6372 }
6373
6374 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6375 {
6376         int i, rc = 0;
6377
6378         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
6379            BP_FUNC(bp), load_code);
6380
6381         bp->dmae_ready = 0;
6382         mutex_init(&bp->dmae_mutex);
6383         bnx2x_gunzip_init(bp);
6384
6385         switch (load_code) {
6386         case FW_MSG_CODE_DRV_LOAD_COMMON:
6387                 rc = bnx2x_init_common(bp);
6388                 if (rc)
6389                         goto init_hw_err;
6390                 /* no break */
6391
6392         case FW_MSG_CODE_DRV_LOAD_PORT:
6393                 bp->dmae_ready = 1;
6394                 rc = bnx2x_init_port(bp);
6395                 if (rc)
6396                         goto init_hw_err;
6397                 /* no break */
6398
6399         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6400                 bp->dmae_ready = 1;
6401                 rc = bnx2x_init_func(bp);
6402                 if (rc)
6403                         goto init_hw_err;
6404                 break;
6405
6406         default:
6407                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6408                 break;
6409         }
6410
6411         if (!BP_NOMCP(bp)) {
6412                 int func = BP_FUNC(bp);
6413
6414                 bp->fw_drv_pulse_wr_seq =
6415                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6416                                  DRV_PULSE_SEQ_MASK);
6417                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6418                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
6419                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
6420         } else
6421                 bp->func_stx = 0;
6422
6423         /* this needs to be done before gunzip end */
6424         bnx2x_zero_def_sb(bp);
6425         for_each_queue(bp, i)
6426                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6427
6428 init_hw_err:
6429         bnx2x_gunzip_end(bp);
6430
6431         return rc;
6432 }
6433
6434 static void bnx2x_free_mem(struct bnx2x *bp)
6435 {
6436
6437 #define BNX2X_PCI_FREE(x, y, size) \
6438         do { \
6439                 if (x) { \
6440                         pci_free_consistent(bp->pdev, size, x, y); \
6441                         x = NULL; \
6442                         y = 0; \
6443                 } \
6444         } while (0)
6445
6446 #define BNX2X_FREE(x) \
6447         do { \
6448                 if (x) { \
6449                         vfree(x); \
6450                         x = NULL; \
6451                 } \
6452         } while (0)
6453
6454         int i;
6455
6456         /* fastpath */
6457         /* Common */
6458         for_each_queue(bp, i) {
6459
6460                 /* status blocks */
6461                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6462                                bnx2x_fp(bp, i, status_blk_mapping),
6463                                sizeof(struct host_status_block));
6464         }
6465         /* Rx */
6466         for_each_rx_queue(bp, i) {
6467
6468                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6469                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6470                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6471                                bnx2x_fp(bp, i, rx_desc_mapping),
6472                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
6473
6474                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6475                                bnx2x_fp(bp, i, rx_comp_mapping),
6476                                sizeof(struct eth_fast_path_rx_cqe) *
6477                                NUM_RCQ_BD);
6478
6479                 /* SGE ring */
6480                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6481                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6482                                bnx2x_fp(bp, i, rx_sge_mapping),
6483                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6484         }
6485         /* Tx */
6486         for_each_tx_queue(bp, i) {
6487
6488                 /* fastpath tx rings: tx_buf tx_desc */
6489                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6490                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6491                                bnx2x_fp(bp, i, tx_desc_mapping),
6492                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6493         }
6494         /* end of fastpath */
6495
6496         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6497                        sizeof(struct host_def_status_block));
6498
6499         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6500                        sizeof(struct bnx2x_slowpath));
6501
6502 #ifdef BCM_ISCSI
6503         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6504         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6505         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6506         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6507 #endif
6508         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6509
6510 #undef BNX2X_PCI_FREE
6511 #undef BNX2X_KFREE
6512 }
6513
6514 static int bnx2x_alloc_mem(struct bnx2x *bp)
6515 {
6516
6517 #define BNX2X_PCI_ALLOC(x, y, size) \
6518         do { \
6519                 x = pci_alloc_consistent(bp->pdev, size, y); \
6520                 if (x == NULL) \
6521                         goto alloc_mem_err; \
6522                 memset(x, 0, size); \
6523         } while (0)
6524
6525 #define BNX2X_ALLOC(x, size) \
6526         do { \
6527                 x = vmalloc(size); \
6528                 if (x == NULL) \
6529                         goto alloc_mem_err; \
6530                 memset(x, 0, size); \
6531         } while (0)
6532
6533         int i;
6534
6535         /* fastpath */
6536         /* Common */
6537         for_each_queue(bp, i) {
6538                 bnx2x_fp(bp, i, bp) = bp;
6539
6540                 /* status blocks */
6541                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6542                                 &bnx2x_fp(bp, i, status_blk_mapping),
6543                                 sizeof(struct host_status_block));
6544         }
6545         /* Rx */
6546         for_each_rx_queue(bp, i) {
6547
6548                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6549                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6550                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6551                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6552                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6553                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6554
6555                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6556                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6557                                 sizeof(struct eth_fast_path_rx_cqe) *
6558                                 NUM_RCQ_BD);
6559
6560                 /* SGE ring */
6561                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6562                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6563                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6564                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6565                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6566         }
6567         /* Tx */
6568         for_each_tx_queue(bp, i) {
6569
6570                 /* fastpath tx rings: tx_buf tx_desc */
6571                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6572                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6573                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6574                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6575                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6576         }
6577         /* end of fastpath */
6578
6579         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6580                         sizeof(struct host_def_status_block));
6581
6582         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6583                         sizeof(struct bnx2x_slowpath));
6584
6585 #ifdef BCM_ISCSI
6586         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6587
6588         /* Initialize T1 */
6589         for (i = 0; i < 64*1024; i += 64) {
6590                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6591                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6592         }
6593
6594         /* allocate searcher T2 table
6595            we allocate 1/4 of alloc num for T2
6596           (which is not entered into the ILT) */
6597         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6598
6599         /* Initialize T2 */
6600         for (i = 0; i < 16*1024; i += 64)
6601                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6602
6603         /* now fixup the last line in the block to point to the next block */
6604         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6605
6606         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6607         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6608
6609         /* QM queues (128*MAX_CONN) */
6610         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6611 #endif
6612
6613         /* Slow path ring */
6614         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6615
6616         return 0;
6617
6618 alloc_mem_err:
6619         bnx2x_free_mem(bp);
6620         return -ENOMEM;
6621
6622 #undef BNX2X_PCI_ALLOC
6623 #undef BNX2X_ALLOC
6624 }
6625
6626 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6627 {
6628         int i;
6629
6630         for_each_tx_queue(bp, i) {
6631                 struct bnx2x_fastpath *fp = &bp->fp[i];
6632
6633                 u16 bd_cons = fp->tx_bd_cons;
6634                 u16 sw_prod = fp->tx_pkt_prod;
6635                 u16 sw_cons = fp->tx_pkt_cons;
6636
6637                 while (sw_cons != sw_prod) {
6638                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6639                         sw_cons++;
6640                 }
6641         }
6642 }
6643
6644 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6645 {
6646         int i, j;
6647
6648         for_each_rx_queue(bp, j) {
6649                 struct bnx2x_fastpath *fp = &bp->fp[j];
6650
6651                 for (i = 0; i < NUM_RX_BD; i++) {
6652                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6653                         struct sk_buff *skb = rx_buf->skb;
6654
6655                         if (skb == NULL)
6656                                 continue;
6657
6658                         pci_unmap_single(bp->pdev,
6659                                          pci_unmap_addr(rx_buf, mapping),
6660                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6661
6662                         rx_buf->skb = NULL;
6663                         dev_kfree_skb(skb);
6664                 }
6665                 if (!fp->disable_tpa)
6666                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6667                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6668                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6669         }
6670 }
6671
6672 static void bnx2x_free_skbs(struct bnx2x *bp)
6673 {
6674         bnx2x_free_tx_skbs(bp);
6675         bnx2x_free_rx_skbs(bp);
6676 }
6677
6678 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6679 {
6680         int i, offset = 1;
6681
6682         free_irq(bp->msix_table[0].vector, bp->dev);
6683         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6684            bp->msix_table[0].vector);
6685
6686         for_each_queue(bp, i) {
6687                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6688                    "state %x\n", i, bp->msix_table[i + offset].vector,
6689                    bnx2x_fp(bp, i, state));
6690
6691                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6692         }
6693 }
6694
6695 static void bnx2x_free_irq(struct bnx2x *bp)
6696 {
6697         if (bp->flags & USING_MSIX_FLAG) {
6698                 bnx2x_free_msix_irqs(bp);
6699                 pci_disable_msix(bp->pdev);
6700                 bp->flags &= ~USING_MSIX_FLAG;
6701
6702         } else if (bp->flags & USING_MSI_FLAG) {
6703                 free_irq(bp->pdev->irq, bp->dev);
6704                 pci_disable_msi(bp->pdev);
6705                 bp->flags &= ~USING_MSI_FLAG;
6706
6707         } else
6708                 free_irq(bp->pdev->irq, bp->dev);
6709 }
6710
6711 static int bnx2x_enable_msix(struct bnx2x *bp)
6712 {
6713         int i, rc, offset = 1;
6714         int igu_vec = 0;
6715
6716         bp->msix_table[0].entry = igu_vec;
6717         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6718
6719         for_each_queue(bp, i) {
6720                 igu_vec = BP_L_ID(bp) + offset + i;
6721                 bp->msix_table[i + offset].entry = igu_vec;
6722                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6723                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6724         }
6725
6726         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6727                              BNX2X_NUM_QUEUES(bp) + offset);
6728         if (rc) {
6729                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
6730                 return rc;
6731         }
6732
6733         bp->flags |= USING_MSIX_FLAG;
6734
6735         return 0;
6736 }
6737
6738 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6739 {
6740         int i, rc, offset = 1;
6741
6742         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6743                          bp->dev->name, bp->dev);
6744         if (rc) {
6745                 BNX2X_ERR("request sp irq failed\n");
6746                 return -EBUSY;
6747         }
6748
6749         for_each_queue(bp, i) {
6750                 struct bnx2x_fastpath *fp = &bp->fp[i];
6751
6752                 if (i < bp->num_rx_queues)
6753                         sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6754                 else
6755                         sprintf(fp->name, "%s-tx-%d",
6756                                 bp->dev->name, i - bp->num_rx_queues);
6757
6758                 rc = request_irq(bp->msix_table[i + offset].vector,
6759                                  bnx2x_msix_fp_int, 0, fp->name, fp);
6760                 if (rc) {
6761                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
6762                         bnx2x_free_msix_irqs(bp);
6763                         return -EBUSY;
6764                 }
6765
6766                 fp->state = BNX2X_FP_STATE_IRQ;
6767         }
6768
6769         i = BNX2X_NUM_QUEUES(bp);
6770         printk(KERN_INFO PFX "%s: using MSI-X  IRQs: sp %d  fp[%d] %d"
6771                " ... fp[%d] %d\n",
6772                bp->dev->name, bp->msix_table[0].vector,
6773                0, bp->msix_table[offset].vector,
6774                i - 1, bp->msix_table[offset + i - 1].vector);
6775
6776         return 0;
6777 }
6778
6779 static int bnx2x_enable_msi(struct bnx2x *bp)
6780 {
6781         int rc;
6782
6783         rc = pci_enable_msi(bp->pdev);
6784         if (rc) {
6785                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6786                 return -1;
6787         }
6788         bp->flags |= USING_MSI_FLAG;
6789
6790         return 0;
6791 }
6792
6793 static int bnx2x_req_irq(struct bnx2x *bp)
6794 {
6795         unsigned long flags;
6796         int rc;
6797
6798         if (bp->flags & USING_MSI_FLAG)
6799                 flags = 0;
6800         else
6801                 flags = IRQF_SHARED;
6802
6803         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6804                          bp->dev->name, bp->dev);
6805         if (!rc)
6806                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6807
6808         return rc;
6809 }
6810
6811 static void bnx2x_napi_enable(struct bnx2x *bp)
6812 {
6813         int i;
6814
6815         for_each_rx_queue(bp, i)
6816                 napi_enable(&bnx2x_fp(bp, i, napi));
6817 }
6818
6819 static void bnx2x_napi_disable(struct bnx2x *bp)
6820 {
6821         int i;
6822
6823         for_each_rx_queue(bp, i)
6824                 napi_disable(&bnx2x_fp(bp, i, napi));
6825 }
6826
6827 static void bnx2x_netif_start(struct bnx2x *bp)
6828 {
6829         int intr_sem;
6830
6831         intr_sem = atomic_dec_and_test(&bp->intr_sem);
6832         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6833
6834         if (intr_sem) {
6835                 if (netif_running(bp->dev)) {
6836                         bnx2x_napi_enable(bp);
6837                         bnx2x_int_enable(bp);
6838                         if (bp->state == BNX2X_STATE_OPEN)
6839                                 netif_tx_wake_all_queues(bp->dev);
6840                 }
6841         }
6842 }
6843
6844 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6845 {
6846         bnx2x_int_disable_sync(bp, disable_hw);
6847         bnx2x_napi_disable(bp);
6848         netif_tx_disable(bp->dev);
6849         bp->dev->trans_start = jiffies; /* prevent tx timeout */
6850 }
6851
6852 /*
6853  * Init service functions
6854  */
6855
6856 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6857 {
6858         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6859         int port = BP_PORT(bp);
6860
6861         /* CAM allocation
6862          * unicasts 0-31:port0 32-63:port1
6863          * multicast 64-127:port0 128-191:port1
6864          */
6865         config->hdr.length = 2;
6866         config->hdr.offset = port ? 32 : 0;
6867         config->hdr.client_id = bp->fp->cl_id;
6868         config->hdr.reserved1 = 0;
6869
6870         /* primary MAC */
6871         config->config_table[0].cam_entry.msb_mac_addr =
6872                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6873         config->config_table[0].cam_entry.middle_mac_addr =
6874                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6875         config->config_table[0].cam_entry.lsb_mac_addr =
6876                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6877         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6878         if (set)
6879                 config->config_table[0].target_table_entry.flags = 0;
6880         else
6881                 CAM_INVALIDATE(config->config_table[0]);
6882         config->config_table[0].target_table_entry.clients_bit_vector =
6883                                                 cpu_to_le32(1 << BP_L_ID(bp));
6884         config->config_table[0].target_table_entry.vlan_id = 0;
6885
6886         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6887            (set ? "setting" : "clearing"),
6888            config->config_table[0].cam_entry.msb_mac_addr,
6889            config->config_table[0].cam_entry.middle_mac_addr,
6890            config->config_table[0].cam_entry.lsb_mac_addr);
6891
6892         /* broadcast */
6893         config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6894         config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6895         config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
6896         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6897         if (set)
6898                 config->config_table[1].target_table_entry.flags =
6899                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6900         else
6901                 CAM_INVALIDATE(config->config_table[1]);
6902         config->config_table[1].target_table_entry.clients_bit_vector =
6903                                                 cpu_to_le32(1 << BP_L_ID(bp));
6904         config->config_table[1].target_table_entry.vlan_id = 0;
6905
6906         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6907                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6908                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6909 }
6910
6911 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6912 {
6913         struct mac_configuration_cmd_e1h *config =
6914                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6915
6916         /* CAM allocation for E1H
6917          * unicasts: by func number
6918          * multicast: 20+FUNC*20, 20 each
6919          */
6920         config->hdr.length = 1;
6921         config->hdr.offset = BP_FUNC(bp);
6922         config->hdr.client_id = bp->fp->cl_id;
6923         config->hdr.reserved1 = 0;
6924
6925         /* primary MAC */
6926         config->config_table[0].msb_mac_addr =
6927                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6928         config->config_table[0].middle_mac_addr =
6929                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6930         config->config_table[0].lsb_mac_addr =
6931                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6932         config->config_table[0].clients_bit_vector =
6933                                         cpu_to_le32(1 << BP_L_ID(bp));
6934         config->config_table[0].vlan_id = 0;
6935         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6936         if (set)
6937                 config->config_table[0].flags = BP_PORT(bp);
6938         else
6939                 config->config_table[0].flags =
6940                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6941
6942         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6943            (set ? "setting" : "clearing"),
6944            config->config_table[0].msb_mac_addr,
6945            config->config_table[0].middle_mac_addr,
6946            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6947
6948         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6949                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6950                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6951 }
6952
6953 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6954                              int *state_p, int poll)
6955 {
6956         /* can take a while if any port is running */
6957         int cnt = 5000;
6958
6959         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6960            poll ? "polling" : "waiting", state, idx);
6961
6962         might_sleep();
6963         while (cnt--) {
6964                 if (poll) {
6965                         bnx2x_rx_int(bp->fp, 10);
6966                         /* if index is different from 0
6967                          * the reply for some commands will
6968                          * be on the non default queue
6969                          */
6970                         if (idx)
6971                                 bnx2x_rx_int(&bp->fp[idx], 10);
6972                 }
6973
6974                 mb(); /* state is changed by bnx2x_sp_event() */
6975                 if (*state_p == state) {
6976 #ifdef BNX2X_STOP_ON_ERROR
6977                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
6978 #endif
6979                         return 0;
6980                 }
6981
6982                 msleep(1);
6983         }
6984
6985         /* timeout! */
6986         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6987                   poll ? "polling" : "waiting", state, idx);
6988 #ifdef BNX2X_STOP_ON_ERROR
6989         bnx2x_panic();
6990 #endif
6991
6992         return -EBUSY;
6993 }
6994
6995 static int bnx2x_setup_leading(struct bnx2x *bp)
6996 {
6997         int rc;
6998
6999         /* reset IGU state */
7000         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7001
7002         /* SETUP ramrod */
7003         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7004
7005         /* Wait for completion */
7006         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7007
7008         return rc;
7009 }
7010
7011 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7012 {
7013         struct bnx2x_fastpath *fp = &bp->fp[index];
7014
7015         /* reset IGU state */
7016         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7017
7018         /* SETUP ramrod */
7019         fp->state = BNX2X_FP_STATE_OPENING;
7020         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7021                       fp->cl_id, 0);
7022
7023         /* Wait for completion */
7024         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7025                                  &(fp->state), 0);
7026 }
7027
7028 static int bnx2x_poll(struct napi_struct *napi, int budget);
7029
7030 static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7031                                     int *num_tx_queues_out)
7032 {
7033         int _num_rx_queues = 0, _num_tx_queues = 0;
7034
7035         switch (bp->multi_mode) {
7036         case ETH_RSS_MODE_DISABLED:
7037                 _num_rx_queues = 1;
7038                 _num_tx_queues = 1;
7039                 break;
7040
7041         case ETH_RSS_MODE_REGULAR:
7042                 if (num_rx_queues)
7043                         _num_rx_queues = min_t(u32, num_rx_queues,
7044                                                BNX2X_MAX_QUEUES(bp));
7045                 else
7046                         _num_rx_queues = min_t(u32, num_online_cpus(),
7047                                                BNX2X_MAX_QUEUES(bp));
7048
7049                 if (num_tx_queues)
7050                         _num_tx_queues = min_t(u32, num_tx_queues,
7051                                                BNX2X_MAX_QUEUES(bp));
7052                 else
7053                         _num_tx_queues = min_t(u32, num_online_cpus(),
7054                                                BNX2X_MAX_QUEUES(bp));
7055
7056                 /* There must be not more Tx queues than Rx queues */
7057                 if (_num_tx_queues > _num_rx_queues) {
7058                         BNX2X_ERR("number of tx queues (%d) > "
7059                                   "number of rx queues (%d)"
7060                                   "  defaulting to %d\n",
7061                                   _num_tx_queues, _num_rx_queues,
7062                                   _num_rx_queues);
7063                         _num_tx_queues = _num_rx_queues;
7064                 }
7065                 break;
7066
7067
7068         default:
7069                 _num_rx_queues = 1;
7070                 _num_tx_queues = 1;
7071                 break;
7072         }
7073
7074         *num_rx_queues_out = _num_rx_queues;
7075         *num_tx_queues_out = _num_tx_queues;
7076 }
7077
7078 static int bnx2x_set_int_mode(struct bnx2x *bp)
7079 {
7080         int rc = 0;
7081
7082         switch (int_mode) {
7083         case INT_MODE_INTx:
7084         case INT_MODE_MSI:
7085                 bp->num_rx_queues = 1;
7086                 bp->num_tx_queues = 1;
7087                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7088                 break;
7089
7090         case INT_MODE_MSIX:
7091         default:
7092                 /* Set interrupt mode according to bp->multi_mode value */
7093                 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7094                                         &bp->num_tx_queues);
7095
7096                 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
7097                    bp->num_rx_queues, bp->num_tx_queues);
7098
7099                 /* if we can't use MSI-X we only need one fp,
7100                  * so try to enable MSI-X with the requested number of fp's
7101                  * and fallback to MSI or legacy INTx with one fp
7102                  */
7103                 rc = bnx2x_enable_msix(bp);
7104                 if (rc) {
7105                         /* failed to enable MSI-X */
7106                         if (bp->multi_mode)
7107                                 BNX2X_ERR("Multi requested but failed to "
7108                                           "enable MSI-X (rx %d tx %d), "
7109                                           "set number of queues to 1\n",
7110                                           bp->num_rx_queues, bp->num_tx_queues);
7111                         bp->num_rx_queues = 1;
7112                         bp->num_tx_queues = 1;
7113                 }
7114                 break;
7115         }
7116         bp->dev->real_num_tx_queues = bp->num_tx_queues;
7117         return rc;
7118 }
7119
7120
7121 /* must be called with rtnl_lock */
7122 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7123 {
7124         u32 load_code;
7125         int i, rc;
7126
7127 #ifdef BNX2X_STOP_ON_ERROR
7128         if (unlikely(bp->panic))
7129                 return -EPERM;
7130 #endif
7131
7132         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7133
7134         rc = bnx2x_set_int_mode(bp);
7135
7136         if (bnx2x_alloc_mem(bp))
7137                 return -ENOMEM;
7138
7139         for_each_rx_queue(bp, i)
7140                 bnx2x_fp(bp, i, disable_tpa) =
7141                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
7142
7143         for_each_rx_queue(bp, i)
7144                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7145                                bnx2x_poll, 128);
7146
7147         bnx2x_napi_enable(bp);
7148
7149         if (bp->flags & USING_MSIX_FLAG) {
7150                 rc = bnx2x_req_msix_irqs(bp);
7151                 if (rc) {
7152                         pci_disable_msix(bp->pdev);
7153                         goto load_error1;
7154                 }
7155         } else {
7156                 /* Fall to INTx if failed to enable MSI-X due to lack of
7157                    memory (in bnx2x_set_int_mode()) */
7158                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7159                         bnx2x_enable_msi(bp);
7160                 bnx2x_ack_int(bp);
7161                 rc = bnx2x_req_irq(bp);
7162                 if (rc) {
7163                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
7164                         if (bp->flags & USING_MSI_FLAG)
7165                                 pci_disable_msi(bp->pdev);
7166                         goto load_error1;
7167                 }
7168                 if (bp->flags & USING_MSI_FLAG) {
7169                         bp->dev->irq = bp->pdev->irq;
7170                         printk(KERN_INFO PFX "%s: using MSI  IRQ %d\n",
7171                                bp->dev->name, bp->pdev->irq);
7172                 }
7173         }
7174
7175         /* Send LOAD_REQUEST command to MCP
7176            Returns the type of LOAD command:
7177            if it is the first port to be initialized
7178            common blocks should be initialized, otherwise - not
7179         */
7180         if (!BP_NOMCP(bp)) {
7181                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7182                 if (!load_code) {
7183                         BNX2X_ERR("MCP response failure, aborting\n");
7184                         rc = -EBUSY;
7185                         goto load_error2;
7186                 }
7187                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7188                         rc = -EBUSY; /* other port in diagnostic mode */
7189                         goto load_error2;
7190                 }
7191
7192         } else {
7193                 int port = BP_PORT(bp);
7194
7195                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
7196                    load_count[0], load_count[1], load_count[2]);
7197                 load_count[0]++;
7198                 load_count[1 + port]++;
7199                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
7200                    load_count[0], load_count[1], load_count[2]);
7201                 if (load_count[0] == 1)
7202                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7203                 else if (load_count[1 + port] == 1)
7204                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7205                 else
7206                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7207         }
7208
7209         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7210             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7211                 bp->port.pmf = 1;
7212         else
7213                 bp->port.pmf = 0;
7214         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7215
7216         /* Initialize HW */
7217         rc = bnx2x_init_hw(bp, load_code);
7218         if (rc) {
7219                 BNX2X_ERR("HW init failed, aborting\n");
7220                 goto load_error2;
7221         }
7222
7223         /* Setup NIC internals and enable interrupts */
7224         bnx2x_nic_init(bp, load_code);
7225
7226         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7227             (bp->common.shmem2_base))
7228                 SHMEM2_WR(bp, dcc_support,
7229                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7230                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7231
7232         /* Send LOAD_DONE command to MCP */
7233         if (!BP_NOMCP(bp)) {
7234                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7235                 if (!load_code) {
7236                         BNX2X_ERR("MCP response failure, aborting\n");
7237                         rc = -EBUSY;
7238                         goto load_error3;
7239                 }
7240         }
7241
7242         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7243
7244         rc = bnx2x_setup_leading(bp);
7245         if (rc) {
7246                 BNX2X_ERR("Setup leading failed!\n");
7247                 goto load_error3;
7248         }
7249
7250         if (CHIP_IS_E1H(bp))
7251                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7252                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7253                         bp->state = BNX2X_STATE_DISABLED;
7254                 }
7255
7256         if (bp->state == BNX2X_STATE_OPEN) {
7257                 for_each_nondefault_queue(bp, i) {
7258                         rc = bnx2x_setup_multi(bp, i);
7259                         if (rc)
7260                                 goto load_error3;
7261                 }
7262
7263                 if (CHIP_IS_E1(bp))
7264                         bnx2x_set_mac_addr_e1(bp, 1);
7265                 else
7266                         bnx2x_set_mac_addr_e1h(bp, 1);
7267         }
7268
7269         if (bp->port.pmf)
7270                 bnx2x_initial_phy_init(bp, load_mode);
7271
7272         /* Start fast path */
7273         switch (load_mode) {
7274         case LOAD_NORMAL:
7275                 if (bp->state == BNX2X_STATE_OPEN) {
7276                         /* Tx queue should be only reenabled */
7277                         netif_tx_wake_all_queues(bp->dev);
7278                 }
7279                 /* Initialize the receive filter. */
7280                 bnx2x_set_rx_mode(bp->dev);
7281                 break;
7282
7283         case LOAD_OPEN:
7284                 netif_tx_start_all_queues(bp->dev);
7285                 if (bp->state != BNX2X_STATE_OPEN)
7286                         netif_tx_disable(bp->dev);
7287                 /* Initialize the receive filter. */
7288                 bnx2x_set_rx_mode(bp->dev);
7289                 break;
7290
7291         case LOAD_DIAG:
7292                 /* Initialize the receive filter. */
7293                 bnx2x_set_rx_mode(bp->dev);
7294                 bp->state = BNX2X_STATE_DIAG;
7295                 break;
7296
7297         default:
7298                 break;
7299         }
7300
7301         if (!bp->port.pmf)
7302                 bnx2x__link_status_update(bp);
7303
7304         /* start the timer */
7305         mod_timer(&bp->timer, jiffies + bp->current_interval);
7306
7307
7308         return 0;
7309
7310 load_error3:
7311         bnx2x_int_disable_sync(bp, 1);
7312         if (!BP_NOMCP(bp)) {
7313                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7314                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7315         }
7316         bp->port.pmf = 0;
7317         /* Free SKBs, SGEs, TPA pool and driver internals */
7318         bnx2x_free_skbs(bp);
7319         for_each_rx_queue(bp, i)
7320                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7321 load_error2:
7322         /* Release IRQs */
7323         bnx2x_free_irq(bp);
7324 load_error1:
7325         bnx2x_napi_disable(bp);
7326         for_each_rx_queue(bp, i)
7327                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7328         bnx2x_free_mem(bp);
7329
7330         return rc;
7331 }
7332
7333 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7334 {
7335         struct bnx2x_fastpath *fp = &bp->fp[index];
7336         int rc;
7337
7338         /* halt the connection */
7339         fp->state = BNX2X_FP_STATE_HALTING;
7340         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7341
7342         /* Wait for completion */
7343         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7344                                &(fp->state), 1);
7345         if (rc) /* timeout */
7346                 return rc;
7347
7348         /* delete cfc entry */
7349         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7350
7351         /* Wait for completion */
7352         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7353                                &(fp->state), 1);
7354         return rc;
7355 }
7356
7357 static int bnx2x_stop_leading(struct bnx2x *bp)
7358 {
7359         __le16 dsb_sp_prod_idx;
7360         /* if the other port is handling traffic,
7361            this can take a lot of time */
7362         int cnt = 500;
7363         int rc;
7364
7365         might_sleep();
7366
7367         /* Send HALT ramrod */
7368         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7369         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7370
7371         /* Wait for completion */
7372         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7373                                &(bp->fp[0].state), 1);
7374         if (rc) /* timeout */
7375                 return rc;
7376
7377         dsb_sp_prod_idx = *bp->dsb_sp_prod;
7378
7379         /* Send PORT_DELETE ramrod */
7380         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7381
7382         /* Wait for completion to arrive on default status block
7383            we are going to reset the chip anyway
7384            so there is not much to do if this times out
7385          */
7386         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7387                 if (!cnt) {
7388                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7389                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7390                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
7391 #ifdef BNX2X_STOP_ON_ERROR
7392                         bnx2x_panic();
7393 #endif
7394                         rc = -EBUSY;
7395                         break;
7396                 }
7397                 cnt--;
7398                 msleep(1);
7399                 rmb(); /* Refresh the dsb_sp_prod */
7400         }
7401         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7402         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7403
7404         return rc;
7405 }
7406
7407 static void bnx2x_reset_func(struct bnx2x *bp)
7408 {
7409         int port = BP_PORT(bp);
7410         int func = BP_FUNC(bp);
7411         int base, i;
7412
7413         /* Configure IGU */
7414         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7415         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7416
7417         /* Clear ILT */
7418         base = FUNC_ILT_BASE(func);
7419         for (i = base; i < base + ILT_PER_FUNC; i++)
7420                 bnx2x_ilt_wr(bp, i, 0);
7421 }
7422
7423 static void bnx2x_reset_port(struct bnx2x *bp)
7424 {
7425         int port = BP_PORT(bp);
7426         u32 val;
7427
7428         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7429
7430         /* Do not rcv packets to BRB */
7431         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7432         /* Do not direct rcv packets that are not for MCP to the BRB */
7433         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7434                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7435
7436         /* Configure AEU */
7437         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7438
7439         msleep(100);
7440         /* Check for BRB port occupancy */
7441         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7442         if (val)
7443                 DP(NETIF_MSG_IFDOWN,
7444                    "BRB1 is not empty  %d blocks are occupied\n", val);
7445
7446         /* TODO: Close Doorbell port? */
7447 }
7448
7449 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7450 {
7451         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7452            BP_FUNC(bp), reset_code);
7453
7454         switch (reset_code) {
7455         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7456                 bnx2x_reset_port(bp);
7457                 bnx2x_reset_func(bp);
7458                 bnx2x_reset_common(bp);
7459                 break;
7460
7461         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7462                 bnx2x_reset_port(bp);
7463                 bnx2x_reset_func(bp);
7464                 break;
7465
7466         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7467                 bnx2x_reset_func(bp);
7468                 break;
7469
7470         default:
7471                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7472                 break;
7473         }
7474 }
7475
7476 /* must be called with rtnl_lock */
7477 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7478 {
7479         int port = BP_PORT(bp);
7480         u32 reset_code = 0;
7481         int i, cnt, rc;
7482
7483         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7484
7485         bp->rx_mode = BNX2X_RX_MODE_NONE;
7486         bnx2x_set_storm_rx_mode(bp);
7487
7488         bnx2x_netif_stop(bp, 1);
7489
7490         del_timer_sync(&bp->timer);
7491         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7492                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7493         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7494
7495         /* Release IRQs */
7496         bnx2x_free_irq(bp);
7497
7498         /* Wait until tx fastpath tasks complete */
7499         for_each_tx_queue(bp, i) {
7500                 struct bnx2x_fastpath *fp = &bp->fp[i];
7501
7502                 cnt = 1000;
7503                 while (bnx2x_has_tx_work_unload(fp)) {
7504
7505                         bnx2x_tx_int(fp);
7506                         if (!cnt) {
7507                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7508                                           i);
7509 #ifdef BNX2X_STOP_ON_ERROR
7510                                 bnx2x_panic();
7511                                 return -EBUSY;
7512 #else
7513                                 break;
7514 #endif
7515                         }
7516                         cnt--;
7517                         msleep(1);
7518                 }
7519         }
7520         /* Give HW time to discard old tx messages */
7521         msleep(1);
7522
7523         if (CHIP_IS_E1(bp)) {
7524                 struct mac_configuration_cmd *config =
7525                                                 bnx2x_sp(bp, mcast_config);
7526
7527                 bnx2x_set_mac_addr_e1(bp, 0);
7528
7529                 for (i = 0; i < config->hdr.length; i++)
7530                         CAM_INVALIDATE(config->config_table[i]);
7531
7532                 config->hdr.length = i;
7533                 if (CHIP_REV_IS_SLOW(bp))
7534                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7535                 else
7536                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7537                 config->hdr.client_id = bp->fp->cl_id;
7538                 config->hdr.reserved1 = 0;
7539
7540                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7541                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7542                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7543
7544         } else { /* E1H */
7545                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7546
7547                 bnx2x_set_mac_addr_e1h(bp, 0);
7548
7549                 for (i = 0; i < MC_HASH_SIZE; i++)
7550                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7551
7552                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7553         }
7554
7555         if (unload_mode == UNLOAD_NORMAL)
7556                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7557
7558         else if (bp->flags & NO_WOL_FLAG)
7559                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7560
7561         else if (bp->wol) {
7562                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7563                 u8 *mac_addr = bp->dev->dev_addr;
7564                 u32 val;
7565                 /* The mac address is written to entries 1-4 to
7566                    preserve entry 0 which is used by the PMF */
7567                 u8 entry = (BP_E1HVN(bp) + 1)*8;
7568
7569                 val = (mac_addr[0] << 8) | mac_addr[1];
7570                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7571
7572                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7573                       (mac_addr[4] << 8) | mac_addr[5];
7574                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7575
7576                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7577
7578         } else
7579                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7580
7581         /* Close multi and leading connections
7582            Completions for ramrods are collected in a synchronous way */
7583         for_each_nondefault_queue(bp, i)
7584                 if (bnx2x_stop_multi(bp, i))
7585                         goto unload_error;
7586
7587         rc = bnx2x_stop_leading(bp);
7588         if (rc) {
7589                 BNX2X_ERR("Stop leading failed!\n");
7590 #ifdef BNX2X_STOP_ON_ERROR
7591                 return -EBUSY;
7592 #else
7593                 goto unload_error;
7594 #endif
7595         }
7596
7597 unload_error:
7598         if (!BP_NOMCP(bp))
7599                 reset_code = bnx2x_fw_command(bp, reset_code);
7600         else {
7601                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
7602                    load_count[0], load_count[1], load_count[2]);
7603                 load_count[0]--;
7604                 load_count[1 + port]--;
7605                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
7606                    load_count[0], load_count[1], load_count[2]);
7607                 if (load_count[0] == 0)
7608                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7609                 else if (load_count[1 + port] == 0)
7610                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7611                 else
7612                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7613         }
7614
7615         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7616             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7617                 bnx2x__link_reset(bp);
7618
7619         /* Reset the chip */
7620         bnx2x_reset_chip(bp, reset_code);
7621
7622         /* Report UNLOAD_DONE to MCP */
7623         if (!BP_NOMCP(bp))
7624                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7625
7626         bp->port.pmf = 0;
7627
7628         /* Free SKBs, SGEs, TPA pool and driver internals */
7629         bnx2x_free_skbs(bp);
7630         for_each_rx_queue(bp, i)
7631                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7632         for_each_rx_queue(bp, i)
7633                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7634         bnx2x_free_mem(bp);
7635
7636         bp->state = BNX2X_STATE_CLOSED;
7637
7638         netif_carrier_off(bp->dev);
7639
7640         return 0;
7641 }
7642
7643 static void bnx2x_reset_task(struct work_struct *work)
7644 {
7645         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7646
7647 #ifdef BNX2X_STOP_ON_ERROR
7648         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7649                   " so reset not done to allow debug dump,\n"
7650                   " you will need to reboot when done\n");
7651         return;
7652 #endif
7653
7654         rtnl_lock();
7655
7656         if (!netif_running(bp->dev))
7657                 goto reset_task_exit;
7658
7659         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7660         bnx2x_nic_load(bp, LOAD_NORMAL);
7661
7662 reset_task_exit:
7663         rtnl_unlock();
7664 }
7665
7666 /* end of nic load/unload */
7667
7668 /* ethtool_ops */
7669
7670 /*
7671  * Init service functions
7672  */
7673
7674 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7675 {
7676         switch (func) {
7677         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7678         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7679         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7680         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7681         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7682         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7683         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7684         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7685         default:
7686                 BNX2X_ERR("Unsupported function index: %d\n", func);
7687                 return (u32)(-1);
7688         }
7689 }
7690
7691 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7692 {
7693         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7694
7695         /* Flush all outstanding writes */
7696         mmiowb();
7697
7698         /* Pretend to be function 0 */
7699         REG_WR(bp, reg, 0);
7700         /* Flush the GRC transaction (in the chip) */
7701         new_val = REG_RD(bp, reg);
7702         if (new_val != 0) {
7703                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7704                           new_val);
7705                 BUG();
7706         }
7707
7708         /* From now we are in the "like-E1" mode */
7709         bnx2x_int_disable(bp);
7710
7711         /* Flush all outstanding writes */
7712         mmiowb();
7713
7714         /* Restore the original funtion settings */
7715         REG_WR(bp, reg, orig_func);
7716         new_val = REG_RD(bp, reg);
7717         if (new_val != orig_func) {
7718                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7719                           orig_func, new_val);
7720                 BUG();
7721         }
7722 }
7723
7724 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7725 {
7726         if (CHIP_IS_E1H(bp))
7727                 bnx2x_undi_int_disable_e1h(bp, func);
7728         else
7729                 bnx2x_int_disable(bp);
7730 }
7731
7732 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7733 {
7734         u32 val;
7735
7736         /* Check if there is any driver already loaded */
7737         val = REG_RD(bp, MISC_REG_UNPREPARED);
7738         if (val == 0x1) {
7739                 /* Check if it is the UNDI driver
7740                  * UNDI driver initializes CID offset for normal bell to 0x7
7741                  */
7742                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7743                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7744                 if (val == 0x7) {
7745                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7746                         /* save our func */
7747                         int func = BP_FUNC(bp);
7748                         u32 swap_en;
7749                         u32 swap_val;
7750
7751                         /* clear the UNDI indication */
7752                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7753
7754                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
7755
7756                         /* try unload UNDI on port 0 */
7757                         bp->func = 0;
7758                         bp->fw_seq =
7759                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7760                                 DRV_MSG_SEQ_NUMBER_MASK);
7761                         reset_code = bnx2x_fw_command(bp, reset_code);
7762
7763                         /* if UNDI is loaded on the other port */
7764                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7765
7766                                 /* send "DONE" for previous unload */
7767                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7768
7769                                 /* unload UNDI on port 1 */
7770                                 bp->func = 1;
7771                                 bp->fw_seq =
7772                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7773                                         DRV_MSG_SEQ_NUMBER_MASK);
7774                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7775
7776                                 bnx2x_fw_command(bp, reset_code);
7777                         }
7778
7779                         /* now it's safe to release the lock */
7780                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7781
7782                         bnx2x_undi_int_disable(bp, func);
7783
7784                         /* close input traffic and wait for it */
7785                         /* Do not rcv packets to BRB */
7786                         REG_WR(bp,
7787                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7788                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7789                         /* Do not direct rcv packets that are not for MCP to
7790                          * the BRB */
7791                         REG_WR(bp,
7792                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7793                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7794                         /* clear AEU */
7795                         REG_WR(bp,
7796                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7797                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7798                         msleep(10);
7799
7800                         /* save NIG port swap info */
7801                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7802                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7803                         /* reset device */
7804                         REG_WR(bp,
7805                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7806                                0xd3ffffff);
7807                         REG_WR(bp,
7808                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7809                                0x1403);
7810                         /* take the NIG out of reset and restore swap values */
7811                         REG_WR(bp,
7812                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7813                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
7814                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7815                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7816
7817                         /* send unload done to the MCP */
7818                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7819
7820                         /* restore our func and fw_seq */
7821                         bp->func = func;
7822                         bp->fw_seq =
7823                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7824                                 DRV_MSG_SEQ_NUMBER_MASK);
7825
7826                 } else
7827                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7828         }
7829 }
7830
7831 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7832 {
7833         u32 val, val2, val3, val4, id;
7834         u16 pmc;
7835
7836         /* Get the chip revision id and number. */
7837         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7838         val = REG_RD(bp, MISC_REG_CHIP_NUM);
7839         id = ((val & 0xffff) << 16);
7840         val = REG_RD(bp, MISC_REG_CHIP_REV);
7841         id |= ((val & 0xf) << 12);
7842         val = REG_RD(bp, MISC_REG_CHIP_METAL);
7843         id |= ((val & 0xff) << 4);
7844         val = REG_RD(bp, MISC_REG_BOND_ID);
7845         id |= (val & 0xf);
7846         bp->common.chip_id = id;
7847         bp->link_params.chip_id = bp->common.chip_id;
7848         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7849
7850         val = (REG_RD(bp, 0x2874) & 0x55);
7851         if ((bp->common.chip_id & 0x1) ||
7852             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7853                 bp->flags |= ONE_PORT_FLAG;
7854                 BNX2X_DEV_INFO("single port device\n");
7855         }
7856
7857         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7858         bp->common.flash_size = (NVRAM_1MB_SIZE <<
7859                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
7860         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7861                        bp->common.flash_size, bp->common.flash_size);
7862
7863         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7864         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
7865         bp->link_params.shmem_base = bp->common.shmem_base;
7866         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
7867                        bp->common.shmem_base, bp->common.shmem2_base);
7868
7869         if (!bp->common.shmem_base ||
7870             (bp->common.shmem_base < 0xA0000) ||
7871             (bp->common.shmem_base >= 0xC0000)) {
7872                 BNX2X_DEV_INFO("MCP not active\n");
7873                 bp->flags |= NO_MCP_FLAG;
7874                 return;
7875         }
7876
7877         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7878         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7879                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7880                 BNX2X_ERR("BAD MCP validity signature\n");
7881
7882         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7883         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7884
7885         bp->link_params.hw_led_mode = ((bp->common.hw_config &
7886                                         SHARED_HW_CFG_LED_MODE_MASK) >>
7887                                        SHARED_HW_CFG_LED_MODE_SHIFT);
7888
7889         bp->link_params.feature_config_flags = 0;
7890         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7891         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7892                 bp->link_params.feature_config_flags |=
7893                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7894         else
7895                 bp->link_params.feature_config_flags &=
7896                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7897
7898         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7899         bp->common.bc_ver = val;
7900         BNX2X_DEV_INFO("bc_ver %X\n", val);
7901         if (val < BNX2X_BC_VER) {
7902                 /* for now only warn
7903                  * later we might need to enforce this */
7904                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7905                           " please upgrade BC\n", BNX2X_BC_VER, val);
7906         }
7907         bp->link_params.feature_config_flags |=
7908                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
7909                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7910
7911         if (BP_E1HVN(bp) == 0) {
7912                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7913                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7914         } else {
7915                 /* no WOL capability for E1HVN != 0 */
7916                 bp->flags |= NO_WOL_FLAG;
7917         }
7918         BNX2X_DEV_INFO("%sWoL capable\n",
7919                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
7920
7921         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7922         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7923         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7924         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7925
7926         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7927                val, val2, val3, val4);
7928 }
7929
7930 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7931                                                     u32 switch_cfg)
7932 {
7933         int port = BP_PORT(bp);
7934         u32 ext_phy_type;
7935
7936         switch (switch_cfg) {
7937         case SWITCH_CFG_1G:
7938                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7939
7940                 ext_phy_type =
7941                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7942                 switch (ext_phy_type) {
7943                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7944                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7945                                        ext_phy_type);
7946
7947                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7948                                                SUPPORTED_10baseT_Full |
7949                                                SUPPORTED_100baseT_Half |
7950                                                SUPPORTED_100baseT_Full |
7951                                                SUPPORTED_1000baseT_Full |
7952                                                SUPPORTED_2500baseX_Full |
7953                                                SUPPORTED_TP |
7954                                                SUPPORTED_FIBRE |
7955                                                SUPPORTED_Autoneg |
7956                                                SUPPORTED_Pause |
7957                                                SUPPORTED_Asym_Pause);
7958                         break;
7959
7960                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7961                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7962                                        ext_phy_type);
7963
7964                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7965                                                SUPPORTED_10baseT_Full |
7966                                                SUPPORTED_100baseT_Half |
7967                                                SUPPORTED_100baseT_Full |
7968                                                SUPPORTED_1000baseT_Full |
7969                                                SUPPORTED_TP |
7970                                                SUPPORTED_FIBRE |
7971                                                SUPPORTED_Autoneg |
7972                                                SUPPORTED_Pause |
7973                                                SUPPORTED_Asym_Pause);
7974                         break;
7975
7976                 default:
7977                         BNX2X_ERR("NVRAM config error. "
7978                                   "BAD SerDes ext_phy_config 0x%x\n",
7979                                   bp->link_params.ext_phy_config);
7980                         return;
7981                 }
7982
7983                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7984                                            port*0x10);
7985                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7986                 break;
7987
7988         case SWITCH_CFG_10G:
7989                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7990
7991                 ext_phy_type =
7992                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7993                 switch (ext_phy_type) {
7994                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7995                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7996                                        ext_phy_type);
7997
7998                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7999                                                SUPPORTED_10baseT_Full |
8000                                                SUPPORTED_100baseT_Half |
8001                                                SUPPORTED_100baseT_Full |
8002                                                SUPPORTED_1000baseT_Full |
8003                                                SUPPORTED_2500baseX_Full |
8004                                                SUPPORTED_10000baseT_Full |
8005                                                SUPPORTED_TP |
8006                                                SUPPORTED_FIBRE |
8007                                                SUPPORTED_Autoneg |
8008                                                SUPPORTED_Pause |
8009                                                SUPPORTED_Asym_Pause);
8010                         break;
8011
8012                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8013                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8014                                        ext_phy_type);
8015
8016                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8017                                                SUPPORTED_1000baseT_Full |
8018                                                SUPPORTED_FIBRE |
8019                                                SUPPORTED_Autoneg |
8020                                                SUPPORTED_Pause |
8021                                                SUPPORTED_Asym_Pause);
8022                         break;
8023
8024                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8025                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8026                                        ext_phy_type);
8027
8028                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8029                                                SUPPORTED_2500baseX_Full |
8030                                                SUPPORTED_1000baseT_Full |
8031                                                SUPPORTED_FIBRE |
8032                                                SUPPORTED_Autoneg |
8033                                                SUPPORTED_Pause |
8034                                                SUPPORTED_Asym_Pause);
8035                         break;
8036
8037                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8038                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8039                                        ext_phy_type);
8040
8041                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8042                                                SUPPORTED_FIBRE |
8043                                                SUPPORTED_Pause |
8044                                                SUPPORTED_Asym_Pause);
8045                         break;
8046
8047                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8048                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8049                                        ext_phy_type);
8050
8051                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8052                                                SUPPORTED_1000baseT_Full |
8053                                                SUPPORTED_FIBRE |
8054                                                SUPPORTED_Pause |
8055                                                SUPPORTED_Asym_Pause);
8056                         break;
8057
8058                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8059                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8060                                        ext_phy_type);
8061
8062                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8063                                                SUPPORTED_1000baseT_Full |
8064                                                SUPPORTED_Autoneg |
8065                                                SUPPORTED_FIBRE |
8066                                                SUPPORTED_Pause |
8067                                                SUPPORTED_Asym_Pause);
8068                         break;
8069
8070                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8071                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8072                                        ext_phy_type);
8073
8074                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8075                                                SUPPORTED_1000baseT_Full |
8076                                                SUPPORTED_Autoneg |
8077                                                SUPPORTED_FIBRE |
8078                                                SUPPORTED_Pause |
8079                                                SUPPORTED_Asym_Pause);
8080                         break;
8081
8082                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8083                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8084                                        ext_phy_type);
8085
8086                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8087                                                SUPPORTED_TP |
8088                                                SUPPORTED_Autoneg |
8089                                                SUPPORTED_Pause |
8090                                                SUPPORTED_Asym_Pause);
8091                         break;
8092
8093                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8094                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8095                                        ext_phy_type);
8096
8097                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8098                                                SUPPORTED_10baseT_Full |
8099                                                SUPPORTED_100baseT_Half |
8100                                                SUPPORTED_100baseT_Full |
8101                                                SUPPORTED_1000baseT_Full |
8102                                                SUPPORTED_10000baseT_Full |
8103                                                SUPPORTED_TP |
8104                                                SUPPORTED_Autoneg |
8105                                                SUPPORTED_Pause |
8106                                                SUPPORTED_Asym_Pause);
8107                         break;
8108
8109                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8110                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8111                                   bp->link_params.ext_phy_config);
8112                         break;
8113
8114                 default:
8115                         BNX2X_ERR("NVRAM config error. "
8116                                   "BAD XGXS ext_phy_config 0x%x\n",
8117                                   bp->link_params.ext_phy_config);
8118                         return;
8119                 }
8120
8121                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8122                                            port*0x18);
8123                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8124
8125                 break;
8126
8127         default:
8128                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8129                           bp->port.link_config);
8130                 return;
8131         }
8132         bp->link_params.phy_addr = bp->port.phy_addr;
8133
8134         /* mask what we support according to speed_cap_mask */
8135         if (!(bp->link_params.speed_cap_mask &
8136                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8137                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8138
8139         if (!(bp->link_params.speed_cap_mask &
8140                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8141                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8142
8143         if (!(bp->link_params.speed_cap_mask &
8144                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8145                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8146
8147         if (!(bp->link_params.speed_cap_mask &
8148                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8149                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8150
8151         if (!(bp->link_params.speed_cap_mask &
8152                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8153                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8154                                         SUPPORTED_1000baseT_Full);
8155
8156         if (!(bp->link_params.speed_cap_mask &
8157                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8158                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8159
8160         if (!(bp->link_params.speed_cap_mask &
8161                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8162                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8163
8164         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8165 }
8166
8167 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8168 {
8169         bp->link_params.req_duplex = DUPLEX_FULL;
8170
8171         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8172         case PORT_FEATURE_LINK_SPEED_AUTO:
8173                 if (bp->port.supported & SUPPORTED_Autoneg) {
8174                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8175                         bp->port.advertising = bp->port.supported;
8176                 } else {
8177                         u32 ext_phy_type =
8178                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8179
8180                         if ((ext_phy_type ==
8181                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8182                             (ext_phy_type ==
8183                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8184                                 /* force 10G, no AN */
8185                                 bp->link_params.req_line_speed = SPEED_10000;
8186                                 bp->port.advertising =
8187                                                 (ADVERTISED_10000baseT_Full |
8188                                                  ADVERTISED_FIBRE);
8189                                 break;
8190                         }
8191                         BNX2X_ERR("NVRAM config error. "
8192                                   "Invalid link_config 0x%x"
8193                                   "  Autoneg not supported\n",
8194                                   bp->port.link_config);
8195                         return;
8196                 }
8197                 break;
8198
8199         case PORT_FEATURE_LINK_SPEED_10M_FULL:
8200                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8201                         bp->link_params.req_line_speed = SPEED_10;
8202                         bp->port.advertising = (ADVERTISED_10baseT_Full |
8203                                                 ADVERTISED_TP);
8204                 } else {
8205                         BNX2X_ERR("NVRAM config error. "
8206                                   "Invalid link_config 0x%x"
8207                                   "  speed_cap_mask 0x%x\n",
8208                                   bp->port.link_config,
8209                                   bp->link_params.speed_cap_mask);
8210                         return;
8211                 }
8212                 break;
8213
8214         case PORT_FEATURE_LINK_SPEED_10M_HALF:
8215                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8216                         bp->link_params.req_line_speed = SPEED_10;
8217                         bp->link_params.req_duplex = DUPLEX_HALF;
8218                         bp->port.advertising = (ADVERTISED_10baseT_Half |
8219                                                 ADVERTISED_TP);
8220                 } else {
8221                         BNX2X_ERR("NVRAM config error. "
8222                                   "Invalid link_config 0x%x"
8223                                   "  speed_cap_mask 0x%x\n",
8224                                   bp->port.link_config,
8225                                   bp->link_params.speed_cap_mask);
8226                         return;
8227                 }
8228                 break;
8229
8230         case PORT_FEATURE_LINK_SPEED_100M_FULL:
8231                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8232                         bp->link_params.req_line_speed = SPEED_100;
8233                         bp->port.advertising = (ADVERTISED_100baseT_Full |
8234                                                 ADVERTISED_TP);
8235                 } else {
8236                         BNX2X_ERR("NVRAM config error. "
8237                                   "Invalid link_config 0x%x"
8238                                   "  speed_cap_mask 0x%x\n",
8239                                   bp->port.link_config,
8240                                   bp->link_params.speed_cap_mask);
8241                         return;
8242                 }
8243                 break;
8244
8245         case PORT_FEATURE_LINK_SPEED_100M_HALF:
8246                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8247                         bp->link_params.req_line_speed = SPEED_100;
8248                         bp->link_params.req_duplex = DUPLEX_HALF;
8249                         bp->port.advertising = (ADVERTISED_100baseT_Half |
8250                                                 ADVERTISED_TP);
8251                 } else {
8252                         BNX2X_ERR("NVRAM config error. "
8253                                   "Invalid link_config 0x%x"
8254                                   "  speed_cap_mask 0x%x\n",
8255                                   bp->port.link_config,
8256                                   bp->link_params.speed_cap_mask);
8257                         return;
8258                 }
8259                 break;
8260
8261         case PORT_FEATURE_LINK_SPEED_1G:
8262                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8263                         bp->link_params.req_line_speed = SPEED_1000;
8264                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
8265                                                 ADVERTISED_TP);
8266                 } else {
8267                         BNX2X_ERR("NVRAM config error. "
8268                                   "Invalid link_config 0x%x"
8269                                   "  speed_cap_mask 0x%x\n",
8270                                   bp->port.link_config,
8271                                   bp->link_params.speed_cap_mask);
8272                         return;
8273                 }
8274                 break;
8275
8276         case PORT_FEATURE_LINK_SPEED_2_5G:
8277                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8278                         bp->link_params.req_line_speed = SPEED_2500;
8279                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
8280                                                 ADVERTISED_TP);
8281                 } else {
8282                         BNX2X_ERR("NVRAM config error. "
8283                                   "Invalid link_config 0x%x"
8284                                   "  speed_cap_mask 0x%x\n",
8285                                   bp->port.link_config,
8286                                   bp->link_params.speed_cap_mask);
8287                         return;
8288                 }
8289                 break;
8290
8291         case PORT_FEATURE_LINK_SPEED_10G_CX4:
8292         case PORT_FEATURE_LINK_SPEED_10G_KX4:
8293         case PORT_FEATURE_LINK_SPEED_10G_KR:
8294                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8295                         bp->link_params.req_line_speed = SPEED_10000;
8296                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
8297                                                 ADVERTISED_FIBRE);
8298                 } else {
8299                         BNX2X_ERR("NVRAM config error. "
8300                                   "Invalid link_config 0x%x"
8301                                   "  speed_cap_mask 0x%x\n",
8302                                   bp->port.link_config,
8303                                   bp->link_params.speed_cap_mask);
8304                         return;
8305                 }
8306                 break;
8307
8308         default:
8309                 BNX2X_ERR("NVRAM config error. "
8310                           "BAD link speed link_config 0x%x\n",
8311                           bp->port.link_config);
8312                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8313                 bp->port.advertising = bp->port.supported;
8314                 break;
8315         }
8316
8317         bp->link_params.req_flow_ctrl = (bp->port.link_config &
8318                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8319         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8320             !(bp->port.supported & SUPPORTED_Autoneg))
8321                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8322
8323         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
8324                        "  advertising 0x%x\n",
8325                        bp->link_params.req_line_speed,
8326                        bp->link_params.req_duplex,
8327                        bp->link_params.req_flow_ctrl, bp->port.advertising);
8328 }
8329
8330 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8331 {
8332         int port = BP_PORT(bp);
8333         u32 val, val2;
8334         u32 config;
8335         u16 i;
8336
8337         bp->link_params.bp = bp;
8338         bp->link_params.port = port;
8339
8340         bp->link_params.lane_config =
8341                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8342         bp->link_params.ext_phy_config =
8343                 SHMEM_RD(bp,
8344                          dev_info.port_hw_config[port].external_phy_config);
8345         /* BCM8727_NOC => BCM8727 no over current */
8346         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8347             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8348                 bp->link_params.ext_phy_config &=
8349                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8350                 bp->link_params.ext_phy_config |=
8351                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8352                 bp->link_params.feature_config_flags |=
8353                         FEATURE_CONFIG_BCM8727_NOC;
8354         }
8355
8356         bp->link_params.speed_cap_mask =
8357                 SHMEM_RD(bp,
8358                          dev_info.port_hw_config[port].speed_capability_mask);
8359
8360         bp->port.link_config =
8361                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8362
8363         /* Get the 4 lanes xgxs config rx and tx */
8364         for (i = 0; i < 2; i++) {
8365                 val = SHMEM_RD(bp,
8366                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8367                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8368                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8369
8370                 val = SHMEM_RD(bp,
8371                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8372                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8373                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8374         }
8375
8376         /* If the device is capable of WoL, set the default state according
8377          * to the HW
8378          */
8379         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8380         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8381                    (config & PORT_FEATURE_WOL_ENABLED));
8382
8383         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
8384                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
8385                        bp->link_params.lane_config,
8386                        bp->link_params.ext_phy_config,
8387                        bp->link_params.speed_cap_mask, bp->port.link_config);
8388
8389         bp->link_params.switch_cfg |= (bp->port.link_config &
8390                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
8391         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8392
8393         bnx2x_link_settings_requested(bp);
8394
8395         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8396         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8397         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8398         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8399         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8400         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8401         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8402         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8403         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8404         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8405 }
8406
8407 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8408 {
8409         int func = BP_FUNC(bp);
8410         u32 val, val2;
8411         int rc = 0;
8412
8413         bnx2x_get_common_hwinfo(bp);
8414
8415         bp->e1hov = 0;
8416         bp->e1hmf = 0;
8417         if (CHIP_IS_E1H(bp)) {
8418                 bp->mf_config =
8419                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8420
8421                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8422                        FUNC_MF_CFG_E1HOV_TAG_MASK);
8423                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8424                         bp->e1hmf = 1;
8425                 BNX2X_DEV_INFO("%s function mode\n",
8426                                IS_E1HMF(bp) ? "multi" : "single");
8427
8428                 if (IS_E1HMF(bp)) {
8429                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8430                                                                 e1hov_tag) &
8431                                FUNC_MF_CFG_E1HOV_TAG_MASK);
8432                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8433                                 bp->e1hov = val;
8434                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8435                                                "(0x%04x)\n",
8436                                                func, bp->e1hov, bp->e1hov);
8437                         } else {
8438                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
8439                                           "  aborting\n", func);
8440                                 rc = -EPERM;
8441                         }
8442                 } else {
8443                         if (BP_E1HVN(bp)) {
8444                                 BNX2X_ERR("!!!  VN %d in single function mode,"
8445                                           "  aborting\n", BP_E1HVN(bp));
8446                                 rc = -EPERM;
8447                         }
8448                 }
8449         }
8450
8451         if (!BP_NOMCP(bp)) {
8452                 bnx2x_get_port_hwinfo(bp);
8453
8454                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8455                               DRV_MSG_SEQ_NUMBER_MASK);
8456                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8457         }
8458
8459         if (IS_E1HMF(bp)) {
8460                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8461                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
8462                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8463                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8464                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8465                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8466                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8467                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8468                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8469                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8470                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8471                                ETH_ALEN);
8472                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8473                                ETH_ALEN);
8474                 }
8475
8476                 return rc;
8477         }
8478
8479         if (BP_NOMCP(bp)) {
8480                 /* only supposed to happen on emulation/FPGA */
8481                 BNX2X_ERR("warning random MAC workaround active\n");
8482                 random_ether_addr(bp->dev->dev_addr);
8483                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8484         }
8485
8486         return rc;
8487 }
8488
8489 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8490 {
8491         int func = BP_FUNC(bp);
8492         int timer_interval;
8493         int rc;
8494
8495         /* Disable interrupt handling until HW is initialized */
8496         atomic_set(&bp->intr_sem, 1);
8497         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8498
8499         mutex_init(&bp->port.phy_mutex);
8500
8501         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8502         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8503
8504         rc = bnx2x_get_hwinfo(bp);
8505
8506         /* need to reset chip if undi was active */
8507         if (!BP_NOMCP(bp))
8508                 bnx2x_undi_unload(bp);
8509
8510         if (CHIP_REV_IS_FPGA(bp))
8511                 printk(KERN_ERR PFX "FPGA detected\n");
8512
8513         if (BP_NOMCP(bp) && (func == 0))
8514                 printk(KERN_ERR PFX
8515                        "MCP disabled, must load devices in order!\n");
8516
8517         /* Set multi queue mode */
8518         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8519             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8520                 printk(KERN_ERR PFX
8521                       "Multi disabled since int_mode requested is not MSI-X\n");
8522                 multi_mode = ETH_RSS_MODE_DISABLED;
8523         }
8524         bp->multi_mode = multi_mode;
8525
8526
8527         /* Set TPA flags */
8528         if (disable_tpa) {
8529                 bp->flags &= ~TPA_ENABLE_FLAG;
8530                 bp->dev->features &= ~NETIF_F_LRO;
8531         } else {
8532                 bp->flags |= TPA_ENABLE_FLAG;
8533                 bp->dev->features |= NETIF_F_LRO;
8534         }
8535
8536         bp->mrrs = mrrs;
8537
8538         bp->tx_ring_size = MAX_TX_AVAIL;
8539         bp->rx_ring_size = MAX_RX_AVAIL;
8540
8541         bp->rx_csum = 1;
8542
8543         bp->tx_ticks = 50;
8544         bp->rx_ticks = 25;
8545
8546         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8547         bp->current_interval = (poll ? poll : timer_interval);
8548
8549         init_timer(&bp->timer);
8550         bp->timer.expires = jiffies + bp->current_interval;
8551         bp->timer.data = (unsigned long) bp;
8552         bp->timer.function = bnx2x_timer;
8553
8554         return rc;
8555 }
8556
8557 /*
8558  * ethtool service functions
8559  */
8560
8561 /* All ethtool functions called with rtnl_lock */
8562
8563 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8564 {
8565         struct bnx2x *bp = netdev_priv(dev);
8566
8567         cmd->supported = bp->port.supported;
8568         cmd->advertising = bp->port.advertising;
8569
8570         if (netif_carrier_ok(dev)) {
8571                 cmd->speed = bp->link_vars.line_speed;
8572                 cmd->duplex = bp->link_vars.duplex;
8573         } else {
8574                 cmd->speed = bp->link_params.req_line_speed;
8575                 cmd->duplex = bp->link_params.req_duplex;
8576         }
8577         if (IS_E1HMF(bp)) {
8578                 u16 vn_max_rate;
8579
8580                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8581                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8582                 if (vn_max_rate < cmd->speed)
8583                         cmd->speed = vn_max_rate;
8584         }
8585
8586         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8587                 u32 ext_phy_type =
8588                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8589
8590                 switch (ext_phy_type) {
8591                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8592                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8593                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8594                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8595                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8596                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8597                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8598                         cmd->port = PORT_FIBRE;
8599                         break;
8600
8601                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8602                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8603                         cmd->port = PORT_TP;
8604                         break;
8605
8606                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8607                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8608                                   bp->link_params.ext_phy_config);
8609                         break;
8610
8611                 default:
8612                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8613                            bp->link_params.ext_phy_config);
8614                         break;
8615                 }
8616         } else
8617                 cmd->port = PORT_TP;
8618
8619         cmd->phy_address = bp->port.phy_addr;
8620         cmd->transceiver = XCVR_INTERNAL;
8621
8622         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8623                 cmd->autoneg = AUTONEG_ENABLE;
8624         else
8625                 cmd->autoneg = AUTONEG_DISABLE;
8626
8627         cmd->maxtxpkt = 0;
8628         cmd->maxrxpkt = 0;
8629
8630         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8631            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8632            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8633            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8634            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8635            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8636            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8637
8638         return 0;
8639 }
8640
8641 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8642 {
8643         struct bnx2x *bp = netdev_priv(dev);
8644         u32 advertising;
8645
8646         if (IS_E1HMF(bp))
8647                 return 0;
8648
8649         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8650            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8651            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8652            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8653            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8654            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8655            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8656
8657         if (cmd->autoneg == AUTONEG_ENABLE) {
8658                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8659                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8660                         return -EINVAL;
8661                 }
8662
8663                 /* advertise the requested speed and duplex if supported */
8664                 cmd->advertising &= bp->port.supported;
8665
8666                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8667                 bp->link_params.req_duplex = DUPLEX_FULL;
8668                 bp->port.advertising |= (ADVERTISED_Autoneg |
8669                                          cmd->advertising);
8670
8671         } else { /* forced speed */
8672                 /* advertise the requested speed and duplex if supported */
8673                 switch (cmd->speed) {
8674                 case SPEED_10:
8675                         if (cmd->duplex == DUPLEX_FULL) {
8676                                 if (!(bp->port.supported &
8677                                       SUPPORTED_10baseT_Full)) {
8678                                         DP(NETIF_MSG_LINK,
8679                                            "10M full not supported\n");
8680                                         return -EINVAL;
8681                                 }
8682
8683                                 advertising = (ADVERTISED_10baseT_Full |
8684                                                ADVERTISED_TP);
8685                         } else {
8686                                 if (!(bp->port.supported &
8687                                       SUPPORTED_10baseT_Half)) {
8688                                         DP(NETIF_MSG_LINK,
8689                                            "10M half not supported\n");
8690                                         return -EINVAL;
8691                                 }
8692
8693                                 advertising = (ADVERTISED_10baseT_Half |
8694                                                ADVERTISED_TP);
8695                         }
8696                         break;
8697
8698                 case SPEED_100:
8699                         if (cmd->duplex == DUPLEX_FULL) {
8700                                 if (!(bp->port.supported &
8701                                                 SUPPORTED_100baseT_Full)) {
8702                                         DP(NETIF_MSG_LINK,
8703                                            "100M full not supported\n");
8704                                         return -EINVAL;
8705                                 }
8706
8707                                 advertising = (ADVERTISED_100baseT_Full |
8708                                                ADVERTISED_TP);
8709                         } else {
8710                                 if (!(bp->port.supported &
8711                                                 SUPPORTED_100baseT_Half)) {
8712                                         DP(NETIF_MSG_LINK,
8713                                            "100M half not supported\n");
8714                                         return -EINVAL;
8715                                 }
8716
8717                                 advertising = (ADVERTISED_100baseT_Half |
8718                                                ADVERTISED_TP);
8719                         }
8720                         break;
8721
8722                 case SPEED_1000:
8723                         if (cmd->duplex != DUPLEX_FULL) {
8724                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
8725                                 return -EINVAL;
8726                         }
8727
8728                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8729                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
8730                                 return -EINVAL;
8731                         }
8732
8733                         advertising = (ADVERTISED_1000baseT_Full |
8734                                        ADVERTISED_TP);
8735                         break;
8736
8737                 case SPEED_2500:
8738                         if (cmd->duplex != DUPLEX_FULL) {
8739                                 DP(NETIF_MSG_LINK,
8740                                    "2.5G half not supported\n");
8741                                 return -EINVAL;
8742                         }
8743
8744                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8745                                 DP(NETIF_MSG_LINK,
8746                                    "2.5G full not supported\n");
8747                                 return -EINVAL;
8748                         }
8749
8750                         advertising = (ADVERTISED_2500baseX_Full |
8751                                        ADVERTISED_TP);
8752                         break;
8753
8754                 case SPEED_10000:
8755                         if (cmd->duplex != DUPLEX_FULL) {
8756                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
8757                                 return -EINVAL;
8758                         }
8759
8760                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8761                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
8762                                 return -EINVAL;
8763                         }
8764
8765                         advertising = (ADVERTISED_10000baseT_Full |
8766                                        ADVERTISED_FIBRE);
8767                         break;
8768
8769                 default:
8770                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
8771                         return -EINVAL;
8772                 }
8773
8774                 bp->link_params.req_line_speed = cmd->speed;
8775                 bp->link_params.req_duplex = cmd->duplex;
8776                 bp->port.advertising = advertising;
8777         }
8778
8779         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8780            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
8781            bp->link_params.req_line_speed, bp->link_params.req_duplex,
8782            bp->port.advertising);
8783
8784         if (netif_running(dev)) {
8785                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8786                 bnx2x_link_set(bp);
8787         }
8788
8789         return 0;
8790 }
8791
8792 #define PHY_FW_VER_LEN                  10
8793
8794 static void bnx2x_get_drvinfo(struct net_device *dev,
8795                               struct ethtool_drvinfo *info)
8796 {
8797         struct bnx2x *bp = netdev_priv(dev);
8798         u8 phy_fw_ver[PHY_FW_VER_LEN];
8799
8800         strcpy(info->driver, DRV_MODULE_NAME);
8801         strcpy(info->version, DRV_MODULE_VERSION);
8802
8803         phy_fw_ver[0] = '\0';
8804         if (bp->port.pmf) {
8805                 bnx2x_acquire_phy_lock(bp);
8806                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8807                                              (bp->state != BNX2X_STATE_CLOSED),
8808                                              phy_fw_ver, PHY_FW_VER_LEN);
8809                 bnx2x_release_phy_lock(bp);
8810         }
8811
8812         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8813                  (bp->common.bc_ver & 0xff0000) >> 16,
8814                  (bp->common.bc_ver & 0xff00) >> 8,
8815                  (bp->common.bc_ver & 0xff),
8816                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8817         strcpy(info->bus_info, pci_name(bp->pdev));
8818         info->n_stats = BNX2X_NUM_STATS;
8819         info->testinfo_len = BNX2X_NUM_TESTS;
8820         info->eedump_len = bp->common.flash_size;
8821         info->regdump_len = 0;
8822 }
8823
8824 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8825 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8826
8827 static int bnx2x_get_regs_len(struct net_device *dev)
8828 {
8829         static u32 regdump_len;
8830         struct bnx2x *bp = netdev_priv(dev);
8831         int i;
8832
8833         if (regdump_len)
8834                 return regdump_len;
8835
8836         if (CHIP_IS_E1(bp)) {
8837                 for (i = 0; i < REGS_COUNT; i++)
8838                         if (IS_E1_ONLINE(reg_addrs[i].info))
8839                                 regdump_len += reg_addrs[i].size;
8840
8841                 for (i = 0; i < WREGS_COUNT_E1; i++)
8842                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8843                                 regdump_len += wreg_addrs_e1[i].size *
8844                                         (1 + wreg_addrs_e1[i].read_regs_count);
8845
8846         } else { /* E1H */
8847                 for (i = 0; i < REGS_COUNT; i++)
8848                         if (IS_E1H_ONLINE(reg_addrs[i].info))
8849                                 regdump_len += reg_addrs[i].size;
8850
8851                 for (i = 0; i < WREGS_COUNT_E1H; i++)
8852                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8853                                 regdump_len += wreg_addrs_e1h[i].size *
8854                                         (1 + wreg_addrs_e1h[i].read_regs_count);
8855         }
8856         regdump_len *= 4;
8857         regdump_len += sizeof(struct dump_hdr);
8858
8859         return regdump_len;
8860 }
8861
8862 static void bnx2x_get_regs(struct net_device *dev,
8863                            struct ethtool_regs *regs, void *_p)
8864 {
8865         u32 *p = _p, i, j;
8866         struct bnx2x *bp = netdev_priv(dev);
8867         struct dump_hdr dump_hdr = {0};
8868
8869         regs->version = 0;
8870         memset(p, 0, regs->len);
8871
8872         if (!netif_running(bp->dev))
8873                 return;
8874
8875         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8876         dump_hdr.dump_sign = dump_sign_all;
8877         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8878         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8879         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8880         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8881         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8882
8883         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8884         p += dump_hdr.hdr_size + 1;
8885
8886         if (CHIP_IS_E1(bp)) {
8887                 for (i = 0; i < REGS_COUNT; i++)
8888                         if (IS_E1_ONLINE(reg_addrs[i].info))
8889                                 for (j = 0; j < reg_addrs[i].size; j++)
8890                                         *p++ = REG_RD(bp,
8891                                                       reg_addrs[i].addr + j*4);
8892
8893         } else { /* E1H */
8894                 for (i = 0; i < REGS_COUNT; i++)
8895                         if (IS_E1H_ONLINE(reg_addrs[i].info))
8896                                 for (j = 0; j < reg_addrs[i].size; j++)
8897                                         *p++ = REG_RD(bp,
8898                                                       reg_addrs[i].addr + j*4);
8899         }
8900 }
8901
8902 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8903 {
8904         struct bnx2x *bp = netdev_priv(dev);
8905
8906         if (bp->flags & NO_WOL_FLAG) {
8907                 wol->supported = 0;
8908                 wol->wolopts = 0;
8909         } else {
8910                 wol->supported = WAKE_MAGIC;
8911                 if (bp->wol)
8912                         wol->wolopts = WAKE_MAGIC;
8913                 else
8914                         wol->wolopts = 0;
8915         }
8916         memset(&wol->sopass, 0, sizeof(wol->sopass));
8917 }
8918
8919 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8920 {
8921         struct bnx2x *bp = netdev_priv(dev);
8922
8923         if (wol->wolopts & ~WAKE_MAGIC)
8924                 return -EINVAL;
8925
8926         if (wol->wolopts & WAKE_MAGIC) {
8927                 if (bp->flags & NO_WOL_FLAG)
8928                         return -EINVAL;
8929
8930                 bp->wol = 1;
8931         } else
8932                 bp->wol = 0;
8933
8934         return 0;
8935 }
8936
8937 static u32 bnx2x_get_msglevel(struct net_device *dev)
8938 {
8939         struct bnx2x *bp = netdev_priv(dev);
8940
8941         return bp->msglevel;
8942 }
8943
8944 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8945 {
8946         struct bnx2x *bp = netdev_priv(dev);
8947
8948         if (capable(CAP_NET_ADMIN))
8949                 bp->msglevel = level;
8950 }
8951
8952 static int bnx2x_nway_reset(struct net_device *dev)
8953 {
8954         struct bnx2x *bp = netdev_priv(dev);
8955
8956         if (!bp->port.pmf)
8957                 return 0;
8958
8959         if (netif_running(dev)) {
8960                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8961                 bnx2x_link_set(bp);
8962         }
8963
8964         return 0;
8965 }
8966
8967 static u32
8968 bnx2x_get_link(struct net_device *dev)
8969 {
8970         struct bnx2x *bp = netdev_priv(dev);
8971
8972         return bp->link_vars.link_up;
8973 }
8974
8975 static int bnx2x_get_eeprom_len(struct net_device *dev)
8976 {
8977         struct bnx2x *bp = netdev_priv(dev);
8978
8979         return bp->common.flash_size;
8980 }
8981
8982 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8983 {
8984         int port = BP_PORT(bp);
8985         int count, i;
8986         u32 val = 0;
8987
8988         /* adjust timeout for emulation/FPGA */
8989         count = NVRAM_TIMEOUT_COUNT;
8990         if (CHIP_REV_IS_SLOW(bp))
8991                 count *= 100;
8992
8993         /* request access to nvram interface */
8994         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8995                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8996
8997         for (i = 0; i < count*10; i++) {
8998                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8999                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9000                         break;
9001
9002                 udelay(5);
9003         }
9004
9005         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9006                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9007                 return -EBUSY;
9008         }
9009
9010         return 0;
9011 }
9012
9013 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9014 {
9015         int port = BP_PORT(bp);
9016         int count, i;
9017         u32 val = 0;
9018
9019         /* adjust timeout for emulation/FPGA */
9020         count = NVRAM_TIMEOUT_COUNT;
9021         if (CHIP_REV_IS_SLOW(bp))
9022                 count *= 100;
9023
9024         /* relinquish nvram interface */
9025         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9026                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9027
9028         for (i = 0; i < count*10; i++) {
9029                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9030                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9031                         break;
9032
9033                 udelay(5);
9034         }
9035
9036         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9037                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9038                 return -EBUSY;
9039         }
9040
9041         return 0;
9042 }
9043
9044 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9045 {
9046         u32 val;
9047
9048         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9049
9050         /* enable both bits, even on read */
9051         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9052                (val | MCPR_NVM_ACCESS_ENABLE_EN |
9053                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
9054 }
9055
9056 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9057 {
9058         u32 val;
9059
9060         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9061
9062         /* disable both bits, even after read */
9063         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9064                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9065                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9066 }
9067
9068 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9069                                   u32 cmd_flags)
9070 {
9071         int count, i, rc;
9072         u32 val;
9073
9074         /* build the command word */
9075         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9076
9077         /* need to clear DONE bit separately */
9078         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9079
9080         /* address of the NVRAM to read from */
9081         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9082                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9083
9084         /* issue a read command */
9085         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9086
9087         /* adjust timeout for emulation/FPGA */
9088         count = NVRAM_TIMEOUT_COUNT;
9089         if (CHIP_REV_IS_SLOW(bp))
9090                 count *= 100;
9091
9092         /* wait for completion */
9093         *ret_val = 0;
9094         rc = -EBUSY;
9095         for (i = 0; i < count; i++) {
9096                 udelay(5);
9097                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9098
9099                 if (val & MCPR_NVM_COMMAND_DONE) {
9100                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9101                         /* we read nvram data in cpu order
9102                          * but ethtool sees it as an array of bytes
9103                          * converting to big-endian will do the work */
9104                         *ret_val = cpu_to_be32(val);
9105                         rc = 0;
9106                         break;
9107                 }
9108         }
9109
9110         return rc;
9111 }
9112
9113 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9114                             int buf_size)
9115 {
9116         int rc;
9117         u32 cmd_flags;
9118         __be32 val;
9119
9120         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9121                 DP(BNX2X_MSG_NVM,
9122                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9123                    offset, buf_size);
9124                 return -EINVAL;
9125         }
9126
9127         if (offset + buf_size > bp->common.flash_size) {
9128                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9129                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9130                    offset, buf_size, bp->common.flash_size);
9131                 return -EINVAL;
9132         }
9133
9134         /* request access to nvram interface */
9135         rc = bnx2x_acquire_nvram_lock(bp);
9136         if (rc)
9137                 return rc;
9138
9139         /* enable access to nvram interface */
9140         bnx2x_enable_nvram_access(bp);
9141
9142         /* read the first word(s) */
9143         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9144         while ((buf_size > sizeof(u32)) && (rc == 0)) {
9145                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9146                 memcpy(ret_buf, &val, 4);
9147
9148                 /* advance to the next dword */
9149                 offset += sizeof(u32);
9150                 ret_buf += sizeof(u32);
9151                 buf_size -= sizeof(u32);
9152                 cmd_flags = 0;
9153         }
9154
9155         if (rc == 0) {
9156                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9157                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9158                 memcpy(ret_buf, &val, 4);
9159         }
9160
9161         /* disable access to nvram interface */
9162         bnx2x_disable_nvram_access(bp);
9163         bnx2x_release_nvram_lock(bp);
9164
9165         return rc;
9166 }
9167
9168 static int bnx2x_get_eeprom(struct net_device *dev,
9169                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9170 {
9171         struct bnx2x *bp = netdev_priv(dev);
9172         int rc;
9173
9174         if (!netif_running(dev))
9175                 return -EAGAIN;
9176
9177         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9178            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9179            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9180            eeprom->len, eeprom->len);
9181
9182         /* parameters already validated in ethtool_get_eeprom */
9183
9184         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9185
9186         return rc;
9187 }
9188
9189 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9190                                    u32 cmd_flags)
9191 {
9192         int count, i, rc;
9193
9194         /* build the command word */
9195         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9196
9197         /* need to clear DONE bit separately */
9198         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9199
9200         /* write the data */
9201         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9202
9203         /* address of the NVRAM to write to */
9204         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9205                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9206
9207         /* issue the write command */
9208         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9209
9210         /* adjust timeout for emulation/FPGA */
9211         count = NVRAM_TIMEOUT_COUNT;
9212         if (CHIP_REV_IS_SLOW(bp))
9213                 count *= 100;
9214
9215         /* wait for completion */
9216         rc = -EBUSY;
9217         for (i = 0; i < count; i++) {
9218                 udelay(5);
9219                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9220                 if (val & MCPR_NVM_COMMAND_DONE) {
9221                         rc = 0;
9222                         break;
9223                 }
9224         }
9225
9226         return rc;
9227 }
9228
9229 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
9230
9231 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9232                               int buf_size)
9233 {
9234         int rc;
9235         u32 cmd_flags;
9236         u32 align_offset;
9237         __be32 val;
9238
9239         if (offset + buf_size > bp->common.flash_size) {
9240                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9241                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9242                    offset, buf_size, bp->common.flash_size);
9243                 return -EINVAL;
9244         }
9245
9246         /* request access to nvram interface */
9247         rc = bnx2x_acquire_nvram_lock(bp);
9248         if (rc)
9249                 return rc;
9250
9251         /* enable access to nvram interface */
9252         bnx2x_enable_nvram_access(bp);
9253
9254         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9255         align_offset = (offset & ~0x03);
9256         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9257
9258         if (rc == 0) {
9259                 val &= ~(0xff << BYTE_OFFSET(offset));
9260                 val |= (*data_buf << BYTE_OFFSET(offset));
9261
9262                 /* nvram data is returned as an array of bytes
9263                  * convert it back to cpu order */
9264                 val = be32_to_cpu(val);
9265
9266                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9267                                              cmd_flags);
9268         }
9269
9270         /* disable access to nvram interface */
9271         bnx2x_disable_nvram_access(bp);
9272         bnx2x_release_nvram_lock(bp);
9273
9274         return rc;
9275 }
9276
9277 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9278                              int buf_size)
9279 {
9280         int rc;
9281         u32 cmd_flags;
9282         u32 val;
9283         u32 written_so_far;
9284
9285         if (buf_size == 1)      /* ethtool */
9286                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9287
9288         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9289                 DP(BNX2X_MSG_NVM,
9290                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9291                    offset, buf_size);
9292                 return -EINVAL;
9293         }
9294
9295         if (offset + buf_size > bp->common.flash_size) {
9296                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9297                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9298                    offset, buf_size, bp->common.flash_size);
9299                 return -EINVAL;
9300         }
9301
9302         /* request access to nvram interface */
9303         rc = bnx2x_acquire_nvram_lock(bp);
9304         if (rc)
9305                 return rc;
9306
9307         /* enable access to nvram interface */
9308         bnx2x_enable_nvram_access(bp);
9309
9310         written_so_far = 0;
9311         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9312         while ((written_so_far < buf_size) && (rc == 0)) {
9313                 if (written_so_far == (buf_size - sizeof(u32)))
9314                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9315                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9316                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9317                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9318                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9319
9320                 memcpy(&val, data_buf, 4);
9321
9322                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9323
9324                 /* advance to the next dword */
9325                 offset += sizeof(u32);
9326                 data_buf += sizeof(u32);
9327                 written_so_far += sizeof(u32);
9328                 cmd_flags = 0;
9329         }
9330
9331         /* disable access to nvram interface */
9332         bnx2x_disable_nvram_access(bp);
9333         bnx2x_release_nvram_lock(bp);
9334
9335         return rc;
9336 }
9337
9338 static int bnx2x_set_eeprom(struct net_device *dev,
9339                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9340 {
9341         struct bnx2x *bp = netdev_priv(dev);
9342         int rc;
9343
9344         if (!netif_running(dev))
9345                 return -EAGAIN;
9346
9347         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9348            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9349            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9350            eeprom->len, eeprom->len);
9351
9352         /* parameters already validated in ethtool_set_eeprom */
9353
9354         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
9355         if (eeprom->magic == 0x00504859)
9356                 if (bp->port.pmf) {
9357
9358                         bnx2x_acquire_phy_lock(bp);
9359                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
9360                                              bp->link_params.ext_phy_config,
9361                                              (bp->state != BNX2X_STATE_CLOSED),
9362                                              eebuf, eeprom->len);
9363                         if ((bp->state == BNX2X_STATE_OPEN) ||
9364                             (bp->state == BNX2X_STATE_DISABLED)) {
9365                                 rc |= bnx2x_link_reset(&bp->link_params,
9366                                                        &bp->link_vars, 1);
9367                                 rc |= bnx2x_phy_init(&bp->link_params,
9368                                                      &bp->link_vars);
9369                         }
9370                         bnx2x_release_phy_lock(bp);
9371
9372                 } else /* Only the PMF can access the PHY */
9373                         return -EINVAL;
9374         else
9375                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9376
9377         return rc;
9378 }
9379
9380 static int bnx2x_get_coalesce(struct net_device *dev,
9381                               struct ethtool_coalesce *coal)
9382 {
9383         struct bnx2x *bp = netdev_priv(dev);
9384
9385         memset(coal, 0, sizeof(struct ethtool_coalesce));
9386
9387         coal->rx_coalesce_usecs = bp->rx_ticks;
9388         coal->tx_coalesce_usecs = bp->tx_ticks;
9389
9390         return 0;
9391 }
9392
9393 #define BNX2X_MAX_COALES_TOUT  (0xf0*12) /* Maximal coalescing timeout in us */
9394 static int bnx2x_set_coalesce(struct net_device *dev,
9395                               struct ethtool_coalesce *coal)
9396 {
9397         struct bnx2x *bp = netdev_priv(dev);
9398
9399         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9400         if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9401                 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9402
9403         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9404         if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9405                 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9406
9407         if (netif_running(dev))
9408                 bnx2x_update_coalesce(bp);
9409
9410         return 0;
9411 }
9412
9413 static void bnx2x_get_ringparam(struct net_device *dev,
9414                                 struct ethtool_ringparam *ering)
9415 {
9416         struct bnx2x *bp = netdev_priv(dev);
9417
9418         ering->rx_max_pending = MAX_RX_AVAIL;
9419         ering->rx_mini_max_pending = 0;
9420         ering->rx_jumbo_max_pending = 0;
9421
9422         ering->rx_pending = bp->rx_ring_size;
9423         ering->rx_mini_pending = 0;
9424         ering->rx_jumbo_pending = 0;
9425
9426         ering->tx_max_pending = MAX_TX_AVAIL;
9427         ering->tx_pending = bp->tx_ring_size;
9428 }
9429
9430 static int bnx2x_set_ringparam(struct net_device *dev,
9431                                struct ethtool_ringparam *ering)
9432 {
9433         struct bnx2x *bp = netdev_priv(dev);
9434         int rc = 0;
9435
9436         if ((ering->rx_pending > MAX_RX_AVAIL) ||
9437             (ering->tx_pending > MAX_TX_AVAIL) ||
9438             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9439                 return -EINVAL;
9440
9441         bp->rx_ring_size = ering->rx_pending;
9442         bp->tx_ring_size = ering->tx_pending;
9443
9444         if (netif_running(dev)) {
9445                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9446                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9447         }
9448
9449         return rc;
9450 }
9451
9452 static void bnx2x_get_pauseparam(struct net_device *dev,
9453                                  struct ethtool_pauseparam *epause)
9454 {
9455         struct bnx2x *bp = netdev_priv(dev);
9456
9457         epause->autoneg = (bp->link_params.req_flow_ctrl ==
9458                            BNX2X_FLOW_CTRL_AUTO) &&
9459                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9460
9461         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9462                             BNX2X_FLOW_CTRL_RX);
9463         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9464                             BNX2X_FLOW_CTRL_TX);
9465
9466         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9467            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9468            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9469 }
9470
9471 static int bnx2x_set_pauseparam(struct net_device *dev,
9472                                 struct ethtool_pauseparam *epause)
9473 {
9474         struct bnx2x *bp = netdev_priv(dev);
9475
9476         if (IS_E1HMF(bp))
9477                 return 0;
9478
9479         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9480            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9481            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9482
9483         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9484
9485         if (epause->rx_pause)
9486                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9487
9488         if (epause->tx_pause)
9489                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9490
9491         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9492                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9493
9494         if (epause->autoneg) {
9495                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9496                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
9497                         return -EINVAL;
9498                 }
9499
9500                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9501                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9502         }
9503
9504         DP(NETIF_MSG_LINK,
9505            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9506
9507         if (netif_running(dev)) {
9508                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9509                 bnx2x_link_set(bp);
9510         }
9511
9512         return 0;
9513 }
9514
9515 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9516 {
9517         struct bnx2x *bp = netdev_priv(dev);
9518         int changed = 0;
9519         int rc = 0;
9520
9521         /* TPA requires Rx CSUM offloading */
9522         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9523                 if (!(dev->features & NETIF_F_LRO)) {
9524                         dev->features |= NETIF_F_LRO;
9525                         bp->flags |= TPA_ENABLE_FLAG;
9526                         changed = 1;
9527                 }
9528
9529         } else if (dev->features & NETIF_F_LRO) {
9530                 dev->features &= ~NETIF_F_LRO;
9531                 bp->flags &= ~TPA_ENABLE_FLAG;
9532                 changed = 1;
9533         }
9534
9535         if (changed && netif_running(dev)) {
9536                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9537                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9538         }
9539
9540         return rc;
9541 }
9542
9543 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9544 {
9545         struct bnx2x *bp = netdev_priv(dev);
9546
9547         return bp->rx_csum;
9548 }
9549
9550 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9551 {
9552         struct bnx2x *bp = netdev_priv(dev);
9553         int rc = 0;
9554
9555         bp->rx_csum = data;
9556
9557         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9558            TPA'ed packets will be discarded due to wrong TCP CSUM */
9559         if (!data) {
9560                 u32 flags = ethtool_op_get_flags(dev);
9561
9562                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9563         }
9564
9565         return rc;
9566 }
9567
9568 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9569 {
9570         if (data) {
9571                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9572                 dev->features |= NETIF_F_TSO6;
9573         } else {
9574                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9575                 dev->features &= ~NETIF_F_TSO6;
9576         }
9577
9578         return 0;
9579 }
9580
9581 static const struct {
9582         char string[ETH_GSTRING_LEN];
9583 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9584         { "register_test (offline)" },
9585         { "memory_test (offline)" },
9586         { "loopback_test (offline)" },
9587         { "nvram_test (online)" },
9588         { "interrupt_test (online)" },
9589         { "link_test (online)" },
9590         { "idle check (online)" }
9591 };
9592
9593 static int bnx2x_self_test_count(struct net_device *dev)
9594 {
9595         return BNX2X_NUM_TESTS;
9596 }
9597
9598 static int bnx2x_test_registers(struct bnx2x *bp)
9599 {
9600         int idx, i, rc = -ENODEV;
9601         u32 wr_val = 0;
9602         int port = BP_PORT(bp);
9603         static const struct {
9604                 u32  offset0;
9605                 u32  offset1;
9606                 u32  mask;
9607         } reg_tbl[] = {
9608 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
9609                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
9610                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
9611                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
9612                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
9613                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
9614                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
9615                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
9616                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
9617                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
9618 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
9619                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
9620                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
9621                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
9622                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
9623                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9624                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
9625                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
9626                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
9627                 { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
9628 /* 20 */        { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
9629                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
9630                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
9631                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
9632                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
9633                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
9634                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
9635                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
9636                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
9637                 { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
9638 /* 30 */        { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
9639                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
9640                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
9641                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9642                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
9643                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9644                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
9645
9646                 { 0xffffffff, 0, 0x00000000 }
9647         };
9648
9649         if (!netif_running(bp->dev))
9650                 return rc;
9651
9652         /* Repeat the test twice:
9653            First by writing 0x00000000, second by writing 0xffffffff */
9654         for (idx = 0; idx < 2; idx++) {
9655
9656                 switch (idx) {
9657                 case 0:
9658                         wr_val = 0;
9659                         break;
9660                 case 1:
9661                         wr_val = 0xffffffff;
9662                         break;
9663                 }
9664
9665                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9666                         u32 offset, mask, save_val, val;
9667
9668                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9669                         mask = reg_tbl[i].mask;
9670
9671                         save_val = REG_RD(bp, offset);
9672
9673                         REG_WR(bp, offset, wr_val);
9674                         val = REG_RD(bp, offset);
9675
9676                         /* Restore the original register's value */
9677                         REG_WR(bp, offset, save_val);
9678
9679                         /* verify that value is as expected value */
9680                         if ((val & mask) != (wr_val & mask))
9681                                 goto test_reg_exit;
9682                 }
9683         }
9684
9685         rc = 0;
9686
9687 test_reg_exit:
9688         return rc;
9689 }
9690
9691 static int bnx2x_test_memory(struct bnx2x *bp)
9692 {
9693         int i, j, rc = -ENODEV;
9694         u32 val;
9695         static const struct {
9696                 u32 offset;
9697                 int size;
9698         } mem_tbl[] = {
9699                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
9700                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9701                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
9702                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
9703                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
9704                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
9705                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
9706
9707                 { 0xffffffff, 0 }
9708         };
9709         static const struct {
9710                 char *name;
9711                 u32 offset;
9712                 u32 e1_mask;
9713                 u32 e1h_mask;
9714         } prty_tbl[] = {
9715                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
9716                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
9717                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
9718                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
9719                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
9720                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
9721
9722                 { NULL, 0xffffffff, 0, 0 }
9723         };
9724
9725         if (!netif_running(bp->dev))
9726                 return rc;
9727
9728         /* Go through all the memories */
9729         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9730                 for (j = 0; j < mem_tbl[i].size; j++)
9731                         REG_RD(bp, mem_tbl[i].offset + j*4);
9732
9733         /* Check the parity status */
9734         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9735                 val = REG_RD(bp, prty_tbl[i].offset);
9736                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9737                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9738                         DP(NETIF_MSG_HW,
9739                            "%s is 0x%x\n", prty_tbl[i].name, val);
9740                         goto test_mem_exit;
9741                 }
9742         }
9743
9744         rc = 0;
9745
9746 test_mem_exit:
9747         return rc;
9748 }
9749
9750 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9751 {
9752         int cnt = 1000;
9753
9754         if (link_up)
9755                 while (bnx2x_link_test(bp) && cnt--)
9756                         msleep(10);
9757 }
9758
9759 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9760 {
9761         unsigned int pkt_size, num_pkts, i;
9762         struct sk_buff *skb;
9763         unsigned char *packet;
9764         struct bnx2x_fastpath *fp_rx = &bp->fp[0];
9765         struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
9766         u16 tx_start_idx, tx_idx;
9767         u16 rx_start_idx, rx_idx;
9768         u16 pkt_prod, bd_prod;
9769         struct sw_tx_bd *tx_buf;
9770         struct eth_tx_start_bd *tx_start_bd;
9771         struct eth_tx_parse_bd *pbd = NULL;
9772         dma_addr_t mapping;
9773         union eth_rx_cqe *cqe;
9774         u8 cqe_fp_flags;
9775         struct sw_rx_bd *rx_buf;
9776         u16 len;
9777         int rc = -ENODEV;
9778
9779         /* check the loopback mode */
9780         switch (loopback_mode) {
9781         case BNX2X_PHY_LOOPBACK:
9782                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9783                         return -EINVAL;
9784                 break;
9785         case BNX2X_MAC_LOOPBACK:
9786                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9787                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9788                 break;
9789         default:
9790                 return -EINVAL;
9791         }
9792
9793         /* prepare the loopback packet */
9794         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9795                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9796         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9797         if (!skb) {
9798                 rc = -ENOMEM;
9799                 goto test_loopback_exit;
9800         }
9801         packet = skb_put(skb, pkt_size);
9802         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9803         memset(packet + ETH_ALEN, 0, ETH_ALEN);
9804         memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
9805         for (i = ETH_HLEN; i < pkt_size; i++)
9806                 packet[i] = (unsigned char) (i & 0xff);
9807
9808         /* send the loopback packet */
9809         num_pkts = 0;
9810         tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
9811         rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
9812
9813         pkt_prod = fp_tx->tx_pkt_prod++;
9814         tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
9815         tx_buf->first_bd = fp_tx->tx_bd_prod;
9816         tx_buf->skb = skb;
9817         tx_buf->flags = 0;
9818
9819         bd_prod = TX_BD(fp_tx->tx_bd_prod);
9820         tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
9821         mapping = pci_map_single(bp->pdev, skb->data,
9822                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9823         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9824         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9825         tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
9826         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9827         tx_start_bd->vlan = cpu_to_le16(pkt_prod);
9828         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9829         tx_start_bd->general_data = ((UNICAST_ADDRESS <<
9830                                 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9831
9832         /* turn on parsing and get a BD */
9833         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9834         pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
9835
9836         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9837
9838         wmb();
9839
9840         fp_tx->tx_db.data.prod += 2;
9841         barrier();
9842         DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
9843
9844         mmiowb();
9845
9846         num_pkts++;
9847         fp_tx->tx_bd_prod += 2; /* start + pbd */
9848         bp->dev->trans_start = jiffies;
9849
9850         udelay(100);
9851
9852         tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
9853         if (tx_idx != tx_start_idx + num_pkts)
9854                 goto test_loopback_exit;
9855
9856         rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
9857         if (rx_idx != rx_start_idx + num_pkts)
9858                 goto test_loopback_exit;
9859
9860         cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
9861         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9862         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9863                 goto test_loopback_rx_exit;
9864
9865         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9866         if (len != pkt_size)
9867                 goto test_loopback_rx_exit;
9868
9869         rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
9870         skb = rx_buf->skb;
9871         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9872         for (i = ETH_HLEN; i < pkt_size; i++)
9873                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9874                         goto test_loopback_rx_exit;
9875
9876         rc = 0;
9877
9878 test_loopback_rx_exit:
9879
9880         fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
9881         fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
9882         fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
9883         fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
9884
9885         /* Update producers */
9886         bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
9887                              fp_rx->rx_sge_prod);
9888
9889 test_loopback_exit:
9890         bp->link_params.loopback_mode = LOOPBACK_NONE;
9891
9892         return rc;
9893 }
9894
9895 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9896 {
9897         int rc = 0, res;
9898
9899         if (!netif_running(bp->dev))
9900                 return BNX2X_LOOPBACK_FAILED;
9901
9902         bnx2x_netif_stop(bp, 1);
9903         bnx2x_acquire_phy_lock(bp);
9904
9905         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9906         if (res) {
9907                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
9908                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9909         }
9910
9911         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9912         if (res) {
9913                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
9914                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9915         }
9916
9917         bnx2x_release_phy_lock(bp);
9918         bnx2x_netif_start(bp);
9919
9920         return rc;
9921 }
9922
9923 #define CRC32_RESIDUAL                  0xdebb20e3
9924
9925 static int bnx2x_test_nvram(struct bnx2x *bp)
9926 {
9927         static const struct {
9928                 int offset;
9929                 int size;
9930         } nvram_tbl[] = {
9931                 {     0,  0x14 }, /* bootstrap */
9932                 {  0x14,  0xec }, /* dir */
9933                 { 0x100, 0x350 }, /* manuf_info */
9934                 { 0x450,  0xf0 }, /* feature_info */
9935                 { 0x640,  0x64 }, /* upgrade_key_info */
9936                 { 0x6a4,  0x64 },
9937                 { 0x708,  0x70 }, /* manuf_key_info */
9938                 { 0x778,  0x70 },
9939                 {     0,     0 }
9940         };
9941         __be32 buf[0x350 / 4];
9942         u8 *data = (u8 *)buf;
9943         int i, rc;
9944         u32 magic, csum;
9945
9946         rc = bnx2x_nvram_read(bp, 0, data, 4);
9947         if (rc) {
9948                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
9949                 goto test_nvram_exit;
9950         }
9951
9952         magic = be32_to_cpu(buf[0]);
9953         if (magic != 0x669955aa) {
9954                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9955                 rc = -ENODEV;
9956                 goto test_nvram_exit;
9957         }
9958
9959         for (i = 0; nvram_tbl[i].size; i++) {
9960
9961                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9962                                       nvram_tbl[i].size);
9963                 if (rc) {
9964                         DP(NETIF_MSG_PROBE,
9965                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
9966                         goto test_nvram_exit;
9967                 }
9968
9969                 csum = ether_crc_le(nvram_tbl[i].size, data);
9970                 if (csum != CRC32_RESIDUAL) {
9971                         DP(NETIF_MSG_PROBE,
9972                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9973                         rc = -ENODEV;
9974                         goto test_nvram_exit;
9975                 }
9976         }
9977
9978 test_nvram_exit:
9979         return rc;
9980 }
9981
9982 static int bnx2x_test_intr(struct bnx2x *bp)
9983 {
9984         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9985         int i, rc;
9986
9987         if (!netif_running(bp->dev))
9988                 return -ENODEV;
9989
9990         config->hdr.length = 0;
9991         if (CHIP_IS_E1(bp))
9992                 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9993         else
9994                 config->hdr.offset = BP_FUNC(bp);
9995         config->hdr.client_id = bp->fp->cl_id;
9996         config->hdr.reserved1 = 0;
9997
9998         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9999                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10000                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10001         if (rc == 0) {
10002                 bp->set_mac_pending++;
10003                 for (i = 0; i < 10; i++) {
10004                         if (!bp->set_mac_pending)
10005                                 break;
10006                         msleep_interruptible(10);
10007                 }
10008                 if (i == 10)
10009                         rc = -ENODEV;
10010         }
10011
10012         return rc;
10013 }
10014
10015 static void bnx2x_self_test(struct net_device *dev,
10016                             struct ethtool_test *etest, u64 *buf)
10017 {
10018         struct bnx2x *bp = netdev_priv(dev);
10019
10020         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10021
10022         if (!netif_running(dev))
10023                 return;
10024
10025         /* offline tests are not supported in MF mode */
10026         if (IS_E1HMF(bp))
10027                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10028
10029         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10030                 int port = BP_PORT(bp);
10031                 u32 val;
10032                 u8 link_up;
10033
10034                 /* save current value of input enable for TX port IF */
10035                 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10036                 /* disable input for TX port IF */
10037                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10038
10039                 link_up = bp->link_vars.link_up;
10040                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10041                 bnx2x_nic_load(bp, LOAD_DIAG);
10042                 /* wait until link state is restored */
10043                 bnx2x_wait_for_link(bp, link_up);
10044
10045                 if (bnx2x_test_registers(bp) != 0) {
10046                         buf[0] = 1;
10047                         etest->flags |= ETH_TEST_FL_FAILED;
10048                 }
10049                 if (bnx2x_test_memory(bp) != 0) {
10050                         buf[1] = 1;
10051                         etest->flags |= ETH_TEST_FL_FAILED;
10052                 }
10053                 buf[2] = bnx2x_test_loopback(bp, link_up);
10054                 if (buf[2] != 0)
10055                         etest->flags |= ETH_TEST_FL_FAILED;
10056
10057                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10058
10059                 /* restore input for TX port IF */
10060                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10061
10062                 bnx2x_nic_load(bp, LOAD_NORMAL);
10063                 /* wait until link state is restored */
10064                 bnx2x_wait_for_link(bp, link_up);
10065         }
10066         if (bnx2x_test_nvram(bp) != 0) {
10067                 buf[3] = 1;
10068                 etest->flags |= ETH_TEST_FL_FAILED;
10069         }
10070         if (bnx2x_test_intr(bp) != 0) {
10071                 buf[4] = 1;
10072                 etest->flags |= ETH_TEST_FL_FAILED;
10073         }
10074         if (bp->port.pmf)
10075                 if (bnx2x_link_test(bp) != 0) {
10076                         buf[5] = 1;
10077                         etest->flags |= ETH_TEST_FL_FAILED;
10078                 }
10079
10080 #ifdef BNX2X_EXTRA_DEBUG
10081         bnx2x_panic_dump(bp);
10082 #endif
10083 }
10084
10085 static const struct {
10086         long offset;
10087         int size;
10088         u8 string[ETH_GSTRING_LEN];
10089 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10090 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10091         { Q_STATS_OFFSET32(error_bytes_received_hi),
10092                                                 8, "[%d]: rx_error_bytes" },
10093         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10094                                                 8, "[%d]: rx_ucast_packets" },
10095         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10096                                                 8, "[%d]: rx_mcast_packets" },
10097         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10098                                                 8, "[%d]: rx_bcast_packets" },
10099         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10100         { Q_STATS_OFFSET32(rx_err_discard_pkt),
10101                                          4, "[%d]: rx_phy_ip_err_discards"},
10102         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10103                                          4, "[%d]: rx_skb_alloc_discard" },
10104         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10105
10106 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10107         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10108                                                         8, "[%d]: tx_packets" }
10109 };
10110
10111 static const struct {
10112         long offset;
10113         int size;
10114         u32 flags;
10115 #define STATS_FLAGS_PORT                1
10116 #define STATS_FLAGS_FUNC                2
10117 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10118         u8 string[ETH_GSTRING_LEN];
10119 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10120 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10121                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
10122         { STATS_OFFSET32(error_bytes_received_hi),
10123                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10124         { STATS_OFFSET32(total_unicast_packets_received_hi),
10125                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10126         { STATS_OFFSET32(total_multicast_packets_received_hi),
10127                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10128         { STATS_OFFSET32(total_broadcast_packets_received_hi),
10129                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10130         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10131                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10132         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10133                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
10134         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10135                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10136         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10137                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10138 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10139                                 8, STATS_FLAGS_PORT, "rx_fragments" },
10140         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10141                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
10142         { STATS_OFFSET32(no_buff_discard_hi),
10143                                 8, STATS_FLAGS_BOTH, "rx_discards" },
10144         { STATS_OFFSET32(mac_filter_discard),
10145                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10146         { STATS_OFFSET32(xxoverflow_discard),
10147                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10148         { STATS_OFFSET32(brb_drop_hi),
10149                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10150         { STATS_OFFSET32(brb_truncate_hi),
10151                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10152         { STATS_OFFSET32(pause_frames_received_hi),
10153                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10154         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10155                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10156         { STATS_OFFSET32(nig_timer_max),
10157                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10158 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10159                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10160         { STATS_OFFSET32(rx_skb_alloc_failed),
10161                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10162         { STATS_OFFSET32(hw_csum_err),
10163                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10164
10165         { STATS_OFFSET32(total_bytes_transmitted_hi),
10166                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
10167         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10168                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10169         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10170                                 8, STATS_FLAGS_BOTH, "tx_packets" },
10171         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10172                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10173         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10174                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10175         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10176                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10177         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10178                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10179 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10180                                 8, STATS_FLAGS_PORT, "tx_deferred" },
10181         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10182                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10183         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10184                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10185         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10186                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10187         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10188                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10189         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10190                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10191         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10192                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10193         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10194                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10195         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10196                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10197         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10198                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10199 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10200                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10201         { STATS_OFFSET32(pause_frames_sent_hi),
10202                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10203 };
10204
10205 #define IS_PORT_STAT(i) \
10206         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10207 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10208 #define IS_E1HMF_MODE_STAT(bp) \
10209                         (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10210
10211 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10212 {
10213         struct bnx2x *bp = netdev_priv(dev);
10214         int i, j, k;
10215
10216         switch (stringset) {
10217         case ETH_SS_STATS:
10218                 if (is_multi(bp)) {
10219                         k = 0;
10220                         for_each_rx_queue(bp, i) {
10221                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10222                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10223                                                 bnx2x_q_stats_arr[j].string, i);
10224                                 k += BNX2X_NUM_Q_STATS;
10225                         }
10226                         if (IS_E1HMF_MODE_STAT(bp))
10227                                 break;
10228                         for (j = 0; j < BNX2X_NUM_STATS; j++)
10229                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10230                                        bnx2x_stats_arr[j].string);
10231                 } else {
10232                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10233                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10234                                         continue;
10235                                 strcpy(buf + j*ETH_GSTRING_LEN,
10236                                        bnx2x_stats_arr[i].string);
10237                                 j++;
10238                         }
10239                 }
10240                 break;
10241
10242         case ETH_SS_TEST:
10243                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10244                 break;
10245         }
10246 }
10247
10248 static int bnx2x_get_stats_count(struct net_device *dev)
10249 {
10250         struct bnx2x *bp = netdev_priv(dev);
10251         int i, num_stats;
10252
10253         if (is_multi(bp)) {
10254                 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10255                 if (!IS_E1HMF_MODE_STAT(bp))
10256                         num_stats += BNX2X_NUM_STATS;
10257         } else {
10258                 if (IS_E1HMF_MODE_STAT(bp)) {
10259                         num_stats = 0;
10260                         for (i = 0; i < BNX2X_NUM_STATS; i++)
10261                                 if (IS_FUNC_STAT(i))
10262                                         num_stats++;
10263                 } else
10264                         num_stats = BNX2X_NUM_STATS;
10265         }
10266
10267         return num_stats;
10268 }
10269
10270 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10271                                     struct ethtool_stats *stats, u64 *buf)
10272 {
10273         struct bnx2x *bp = netdev_priv(dev);
10274         u32 *hw_stats, *offset;
10275         int i, j, k;
10276
10277         if (is_multi(bp)) {
10278                 k = 0;
10279                 for_each_rx_queue(bp, i) {
10280                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10281                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10282                                 if (bnx2x_q_stats_arr[j].size == 0) {
10283                                         /* skip this counter */
10284                                         buf[k + j] = 0;
10285                                         continue;
10286                                 }
10287                                 offset = (hw_stats +
10288                                           bnx2x_q_stats_arr[j].offset);
10289                                 if (bnx2x_q_stats_arr[j].size == 4) {
10290                                         /* 4-byte counter */
10291                                         buf[k + j] = (u64) *offset;
10292                                         continue;
10293                                 }
10294                                 /* 8-byte counter */
10295                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10296                         }
10297                         k += BNX2X_NUM_Q_STATS;
10298                 }
10299                 if (IS_E1HMF_MODE_STAT(bp))
10300                         return;
10301                 hw_stats = (u32 *)&bp->eth_stats;
10302                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10303                         if (bnx2x_stats_arr[j].size == 0) {
10304                                 /* skip this counter */
10305                                 buf[k + j] = 0;
10306                                 continue;
10307                         }
10308                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
10309                         if (bnx2x_stats_arr[j].size == 4) {
10310                                 /* 4-byte counter */
10311                                 buf[k + j] = (u64) *offset;
10312                                 continue;
10313                         }
10314                         /* 8-byte counter */
10315                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
10316                 }
10317         } else {
10318                 hw_stats = (u32 *)&bp->eth_stats;
10319                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10320                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10321                                 continue;
10322                         if (bnx2x_stats_arr[i].size == 0) {
10323                                 /* skip this counter */
10324                                 buf[j] = 0;
10325                                 j++;
10326                                 continue;
10327                         }
10328                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
10329                         if (bnx2x_stats_arr[i].size == 4) {
10330                                 /* 4-byte counter */
10331                                 buf[j] = (u64) *offset;
10332                                 j++;
10333                                 continue;
10334                         }
10335                         /* 8-byte counter */
10336                         buf[j] = HILO_U64(*offset, *(offset + 1));
10337                         j++;
10338                 }
10339         }
10340 }
10341
10342 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10343 {
10344         struct bnx2x *bp = netdev_priv(dev);
10345         int port = BP_PORT(bp);
10346         int i;
10347
10348         if (!netif_running(dev))
10349                 return 0;
10350
10351         if (!bp->port.pmf)
10352                 return 0;
10353
10354         if (data == 0)
10355                 data = 2;
10356
10357         for (i = 0; i < (data * 2); i++) {
10358                 if ((i % 2) == 0)
10359                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10360                                       bp->link_params.hw_led_mode,
10361                                       bp->link_params.chip_id);
10362                 else
10363                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10364                                       bp->link_params.hw_led_mode,
10365                                       bp->link_params.chip_id);
10366
10367                 msleep_interruptible(500);
10368                 if (signal_pending(current))
10369                         break;
10370         }
10371
10372         if (bp->link_vars.link_up)
10373                 bnx2x_set_led(bp, port, LED_MODE_OPER,
10374                               bp->link_vars.line_speed,
10375                               bp->link_params.hw_led_mode,
10376                               bp->link_params.chip_id);
10377
10378         return 0;
10379 }
10380
10381 static struct ethtool_ops bnx2x_ethtool_ops = {
10382         .get_settings           = bnx2x_get_settings,
10383         .set_settings           = bnx2x_set_settings,
10384         .get_drvinfo            = bnx2x_get_drvinfo,
10385         .get_regs_len           = bnx2x_get_regs_len,
10386         .get_regs               = bnx2x_get_regs,
10387         .get_wol                = bnx2x_get_wol,
10388         .set_wol                = bnx2x_set_wol,
10389         .get_msglevel           = bnx2x_get_msglevel,
10390         .set_msglevel           = bnx2x_set_msglevel,
10391         .nway_reset             = bnx2x_nway_reset,
10392         .get_link               = bnx2x_get_link,
10393         .get_eeprom_len         = bnx2x_get_eeprom_len,
10394         .get_eeprom             = bnx2x_get_eeprom,
10395         .set_eeprom             = bnx2x_set_eeprom,
10396         .get_coalesce           = bnx2x_get_coalesce,
10397         .set_coalesce           = bnx2x_set_coalesce,
10398         .get_ringparam          = bnx2x_get_ringparam,
10399         .set_ringparam          = bnx2x_set_ringparam,
10400         .get_pauseparam         = bnx2x_get_pauseparam,
10401         .set_pauseparam         = bnx2x_set_pauseparam,
10402         .get_rx_csum            = bnx2x_get_rx_csum,
10403         .set_rx_csum            = bnx2x_set_rx_csum,
10404         .get_tx_csum            = ethtool_op_get_tx_csum,
10405         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
10406         .set_flags              = bnx2x_set_flags,
10407         .get_flags              = ethtool_op_get_flags,
10408         .get_sg                 = ethtool_op_get_sg,
10409         .set_sg                 = ethtool_op_set_sg,
10410         .get_tso                = ethtool_op_get_tso,
10411         .set_tso                = bnx2x_set_tso,
10412         .self_test_count        = bnx2x_self_test_count,
10413         .self_test              = bnx2x_self_test,
10414         .get_strings            = bnx2x_get_strings,
10415         .phys_id                = bnx2x_phys_id,
10416         .get_stats_count        = bnx2x_get_stats_count,
10417         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
10418 };
10419
10420 /* end of ethtool_ops */
10421
10422 /****************************************************************************
10423 * General service functions
10424 ****************************************************************************/
10425
10426 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10427 {
10428         u16 pmcsr;
10429
10430         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10431
10432         switch (state) {
10433         case PCI_D0:
10434                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10435                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10436                                        PCI_PM_CTRL_PME_STATUS));
10437
10438                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10439                         /* delay required during transition out of D3hot */
10440                         msleep(20);
10441                 break;
10442
10443         case PCI_D3hot:
10444                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10445                 pmcsr |= 3;
10446
10447                 if (bp->wol)
10448                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10449
10450                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10451                                       pmcsr);
10452
10453                 /* No more memory access after this point until
10454                 * device is brought back to D0.
10455                 */
10456                 break;
10457
10458         default:
10459                 return -EINVAL;
10460         }
10461         return 0;
10462 }
10463
10464 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10465 {
10466         u16 rx_cons_sb;
10467
10468         /* Tell compiler that status block fields can change */
10469         barrier();
10470         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10471         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10472                 rx_cons_sb++;
10473         return (fp->rx_comp_cons != rx_cons_sb);
10474 }
10475
10476 /*
10477  * net_device service functions
10478  */
10479
10480 static int bnx2x_poll(struct napi_struct *napi, int budget)
10481 {
10482         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10483                                                  napi);
10484         struct bnx2x *bp = fp->bp;
10485         int work_done = 0;
10486
10487 #ifdef BNX2X_STOP_ON_ERROR
10488         if (unlikely(bp->panic))
10489                 goto poll_panic;
10490 #endif
10491
10492         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10493         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10494
10495         bnx2x_update_fpsb_idx(fp);
10496
10497         if (bnx2x_has_rx_work(fp)) {
10498                 work_done = bnx2x_rx_int(fp, budget);
10499
10500                 /* must not complete if we consumed full budget */
10501                 if (work_done >= budget)
10502                         goto poll_again;
10503         }
10504
10505         /* bnx2x_has_rx_work() reads the status block, thus we need to
10506          * ensure that status block indices have been actually read
10507          * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
10508          * so that we won't write the "newer" value of the status block to IGU
10509          * (if there was a DMA right after bnx2x_has_rx_work and
10510          * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10511          * may be postponed to right before bnx2x_ack_sb). In this case
10512          * there will never be another interrupt until there is another update
10513          * of the status block, while there is still unhandled work.
10514          */
10515         rmb();
10516
10517         if (!bnx2x_has_rx_work(fp)) {
10518 #ifdef BNX2X_STOP_ON_ERROR
10519 poll_panic:
10520 #endif
10521                 napi_complete(napi);
10522
10523                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10524                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10525                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10526                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10527         }
10528
10529 poll_again:
10530         return work_done;
10531 }
10532
10533
10534 /* we split the first BD into headers and data BDs
10535  * to ease the pain of our fellow microcode engineers
10536  * we use one mapping for both BDs
10537  * So far this has only been observed to happen
10538  * in Other Operating Systems(TM)
10539  */
10540 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10541                                    struct bnx2x_fastpath *fp,
10542                                    struct sw_tx_bd *tx_buf,
10543                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
10544                                    u16 bd_prod, int nbd)
10545 {
10546         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
10547         struct eth_tx_bd *d_tx_bd;
10548         dma_addr_t mapping;
10549         int old_len = le16_to_cpu(h_tx_bd->nbytes);
10550
10551         /* first fix first BD */
10552         h_tx_bd->nbd = cpu_to_le16(nbd);
10553         h_tx_bd->nbytes = cpu_to_le16(hlen);
10554
10555         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10556            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10557            h_tx_bd->addr_lo, h_tx_bd->nbd);
10558
10559         /* now get a new data BD
10560          * (after the pbd) and fill it */
10561         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10562         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10563
10564         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10565                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10566
10567         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10568         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10569         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10570
10571         /* this marks the BD as one that has no individual mapping */
10572         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10573
10574         DP(NETIF_MSG_TX_QUEUED,
10575            "TSO split data size is %d (%x:%x)\n",
10576            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10577
10578         /* update tx_bd */
10579         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
10580
10581         return bd_prod;
10582 }
10583
10584 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10585 {
10586         if (fix > 0)
10587                 csum = (u16) ~csum_fold(csum_sub(csum,
10588                                 csum_partial(t_header - fix, fix, 0)));
10589
10590         else if (fix < 0)
10591                 csum = (u16) ~csum_fold(csum_add(csum,
10592                                 csum_partial(t_header, -fix, 0)));
10593
10594         return swab16(csum);
10595 }
10596
10597 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10598 {
10599         u32 rc;
10600
10601         if (skb->ip_summed != CHECKSUM_PARTIAL)
10602                 rc = XMIT_PLAIN;
10603
10604         else {
10605                 if (skb->protocol == htons(ETH_P_IPV6)) {
10606                         rc = XMIT_CSUM_V6;
10607                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10608                                 rc |= XMIT_CSUM_TCP;
10609
10610                 } else {
10611                         rc = XMIT_CSUM_V4;
10612                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10613                                 rc |= XMIT_CSUM_TCP;
10614                 }
10615         }
10616
10617         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10618                 rc |= XMIT_GSO_V4;
10619
10620         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10621                 rc |= XMIT_GSO_V6;
10622
10623         return rc;
10624 }
10625
10626 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10627 /* check if packet requires linearization (packet is too fragmented)
10628    no need to check fragmentation if page size > 8K (there will be no
10629    violation to FW restrictions) */
10630 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10631                              u32 xmit_type)
10632 {
10633         int to_copy = 0;
10634         int hlen = 0;
10635         int first_bd_sz = 0;
10636
10637         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10638         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10639
10640                 if (xmit_type & XMIT_GSO) {
10641                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10642                         /* Check if LSO packet needs to be copied:
10643                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10644                         int wnd_size = MAX_FETCH_BD - 3;
10645                         /* Number of windows to check */
10646                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10647                         int wnd_idx = 0;
10648                         int frag_idx = 0;
10649                         u32 wnd_sum = 0;
10650
10651                         /* Headers length */
10652                         hlen = (int)(skb_transport_header(skb) - skb->data) +
10653                                 tcp_hdrlen(skb);
10654
10655                         /* Amount of data (w/o headers) on linear part of SKB*/
10656                         first_bd_sz = skb_headlen(skb) - hlen;
10657
10658                         wnd_sum  = first_bd_sz;
10659
10660                         /* Calculate the first sum - it's special */
10661                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10662                                 wnd_sum +=
10663                                         skb_shinfo(skb)->frags[frag_idx].size;
10664
10665                         /* If there was data on linear skb data - check it */
10666                         if (first_bd_sz > 0) {
10667                                 if (unlikely(wnd_sum < lso_mss)) {
10668                                         to_copy = 1;
10669                                         goto exit_lbl;
10670                                 }
10671
10672                                 wnd_sum -= first_bd_sz;
10673                         }
10674
10675                         /* Others are easier: run through the frag list and
10676                            check all windows */
10677                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10678                                 wnd_sum +=
10679                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10680
10681                                 if (unlikely(wnd_sum < lso_mss)) {
10682                                         to_copy = 1;
10683                                         break;
10684                                 }
10685                                 wnd_sum -=
10686                                         skb_shinfo(skb)->frags[wnd_idx].size;
10687                         }
10688                 } else {
10689                         /* in non-LSO too fragmented packet should always
10690                            be linearized */
10691                         to_copy = 1;
10692                 }
10693         }
10694
10695 exit_lbl:
10696         if (unlikely(to_copy))
10697                 DP(NETIF_MSG_TX_QUEUED,
10698                    "Linearization IS REQUIRED for %s packet. "
10699                    "num_frags %d  hlen %d  first_bd_sz %d\n",
10700                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10701                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10702
10703         return to_copy;
10704 }
10705 #endif
10706
10707 /* called with netif_tx_lock
10708  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10709  * netif_wake_queue()
10710  */
10711 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10712 {
10713         struct bnx2x *bp = netdev_priv(dev);
10714         struct bnx2x_fastpath *fp, *fp_stat;
10715         struct netdev_queue *txq;
10716         struct sw_tx_bd *tx_buf;
10717         struct eth_tx_start_bd *tx_start_bd;
10718         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
10719         struct eth_tx_parse_bd *pbd = NULL;
10720         u16 pkt_prod, bd_prod;
10721         int nbd, fp_index;
10722         dma_addr_t mapping;
10723         u32 xmit_type = bnx2x_xmit_type(bp, skb);
10724         int i;
10725         u8 hlen = 0;
10726         __le16 pkt_size = 0;
10727
10728 #ifdef BNX2X_STOP_ON_ERROR
10729         if (unlikely(bp->panic))
10730                 return NETDEV_TX_BUSY;
10731 #endif
10732
10733         fp_index = skb_get_queue_mapping(skb);
10734         txq = netdev_get_tx_queue(dev, fp_index);
10735
10736         fp = &bp->fp[fp_index + bp->num_rx_queues];
10737         fp_stat = &bp->fp[fp_index];
10738
10739         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10740                 fp_stat->eth_q_stats.driver_xoff++;
10741                 netif_tx_stop_queue(txq);
10742                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10743                 return NETDEV_TX_BUSY;
10744         }
10745
10746         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
10747            "  gso type %x  xmit_type %x\n",
10748            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10749            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10750
10751 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10752         /* First, check if we need to linearize the skb (due to FW
10753            restrictions). No need to check fragmentation if page size > 8K
10754            (there will be no violation to FW restrictions) */
10755         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10756                 /* Statistics of linearization */
10757                 bp->lin_cnt++;
10758                 if (skb_linearize(skb) != 0) {
10759                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10760                            "silently dropping this SKB\n");
10761                         dev_kfree_skb_any(skb);
10762                         return NETDEV_TX_OK;
10763                 }
10764         }
10765 #endif
10766
10767         /*
10768         Please read carefully. First we use one BD which we mark as start,
10769         then we have a parsing info BD (used for TSO or xsum),
10770         and only then we have the rest of the TSO BDs.
10771         (don't forget to mark the last one as last,
10772         and to unmap only AFTER you write to the BD ...)
10773         And above all, all pdb sizes are in words - NOT DWORDS!
10774         */
10775
10776         pkt_prod = fp->tx_pkt_prod++;
10777         bd_prod = TX_BD(fp->tx_bd_prod);
10778
10779         /* get a tx_buf and first BD */
10780         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10781         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
10782
10783         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10784         tx_start_bd->general_data = (UNICAST_ADDRESS <<
10785                                      ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
10786         /* header nbd */
10787         tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
10788
10789         /* remember the first BD of the packet */
10790         tx_buf->first_bd = fp->tx_bd_prod;
10791         tx_buf->skb = skb;
10792         tx_buf->flags = 0;
10793
10794         DP(NETIF_MSG_TX_QUEUED,
10795            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
10796            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
10797
10798 #ifdef BCM_VLAN
10799         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10800             (bp->flags & HW_VLAN_TX_FLAG)) {
10801                 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10802                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10803         } else
10804 #endif
10805                 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10806
10807         /* turn on parsing and get a BD */
10808         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10809         pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
10810
10811         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10812
10813         if (xmit_type & XMIT_CSUM) {
10814                 hlen = (skb_network_header(skb) - skb->data) / 2;
10815
10816                 /* for now NS flag is not used in Linux */
10817                 pbd->global_data =
10818                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10819                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10820
10821                 pbd->ip_hlen = (skb_transport_header(skb) -
10822                                 skb_network_header(skb)) / 2;
10823
10824                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10825
10826                 pbd->total_hlen = cpu_to_le16(hlen);
10827                 hlen = hlen*2;
10828
10829                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
10830
10831                 if (xmit_type & XMIT_CSUM_V4)
10832                         tx_start_bd->bd_flags.as_bitfield |=
10833                                                 ETH_TX_BD_FLAGS_IP_CSUM;
10834                 else
10835                         tx_start_bd->bd_flags.as_bitfield |=
10836                                                 ETH_TX_BD_FLAGS_IPV6;
10837
10838                 if (xmit_type & XMIT_CSUM_TCP) {
10839                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10840
10841                 } else {
10842                         s8 fix = SKB_CS_OFF(skb); /* signed! */
10843
10844                         pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
10845
10846                         DP(NETIF_MSG_TX_QUEUED,
10847                            "hlen %d  fix %d  csum before fix %x\n",
10848                            le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
10849
10850                         /* HW bug: fixup the CSUM */
10851                         pbd->tcp_pseudo_csum =
10852                                 bnx2x_csum_fix(skb_transport_header(skb),
10853                                                SKB_CS(skb), fix);
10854
10855                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10856                            pbd->tcp_pseudo_csum);
10857                 }
10858         }
10859
10860         mapping = pci_map_single(bp->pdev, skb->data,
10861                                  skb_headlen(skb), PCI_DMA_TODEVICE);
10862
10863         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10864         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10865         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
10866         tx_start_bd->nbd = cpu_to_le16(nbd);
10867         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10868         pkt_size = tx_start_bd->nbytes;
10869
10870         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
10871            "  nbytes %d  flags %x  vlan %x\n",
10872            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
10873            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
10874            tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
10875
10876         if (xmit_type & XMIT_GSO) {
10877
10878                 DP(NETIF_MSG_TX_QUEUED,
10879                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
10880                    skb->len, hlen, skb_headlen(skb),
10881                    skb_shinfo(skb)->gso_size);
10882
10883                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10884
10885                 if (unlikely(skb_headlen(skb) > hlen))
10886                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
10887                                                  hlen, bd_prod, ++nbd);
10888
10889                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10890                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10891                 pbd->tcp_flags = pbd_tcp_flags(skb);
10892
10893                 if (xmit_type & XMIT_GSO_V4) {
10894                         pbd->ip_id = swab16(ip_hdr(skb)->id);
10895                         pbd->tcp_pseudo_csum =
10896                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10897                                                           ip_hdr(skb)->daddr,
10898                                                           0, IPPROTO_TCP, 0));
10899
10900                 } else
10901                         pbd->tcp_pseudo_csum =
10902                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10903                                                         &ipv6_hdr(skb)->daddr,
10904                                                         0, IPPROTO_TCP, 0));
10905
10906                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10907         }
10908         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
10909
10910         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10911                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10912
10913                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10914                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10915                 if (total_pkt_bd == NULL)
10916                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10917
10918                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10919                                        frag->size, PCI_DMA_TODEVICE);
10920
10921                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10922                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10923                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
10924                 le16_add_cpu(&pkt_size, frag->size);
10925
10926                 DP(NETIF_MSG_TX_QUEUED,
10927                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
10928                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
10929                    le16_to_cpu(tx_data_bd->nbytes));
10930         }
10931
10932         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
10933
10934         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10935
10936         /* now send a tx doorbell, counting the next BD
10937          * if the packet contains or ends with it
10938          */
10939         if (TX_BD_POFF(bd_prod) < nbd)
10940                 nbd++;
10941
10942         if (total_pkt_bd != NULL)
10943                 total_pkt_bd->total_pkt_bytes = pkt_size;
10944
10945         if (pbd)
10946                 DP(NETIF_MSG_TX_QUEUED,
10947                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
10948                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
10949                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10950                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10951                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10952
10953         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
10954
10955         /*
10956          * Make sure that the BD data is updated before updating the producer
10957          * since FW might read the BD right after the producer is updated.
10958          * This is only applicable for weak-ordered memory model archs such
10959          * as IA-64. The following barrier is also mandatory since FW will
10960          * assumes packets must have BDs.
10961          */
10962         wmb();
10963
10964         fp->tx_db.data.prod += nbd;
10965         barrier();
10966         DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
10967
10968         mmiowb();
10969
10970         fp->tx_bd_prod += nbd;
10971
10972         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10973                 netif_tx_stop_queue(txq);
10974                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10975                    if we put Tx into XOFF state. */
10976                 smp_mb();
10977                 fp_stat->eth_q_stats.driver_xoff++;
10978                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10979                         netif_tx_wake_queue(txq);
10980         }
10981         fp_stat->tx_pkt++;
10982
10983         return NETDEV_TX_OK;
10984 }
10985
10986 /* called with rtnl_lock */
10987 static int bnx2x_open(struct net_device *dev)
10988 {
10989         struct bnx2x *bp = netdev_priv(dev);
10990
10991         netif_carrier_off(dev);
10992
10993         bnx2x_set_power_state(bp, PCI_D0);
10994
10995         return bnx2x_nic_load(bp, LOAD_OPEN);
10996 }
10997
10998 /* called with rtnl_lock */
10999 static int bnx2x_close(struct net_device *dev)
11000 {
11001         struct bnx2x *bp = netdev_priv(dev);
11002
11003         /* Unload the driver, release IRQs */
11004         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11005         if (atomic_read(&bp->pdev->enable_cnt) == 1)
11006                 if (!CHIP_REV_IS_SLOW(bp))
11007                         bnx2x_set_power_state(bp, PCI_D3hot);
11008
11009         return 0;
11010 }
11011
11012 /* called with netif_tx_lock from dev_mcast.c */
11013 static void bnx2x_set_rx_mode(struct net_device *dev)
11014 {
11015         struct bnx2x *bp = netdev_priv(dev);
11016         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11017         int port = BP_PORT(bp);
11018
11019         if (bp->state != BNX2X_STATE_OPEN) {
11020                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11021                 return;
11022         }
11023
11024         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11025
11026         if (dev->flags & IFF_PROMISC)
11027                 rx_mode = BNX2X_RX_MODE_PROMISC;
11028
11029         else if ((dev->flags & IFF_ALLMULTI) ||
11030                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11031                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11032
11033         else { /* some multicasts */
11034                 if (CHIP_IS_E1(bp)) {
11035                         int i, old, offset;
11036                         struct dev_mc_list *mclist;
11037                         struct mac_configuration_cmd *config =
11038                                                 bnx2x_sp(bp, mcast_config);
11039
11040                         for (i = 0, mclist = dev->mc_list;
11041                              mclist && (i < dev->mc_count);
11042                              i++, mclist = mclist->next) {
11043
11044                                 config->config_table[i].
11045                                         cam_entry.msb_mac_addr =
11046                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
11047                                 config->config_table[i].
11048                                         cam_entry.middle_mac_addr =
11049                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
11050                                 config->config_table[i].
11051                                         cam_entry.lsb_mac_addr =
11052                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
11053                                 config->config_table[i].cam_entry.flags =
11054                                                         cpu_to_le16(port);
11055                                 config->config_table[i].
11056                                         target_table_entry.flags = 0;
11057                                 config->config_table[i].target_table_entry.
11058                                         clients_bit_vector =
11059                                                 cpu_to_le32(1 << BP_L_ID(bp));
11060                                 config->config_table[i].
11061                                         target_table_entry.vlan_id = 0;
11062
11063                                 DP(NETIF_MSG_IFUP,
11064                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11065                                    config->config_table[i].
11066                                                 cam_entry.msb_mac_addr,
11067                                    config->config_table[i].
11068                                                 cam_entry.middle_mac_addr,
11069                                    config->config_table[i].
11070                                                 cam_entry.lsb_mac_addr);
11071                         }
11072                         old = config->hdr.length;
11073                         if (old > i) {
11074                                 for (; i < old; i++) {
11075                                         if (CAM_IS_INVALID(config->
11076                                                            config_table[i])) {
11077                                                 /* already invalidated */
11078                                                 break;
11079                                         }
11080                                         /* invalidate */
11081                                         CAM_INVALIDATE(config->
11082                                                        config_table[i]);
11083                                 }
11084                         }
11085
11086                         if (CHIP_REV_IS_SLOW(bp))
11087                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11088                         else
11089                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
11090
11091                         config->hdr.length = i;
11092                         config->hdr.offset = offset;
11093                         config->hdr.client_id = bp->fp->cl_id;
11094                         config->hdr.reserved1 = 0;
11095
11096                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11097                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11098                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11099                                       0);
11100                 } else { /* E1H */
11101                         /* Accept one or more multicasts */
11102                         struct dev_mc_list *mclist;
11103                         u32 mc_filter[MC_HASH_SIZE];
11104                         u32 crc, bit, regidx;
11105                         int i;
11106
11107                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11108
11109                         for (i = 0, mclist = dev->mc_list;
11110                              mclist && (i < dev->mc_count);
11111                              i++, mclist = mclist->next) {
11112
11113                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11114                                    mclist->dmi_addr);
11115
11116                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11117                                 bit = (crc >> 24) & 0xff;
11118                                 regidx = bit >> 5;
11119                                 bit &= 0x1f;
11120                                 mc_filter[regidx] |= (1 << bit);
11121                         }
11122
11123                         for (i = 0; i < MC_HASH_SIZE; i++)
11124                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11125                                        mc_filter[i]);
11126                 }
11127         }
11128
11129         bp->rx_mode = rx_mode;
11130         bnx2x_set_storm_rx_mode(bp);
11131 }
11132
11133 /* called with rtnl_lock */
11134 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11135 {
11136         struct sockaddr *addr = p;
11137         struct bnx2x *bp = netdev_priv(dev);
11138
11139         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11140                 return -EINVAL;
11141
11142         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11143         if (netif_running(dev)) {
11144                 if (CHIP_IS_E1(bp))
11145                         bnx2x_set_mac_addr_e1(bp, 1);
11146                 else
11147                         bnx2x_set_mac_addr_e1h(bp, 1);
11148         }
11149
11150         return 0;
11151 }
11152
11153 /* called with rtnl_lock */
11154 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11155 {
11156         struct mii_ioctl_data *data = if_mii(ifr);
11157         struct bnx2x *bp = netdev_priv(dev);
11158         int port = BP_PORT(bp);
11159         int err;
11160
11161         switch (cmd) {
11162         case SIOCGMIIPHY:
11163                 data->phy_id = bp->port.phy_addr;
11164
11165                 /* fallthrough */
11166
11167         case SIOCGMIIREG: {
11168                 u16 mii_regval;
11169
11170                 if (!netif_running(dev))
11171                         return -EAGAIN;
11172
11173                 mutex_lock(&bp->port.phy_mutex);
11174                 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
11175                                       DEFAULT_PHY_DEV_ADDR,
11176                                       (data->reg_num & 0x1f), &mii_regval);
11177                 data->val_out = mii_regval;
11178                 mutex_unlock(&bp->port.phy_mutex);
11179                 return err;
11180         }
11181
11182         case SIOCSMIIREG:
11183                 if (!capable(CAP_NET_ADMIN))
11184                         return -EPERM;
11185
11186                 if (!netif_running(dev))
11187                         return -EAGAIN;
11188
11189                 mutex_lock(&bp->port.phy_mutex);
11190                 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
11191                                        DEFAULT_PHY_DEV_ADDR,
11192                                        (data->reg_num & 0x1f), data->val_in);
11193                 mutex_unlock(&bp->port.phy_mutex);
11194                 return err;
11195
11196         default:
11197                 /* do nothing */
11198                 break;
11199         }
11200
11201         return -EOPNOTSUPP;
11202 }
11203
11204 /* called with rtnl_lock */
11205 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11206 {
11207         struct bnx2x *bp = netdev_priv(dev);
11208         int rc = 0;
11209
11210         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11211             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11212                 return -EINVAL;
11213
11214         /* This does not race with packet allocation
11215          * because the actual alloc size is
11216          * only updated as part of load
11217          */
11218         dev->mtu = new_mtu;
11219
11220         if (netif_running(dev)) {
11221                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11222                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11223         }
11224
11225         return rc;
11226 }
11227
11228 static void bnx2x_tx_timeout(struct net_device *dev)
11229 {
11230         struct bnx2x *bp = netdev_priv(dev);
11231
11232 #ifdef BNX2X_STOP_ON_ERROR
11233         if (!bp->panic)
11234                 bnx2x_panic();
11235 #endif
11236         /* This allows the netif to be shutdown gracefully before resetting */
11237         schedule_work(&bp->reset_task);
11238 }
11239
11240 #ifdef BCM_VLAN
11241 /* called with rtnl_lock */
11242 static void bnx2x_vlan_rx_register(struct net_device *dev,
11243                                    struct vlan_group *vlgrp)
11244 {
11245         struct bnx2x *bp = netdev_priv(dev);
11246
11247         bp->vlgrp = vlgrp;
11248
11249         /* Set flags according to the required capabilities */
11250         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11251
11252         if (dev->features & NETIF_F_HW_VLAN_TX)
11253                 bp->flags |= HW_VLAN_TX_FLAG;
11254
11255         if (dev->features & NETIF_F_HW_VLAN_RX)
11256                 bp->flags |= HW_VLAN_RX_FLAG;
11257
11258         if (netif_running(dev))
11259                 bnx2x_set_client_config(bp);
11260 }
11261
11262 #endif
11263
11264 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11265 static void poll_bnx2x(struct net_device *dev)
11266 {
11267         struct bnx2x *bp = netdev_priv(dev);
11268
11269         disable_irq(bp->pdev->irq);
11270         bnx2x_interrupt(bp->pdev->irq, dev);
11271         enable_irq(bp->pdev->irq);
11272 }
11273 #endif
11274
11275 static const struct net_device_ops bnx2x_netdev_ops = {
11276         .ndo_open               = bnx2x_open,
11277         .ndo_stop               = bnx2x_close,
11278         .ndo_start_xmit         = bnx2x_start_xmit,
11279         .ndo_set_multicast_list = bnx2x_set_rx_mode,
11280         .ndo_set_mac_address    = bnx2x_change_mac_addr,
11281         .ndo_validate_addr      = eth_validate_addr,
11282         .ndo_do_ioctl           = bnx2x_ioctl,
11283         .ndo_change_mtu         = bnx2x_change_mtu,
11284         .ndo_tx_timeout         = bnx2x_tx_timeout,
11285 #ifdef BCM_VLAN
11286         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
11287 #endif
11288 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11289         .ndo_poll_controller    = poll_bnx2x,
11290 #endif
11291 };
11292
11293 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11294                                     struct net_device *dev)
11295 {
11296         struct bnx2x *bp;
11297         int rc;
11298
11299         SET_NETDEV_DEV(dev, &pdev->dev);
11300         bp = netdev_priv(dev);
11301
11302         bp->dev = dev;
11303         bp->pdev = pdev;
11304         bp->flags = 0;
11305         bp->func = PCI_FUNC(pdev->devfn);
11306
11307         rc = pci_enable_device(pdev);
11308         if (rc) {
11309                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11310                 goto err_out;
11311         }
11312
11313         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11314                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11315                        " aborting\n");
11316                 rc = -ENODEV;
11317                 goto err_out_disable;
11318         }
11319
11320         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11321                 printk(KERN_ERR PFX "Cannot find second PCI device"
11322                        " base address, aborting\n");
11323                 rc = -ENODEV;
11324                 goto err_out_disable;
11325         }
11326
11327         if (atomic_read(&pdev->enable_cnt) == 1) {
11328                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11329                 if (rc) {
11330                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11331                                " aborting\n");
11332                         goto err_out_disable;
11333                 }
11334
11335                 pci_set_master(pdev);
11336                 pci_save_state(pdev);
11337         }
11338
11339         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11340         if (bp->pm_cap == 0) {
11341                 printk(KERN_ERR PFX "Cannot find power management"
11342                        " capability, aborting\n");
11343                 rc = -EIO;
11344                 goto err_out_release;
11345         }
11346
11347         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11348         if (bp->pcie_cap == 0) {
11349                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11350                        " aborting\n");
11351                 rc = -EIO;
11352                 goto err_out_release;
11353         }
11354
11355         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11356                 bp->flags |= USING_DAC_FLAG;
11357                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11358                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11359                                " failed, aborting\n");
11360                         rc = -EIO;
11361                         goto err_out_release;
11362                 }
11363
11364         } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11365                 printk(KERN_ERR PFX "System does not support DMA,"
11366                        " aborting\n");
11367                 rc = -EIO;
11368                 goto err_out_release;
11369         }
11370
11371         dev->mem_start = pci_resource_start(pdev, 0);
11372         dev->base_addr = dev->mem_start;
11373         dev->mem_end = pci_resource_end(pdev, 0);
11374
11375         dev->irq = pdev->irq;
11376
11377         bp->regview = pci_ioremap_bar(pdev, 0);
11378         if (!bp->regview) {
11379                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11380                 rc = -ENOMEM;
11381                 goto err_out_release;
11382         }
11383
11384         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11385                                         min_t(u64, BNX2X_DB_SIZE,
11386                                               pci_resource_len(pdev, 2)));
11387         if (!bp->doorbells) {
11388                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11389                 rc = -ENOMEM;
11390                 goto err_out_unmap;
11391         }
11392
11393         bnx2x_set_power_state(bp, PCI_D0);
11394
11395         /* clean indirect addresses */
11396         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11397                                PCICFG_VENDOR_ID_OFFSET);
11398         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11399         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11400         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11401         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11402
11403         dev->watchdog_timeo = TX_TIMEOUT;
11404
11405         dev->netdev_ops = &bnx2x_netdev_ops;
11406         dev->ethtool_ops = &bnx2x_ethtool_ops;
11407         dev->features |= NETIF_F_SG;
11408         dev->features |= NETIF_F_HW_CSUM;
11409         if (bp->flags & USING_DAC_FLAG)
11410                 dev->features |= NETIF_F_HIGHDMA;
11411         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11412         dev->features |= NETIF_F_TSO6;
11413 #ifdef BCM_VLAN
11414         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11415         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11416
11417         dev->vlan_features |= NETIF_F_SG;
11418         dev->vlan_features |= NETIF_F_HW_CSUM;
11419         if (bp->flags & USING_DAC_FLAG)
11420                 dev->vlan_features |= NETIF_F_HIGHDMA;
11421         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11422         dev->vlan_features |= NETIF_F_TSO6;
11423 #endif
11424
11425         return 0;
11426
11427 err_out_unmap:
11428         if (bp->regview) {
11429                 iounmap(bp->regview);
11430                 bp->regview = NULL;
11431         }
11432         if (bp->doorbells) {
11433                 iounmap(bp->doorbells);
11434                 bp->doorbells = NULL;
11435         }
11436
11437 err_out_release:
11438         if (atomic_read(&pdev->enable_cnt) == 1)
11439                 pci_release_regions(pdev);
11440
11441 err_out_disable:
11442         pci_disable_device(pdev);
11443         pci_set_drvdata(pdev, NULL);
11444
11445 err_out:
11446         return rc;
11447 }
11448
11449 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11450 {
11451         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11452
11453         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11454         return val;
11455 }
11456
11457 /* return value of 1=2.5GHz 2=5GHz */
11458 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11459 {
11460         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11461
11462         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11463         return val;
11464 }
11465 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11466 {
11467         struct bnx2x_fw_file_hdr *fw_hdr;
11468         struct bnx2x_fw_file_section *sections;
11469         u16 *ops_offsets;
11470         u32 offset, len, num_ops;
11471         int i;
11472         const struct firmware *firmware = bp->firmware;
11473         const u8 * fw_ver;
11474
11475         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11476                 return -EINVAL;
11477
11478         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11479         sections = (struct bnx2x_fw_file_section *)fw_hdr;
11480
11481         /* Make sure none of the offsets and sizes make us read beyond
11482          * the end of the firmware data */
11483         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11484                 offset = be32_to_cpu(sections[i].offset);
11485                 len = be32_to_cpu(sections[i].len);
11486                 if (offset + len > firmware->size) {
11487                         printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11488                         return -EINVAL;
11489                 }
11490         }
11491
11492         /* Likewise for the init_ops offsets */
11493         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11494         ops_offsets = (u16 *)(firmware->data + offset);
11495         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11496
11497         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11498                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11499                         printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11500                         return -EINVAL;
11501                 }
11502         }
11503
11504         /* Check FW version */
11505         offset = be32_to_cpu(fw_hdr->fw_version.offset);
11506         fw_ver = firmware->data + offset;
11507         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11508             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11509             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11510             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11511                 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11512                                     " Should be %d.%d.%d.%d\n",
11513                        fw_ver[0], fw_ver[1], fw_ver[2],
11514                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11515                        BCM_5710_FW_MINOR_VERSION,
11516                        BCM_5710_FW_REVISION_VERSION,
11517                        BCM_5710_FW_ENGINEERING_VERSION);
11518                 return -EINVAL;
11519         }
11520
11521         return 0;
11522 }
11523
11524 static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11525 {
11526         u32 i;
11527         const __be32 *source = (const __be32*)_source;
11528         u32 *target = (u32*)_target;
11529
11530         for (i = 0; i < n/4; i++)
11531                 target[i] = be32_to_cpu(source[i]);
11532 }
11533
11534 /*
11535    Ops array is stored in the following format:
11536    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11537  */
11538 static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11539 {
11540         u32 i, j, tmp;
11541         const __be32 *source = (const __be32*)_source;
11542         struct raw_op *target = (struct raw_op*)_target;
11543
11544         for (i = 0, j = 0; i < n/8; i++, j+=2) {
11545                 tmp = be32_to_cpu(source[j]);
11546                 target[i].op = (tmp >> 24) & 0xff;
11547                 target[i].offset =  tmp & 0xffffff;
11548                 target[i].raw_data = be32_to_cpu(source[j+1]);
11549         }
11550 }
11551 static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11552 {
11553         u32 i;
11554         u16 *target = (u16*)_target;
11555         const __be16 *source = (const __be16*)_source;
11556
11557         for (i = 0; i < n/2; i++)
11558                 target[i] = be16_to_cpu(source[i]);
11559 }
11560
11561 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11562         do {   \
11563                 u32 len = be32_to_cpu(fw_hdr->arr.len);   \
11564                 bp->arr = kmalloc(len, GFP_KERNEL);  \
11565                 if (!bp->arr) { \
11566                         printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11567                         goto lbl; \
11568                 } \
11569                 func(bp->firmware->data + \
11570                         be32_to_cpu(fw_hdr->arr.offset), \
11571                         (u8*)bp->arr, len); \
11572         } while (0)
11573
11574
11575 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11576 {
11577         char fw_file_name[40] = {0};
11578         int rc, offset;
11579         struct bnx2x_fw_file_hdr *fw_hdr;
11580
11581         /* Create a FW file name */
11582         if (CHIP_IS_E1(bp))
11583                 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11584         else
11585                 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11586
11587         sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11588                 BCM_5710_FW_MAJOR_VERSION,
11589                 BCM_5710_FW_MINOR_VERSION,
11590                 BCM_5710_FW_REVISION_VERSION,
11591                 BCM_5710_FW_ENGINEERING_VERSION);
11592
11593         printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11594
11595         rc = request_firmware(&bp->firmware, fw_file_name, dev);
11596         if (rc) {
11597                 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11598                 goto request_firmware_exit;
11599         }
11600
11601         rc = bnx2x_check_firmware(bp);
11602         if (rc) {
11603                 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11604                 goto request_firmware_exit;
11605         }
11606
11607         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11608
11609         /* Initialize the pointers to the init arrays */
11610         /* Blob */
11611         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11612
11613         /* Opcodes */
11614         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11615
11616         /* Offsets */
11617         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11618
11619         /* STORMs firmware */
11620         bp->tsem_int_table_data = bp->firmware->data +
11621                 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11622         bp->tsem_pram_data      = bp->firmware->data +
11623                 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11624         bp->usem_int_table_data = bp->firmware->data +
11625                 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11626         bp->usem_pram_data      = bp->firmware->data +
11627                 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11628         bp->xsem_int_table_data = bp->firmware->data +
11629                 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11630         bp->xsem_pram_data      = bp->firmware->data +
11631                 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11632         bp->csem_int_table_data = bp->firmware->data +
11633                 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11634         bp->csem_pram_data      = bp->firmware->data +
11635                 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11636
11637         return 0;
11638 init_offsets_alloc_err:
11639         kfree(bp->init_ops);
11640 init_ops_alloc_err:
11641         kfree(bp->init_data);
11642 request_firmware_exit:
11643         release_firmware(bp->firmware);
11644
11645         return rc;
11646 }
11647
11648
11649
11650 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11651                                     const struct pci_device_id *ent)
11652 {
11653         static int version_printed;
11654         struct net_device *dev = NULL;
11655         struct bnx2x *bp;
11656         int rc;
11657
11658         if (version_printed++ == 0)
11659                 printk(KERN_INFO "%s", version);
11660
11661         /* dev zeroed in init_etherdev */
11662         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11663         if (!dev) {
11664                 printk(KERN_ERR PFX "Cannot allocate net device\n");
11665                 return -ENOMEM;
11666         }
11667
11668         bp = netdev_priv(dev);
11669         bp->msglevel = debug;
11670
11671         rc = bnx2x_init_dev(pdev, dev);
11672         if (rc < 0) {
11673                 free_netdev(dev);
11674                 return rc;
11675         }
11676
11677         pci_set_drvdata(pdev, dev);
11678
11679         rc = bnx2x_init_bp(bp);
11680         if (rc)
11681                 goto init_one_exit;
11682
11683         /* Set init arrays */
11684         rc = bnx2x_init_firmware(bp, &pdev->dev);
11685         if (rc) {
11686                 printk(KERN_ERR PFX "Error loading firmware\n");
11687                 goto init_one_exit;
11688         }
11689
11690         rc = register_netdev(dev);
11691         if (rc) {
11692                 dev_err(&pdev->dev, "Cannot register net device\n");
11693                 goto init_one_exit;
11694         }
11695
11696         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11697                " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11698                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11699                bnx2x_get_pcie_width(bp),
11700                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11701                dev->base_addr, bp->pdev->irq);
11702         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11703
11704         return 0;
11705
11706 init_one_exit:
11707         if (bp->regview)
11708                 iounmap(bp->regview);
11709
11710         if (bp->doorbells)
11711                 iounmap(bp->doorbells);
11712
11713         free_netdev(dev);
11714
11715         if (atomic_read(&pdev->enable_cnt) == 1)
11716                 pci_release_regions(pdev);
11717
11718         pci_disable_device(pdev);
11719         pci_set_drvdata(pdev, NULL);
11720
11721         return rc;
11722 }
11723
11724 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11725 {
11726         struct net_device *dev = pci_get_drvdata(pdev);
11727         struct bnx2x *bp;
11728
11729         if (!dev) {
11730                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11731                 return;
11732         }
11733         bp = netdev_priv(dev);
11734
11735         unregister_netdev(dev);
11736
11737         kfree(bp->init_ops_offsets);
11738         kfree(bp->init_ops);
11739         kfree(bp->init_data);
11740         release_firmware(bp->firmware);
11741
11742         if (bp->regview)
11743                 iounmap(bp->regview);
11744
11745         if (bp->doorbells)
11746                 iounmap(bp->doorbells);
11747
11748         free_netdev(dev);
11749
11750         if (atomic_read(&pdev->enable_cnt) == 1)
11751                 pci_release_regions(pdev);
11752
11753         pci_disable_device(pdev);
11754         pci_set_drvdata(pdev, NULL);
11755 }
11756
11757 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11758 {
11759         struct net_device *dev = pci_get_drvdata(pdev);
11760         struct bnx2x *bp;
11761
11762         if (!dev) {
11763                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11764                 return -ENODEV;
11765         }
11766         bp = netdev_priv(dev);
11767
11768         rtnl_lock();
11769
11770         pci_save_state(pdev);
11771
11772         if (!netif_running(dev)) {
11773                 rtnl_unlock();
11774                 return 0;
11775         }
11776
11777         netif_device_detach(dev);
11778
11779         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11780
11781         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11782
11783         rtnl_unlock();
11784
11785         return 0;
11786 }
11787
11788 static int bnx2x_resume(struct pci_dev *pdev)
11789 {
11790         struct net_device *dev = pci_get_drvdata(pdev);
11791         struct bnx2x *bp;
11792         int rc;
11793
11794         if (!dev) {
11795                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11796                 return -ENODEV;
11797         }
11798         bp = netdev_priv(dev);
11799
11800         rtnl_lock();
11801
11802         pci_restore_state(pdev);
11803
11804         if (!netif_running(dev)) {
11805                 rtnl_unlock();
11806                 return 0;
11807         }
11808
11809         bnx2x_set_power_state(bp, PCI_D0);
11810         netif_device_attach(dev);
11811
11812         rc = bnx2x_nic_load(bp, LOAD_OPEN);
11813
11814         rtnl_unlock();
11815
11816         return rc;
11817 }
11818
11819 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11820 {
11821         int i;
11822
11823         bp->state = BNX2X_STATE_ERROR;
11824
11825         bp->rx_mode = BNX2X_RX_MODE_NONE;
11826
11827         bnx2x_netif_stop(bp, 0);
11828
11829         del_timer_sync(&bp->timer);
11830         bp->stats_state = STATS_STATE_DISABLED;
11831         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11832
11833         /* Release IRQs */
11834         bnx2x_free_irq(bp);
11835
11836         if (CHIP_IS_E1(bp)) {
11837                 struct mac_configuration_cmd *config =
11838                                                 bnx2x_sp(bp, mcast_config);
11839
11840                 for (i = 0; i < config->hdr.length; i++)
11841                         CAM_INVALIDATE(config->config_table[i]);
11842         }
11843
11844         /* Free SKBs, SGEs, TPA pool and driver internals */
11845         bnx2x_free_skbs(bp);
11846         for_each_rx_queue(bp, i)
11847                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11848         for_each_rx_queue(bp, i)
11849                 netif_napi_del(&bnx2x_fp(bp, i, napi));
11850         bnx2x_free_mem(bp);
11851
11852         bp->state = BNX2X_STATE_CLOSED;
11853
11854         netif_carrier_off(bp->dev);
11855
11856         return 0;
11857 }
11858
11859 static void bnx2x_eeh_recover(struct bnx2x *bp)
11860 {
11861         u32 val;
11862
11863         mutex_init(&bp->port.phy_mutex);
11864
11865         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11866         bp->link_params.shmem_base = bp->common.shmem_base;
11867         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11868
11869         if (!bp->common.shmem_base ||
11870             (bp->common.shmem_base < 0xA0000) ||
11871             (bp->common.shmem_base >= 0xC0000)) {
11872                 BNX2X_DEV_INFO("MCP not active\n");
11873                 bp->flags |= NO_MCP_FLAG;
11874                 return;
11875         }
11876
11877         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11878         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11879                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11880                 BNX2X_ERR("BAD MCP validity signature\n");
11881
11882         if (!BP_NOMCP(bp)) {
11883                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11884                               & DRV_MSG_SEQ_NUMBER_MASK);
11885                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11886         }
11887 }
11888
11889 /**
11890  * bnx2x_io_error_detected - called when PCI error is detected
11891  * @pdev: Pointer to PCI device
11892  * @state: The current pci connection state
11893  *
11894  * This function is called after a PCI bus error affecting
11895  * this device has been detected.
11896  */
11897 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11898                                                 pci_channel_state_t state)
11899 {
11900         struct net_device *dev = pci_get_drvdata(pdev);
11901         struct bnx2x *bp = netdev_priv(dev);
11902
11903         rtnl_lock();
11904
11905         netif_device_detach(dev);
11906
11907         if (state == pci_channel_io_perm_failure) {
11908                 rtnl_unlock();
11909                 return PCI_ERS_RESULT_DISCONNECT;
11910         }
11911
11912         if (netif_running(dev))
11913                 bnx2x_eeh_nic_unload(bp);
11914
11915         pci_disable_device(pdev);
11916
11917         rtnl_unlock();
11918
11919         /* Request a slot reset */
11920         return PCI_ERS_RESULT_NEED_RESET;
11921 }
11922
11923 /**
11924  * bnx2x_io_slot_reset - called after the PCI bus has been reset
11925  * @pdev: Pointer to PCI device
11926  *
11927  * Restart the card from scratch, as if from a cold-boot.
11928  */
11929 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11930 {
11931         struct net_device *dev = pci_get_drvdata(pdev);
11932         struct bnx2x *bp = netdev_priv(dev);
11933
11934         rtnl_lock();
11935
11936         if (pci_enable_device(pdev)) {
11937                 dev_err(&pdev->dev,
11938                         "Cannot re-enable PCI device after reset\n");
11939                 rtnl_unlock();
11940                 return PCI_ERS_RESULT_DISCONNECT;
11941         }
11942
11943         pci_set_master(pdev);
11944         pci_restore_state(pdev);
11945
11946         if (netif_running(dev))
11947                 bnx2x_set_power_state(bp, PCI_D0);
11948
11949         rtnl_unlock();
11950
11951         return PCI_ERS_RESULT_RECOVERED;
11952 }
11953
11954 /**
11955  * bnx2x_io_resume - called when traffic can start flowing again
11956  * @pdev: Pointer to PCI device
11957  *
11958  * This callback is called when the error recovery driver tells us that
11959  * its OK to resume normal operation.
11960  */
11961 static void bnx2x_io_resume(struct pci_dev *pdev)
11962 {
11963         struct net_device *dev = pci_get_drvdata(pdev);
11964         struct bnx2x *bp = netdev_priv(dev);
11965
11966         rtnl_lock();
11967
11968         bnx2x_eeh_recover(bp);
11969
11970         if (netif_running(dev))
11971                 bnx2x_nic_load(bp, LOAD_NORMAL);
11972
11973         netif_device_attach(dev);
11974
11975         rtnl_unlock();
11976 }
11977
11978 static struct pci_error_handlers bnx2x_err_handler = {
11979         .error_detected = bnx2x_io_error_detected,
11980         .slot_reset     = bnx2x_io_slot_reset,
11981         .resume         = bnx2x_io_resume,
11982 };
11983
11984 static struct pci_driver bnx2x_pci_driver = {
11985         .name        = DRV_MODULE_NAME,
11986         .id_table    = bnx2x_pci_tbl,
11987         .probe       = bnx2x_init_one,
11988         .remove      = __devexit_p(bnx2x_remove_one),
11989         .suspend     = bnx2x_suspend,
11990         .resume      = bnx2x_resume,
11991         .err_handler = &bnx2x_err_handler,
11992 };
11993
11994 static int __init bnx2x_init(void)
11995 {
11996         int ret;
11997
11998         bnx2x_wq = create_singlethread_workqueue("bnx2x");
11999         if (bnx2x_wq == NULL) {
12000                 printk(KERN_ERR PFX "Cannot create workqueue\n");
12001                 return -ENOMEM;
12002         }
12003
12004         ret = pci_register_driver(&bnx2x_pci_driver);
12005         if (ret) {
12006                 printk(KERN_ERR PFX "Cannot register driver\n");
12007                 destroy_workqueue(bnx2x_wq);
12008         }
12009         return ret;
12010 }
12011
12012 static void __exit bnx2x_cleanup(void)
12013 {
12014         pci_unregister_driver(&bnx2x_pci_driver);
12015
12016         destroy_workqueue(bnx2x_wq);
12017 }
12018
12019 module_init(bnx2x_init);
12020 module_exit(bnx2x_cleanup);
12021
12022