]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/bnx2x_main.c
bnx2x: No LRO without Rx checksum
[net-next-2.6.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42         #include <linux/if_vlan.h>
43 #endif
44 #include <net/ip.h>
45 #include <net/tcp.h>
46 #include <net/checksum.h>
47 #include <linux/version.h>
48 #include <net/ip6_checksum.h>
49 #include <linux/workqueue.h>
50 #include <linux/crc32.h>
51 #include <linux/crc32c.h>
52 #include <linux/prefetch.h>
53 #include <linux/zlib.h>
54 #include <linux/io.h>
55
56 #include "bnx2x_reg.h"
57 #include "bnx2x_fw_defs.h"
58 #include "bnx2x_hsi.h"
59 #include "bnx2x_link.h"
60 #include "bnx2x.h"
61 #include "bnx2x_init.h"
62
63 #define DRV_MODULE_VERSION      "1.45.6"
64 #define DRV_MODULE_RELDATE      "2008/06/23"
65 #define BNX2X_BC_VER            0x040200
66
67 /* Time in jiffies before concluding the transmitter is hung */
68 #define TX_TIMEOUT              (5*HZ)
69
70 static char version[] __devinitdata =
71         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
72         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
73
74 MODULE_AUTHOR("Eliezer Tamir");
75 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76 MODULE_LICENSE("GPL");
77 MODULE_VERSION(DRV_MODULE_VERSION);
78
79 static int disable_tpa;
80 static int use_inta;
81 static int poll;
82 static int debug;
83 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
84 static int use_multi;
85
86 module_param(disable_tpa, int, 0);
87 module_param(use_inta, int, 0);
88 module_param(poll, int, 0);
89 module_param(debug, int, 0);
90 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
91 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
92 MODULE_PARM_DESC(poll, "use polling (for debug)");
93 MODULE_PARM_DESC(debug, "default debug msglevel");
94
95 #ifdef BNX2X_MULTI
96 module_param(use_multi, int, 0);
97 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
98 #endif
99
100 enum bnx2x_board_type {
101         BCM57710 = 0,
102         BCM57711 = 1,
103         BCM57711E = 2,
104 };
105
106 /* indexed by board_type, above */
107 static struct {
108         char *name;
109 } board_info[] __devinitdata = {
110         { "Broadcom NetXtreme II BCM57710 XGb" },
111         { "Broadcom NetXtreme II BCM57711 XGb" },
112         { "Broadcom NetXtreme II BCM57711E XGb" }
113 };
114
115
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
123         { 0 }
124 };
125
126 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128 /****************************************************************************
129 * General service functions
130 ****************************************************************************/
131
132 /* used only at init
133  * locking is done by mcp
134  */
135 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136 {
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140                                PCICFG_VENDOR_ID_OFFSET);
141 }
142
143 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144 {
145         u32 val;
146
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150                                PCICFG_VENDOR_ID_OFFSET);
151
152         return val;
153 }
154
155 static const u32 dmae_reg_go_c[] = {
156         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 };
161
162 /* copy command into DMAE command memory and set DMAE command go */
163 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164                             int idx)
165 {
166         u32 cmd_offset;
167         int i;
168
169         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
173                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
175         }
176         REG_WR(bp, dmae_reg_go_c[idx], 1);
177 }
178
179 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180                       u32 len32)
181 {
182         struct dmae_command *dmae = &bp->init_dmae;
183         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
184         int cnt = 200;
185
186         if (!bp->dmae_ready) {
187                 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
190                    "  using indirect\n", dst_addr, len32);
191                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192                 return;
193         }
194
195         mutex_lock(&bp->dmae_mutex);
196
197         memset(dmae, 0, sizeof(struct dmae_command));
198
199         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202 #ifdef __BIG_ENDIAN
203                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
204 #else
205                         DMAE_CMD_ENDIANITY_DW_SWAP |
206 #endif
207                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
209         dmae->src_addr_lo = U64_LO(dma_addr);
210         dmae->src_addr_hi = U64_HI(dma_addr);
211         dmae->dst_addr_lo = dst_addr >> 2;
212         dmae->dst_addr_hi = 0;
213         dmae->len = len32;
214         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
216         dmae->comp_val = DMAE_COMP_VAL;
217
218         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
219            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
220                     "dst_addr [%x:%08x (%08x)]\n"
221            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
222            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
225         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
226            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
228
229         *wb_comp = 0;
230
231         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
232
233         udelay(5);
234
235         while (*wb_comp != DMAE_COMP_VAL) {
236                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
238                 if (!cnt) {
239                         BNX2X_ERR("dmae timeout!\n");
240                         break;
241                 }
242                 cnt--;
243                 /* adjust delay for emulation/FPGA */
244                 if (CHIP_REV_IS_SLOW(bp))
245                         msleep(100);
246                 else
247                         udelay(5);
248         }
249
250         mutex_unlock(&bp->dmae_mutex);
251 }
252
253 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
254 {
255         struct dmae_command *dmae = &bp->init_dmae;
256         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
257         int cnt = 200;
258
259         if (!bp->dmae_ready) {
260                 u32 *data = bnx2x_sp(bp, wb_data[0]);
261                 int i;
262
263                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
264                    "  using indirect\n", src_addr, len32);
265                 for (i = 0; i < len32; i++)
266                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
267                 return;
268         }
269
270         mutex_lock(&bp->dmae_mutex);
271
272         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273         memset(dmae, 0, sizeof(struct dmae_command));
274
275         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
278 #ifdef __BIG_ENDIAN
279                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
280 #else
281                         DMAE_CMD_ENDIANITY_DW_SWAP |
282 #endif
283                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
285         dmae->src_addr_lo = src_addr >> 2;
286         dmae->src_addr_hi = 0;
287         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
289         dmae->len = len32;
290         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
292         dmae->comp_val = DMAE_COMP_VAL;
293
294         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
295            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
296                     "dst_addr [%x:%08x (%08x)]\n"
297            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
298            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
301
302         *wb_comp = 0;
303
304         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
305
306         udelay(5);
307
308         while (*wb_comp != DMAE_COMP_VAL) {
309
310                 if (!cnt) {
311                         BNX2X_ERR("dmae timeout!\n");
312                         break;
313                 }
314                 cnt--;
315                 /* adjust delay for emulation/FPGA */
316                 if (CHIP_REV_IS_SLOW(bp))
317                         msleep(100);
318                 else
319                         udelay(5);
320         }
321         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
322            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
324
325         mutex_unlock(&bp->dmae_mutex);
326 }
327
328 /* used only for slowpath so not inlined */
329 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
330 {
331         u32 wb_write[2];
332
333         wb_write[0] = val_hi;
334         wb_write[1] = val_lo;
335         REG_WR_DMAE(bp, reg, wb_write, 2);
336 }
337
338 #ifdef USE_WB_RD
339 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
340 {
341         u32 wb_data[2];
342
343         REG_RD_DMAE(bp, reg, wb_data, 2);
344
345         return HILO_U64(wb_data[0], wb_data[1]);
346 }
347 #endif
348
349 static int bnx2x_mc_assert(struct bnx2x *bp)
350 {
351         char last_idx;
352         int i, rc = 0;
353         u32 row0, row1, row2, row3;
354
355         /* XSTORM */
356         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
358         if (last_idx)
359                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
360
361         /* print the asserts */
362         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
363
364                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365                               XSTORM_ASSERT_LIST_OFFSET(i));
366                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
372
373                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375                                   " 0x%08x 0x%08x 0x%08x\n",
376                                   i, row3, row2, row1, row0);
377                         rc++;
378                 } else {
379                         break;
380                 }
381         }
382
383         /* TSTORM */
384         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
386         if (last_idx)
387                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
388
389         /* print the asserts */
390         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
391
392                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393                               TSTORM_ASSERT_LIST_OFFSET(i));
394                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
400
401                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403                                   " 0x%08x 0x%08x 0x%08x\n",
404                                   i, row3, row2, row1, row0);
405                         rc++;
406                 } else {
407                         break;
408                 }
409         }
410
411         /* CSTORM */
412         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
414         if (last_idx)
415                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
416
417         /* print the asserts */
418         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
419
420                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421                               CSTORM_ASSERT_LIST_OFFSET(i));
422                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
428
429                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431                                   " 0x%08x 0x%08x 0x%08x\n",
432                                   i, row3, row2, row1, row0);
433                         rc++;
434                 } else {
435                         break;
436                 }
437         }
438
439         /* USTORM */
440         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441                            USTORM_ASSERT_LIST_INDEX_OFFSET);
442         if (last_idx)
443                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
444
445         /* print the asserts */
446         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
447
448                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449                               USTORM_ASSERT_LIST_OFFSET(i));
450                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
452                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
454                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
456
457                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459                                   " 0x%08x 0x%08x 0x%08x\n",
460                                   i, row3, row2, row1, row0);
461                         rc++;
462                 } else {
463                         break;
464                 }
465         }
466
467         return rc;
468 }
469
470 static void bnx2x_fw_dump(struct bnx2x *bp)
471 {
472         u32 mark, offset;
473         u32 data[9];
474         int word;
475
476         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
477         mark = ((mark + 0x3) & ~0x3);
478         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
479
480         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481                 for (word = 0; word < 8; word++)
482                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483                                                   offset + 4*word));
484                 data[8] = 0x0;
485                 printk(KERN_CONT "%s", (char *)data);
486         }
487         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488                 for (word = 0; word < 8; word++)
489                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490                                                   offset + 4*word));
491                 data[8] = 0x0;
492                 printk(KERN_CONT "%s", (char *)data);
493         }
494         printk("\n" KERN_ERR PFX "end of fw dump\n");
495 }
496
497 static void bnx2x_panic_dump(struct bnx2x *bp)
498 {
499         int i;
500         u16 j, start, end;
501
502         bp->stats_state = STATS_STATE_DISABLED;
503         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504
505         BNX2X_ERR("begin crash dump -----------------\n");
506
507         for_each_queue(bp, i) {
508                 struct bnx2x_fastpath *fp = &bp->fp[i];
509                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
510
511                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
512                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
513                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
514                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
515                 BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
516                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
517                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
518                           fp->rx_bd_prod, fp->rx_bd_cons,
519                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
520                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
521                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
522                           "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
523                           "  *sb_u_idx(%x)  bd data(%x,%x)\n",
524                           fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
525                           fp->status_blk->c_status_block.status_block_index,
526                           fp->fp_u_idx,
527                           fp->status_blk->u_status_block.status_block_index,
528                           hw_prods->packets_prod, hw_prods->bds_prod);
529
530                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
531                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
532                 for (j = start; j < end; j++) {
533                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
534
535                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
536                                   sw_bd->skb, sw_bd->first_bd);
537                 }
538
539                 start = TX_BD(fp->tx_bd_cons - 10);
540                 end = TX_BD(fp->tx_bd_cons + 254);
541                 for (j = start; j < end; j++) {
542                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
543
544                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
545                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
546                 }
547
548                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
549                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
550                 for (j = start; j < end; j++) {
551                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
552                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
553
554                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
555                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
556                 }
557
558                 start = 0;
559                 end = RX_SGE_CNT*NUM_RX_SGE_PAGES;
560                 for (j = start; j < end; j++) {
561                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
562                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
563
564                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
565                                   j, rx_sge[1], rx_sge[0], sw_page->page);
566                 }
567
568                 start = RCQ_BD(fp->rx_comp_cons - 10);
569                 end = RCQ_BD(fp->rx_comp_cons + 503);
570                 for (j = start; j < end; j++) {
571                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
572
573                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
574                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
575                 }
576         }
577
578         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
579                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
580                   "  spq_prod_idx(%u)\n",
581                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
582                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
583
584         bnx2x_fw_dump(bp);
585         bnx2x_mc_assert(bp);
586         BNX2X_ERR("end crash dump -----------------\n");
587 }
588
589 static void bnx2x_int_enable(struct bnx2x *bp)
590 {
591         int port = BP_PORT(bp);
592         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
593         u32 val = REG_RD(bp, addr);
594         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
595
596         if (msix) {
597                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
598                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
600         } else {
601                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
602                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
603                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
604                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
605
606                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
607                    val, port, addr, msix);
608
609                 REG_WR(bp, addr, val);
610
611                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
612         }
613
614         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
615            val, port, addr, msix);
616
617         REG_WR(bp, addr, val);
618
619         if (CHIP_IS_E1H(bp)) {
620                 /* init leading/trailing edge */
621                 if (IS_E1HMF(bp)) {
622                         val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
623                         if (bp->port.pmf)
624                                 /* enable nig attention */
625                                 val |= 0x0100;
626                 } else
627                         val = 0xffff;
628
629                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
630                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
631         }
632 }
633
634 static void bnx2x_int_disable(struct bnx2x *bp)
635 {
636         int port = BP_PORT(bp);
637         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
638         u32 val = REG_RD(bp, addr);
639
640         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
641                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
643                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
644
645         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
646            val, port, addr);
647
648         REG_WR(bp, addr, val);
649         if (REG_RD(bp, addr) != val)
650                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
651 }
652
653 static void bnx2x_int_disable_sync(struct bnx2x *bp)
654 {
655         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
656         int i;
657
658         /* disable interrupt handling */
659         atomic_inc(&bp->intr_sem);
660         /* prevent the HW from sending interrupts */
661         bnx2x_int_disable(bp);
662
663         /* make sure all ISRs are done */
664         if (msix) {
665                 for_each_queue(bp, i)
666                         synchronize_irq(bp->msix_table[i].vector);
667
668                 /* one more for the Slow Path IRQ */
669                 synchronize_irq(bp->msix_table[i].vector);
670         } else
671                 synchronize_irq(bp->pdev->irq);
672
673         /* make sure sp_task is not running */
674         cancel_work_sync(&bp->sp_task);
675 }
676
677 /* fast path */
678
679 /*
680  * General service functions
681  */
682
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684                                 u8 storm, u16 index, u8 op, u8 update)
685 {
686         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687                        COMMAND_REG_INT_ACK);
688         struct igu_ack_register igu_ack;
689
690         igu_ack.status_block_index = index;
691         igu_ack.sb_id_and_flags =
692                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
697         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698            (*(u32 *)&igu_ack), hc_addr);
699         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
700 }
701
702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703 {
704         struct host_status_block *fpsb = fp->status_blk;
705         u16 rc = 0;
706
707         barrier(); /* status block is written to by the chip */
708         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710                 rc |= 1;
711         }
712         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714                 rc |= 2;
715         }
716         return rc;
717 }
718
719 static u16 bnx2x_ack_int(struct bnx2x *bp)
720 {
721         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722                        COMMAND_REG_SIMD_MASK);
723         u32 result = REG_RD(bp, hc_addr);
724
725         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726            result, hc_addr);
727
728         return result;
729 }
730
731
732 /*
733  * fast path service functions
734  */
735
736 /* free skb in the packet ring at pos idx
737  * return idx of last bd freed
738  */
739 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
740                              u16 idx)
741 {
742         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
743         struct eth_tx_bd *tx_bd;
744         struct sk_buff *skb = tx_buf->skb;
745         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
746         int nbd;
747
748         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
749            idx, tx_buf, skb);
750
751         /* unmap first bd */
752         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
753         tx_bd = &fp->tx_desc_ring[bd_idx];
754         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
755                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
756
757         nbd = le16_to_cpu(tx_bd->nbd) - 1;
758         new_cons = nbd + tx_buf->first_bd;
759 #ifdef BNX2X_STOP_ON_ERROR
760         if (nbd > (MAX_SKB_FRAGS + 2)) {
761                 BNX2X_ERR("BAD nbd!\n");
762                 bnx2x_panic();
763         }
764 #endif
765
766         /* Skip a parse bd and the TSO split header bd
767            since they have no mapping */
768         if (nbd)
769                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
770
771         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
772                                            ETH_TX_BD_FLAGS_TCP_CSUM |
773                                            ETH_TX_BD_FLAGS_SW_LSO)) {
774                 if (--nbd)
775                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
776                 tx_bd = &fp->tx_desc_ring[bd_idx];
777                 /* is this a TSO split header bd? */
778                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
779                         if (--nbd)
780                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
781                 }
782         }
783
784         /* now free frags */
785         while (nbd > 0) {
786
787                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
788                 tx_bd = &fp->tx_desc_ring[bd_idx];
789                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
790                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
791                 if (--nbd)
792                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
793         }
794
795         /* release skb */
796         WARN_ON(!skb);
797         dev_kfree_skb(skb);
798         tx_buf->first_bd = 0;
799         tx_buf->skb = NULL;
800
801         return new_cons;
802 }
803
804 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
805 {
806         s16 used;
807         u16 prod;
808         u16 cons;
809
810         barrier(); /* Tell compiler that prod and cons can change */
811         prod = fp->tx_bd_prod;
812         cons = fp->tx_bd_cons;
813
814         /* NUM_TX_RINGS = number of "next-page" entries
815            It will be used as a threshold */
816         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
817
818 #ifdef BNX2X_STOP_ON_ERROR
819         WARN_ON(used < 0);
820         WARN_ON(used > fp->bp->tx_ring_size);
821         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
822 #endif
823
824         return (s16)(fp->bp->tx_ring_size) - used;
825 }
826
827 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
828 {
829         struct bnx2x *bp = fp->bp;
830         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
831         int done = 0;
832
833 #ifdef BNX2X_STOP_ON_ERROR
834         if (unlikely(bp->panic))
835                 return;
836 #endif
837
838         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
839         sw_cons = fp->tx_pkt_cons;
840
841         while (sw_cons != hw_cons) {
842                 u16 pkt_cons;
843
844                 pkt_cons = TX_BD(sw_cons);
845
846                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
847
848                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
849                    hw_cons, sw_cons, pkt_cons);
850
851 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
852                         rmb();
853                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
854                 }
855 */
856                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
857                 sw_cons++;
858                 done++;
859
860                 if (done == work)
861                         break;
862         }
863
864         fp->tx_pkt_cons = sw_cons;
865         fp->tx_bd_cons = bd_cons;
866
867         /* Need to make the tx_cons update visible to start_xmit()
868          * before checking for netif_queue_stopped().  Without the
869          * memory barrier, there is a small possibility that start_xmit()
870          * will miss it and cause the queue to be stopped forever.
871          */
872         smp_mb();
873
874         /* TBD need a thresh? */
875         if (unlikely(netif_queue_stopped(bp->dev))) {
876
877                 netif_tx_lock(bp->dev);
878
879                 if (netif_queue_stopped(bp->dev) &&
880                     (bp->state == BNX2X_STATE_OPEN) &&
881                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
882                         netif_wake_queue(bp->dev);
883
884                 netif_tx_unlock(bp->dev);
885         }
886 }
887
888 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
889                            union eth_rx_cqe *rr_cqe)
890 {
891         struct bnx2x *bp = fp->bp;
892         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
893         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
894
895         DP(BNX2X_MSG_SP,
896            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
897            FP_IDX(fp), cid, command, bp->state,
898            rr_cqe->ramrod_cqe.ramrod_type);
899
900         bp->spq_left++;
901
902         if (FP_IDX(fp)) {
903                 switch (command | fp->state) {
904                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
905                                                 BNX2X_FP_STATE_OPENING):
906                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
907                            cid);
908                         fp->state = BNX2X_FP_STATE_OPEN;
909                         break;
910
911                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
912                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
913                            cid);
914                         fp->state = BNX2X_FP_STATE_HALTED;
915                         break;
916
917                 default:
918                         BNX2X_ERR("unexpected MC reply (%d)  "
919                                   "fp->state is %x\n", command, fp->state);
920                         break;
921                 }
922                 mb(); /* force bnx2x_wait_ramrod() to see the change */
923                 return;
924         }
925
926         switch (command | bp->state) {
927         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
928                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
929                 bp->state = BNX2X_STATE_OPEN;
930                 break;
931
932         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
933                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
934                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
935                 fp->state = BNX2X_FP_STATE_HALTED;
936                 break;
937
938         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
939                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
940                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
941                 break;
942
943         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
944         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
945                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
946                 bp->set_mac_pending = 0;
947                 break;
948
949         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
950                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
951                 break;
952
953         default:
954                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
955                           command, bp->state);
956                 break;
957         }
958         mb(); /* force bnx2x_wait_ramrod() to see the change */
959 }
960
961 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
962                                      struct bnx2x_fastpath *fp, u16 index)
963 {
964         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
965         struct page *page = sw_buf->page;
966         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
967
968         /* Skip "next page" elements */
969         if (!page)
970                 return;
971
972         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
973                        BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
974         __free_pages(page, PAGES_PER_SGE_SHIFT);
975
976         sw_buf->page = NULL;
977         sge->addr_hi = 0;
978         sge->addr_lo = 0;
979 }
980
981 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
982                                            struct bnx2x_fastpath *fp, int last)
983 {
984         int i;
985
986         for (i = 0; i < last; i++)
987                 bnx2x_free_rx_sge(bp, fp, i);
988 }
989
990 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
991                                      struct bnx2x_fastpath *fp, u16 index)
992 {
993         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
994         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
995         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
996         dma_addr_t mapping;
997
998         if (unlikely(page == NULL))
999                 return -ENOMEM;
1000
1001         mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1002                                PCI_DMA_FROMDEVICE);
1003         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1004                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1005                 return -ENOMEM;
1006         }
1007
1008         sw_buf->page = page;
1009         pci_unmap_addr_set(sw_buf, mapping, mapping);
1010
1011         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1012         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1013
1014         return 0;
1015 }
1016
1017 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1018                                      struct bnx2x_fastpath *fp, u16 index)
1019 {
1020         struct sk_buff *skb;
1021         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1022         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1023         dma_addr_t mapping;
1024
1025         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1026         if (unlikely(skb == NULL))
1027                 return -ENOMEM;
1028
1029         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1030                                  PCI_DMA_FROMDEVICE);
1031         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1032                 dev_kfree_skb(skb);
1033                 return -ENOMEM;
1034         }
1035
1036         rx_buf->skb = skb;
1037         pci_unmap_addr_set(rx_buf, mapping, mapping);
1038
1039         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1040         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1041
1042         return 0;
1043 }
1044
1045 /* note that we are not allocating a new skb,
1046  * we are just moving one from cons to prod
1047  * we are not creating a new mapping,
1048  * so there is no need to check for dma_mapping_error().
1049  */
1050 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1051                                struct sk_buff *skb, u16 cons, u16 prod)
1052 {
1053         struct bnx2x *bp = fp->bp;
1054         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1055         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1056         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1057         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1058
1059         pci_dma_sync_single_for_device(bp->pdev,
1060                                        pci_unmap_addr(cons_rx_buf, mapping),
1061                                        bp->rx_offset + RX_COPY_THRESH,
1062                                        PCI_DMA_FROMDEVICE);
1063
1064         prod_rx_buf->skb = cons_rx_buf->skb;
1065         pci_unmap_addr_set(prod_rx_buf, mapping,
1066                            pci_unmap_addr(cons_rx_buf, mapping));
1067         *prod_bd = *cons_bd;
1068 }
1069
1070 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1071                                              u16 idx)
1072 {
1073         u16 last_max = fp->last_max_sge;
1074
1075         if (SUB_S16(idx, last_max) > 0)
1076                 fp->last_max_sge = idx;
1077 }
1078
1079 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1080 {
1081         int i, j;
1082
1083         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1084                 int idx = RX_SGE_CNT * i - 1;
1085
1086                 for (j = 0; j < 2; j++) {
1087                         SGE_MASK_CLEAR_BIT(fp, idx);
1088                         idx--;
1089                 }
1090         }
1091 }
1092
1093 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1094                                   struct eth_fast_path_rx_cqe *fp_cqe)
1095 {
1096         struct bnx2x *bp = fp->bp;
1097         u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1098                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1099                       BCM_PAGE_SHIFT;
1100         u16 last_max, last_elem, first_elem;
1101         u16 delta = 0;
1102         u16 i;
1103
1104         if (!sge_len)
1105                 return;
1106
1107         /* First mark all used pages */
1108         for (i = 0; i < sge_len; i++)
1109                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1110
1111         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1112            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1113
1114         /* Here we assume that the last SGE index is the biggest */
1115         prefetch((void *)(fp->sge_mask));
1116         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1117
1118         last_max = RX_SGE(fp->last_max_sge);
1119         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1120         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1121
1122         /* If ring is not full */
1123         if (last_elem + 1 != first_elem)
1124                 last_elem++;
1125
1126         /* Now update the prod */
1127         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1128                 if (likely(fp->sge_mask[i]))
1129                         break;
1130
1131                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1132                 delta += RX_SGE_MASK_ELEM_SZ;
1133         }
1134
1135         if (delta > 0) {
1136                 fp->rx_sge_prod += delta;
1137                 /* clear page-end entries */
1138                 bnx2x_clear_sge_mask_next_elems(fp);
1139         }
1140
1141         DP(NETIF_MSG_RX_STATUS,
1142            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1143            fp->last_max_sge, fp->rx_sge_prod);
1144 }
1145
1146 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1147 {
1148         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1149         memset(fp->sge_mask, 0xff,
1150                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1151
1152         /* Clear the two last indeces in the page to 1:
1153            these are the indeces that correspond to the "next" element,
1154            hence will never be indicated and should be removed from
1155            the calculations. */
1156         bnx2x_clear_sge_mask_next_elems(fp);
1157 }
1158
1159 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1160                             struct sk_buff *skb, u16 cons, u16 prod)
1161 {
1162         struct bnx2x *bp = fp->bp;
1163         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1164         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1165         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1166         dma_addr_t mapping;
1167
1168         /* move empty skb from pool to prod and map it */
1169         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1170         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1171                                  bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1172         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1173
1174         /* move partial skb from cons to pool (don't unmap yet) */
1175         fp->tpa_pool[queue] = *cons_rx_buf;
1176
1177         /* mark bin state as start - print error if current state != stop */
1178         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1179                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1180
1181         fp->tpa_state[queue] = BNX2X_TPA_START;
1182
1183         /* point prod_bd to new skb */
1184         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1185         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1186
1187 #ifdef BNX2X_STOP_ON_ERROR
1188         fp->tpa_queue_used |= (1 << queue);
1189 #ifdef __powerpc64__
1190         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1191 #else
1192         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1193 #endif
1194            fp->tpa_queue_used);
1195 #endif
1196 }
1197
1198 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1199                                struct sk_buff *skb,
1200                                struct eth_fast_path_rx_cqe *fp_cqe,
1201                                u16 cqe_idx)
1202 {
1203         struct sw_rx_page *rx_pg, old_rx_pg;
1204         struct page *sge;
1205         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1206         u32 i, frag_len, frag_size, pages;
1207         int err;
1208         int j;
1209
1210         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1211         pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1212
1213         /* This is needed in order to enable forwarding support */
1214         if (frag_size)
1215                 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1216                                                max(frag_size, (u32)len_on_bd));
1217
1218 #ifdef BNX2X_STOP_ON_ERROR
1219         if (pages > 8*PAGES_PER_SGE) {
1220                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1221                           pages, cqe_idx);
1222                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1223                           fp_cqe->pkt_len, len_on_bd);
1224                 bnx2x_panic();
1225                 return -EINVAL;
1226         }
1227 #endif
1228
1229         /* Run through the SGL and compose the fragmented skb */
1230         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1231                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1232
1233                 /* FW gives the indices of the SGE as if the ring is an array
1234                    (meaning that "next" element will consume 2 indices) */
1235                 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1236                 rx_pg = &fp->rx_page_ring[sge_idx];
1237                 sge = rx_pg->page;
1238                 old_rx_pg = *rx_pg;
1239
1240                 /* If we fail to allocate a substitute page, we simply stop
1241                    where we are and drop the whole packet */
1242                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1243                 if (unlikely(err)) {
1244                         bp->eth_stats.rx_skb_alloc_failed++;
1245                         return err;
1246                 }
1247
1248                 /* Unmap the page as we r going to pass it to the stack */
1249                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1250                               BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1251
1252                 /* Add one frag and update the appropriate fields in the skb */
1253                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1254
1255                 skb->data_len += frag_len;
1256                 skb->truesize += frag_len;
1257                 skb->len += frag_len;
1258
1259                 frag_size -= frag_len;
1260         }
1261
1262         return 0;
1263 }
1264
1265 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1266                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1267                            u16 cqe_idx)
1268 {
1269         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1270         struct sk_buff *skb = rx_buf->skb;
1271         /* alloc new skb */
1272         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1273
1274         /* Unmap skb in the pool anyway, as we are going to change
1275            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1276            fails. */
1277         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1278                          bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1279
1280         if (likely(new_skb)) {
1281                 /* fix ip xsum and give it to the stack */
1282                 /* (no need to map the new skb) */
1283
1284                 prefetch(skb);
1285                 prefetch(((char *)(skb)) + 128);
1286
1287 #ifdef BNX2X_STOP_ON_ERROR
1288                 if (pad + len > bp->rx_buf_size) {
1289                         BNX2X_ERR("skb_put is about to fail...  "
1290                                   "pad %d  len %d  rx_buf_size %d\n",
1291                                   pad, len, bp->rx_buf_size);
1292                         bnx2x_panic();
1293                         return;
1294                 }
1295 #endif
1296
1297                 skb_reserve(skb, pad);
1298                 skb_put(skb, len);
1299
1300                 skb->protocol = eth_type_trans(skb, bp->dev);
1301                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1302
1303                 {
1304                         struct iphdr *iph;
1305
1306                         iph = (struct iphdr *)skb->data;
1307                         iph->check = 0;
1308                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1309                 }
1310
1311                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1312                                          &cqe->fast_path_cqe, cqe_idx)) {
1313 #ifdef BCM_VLAN
1314                         if ((bp->vlgrp != NULL) &&
1315                             (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1316                              PARSING_FLAGS_VLAN))
1317                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1318                                                 le16_to_cpu(cqe->fast_path_cqe.
1319                                                             vlan_tag));
1320                         else
1321 #endif
1322                                 netif_receive_skb(skb);
1323                 } else {
1324                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1325                            " - dropping packet!\n");
1326                         dev_kfree_skb(skb);
1327                 }
1328
1329                 bp->dev->last_rx = jiffies;
1330
1331                 /* put new skb in bin */
1332                 fp->tpa_pool[queue].skb = new_skb;
1333
1334         } else {
1335                 /* else drop the packet and keep the buffer in the bin */
1336                 DP(NETIF_MSG_RX_STATUS,
1337                    "Failed to allocate new skb - dropping packet!\n");
1338                 bp->eth_stats.rx_skb_alloc_failed++;
1339         }
1340
1341         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1342 }
1343
1344 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1345                                         struct bnx2x_fastpath *fp,
1346                                         u16 bd_prod, u16 rx_comp_prod,
1347                                         u16 rx_sge_prod)
1348 {
1349         struct tstorm_eth_rx_producers rx_prods = {0};
1350         int i;
1351
1352         /* Update producers */
1353         rx_prods.bd_prod = bd_prod;
1354         rx_prods.cqe_prod = rx_comp_prod;
1355         rx_prods.sge_prod = rx_sge_prod;
1356
1357         for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1358                 REG_WR(bp, BAR_TSTRORM_INTMEM +
1359                        TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1360                        ((u32 *)&rx_prods)[i]);
1361
1362         DP(NETIF_MSG_RX_STATUS,
1363            "Wrote: bd_prod %u  cqe_prod %u  sge_prod %u\n",
1364            bd_prod, rx_comp_prod, rx_sge_prod);
1365 }
1366
1367 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1368 {
1369         struct bnx2x *bp = fp->bp;
1370         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1371         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1372         int rx_pkt = 0;
1373         u16 queue;
1374
1375 #ifdef BNX2X_STOP_ON_ERROR
1376         if (unlikely(bp->panic))
1377                 return 0;
1378 #endif
1379
1380         /* CQ "next element" is of the size of the regular element,
1381            that's why it's ok here */
1382         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1383         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1384                 hw_comp_cons++;
1385
1386         bd_cons = fp->rx_bd_cons;
1387         bd_prod = fp->rx_bd_prod;
1388         bd_prod_fw = bd_prod;
1389         sw_comp_cons = fp->rx_comp_cons;
1390         sw_comp_prod = fp->rx_comp_prod;
1391
1392         /* Memory barrier necessary as speculative reads of the rx
1393          * buffer can be ahead of the index in the status block
1394          */
1395         rmb();
1396
1397         DP(NETIF_MSG_RX_STATUS,
1398            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1399            FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1400
1401         while (sw_comp_cons != hw_comp_cons) {
1402                 struct sw_rx_bd *rx_buf = NULL;
1403                 struct sk_buff *skb;
1404                 union eth_rx_cqe *cqe;
1405                 u8 cqe_fp_flags;
1406                 u16 len, pad;
1407
1408                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1409                 bd_prod = RX_BD(bd_prod);
1410                 bd_cons = RX_BD(bd_cons);
1411
1412                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1413                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1414
1415                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1416                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1417                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1418                    cqe->fast_path_cqe.rss_hash_result,
1419                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1420                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1421
1422                 /* is this a slowpath msg? */
1423                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1424                         bnx2x_sp_event(fp, cqe);
1425                         goto next_cqe;
1426
1427                 /* this is an rx packet */
1428                 } else {
1429                         rx_buf = &fp->rx_buf_ring[bd_cons];
1430                         skb = rx_buf->skb;
1431                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1432                         pad = cqe->fast_path_cqe.placement_offset;
1433
1434                         /* If CQE is marked both TPA_START and TPA_END
1435                            it is a non-TPA CQE */
1436                         if ((!fp->disable_tpa) &&
1437                             (TPA_TYPE(cqe_fp_flags) !=
1438                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1439                                 queue = cqe->fast_path_cqe.queue_index;
1440
1441                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1442                                         DP(NETIF_MSG_RX_STATUS,
1443                                            "calling tpa_start on queue %d\n",
1444                                            queue);
1445
1446                                         bnx2x_tpa_start(fp, queue, skb,
1447                                                         bd_cons, bd_prod);
1448                                         goto next_rx;
1449                                 }
1450
1451                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1452                                         DP(NETIF_MSG_RX_STATUS,
1453                                            "calling tpa_stop on queue %d\n",
1454                                            queue);
1455
1456                                         if (!BNX2X_RX_SUM_FIX(cqe))
1457                                                 BNX2X_ERR("STOP on none TCP "
1458                                                           "data\n");
1459
1460                                         /* This is a size of the linear data
1461                                            on this skb */
1462                                         len = le16_to_cpu(cqe->fast_path_cqe.
1463                                                                 len_on_bd);
1464                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1465                                                     len, cqe, comp_ring_cons);
1466 #ifdef BNX2X_STOP_ON_ERROR
1467                                         if (bp->panic)
1468                                                 return -EINVAL;
1469 #endif
1470
1471                                         bnx2x_update_sge_prod(fp,
1472                                                         &cqe->fast_path_cqe);
1473                                         goto next_cqe;
1474                                 }
1475                         }
1476
1477                         pci_dma_sync_single_for_device(bp->pdev,
1478                                         pci_unmap_addr(rx_buf, mapping),
1479                                                        pad + RX_COPY_THRESH,
1480                                                        PCI_DMA_FROMDEVICE);
1481                         prefetch(skb);
1482                         prefetch(((char *)(skb)) + 128);
1483
1484                         /* is this an error packet? */
1485                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1486                                 DP(NETIF_MSG_RX_ERR,
1487                                    "ERROR  flags %x  rx packet %u\n",
1488                                    cqe_fp_flags, sw_comp_cons);
1489                                 bp->eth_stats.rx_err_discard_pkt++;
1490                                 goto reuse_rx;
1491                         }
1492
1493                         /* Since we don't have a jumbo ring
1494                          * copy small packets if mtu > 1500
1495                          */
1496                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1497                             (len <= RX_COPY_THRESH)) {
1498                                 struct sk_buff *new_skb;
1499
1500                                 new_skb = netdev_alloc_skb(bp->dev,
1501                                                            len + pad);
1502                                 if (new_skb == NULL) {
1503                                         DP(NETIF_MSG_RX_ERR,
1504                                            "ERROR  packet dropped "
1505                                            "because of alloc failure\n");
1506                                         bp->eth_stats.rx_skb_alloc_failed++;
1507                                         goto reuse_rx;
1508                                 }
1509
1510                                 /* aligned copy */
1511                                 skb_copy_from_linear_data_offset(skb, pad,
1512                                                     new_skb->data + pad, len);
1513                                 skb_reserve(new_skb, pad);
1514                                 skb_put(new_skb, len);
1515
1516                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1517
1518                                 skb = new_skb;
1519
1520                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1521                                 pci_unmap_single(bp->pdev,
1522                                         pci_unmap_addr(rx_buf, mapping),
1523                                                  bp->rx_buf_use_size,
1524                                                  PCI_DMA_FROMDEVICE);
1525                                 skb_reserve(skb, pad);
1526                                 skb_put(skb, len);
1527
1528                         } else {
1529                                 DP(NETIF_MSG_RX_ERR,
1530                                    "ERROR  packet dropped because "
1531                                    "of alloc failure\n");
1532                                 bp->eth_stats.rx_skb_alloc_failed++;
1533 reuse_rx:
1534                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1535                                 goto next_rx;
1536                         }
1537
1538                         skb->protocol = eth_type_trans(skb, bp->dev);
1539
1540                         skb->ip_summed = CHECKSUM_NONE;
1541                         if (bp->rx_csum) {
1542                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1543                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1544                                 else
1545                                         bp->eth_stats.hw_csum_err++;
1546                         }
1547                 }
1548
1549 #ifdef BCM_VLAN
1550                 if ((bp->vlgrp != NULL) &&
1551                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1552                      PARSING_FLAGS_VLAN))
1553                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1554                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1555                 else
1556 #endif
1557                         netif_receive_skb(skb);
1558
1559                 bp->dev->last_rx = jiffies;
1560
1561 next_rx:
1562                 rx_buf->skb = NULL;
1563
1564                 bd_cons = NEXT_RX_IDX(bd_cons);
1565                 bd_prod = NEXT_RX_IDX(bd_prod);
1566                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1567                 rx_pkt++;
1568 next_cqe:
1569                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1570                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1571
1572                 if (rx_pkt == budget)
1573                         break;
1574         } /* while */
1575
1576         fp->rx_bd_cons = bd_cons;
1577         fp->rx_bd_prod = bd_prod_fw;
1578         fp->rx_comp_cons = sw_comp_cons;
1579         fp->rx_comp_prod = sw_comp_prod;
1580
1581         /* Update producers */
1582         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1583                              fp->rx_sge_prod);
1584         mmiowb(); /* keep prod updates ordered */
1585
1586         fp->rx_pkt += rx_pkt;
1587         fp->rx_calls++;
1588
1589         return rx_pkt;
1590 }
1591
1592 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1593 {
1594         struct bnx2x_fastpath *fp = fp_cookie;
1595         struct bnx2x *bp = fp->bp;
1596         struct net_device *dev = bp->dev;
1597         int index = FP_IDX(fp);
1598
1599         /* Return here if interrupt is disabled */
1600         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1601                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1602                 return IRQ_HANDLED;
1603         }
1604
1605         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1606            index, FP_SB_ID(fp));
1607         bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1608
1609 #ifdef BNX2X_STOP_ON_ERROR
1610         if (unlikely(bp->panic))
1611                 return IRQ_HANDLED;
1612 #endif
1613
1614         prefetch(fp->rx_cons_sb);
1615         prefetch(fp->tx_cons_sb);
1616         prefetch(&fp->status_blk->c_status_block.status_block_index);
1617         prefetch(&fp->status_blk->u_status_block.status_block_index);
1618
1619         netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1620
1621         return IRQ_HANDLED;
1622 }
1623
1624 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1625 {
1626         struct net_device *dev = dev_instance;
1627         struct bnx2x *bp = netdev_priv(dev);
1628         u16 status = bnx2x_ack_int(bp);
1629         u16 mask;
1630
1631         /* Return here if interrupt is shared and it's not for us */
1632         if (unlikely(status == 0)) {
1633                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1634                 return IRQ_NONE;
1635         }
1636         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1637
1638 #ifdef BNX2X_STOP_ON_ERROR
1639         if (unlikely(bp->panic))
1640                 return IRQ_HANDLED;
1641 #endif
1642
1643         /* Return here if interrupt is disabled */
1644         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1645                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1646                 return IRQ_HANDLED;
1647         }
1648
1649         mask = 0x2 << bp->fp[0].sb_id;
1650         if (status & mask) {
1651                 struct bnx2x_fastpath *fp = &bp->fp[0];
1652
1653                 prefetch(fp->rx_cons_sb);
1654                 prefetch(fp->tx_cons_sb);
1655                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1656                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1657
1658                 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1659
1660                 status &= ~mask;
1661         }
1662
1663
1664         if (unlikely(status & 0x1)) {
1665                 schedule_work(&bp->sp_task);
1666
1667                 status &= ~0x1;
1668                 if (!status)
1669                         return IRQ_HANDLED;
1670         }
1671
1672         if (status)
1673                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1674                    status);
1675
1676         return IRQ_HANDLED;
1677 }
1678
1679 /* end of fast path */
1680
1681 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1682
1683 /* Link */
1684
1685 /*
1686  * General service functions
1687  */
1688
1689 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1690 {
1691         u32 lock_status;
1692         u32 resource_bit = (1 << resource);
1693         int func = BP_FUNC(bp);
1694         u32 hw_lock_control_reg;
1695         int cnt;
1696
1697         /* Validating that the resource is within range */
1698         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1699                 DP(NETIF_MSG_HW,
1700                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1701                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1702                 return -EINVAL;
1703         }
1704
1705         if (func <= 5) {
1706                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1707         } else {
1708                 hw_lock_control_reg =
1709                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1710         }
1711
1712         /* Validating that the resource is not already taken */
1713         lock_status = REG_RD(bp, hw_lock_control_reg);
1714         if (lock_status & resource_bit) {
1715                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1716                    lock_status, resource_bit);
1717                 return -EEXIST;
1718         }
1719
1720         /* Try for 1 second every 5ms */
1721         for (cnt = 0; cnt < 200; cnt++) {
1722                 /* Try to acquire the lock */
1723                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1724                 lock_status = REG_RD(bp, hw_lock_control_reg);
1725                 if (lock_status & resource_bit)
1726                         return 0;
1727
1728                 msleep(5);
1729         }
1730         DP(NETIF_MSG_HW, "Timeout\n");
1731         return -EAGAIN;
1732 }
1733
1734 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1735 {
1736         u32 lock_status;
1737         u32 resource_bit = (1 << resource);
1738         int func = BP_FUNC(bp);
1739         u32 hw_lock_control_reg;
1740
1741         /* Validating that the resource is within range */
1742         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1743                 DP(NETIF_MSG_HW,
1744                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1745                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1746                 return -EINVAL;
1747         }
1748
1749         if (func <= 5) {
1750                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1751         } else {
1752                 hw_lock_control_reg =
1753                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1754         }
1755
1756         /* Validating that the resource is currently taken */
1757         lock_status = REG_RD(bp, hw_lock_control_reg);
1758         if (!(lock_status & resource_bit)) {
1759                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1760                    lock_status, resource_bit);
1761                 return -EFAULT;
1762         }
1763
1764         REG_WR(bp, hw_lock_control_reg, resource_bit);
1765         return 0;
1766 }
1767
1768 /* HW Lock for shared dual port PHYs */
1769 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1770 {
1771         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1772
1773         mutex_lock(&bp->port.phy_mutex);
1774
1775         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1776             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1777                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1778 }
1779
1780 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1781 {
1782         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1783
1784         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1785             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1786                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1787
1788         mutex_unlock(&bp->port.phy_mutex);
1789 }
1790
1791 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1792 {
1793         /* The GPIO should be swapped if swap register is set and active */
1794         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1795                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp);
1796         int gpio_shift = gpio_num +
1797                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1798         u32 gpio_mask = (1 << gpio_shift);
1799         u32 gpio_reg;
1800
1801         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1802                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1803                 return -EINVAL;
1804         }
1805
1806         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1807         /* read GPIO and mask except the float bits */
1808         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1809
1810         switch (mode) {
1811         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1812                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1813                    gpio_num, gpio_shift);
1814                 /* clear FLOAT and set CLR */
1815                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1816                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1817                 break;
1818
1819         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1820                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1821                    gpio_num, gpio_shift);
1822                 /* clear FLOAT and set SET */
1823                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1824                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1825                 break;
1826
1827         case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1828                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1829                    gpio_num, gpio_shift);
1830                 /* set FLOAT */
1831                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1832                 break;
1833
1834         default:
1835                 break;
1836         }
1837
1838         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1839         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1840
1841         return 0;
1842 }
1843
1844 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1845 {
1846         u32 spio_mask = (1 << spio_num);
1847         u32 spio_reg;
1848
1849         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1850             (spio_num > MISC_REGISTERS_SPIO_7)) {
1851                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1852                 return -EINVAL;
1853         }
1854
1855         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1856         /* read SPIO and mask except the float bits */
1857         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1858
1859         switch (mode) {
1860         case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1861                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1862                 /* clear FLOAT and set CLR */
1863                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1864                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1865                 break;
1866
1867         case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1868                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1869                 /* clear FLOAT and set SET */
1870                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1871                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1872                 break;
1873
1874         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1875                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1876                 /* set FLOAT */
1877                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1878                 break;
1879
1880         default:
1881                 break;
1882         }
1883
1884         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1885         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1886
1887         return 0;
1888 }
1889
1890 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1891 {
1892         switch (bp->link_vars.ieee_fc) {
1893         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1894                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1895                                           ADVERTISED_Pause);
1896                 break;
1897         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1898                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1899                                          ADVERTISED_Pause);
1900                 break;
1901         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1902                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1903                 break;
1904         default:
1905                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1906                                           ADVERTISED_Pause);
1907                 break;
1908         }
1909 }
1910
1911 static void bnx2x_link_report(struct bnx2x *bp)
1912 {
1913         if (bp->link_vars.link_up) {
1914                 if (bp->state == BNX2X_STATE_OPEN)
1915                         netif_carrier_on(bp->dev);
1916                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1917
1918                 printk("%d Mbps ", bp->link_vars.line_speed);
1919
1920                 if (bp->link_vars.duplex == DUPLEX_FULL)
1921                         printk("full duplex");
1922                 else
1923                         printk("half duplex");
1924
1925                 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1926                         if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1927                                 printk(", receive ");
1928                                 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1929                                         printk("& transmit ");
1930                         } else {
1931                                 printk(", transmit ");
1932                         }
1933                         printk("flow control ON");
1934                 }
1935                 printk("\n");
1936
1937         } else { /* link_down */
1938                 netif_carrier_off(bp->dev);
1939                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1940         }
1941 }
1942
1943 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1944 {
1945         if (!BP_NOMCP(bp)) {
1946                 u8 rc;
1947
1948                 /* Initialize link parameters structure variables */
1949                 bp->link_params.mtu = bp->dev->mtu;
1950
1951                 bnx2x_acquire_phy_lock(bp);
1952                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1953                 bnx2x_release_phy_lock(bp);
1954
1955                 if (bp->link_vars.link_up)
1956                         bnx2x_link_report(bp);
1957
1958                 bnx2x_calc_fc_adv(bp);
1959
1960                 return rc;
1961         }
1962         BNX2X_ERR("Bootcode is missing -not initializing link\n");
1963         return -EINVAL;
1964 }
1965
1966 static void bnx2x_link_set(struct bnx2x *bp)
1967 {
1968         if (!BP_NOMCP(bp)) {
1969                 bnx2x_acquire_phy_lock(bp);
1970                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1971                 bnx2x_release_phy_lock(bp);
1972
1973                 bnx2x_calc_fc_adv(bp);
1974         } else
1975                 BNX2X_ERR("Bootcode is missing -not setting link\n");
1976 }
1977
1978 static void bnx2x__link_reset(struct bnx2x *bp)
1979 {
1980         if (!BP_NOMCP(bp)) {
1981                 bnx2x_acquire_phy_lock(bp);
1982                 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1983                 bnx2x_release_phy_lock(bp);
1984         } else
1985                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1986 }
1987
1988 static u8 bnx2x_link_test(struct bnx2x *bp)
1989 {
1990         u8 rc;
1991
1992         bnx2x_acquire_phy_lock(bp);
1993         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1994         bnx2x_release_phy_lock(bp);
1995
1996         return rc;
1997 }
1998
1999 /* Calculates the sum of vn_min_rates.
2000    It's needed for further normalizing of the min_rates.
2001
2002    Returns:
2003      sum of vn_min_rates
2004        or
2005      0 - if all the min_rates are 0.
2006      In the later case fainess algorithm should be deactivated.
2007      If not all min_rates are zero then those that are zeroes will
2008      be set to 1.
2009  */
2010 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2011 {
2012         int i, port = BP_PORT(bp);
2013         u32 wsum = 0;
2014         int all_zero = 1;
2015
2016         for (i = 0; i < E1HVN_MAX; i++) {
2017                 u32 vn_cfg =
2018                         SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2019                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2020                                      FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2021                 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2022                         /* If min rate is zero - set it to 1 */
2023                         if (!vn_min_rate)
2024                                 vn_min_rate = DEF_MIN_RATE;
2025                         else
2026                                 all_zero = 0;
2027
2028                         wsum += vn_min_rate;
2029                 }
2030         }
2031
2032         /* ... only if all min rates are zeros - disable FAIRNESS */
2033         if (all_zero)
2034                 return 0;
2035
2036         return wsum;
2037 }
2038
2039 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2040                                    int en_fness,
2041                                    u16 port_rate,
2042                                    struct cmng_struct_per_port *m_cmng_port)
2043 {
2044         u32 r_param = port_rate / 8;
2045         int port = BP_PORT(bp);
2046         int i;
2047
2048         memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2049
2050         /* Enable minmax only if we are in e1hmf mode */
2051         if (IS_E1HMF(bp)) {
2052                 u32 fair_periodic_timeout_usec;
2053                 u32 t_fair;
2054
2055                 /* Enable rate shaping and fairness */
2056                 m_cmng_port->flags.cmng_vn_enable = 1;
2057                 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2058                 m_cmng_port->flags.rate_shaping_enable = 1;
2059
2060                 if (!en_fness)
2061                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2062                            "  fairness will be disabled\n");
2063
2064                 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2065                 m_cmng_port->rs_vars.rs_periodic_timeout =
2066                                                 RS_PERIODIC_TIMEOUT_USEC / 4;
2067
2068                 /* this is the threshold below which no timer arming will occur
2069                    1.25 coefficient is for the threshold to be a little bigger
2070                    than the real time, to compensate for timer in-accuracy */
2071                 m_cmng_port->rs_vars.rs_threshold =
2072                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2073
2074                 /* resolution of fairness timer */
2075                 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2076                 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2077                 t_fair = T_FAIR_COEF / port_rate;
2078
2079                 /* this is the threshold below which we won't arm
2080                    the timer anymore */
2081                 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2082
2083                 /* we multiply by 1e3/8 to get bytes/msec.
2084                    We don't want the credits to pass a credit
2085                    of the T_FAIR*FAIR_MEM (algorithm resolution) */
2086                 m_cmng_port->fair_vars.upper_bound =
2087                                                 r_param * t_fair * FAIR_MEM;
2088                 /* since each tick is 4 usec */
2089                 m_cmng_port->fair_vars.fairness_timeout =
2090                                                 fair_periodic_timeout_usec / 4;
2091
2092         } else {
2093                 /* Disable rate shaping and fairness */
2094                 m_cmng_port->flags.cmng_vn_enable = 0;
2095                 m_cmng_port->flags.fairness_enable = 0;
2096                 m_cmng_port->flags.rate_shaping_enable = 0;
2097
2098                 DP(NETIF_MSG_IFUP,
2099                    "Single function mode  minmax will be disabled\n");
2100         }
2101
2102         /* Store it to internal memory */
2103         for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2104                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2105                        XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2106                        ((u32 *)(m_cmng_port))[i]);
2107 }
2108
2109 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2110                                    u32 wsum, u16 port_rate,
2111                                  struct cmng_struct_per_port *m_cmng_port)
2112 {
2113         struct rate_shaping_vars_per_vn m_rs_vn;
2114         struct fairness_vars_per_vn m_fair_vn;
2115         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2116         u16 vn_min_rate, vn_max_rate;
2117         int i;
2118
2119         /* If function is hidden - set min and max to zeroes */
2120         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2121                 vn_min_rate = 0;
2122                 vn_max_rate = 0;
2123
2124         } else {
2125                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2126                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2127                 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2128                    if current min rate is zero - set it to 1.
2129                    This is a requirment of the algorithm. */
2130                 if ((vn_min_rate == 0) && wsum)
2131                         vn_min_rate = DEF_MIN_RATE;
2132                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2133                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2134         }
2135
2136         DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d  vn_max_rate=%d  "
2137            "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2138
2139         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2140         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2141
2142         /* global vn counter - maximal Mbps for this vn */
2143         m_rs_vn.vn_counter.rate = vn_max_rate;
2144
2145         /* quota - number of bytes transmitted in this period */
2146         m_rs_vn.vn_counter.quota =
2147                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2148
2149 #ifdef BNX2X_PER_PROT_QOS
2150         /* per protocol counter */
2151         for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2152                 /* maximal Mbps for this protocol */
2153                 m_rs_vn.protocol_counters[protocol].rate =
2154                                                 protocol_max_rate[protocol];
2155                 /* the quota in each timer period -
2156                    number of bytes transmitted in this period */
2157                 m_rs_vn.protocol_counters[protocol].quota =
2158                         (u32)(rs_periodic_timeout_usec *
2159                           ((double)m_rs_vn.
2160                                    protocol_counters[protocol].rate/8));
2161         }
2162 #endif
2163
2164         if (wsum) {
2165                 /* credit for each period of the fairness algorithm:
2166                    number of bytes in T_FAIR (the vn share the port rate).
2167                    wsum should not be larger than 10000, thus
2168                    T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2169                 m_fair_vn.vn_credit_delta =
2170                         max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2171                             (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2172                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2173                    m_fair_vn.vn_credit_delta);
2174         }
2175
2176 #ifdef BNX2X_PER_PROT_QOS
2177         do {
2178                 u32 protocolWeightSum = 0;
2179
2180                 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2181                         protocolWeightSum +=
2182                                         drvInit.protocol_min_rate[protocol];
2183                 /* per protocol counter -
2184                    NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2185                 if (protocolWeightSum > 0) {
2186                         for (protocol = 0;
2187                              protocol < NUM_OF_PROTOCOLS; protocol++)
2188                                 /* credit for each period of the
2189                                    fairness algorithm - number of bytes in
2190                                    T_FAIR (the protocol share the vn rate) */
2191                                 m_fair_vn.protocol_credit_delta[protocol] =
2192                                         (u32)((vn_min_rate / 8) * t_fair *
2193                                         protocol_min_rate / protocolWeightSum);
2194                 }
2195         } while (0);
2196 #endif
2197
2198         /* Store it to internal memory */
2199         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2200                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2201                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2202                        ((u32 *)(&m_rs_vn))[i]);
2203
2204         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2205                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2206                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2207                        ((u32 *)(&m_fair_vn))[i]);
2208 }
2209
2210 /* This function is called upon link interrupt */
2211 static void bnx2x_link_attn(struct bnx2x *bp)
2212 {
2213         int vn;
2214
2215         /* Make sure that we are synced with the current statistics */
2216         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2217
2218         bnx2x_acquire_phy_lock(bp);
2219         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2220         bnx2x_release_phy_lock(bp);
2221
2222         if (bp->link_vars.link_up) {
2223
2224                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2225                         struct host_port_stats *pstats;
2226
2227                         pstats = bnx2x_sp(bp, port_stats);
2228                         /* reset old bmac stats */
2229                         memset(&(pstats->mac_stx[0]), 0,
2230                                sizeof(struct mac_stx));
2231                 }
2232                 if ((bp->state == BNX2X_STATE_OPEN) ||
2233                     (bp->state == BNX2X_STATE_DISABLED))
2234                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2235         }
2236
2237         /* indicate link status */
2238         bnx2x_link_report(bp);
2239
2240         if (IS_E1HMF(bp)) {
2241                 int func;
2242
2243                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2244                         if (vn == BP_E1HVN(bp))
2245                                 continue;
2246
2247                         func = ((vn << 1) | BP_PORT(bp));
2248
2249                         /* Set the attention towards other drivers
2250                            on the same port */
2251                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2252                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2253                 }
2254         }
2255
2256         if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2257                 struct cmng_struct_per_port m_cmng_port;
2258                 u32 wsum;
2259                 int port = BP_PORT(bp);
2260
2261                 /* Init RATE SHAPING and FAIRNESS contexts */
2262                 wsum = bnx2x_calc_vn_wsum(bp);
2263                 bnx2x_init_port_minmax(bp, (int)wsum,
2264                                         bp->link_vars.line_speed,
2265                                         &m_cmng_port);
2266                 if (IS_E1HMF(bp))
2267                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2268                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
2269                                         wsum, bp->link_vars.line_speed,
2270                                                      &m_cmng_port);
2271         }
2272 }
2273
2274 static void bnx2x__link_status_update(struct bnx2x *bp)
2275 {
2276         if (bp->state != BNX2X_STATE_OPEN)
2277                 return;
2278
2279         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2280
2281         if (bp->link_vars.link_up)
2282                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2283         else
2284                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2285
2286         /* indicate link status */
2287         bnx2x_link_report(bp);
2288 }
2289
2290 static void bnx2x_pmf_update(struct bnx2x *bp)
2291 {
2292         int port = BP_PORT(bp);
2293         u32 val;
2294
2295         bp->port.pmf = 1;
2296         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2297
2298         /* enable nig attention */
2299         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2300         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2301         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2302
2303         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2304 }
2305
2306 /* end of Link */
2307
2308 /* slow path */
2309
2310 /*
2311  * General service functions
2312  */
2313
2314 /* the slow path queue is odd since completions arrive on the fastpath ring */
2315 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2316                          u32 data_hi, u32 data_lo, int common)
2317 {
2318         int func = BP_FUNC(bp);
2319
2320         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2321            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2322            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2323            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2324            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2325
2326 #ifdef BNX2X_STOP_ON_ERROR
2327         if (unlikely(bp->panic))
2328                 return -EIO;
2329 #endif
2330
2331         spin_lock_bh(&bp->spq_lock);
2332
2333         if (!bp->spq_left) {
2334                 BNX2X_ERR("BUG! SPQ ring full!\n");
2335                 spin_unlock_bh(&bp->spq_lock);
2336                 bnx2x_panic();
2337                 return -EBUSY;
2338         }
2339
2340         /* CID needs port number to be encoded int it */
2341         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2342                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2343                                      HW_CID(bp, cid)));
2344         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2345         if (common)
2346                 bp->spq_prod_bd->hdr.type |=
2347                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2348
2349         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2350         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2351
2352         bp->spq_left--;
2353
2354         if (bp->spq_prod_bd == bp->spq_last_bd) {
2355                 bp->spq_prod_bd = bp->spq;
2356                 bp->spq_prod_idx = 0;
2357                 DP(NETIF_MSG_TIMER, "end of spq\n");
2358
2359         } else {
2360                 bp->spq_prod_bd++;
2361                 bp->spq_prod_idx++;
2362         }
2363
2364         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2365                bp->spq_prod_idx);
2366
2367         spin_unlock_bh(&bp->spq_lock);
2368         return 0;
2369 }
2370
2371 /* acquire split MCP access lock register */
2372 static int bnx2x_acquire_alr(struct bnx2x *bp)
2373 {
2374         u32 i, j, val;
2375         int rc = 0;
2376
2377         might_sleep();
2378         i = 100;
2379         for (j = 0; j < i*10; j++) {
2380                 val = (1UL << 31);
2381                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2382                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2383                 if (val & (1L << 31))
2384                         break;
2385
2386                 msleep(5);
2387         }
2388         if (!(val & (1L << 31))) {
2389                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2390                 rc = -EBUSY;
2391         }
2392
2393         return rc;
2394 }
2395
2396 /* release split MCP access lock register */
2397 static void bnx2x_release_alr(struct bnx2x *bp)
2398 {
2399         u32 val = 0;
2400
2401         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2402 }
2403
2404 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2405 {
2406         struct host_def_status_block *def_sb = bp->def_status_blk;
2407         u16 rc = 0;
2408
2409         barrier(); /* status block is written to by the chip */
2410         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2411                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2412                 rc |= 1;
2413         }
2414         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2415                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2416                 rc |= 2;
2417         }
2418         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2419                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2420                 rc |= 4;
2421         }
2422         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2423                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2424                 rc |= 8;
2425         }
2426         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2427                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2428                 rc |= 16;
2429         }
2430         return rc;
2431 }
2432
2433 /*
2434  * slow path service functions
2435  */
2436
2437 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2438 {
2439         int port = BP_PORT(bp);
2440         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2441                        COMMAND_REG_ATTN_BITS_SET);
2442         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2443                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2444         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2445                                        NIG_REG_MASK_INTERRUPT_PORT0;
2446         u32 aeu_mask;
2447
2448         if (bp->attn_state & asserted)
2449                 BNX2X_ERR("IGU ERROR\n");
2450
2451         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2452         aeu_mask = REG_RD(bp, aeu_addr);
2453
2454         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2455            aeu_mask, asserted);
2456         aeu_mask &= ~(asserted & 0xff);
2457         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2458
2459         REG_WR(bp, aeu_addr, aeu_mask);
2460         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2461
2462         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2463         bp->attn_state |= asserted;
2464         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2465
2466         if (asserted & ATTN_HARD_WIRED_MASK) {
2467                 if (asserted & ATTN_NIG_FOR_FUNC) {
2468
2469                         /* save nig interrupt mask */
2470                         bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2471                         REG_WR(bp, nig_int_mask_addr, 0);
2472
2473                         bnx2x_link_attn(bp);
2474
2475                         /* handle unicore attn? */
2476                 }
2477                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2478                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2479
2480                 if (asserted & GPIO_2_FUNC)
2481                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2482
2483                 if (asserted & GPIO_3_FUNC)
2484                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2485
2486                 if (asserted & GPIO_4_FUNC)
2487                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2488
2489                 if (port == 0) {
2490                         if (asserted & ATTN_GENERAL_ATTN_1) {
2491                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2492                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2493                         }
2494                         if (asserted & ATTN_GENERAL_ATTN_2) {
2495                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2496                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2497                         }
2498                         if (asserted & ATTN_GENERAL_ATTN_3) {
2499                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2500                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2501                         }
2502                 } else {
2503                         if (asserted & ATTN_GENERAL_ATTN_4) {
2504                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2505                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2506                         }
2507                         if (asserted & ATTN_GENERAL_ATTN_5) {
2508                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2509                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2510                         }
2511                         if (asserted & ATTN_GENERAL_ATTN_6) {
2512                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2513                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2514                         }
2515                 }
2516
2517         } /* if hardwired */
2518
2519         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2520            asserted, hc_addr);
2521         REG_WR(bp, hc_addr, asserted);
2522
2523         /* now set back the mask */
2524         if (asserted & ATTN_NIG_FOR_FUNC)
2525                 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2526 }
2527
2528 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2529 {
2530         int port = BP_PORT(bp);
2531         int reg_offset;
2532         u32 val;
2533
2534         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2535                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2536
2537         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2538
2539                 val = REG_RD(bp, reg_offset);
2540                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2541                 REG_WR(bp, reg_offset, val);
2542
2543                 BNX2X_ERR("SPIO5 hw attention\n");
2544
2545                 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2546                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2547                         /* Fan failure attention */
2548
2549                         /* The PHY reset is controled by GPIO 1 */
2550                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2551                                        MISC_REGISTERS_GPIO_OUTPUT_LOW);
2552                         /* Low power mode is controled by GPIO 2 */
2553                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2554                                        MISC_REGISTERS_GPIO_OUTPUT_LOW);
2555                         /* mark the failure */
2556                         bp->link_params.ext_phy_config &=
2557                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2558                         bp->link_params.ext_phy_config |=
2559                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2560                         SHMEM_WR(bp,
2561                                  dev_info.port_hw_config[port].
2562                                                         external_phy_config,
2563                                  bp->link_params.ext_phy_config);
2564                         /* log the failure */
2565                         printk(KERN_ERR PFX "Fan Failure on Network"
2566                                " Controller %s has caused the driver to"
2567                                " shutdown the card to prevent permanent"
2568                                " damage.  Please contact Dell Support for"
2569                                " assistance\n", bp->dev->name);
2570                         break;
2571
2572                 default:
2573                         break;
2574                 }
2575         }
2576
2577         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2578
2579                 val = REG_RD(bp, reg_offset);
2580                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2581                 REG_WR(bp, reg_offset, val);
2582
2583                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2584                           (attn & HW_INTERRUT_ASSERT_SET_0));
2585                 bnx2x_panic();
2586         }
2587 }
2588
2589 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2590 {
2591         u32 val;
2592
2593         if (attn & BNX2X_DOORQ_ASSERT) {
2594
2595                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2596                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2597                 /* DORQ discard attention */
2598                 if (val & 0x2)
2599                         BNX2X_ERR("FATAL error from DORQ\n");
2600         }
2601
2602         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2603
2604                 int port = BP_PORT(bp);
2605                 int reg_offset;
2606
2607                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2608                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2609
2610                 val = REG_RD(bp, reg_offset);
2611                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2612                 REG_WR(bp, reg_offset, val);
2613
2614                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2615                           (attn & HW_INTERRUT_ASSERT_SET_1));
2616                 bnx2x_panic();
2617         }
2618 }
2619
2620 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2621 {
2622         u32 val;
2623
2624         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2625
2626                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2627                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2628                 /* CFC error attention */
2629                 if (val & 0x2)
2630                         BNX2X_ERR("FATAL error from CFC\n");
2631         }
2632
2633         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2634
2635                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2636                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2637                 /* RQ_USDMDP_FIFO_OVERFLOW */
2638                 if (val & 0x18000)
2639                         BNX2X_ERR("FATAL error from PXP\n");
2640         }
2641
2642         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2643
2644                 int port = BP_PORT(bp);
2645                 int reg_offset;
2646
2647                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2648                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2649
2650                 val = REG_RD(bp, reg_offset);
2651                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2652                 REG_WR(bp, reg_offset, val);
2653
2654                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2655                           (attn & HW_INTERRUT_ASSERT_SET_2));
2656                 bnx2x_panic();
2657         }
2658 }
2659
2660 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2661 {
2662         u32 val;
2663
2664         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2665
2666                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2667                         int func = BP_FUNC(bp);
2668
2669                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2670                         bnx2x__link_status_update(bp);
2671                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2672                                                         DRV_STATUS_PMF)
2673                                 bnx2x_pmf_update(bp);
2674
2675                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2676
2677                         BNX2X_ERR("MC assert!\n");
2678                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2679                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2680                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2681                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2682                         bnx2x_panic();
2683
2684                 } else if (attn & BNX2X_MCP_ASSERT) {
2685
2686                         BNX2X_ERR("MCP assert!\n");
2687                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2688                         bnx2x_fw_dump(bp);
2689
2690                 } else
2691                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2692         }
2693
2694         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2695                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2696                 if (attn & BNX2X_GRC_TIMEOUT) {
2697                         val = CHIP_IS_E1H(bp) ?
2698                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2699                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2700                 }
2701                 if (attn & BNX2X_GRC_RSV) {
2702                         val = CHIP_IS_E1H(bp) ?
2703                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2704                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2705                 }
2706                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2707         }
2708 }
2709
2710 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2711 {
2712         struct attn_route attn;
2713         struct attn_route group_mask;
2714         int port = BP_PORT(bp);
2715         int index;
2716         u32 reg_addr;
2717         u32 val;
2718         u32 aeu_mask;
2719
2720         /* need to take HW lock because MCP or other port might also
2721            try to handle this event */
2722         bnx2x_acquire_alr(bp);
2723
2724         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2725         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2726         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2727         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2728         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2729            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2730
2731         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2732                 if (deasserted & (1 << index)) {
2733                         group_mask = bp->attn_group[index];
2734
2735                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2736                            index, group_mask.sig[0], group_mask.sig[1],
2737                            group_mask.sig[2], group_mask.sig[3]);
2738
2739                         bnx2x_attn_int_deasserted3(bp,
2740                                         attn.sig[3] & group_mask.sig[3]);
2741                         bnx2x_attn_int_deasserted1(bp,
2742                                         attn.sig[1] & group_mask.sig[1]);
2743                         bnx2x_attn_int_deasserted2(bp,
2744                                         attn.sig[2] & group_mask.sig[2]);
2745                         bnx2x_attn_int_deasserted0(bp,
2746                                         attn.sig[0] & group_mask.sig[0]);
2747
2748                         if ((attn.sig[0] & group_mask.sig[0] &
2749                                                 HW_PRTY_ASSERT_SET_0) ||
2750                             (attn.sig[1] & group_mask.sig[1] &
2751                                                 HW_PRTY_ASSERT_SET_1) ||
2752                             (attn.sig[2] & group_mask.sig[2] &
2753                                                 HW_PRTY_ASSERT_SET_2))
2754                                BNX2X_ERR("FATAL HW block parity attention\n");
2755                 }
2756         }
2757
2758         bnx2x_release_alr(bp);
2759
2760         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2761
2762         val = ~deasserted;
2763         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2764            val, reg_addr);
2765         REG_WR(bp, reg_addr, val);
2766
2767         if (~bp->attn_state & deasserted)
2768                 BNX2X_ERR("IGU ERROR\n");
2769
2770         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2771                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2772
2773         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2774         aeu_mask = REG_RD(bp, reg_addr);
2775
2776         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2777            aeu_mask, deasserted);
2778         aeu_mask |= (deasserted & 0xff);
2779         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2780
2781         REG_WR(bp, reg_addr, aeu_mask);
2782         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2783
2784         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2785         bp->attn_state &= ~deasserted;
2786         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2787 }
2788
2789 static void bnx2x_attn_int(struct bnx2x *bp)
2790 {
2791         /* read local copy of bits */
2792         u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2793         u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2794         u32 attn_state = bp->attn_state;
2795
2796         /* look for changed bits */
2797         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2798         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2799
2800         DP(NETIF_MSG_HW,
2801            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2802            attn_bits, attn_ack, asserted, deasserted);
2803
2804         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2805                 BNX2X_ERR("BAD attention state\n");
2806
2807         /* handle bits that were raised */
2808         if (asserted)
2809                 bnx2x_attn_int_asserted(bp, asserted);
2810
2811         if (deasserted)
2812                 bnx2x_attn_int_deasserted(bp, deasserted);
2813 }
2814
2815 static void bnx2x_sp_task(struct work_struct *work)
2816 {
2817         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2818         u16 status;
2819
2820
2821         /* Return here if interrupt is disabled */
2822         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2823                 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
2824                 return;
2825         }
2826
2827         status = bnx2x_update_dsb_idx(bp);
2828 /*      if (status == 0)                                     */
2829 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2830
2831         DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status);
2832
2833         /* HW attentions */
2834         if (status & 0x1)
2835                 bnx2x_attn_int(bp);
2836
2837         /* CStorm events: query_stats, port delete ramrod */
2838         if (status & 0x2)
2839                 bp->stats_pending = 0;
2840
2841         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2842                      IGU_INT_NOP, 1);
2843         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2844                      IGU_INT_NOP, 1);
2845         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2846                      IGU_INT_NOP, 1);
2847         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2848                      IGU_INT_NOP, 1);
2849         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2850                      IGU_INT_ENABLE, 1);
2851
2852 }
2853
2854 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2855 {
2856         struct net_device *dev = dev_instance;
2857         struct bnx2x *bp = netdev_priv(dev);
2858
2859         /* Return here if interrupt is disabled */
2860         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2861                 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
2862                 return IRQ_HANDLED;
2863         }
2864
2865         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2866
2867 #ifdef BNX2X_STOP_ON_ERROR
2868         if (unlikely(bp->panic))
2869                 return IRQ_HANDLED;
2870 #endif
2871
2872         schedule_work(&bp->sp_task);
2873
2874         return IRQ_HANDLED;
2875 }
2876
2877 /* end of slow path */
2878
2879 /* Statistics */
2880
2881 /****************************************************************************
2882 * Macros
2883 ****************************************************************************/
2884
2885 /* sum[hi:lo] += add[hi:lo] */
2886 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2887         do { \
2888                 s_lo += a_lo; \
2889                 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2890         } while (0)
2891
2892 /* difference = minuend - subtrahend */
2893 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2894         do { \
2895                 if (m_lo < s_lo) { \
2896                         /* underflow */ \
2897                         d_hi = m_hi - s_hi; \
2898                         if (d_hi > 0) { \
2899                         /* we can 'loan' 1 */ \
2900                                 d_hi--; \
2901                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2902                         } else { \
2903                         /* m_hi <= s_hi */ \
2904                                 d_hi = 0; \
2905                                 d_lo = 0; \
2906                         } \
2907                 } else { \
2908                         /* m_lo >= s_lo */ \
2909                         if (m_hi < s_hi) { \
2910                                 d_hi = 0; \
2911                                 d_lo = 0; \
2912                         } else { \
2913                         /* m_hi >= s_hi */ \
2914                                 d_hi = m_hi - s_hi; \
2915                                 d_lo = m_lo - s_lo; \
2916                         } \
2917                 } \
2918         } while (0)
2919
2920 #define UPDATE_STAT64(s, t) \
2921         do { \
2922                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2923                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2924                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2925                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2926                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2927                        pstats->mac_stx[1].t##_lo, diff.lo); \
2928         } while (0)
2929
2930 #define UPDATE_STAT64_NIG(s, t) \
2931         do { \
2932                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2933                         diff.lo, new->s##_lo, old->s##_lo); \
2934                 ADD_64(estats->t##_hi, diff.hi, \
2935                        estats->t##_lo, diff.lo); \
2936         } while (0)
2937
2938 /* sum[hi:lo] += add */
2939 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2940         do { \
2941                 s_lo += a; \
2942                 s_hi += (s_lo < a) ? 1 : 0; \
2943         } while (0)
2944
2945 #define UPDATE_EXTEND_STAT(s) \
2946         do { \
2947                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2948                               pstats->mac_stx[1].s##_lo, \
2949                               new->s); \
2950         } while (0)
2951
2952 #define UPDATE_EXTEND_TSTAT(s, t) \
2953         do { \
2954                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2955                 old_tclient->s = le32_to_cpu(tclient->s); \
2956                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2957         } while (0)
2958
2959 #define UPDATE_EXTEND_XSTAT(s, t) \
2960         do { \
2961                 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2962                 old_xclient->s = le32_to_cpu(xclient->s); \
2963                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2964         } while (0)
2965
2966 /*
2967  * General service functions
2968  */
2969
2970 static inline long bnx2x_hilo(u32 *hiref)
2971 {
2972         u32 lo = *(hiref + 1);
2973 #if (BITS_PER_LONG == 64)
2974         u32 hi = *hiref;
2975
2976         return HILO_U64(hi, lo);
2977 #else
2978         return lo;
2979 #endif
2980 }
2981
2982 /*
2983  * Init service functions
2984  */
2985
2986 static void bnx2x_storm_stats_post(struct bnx2x *bp)
2987 {
2988         if (!bp->stats_pending) {
2989                 struct eth_query_ramrod_data ramrod_data = {0};
2990                 int rc;
2991
2992                 ramrod_data.drv_counter = bp->stats_counter++;
2993                 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
2994                 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
2995
2996                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
2997                                    ((u32 *)&ramrod_data)[1],
2998                                    ((u32 *)&ramrod_data)[0], 0);
2999                 if (rc == 0) {
3000                         /* stats ramrod has it's own slot on the spq */
3001                         bp->spq_left++;
3002                         bp->stats_pending = 1;
3003                 }
3004         }
3005 }
3006
3007 static void bnx2x_stats_init(struct bnx2x *bp)
3008 {
3009         int port = BP_PORT(bp);
3010
3011         bp->executer_idx = 0;
3012         bp->stats_counter = 0;
3013
3014         /* port stats */
3015         if (!BP_NOMCP(bp))
3016                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3017         else
3018                 bp->port.port_stx = 0;
3019         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3020
3021         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3022         bp->port.old_nig_stats.brb_discard =
3023                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3024         bp->port.old_nig_stats.brb_truncate =
3025                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3026         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3027                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3028         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3029                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3030
3031         /* function stats */
3032         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3033         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3034         memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3035         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3036
3037         bp->stats_state = STATS_STATE_DISABLED;
3038         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3039                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3040 }
3041
3042 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3043 {
3044         struct dmae_command *dmae = &bp->stats_dmae;
3045         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3046
3047         *stats_comp = DMAE_COMP_VAL;
3048
3049         /* loader */
3050         if (bp->executer_idx) {
3051                 int loader_idx = PMF_DMAE_C(bp);
3052
3053                 memset(dmae, 0, sizeof(struct dmae_command));
3054
3055                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3056                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3057                                 DMAE_CMD_DST_RESET |
3058 #ifdef __BIG_ENDIAN
3059                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3060 #else
3061                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3062 #endif
3063                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3064                                                DMAE_CMD_PORT_0) |
3065                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3066                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3067                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3068                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3069                                      sizeof(struct dmae_command) *
3070                                      (loader_idx + 1)) >> 2;
3071                 dmae->dst_addr_hi = 0;
3072                 dmae->len = sizeof(struct dmae_command) >> 2;
3073                 if (CHIP_IS_E1(bp))
3074                         dmae->len--;
3075                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3076                 dmae->comp_addr_hi = 0;
3077                 dmae->comp_val = 1;
3078
3079                 *stats_comp = 0;
3080                 bnx2x_post_dmae(bp, dmae, loader_idx);
3081
3082         } else if (bp->func_stx) {
3083                 *stats_comp = 0;
3084                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3085         }
3086 }
3087
3088 static int bnx2x_stats_comp(struct bnx2x *bp)
3089 {
3090         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3091         int cnt = 10;
3092
3093         might_sleep();
3094         while (*stats_comp != DMAE_COMP_VAL) {
3095                 if (!cnt) {
3096                         BNX2X_ERR("timeout waiting for stats finished\n");
3097                         break;
3098                 }
3099                 cnt--;
3100                 msleep(1);
3101         }
3102         return 1;
3103 }
3104
3105 /*
3106  * Statistics service functions
3107  */
3108
3109 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3110 {
3111         struct dmae_command *dmae;
3112         u32 opcode;
3113         int loader_idx = PMF_DMAE_C(bp);
3114         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3115
3116         /* sanity */
3117         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3118                 BNX2X_ERR("BUG!\n");
3119                 return;
3120         }
3121
3122         bp->executer_idx = 0;
3123
3124         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3125                   DMAE_CMD_C_ENABLE |
3126                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3127 #ifdef __BIG_ENDIAN
3128                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3129 #else
3130                   DMAE_CMD_ENDIANITY_DW_SWAP |
3131 #endif
3132                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3133                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3134
3135         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3136         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3137         dmae->src_addr_lo = bp->port.port_stx >> 2;
3138         dmae->src_addr_hi = 0;
3139         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3140         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3141         dmae->len = DMAE_LEN32_RD_MAX;
3142         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3143         dmae->comp_addr_hi = 0;
3144         dmae->comp_val = 1;
3145
3146         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3147         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3148         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3149         dmae->src_addr_hi = 0;
3150         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3151                                    DMAE_LEN32_RD_MAX * 4);
3152         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3153                                    DMAE_LEN32_RD_MAX * 4);
3154         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3155         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3156         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3157         dmae->comp_val = DMAE_COMP_VAL;
3158
3159         *stats_comp = 0;
3160         bnx2x_hw_stats_post(bp);
3161         bnx2x_stats_comp(bp);
3162 }
3163
3164 static void bnx2x_port_stats_init(struct bnx2x *bp)
3165 {
3166         struct dmae_command *dmae;
3167         int port = BP_PORT(bp);
3168         int vn = BP_E1HVN(bp);
3169         u32 opcode;
3170         int loader_idx = PMF_DMAE_C(bp);
3171         u32 mac_addr;
3172         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3173
3174         /* sanity */
3175         if (!bp->link_vars.link_up || !bp->port.pmf) {
3176                 BNX2X_ERR("BUG!\n");
3177                 return;
3178         }
3179
3180         bp->executer_idx = 0;
3181
3182         /* MCP */
3183         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3184                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3185                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3186 #ifdef __BIG_ENDIAN
3187                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3188 #else
3189                   DMAE_CMD_ENDIANITY_DW_SWAP |
3190 #endif
3191                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3192                   (vn << DMAE_CMD_E1HVN_SHIFT));
3193
3194         if (bp->port.port_stx) {
3195
3196                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3197                 dmae->opcode = opcode;
3198                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3199                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3200                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3201                 dmae->dst_addr_hi = 0;
3202                 dmae->len = sizeof(struct host_port_stats) >> 2;
3203                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3204                 dmae->comp_addr_hi = 0;
3205                 dmae->comp_val = 1;
3206         }
3207
3208         if (bp->func_stx) {
3209
3210                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3211                 dmae->opcode = opcode;
3212                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3213                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3214                 dmae->dst_addr_lo = bp->func_stx >> 2;
3215                 dmae->dst_addr_hi = 0;
3216                 dmae->len = sizeof(struct host_func_stats) >> 2;
3217                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3218                 dmae->comp_addr_hi = 0;
3219                 dmae->comp_val = 1;
3220         }
3221
3222         /* MAC */
3223         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3224                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3225                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3226 #ifdef __BIG_ENDIAN
3227                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3228 #else
3229                   DMAE_CMD_ENDIANITY_DW_SWAP |
3230 #endif
3231                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3232                   (vn << DMAE_CMD_E1HVN_SHIFT));
3233
3234         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3235
3236                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3237                                    NIG_REG_INGRESS_BMAC0_MEM);
3238
3239                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3240                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3241                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3242                 dmae->opcode = opcode;
3243                 dmae->src_addr_lo = (mac_addr +
3244                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3245                 dmae->src_addr_hi = 0;
3246                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3247                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3248                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3249                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3250                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3251                 dmae->comp_addr_hi = 0;
3252                 dmae->comp_val = 1;
3253
3254                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3255                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3256                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3257                 dmae->opcode = opcode;
3258                 dmae->src_addr_lo = (mac_addr +
3259                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3260                 dmae->src_addr_hi = 0;
3261                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3262                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3263                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3264                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3265                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3266                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3267                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3268                 dmae->comp_addr_hi = 0;
3269                 dmae->comp_val = 1;
3270
3271         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3272
3273                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3274
3275                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3276                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3277                 dmae->opcode = opcode;
3278                 dmae->src_addr_lo = (mac_addr +
3279                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3280                 dmae->src_addr_hi = 0;
3281                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3282                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3283                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3284                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3285                 dmae->comp_addr_hi = 0;
3286                 dmae->comp_val = 1;
3287
3288                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3289                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3290                 dmae->opcode = opcode;
3291                 dmae->src_addr_lo = (mac_addr +
3292                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3293                 dmae->src_addr_hi = 0;
3294                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3295                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3296                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3297                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3298                 dmae->len = 1;
3299                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3300                 dmae->comp_addr_hi = 0;
3301                 dmae->comp_val = 1;
3302
3303                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3304                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3305                 dmae->opcode = opcode;
3306                 dmae->src_addr_lo = (mac_addr +
3307                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3308                 dmae->src_addr_hi = 0;
3309                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3310                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3311                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3312                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3313                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3314                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3315                 dmae->comp_addr_hi = 0;
3316                 dmae->comp_val = 1;
3317         }
3318
3319         /* NIG */
3320         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3321         dmae->opcode = opcode;
3322         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3323                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3324         dmae->src_addr_hi = 0;
3325         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3326         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3327         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3328         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3329         dmae->comp_addr_hi = 0;
3330         dmae->comp_val = 1;
3331
3332         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3333         dmae->opcode = opcode;
3334         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3335                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3336         dmae->src_addr_hi = 0;
3337         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3338                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3339         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3340                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3341         dmae->len = (2*sizeof(u32)) >> 2;
3342         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3343         dmae->comp_addr_hi = 0;
3344         dmae->comp_val = 1;
3345
3346         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3347         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3348                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3349                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3350 #ifdef __BIG_ENDIAN
3351                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3352 #else
3353                         DMAE_CMD_ENDIANITY_DW_SWAP |
3354 #endif
3355                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3356                         (vn << DMAE_CMD_E1HVN_SHIFT));
3357         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3358                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3359         dmae->src_addr_hi = 0;
3360         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3361                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3362         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3363                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3364         dmae->len = (2*sizeof(u32)) >> 2;
3365         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3366         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3367         dmae->comp_val = DMAE_COMP_VAL;
3368
3369         *stats_comp = 0;
3370 }
3371
3372 static void bnx2x_func_stats_init(struct bnx2x *bp)
3373 {
3374         struct dmae_command *dmae = &bp->stats_dmae;
3375         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3376
3377         /* sanity */
3378         if (!bp->func_stx) {
3379                 BNX2X_ERR("BUG!\n");
3380                 return;
3381         }
3382
3383         bp->executer_idx = 0;
3384         memset(dmae, 0, sizeof(struct dmae_command));
3385
3386         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3387                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3388                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3389 #ifdef __BIG_ENDIAN
3390                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3391 #else
3392                         DMAE_CMD_ENDIANITY_DW_SWAP |
3393 #endif
3394                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3395                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3396         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3397         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3398         dmae->dst_addr_lo = bp->func_stx >> 2;
3399         dmae->dst_addr_hi = 0;
3400         dmae->len = sizeof(struct host_func_stats) >> 2;
3401         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3402         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3403         dmae->comp_val = DMAE_COMP_VAL;
3404
3405         *stats_comp = 0;
3406 }
3407
3408 static void bnx2x_stats_start(struct bnx2x *bp)
3409 {
3410         if (bp->port.pmf)
3411                 bnx2x_port_stats_init(bp);
3412
3413         else if (bp->func_stx)
3414                 bnx2x_func_stats_init(bp);
3415
3416         bnx2x_hw_stats_post(bp);
3417         bnx2x_storm_stats_post(bp);
3418 }
3419
3420 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3421 {
3422         bnx2x_stats_comp(bp);
3423         bnx2x_stats_pmf_update(bp);
3424         bnx2x_stats_start(bp);
3425 }
3426
3427 static void bnx2x_stats_restart(struct bnx2x *bp)
3428 {
3429         bnx2x_stats_comp(bp);
3430         bnx2x_stats_start(bp);
3431 }
3432
3433 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3434 {
3435         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3436         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3437         struct regpair diff;
3438
3439         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3440         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3441         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3442         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3443         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3444         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3445         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3446         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3447         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3448         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3449         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3450         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3451         UPDATE_STAT64(tx_stat_gt127,
3452                                 tx_stat_etherstatspkts65octetsto127octets);
3453         UPDATE_STAT64(tx_stat_gt255,
3454                                 tx_stat_etherstatspkts128octetsto255octets);
3455         UPDATE_STAT64(tx_stat_gt511,
3456                                 tx_stat_etherstatspkts256octetsto511octets);
3457         UPDATE_STAT64(tx_stat_gt1023,
3458                                 tx_stat_etherstatspkts512octetsto1023octets);
3459         UPDATE_STAT64(tx_stat_gt1518,
3460                                 tx_stat_etherstatspkts1024octetsto1522octets);
3461         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3462         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3463         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3464         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3465         UPDATE_STAT64(tx_stat_gterr,
3466                                 tx_stat_dot3statsinternalmactransmiterrors);
3467         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3468 }
3469
3470 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3471 {
3472         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3473         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3474
3475         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3476         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3477         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3478         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3479         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3480         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3481         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3482         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3483         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3484         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3485         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3486         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3487         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3488         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3489         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3490         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3491         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3492         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3493         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3494         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3495         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3496         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3497         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3498         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3499         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3500         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3501         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3502         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3503         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3504         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3505         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3506 }
3507
3508 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3509 {
3510         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3511         struct nig_stats *old = &(bp->port.old_nig_stats);
3512         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3513         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3514         struct regpair diff;
3515
3516         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3517                 bnx2x_bmac_stats_update(bp);
3518
3519         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3520                 bnx2x_emac_stats_update(bp);
3521
3522         else { /* unreached */
3523                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3524                 return -1;
3525         }
3526
3527         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3528                       new->brb_discard - old->brb_discard);
3529         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3530                       new->brb_truncate - old->brb_truncate);
3531
3532         UPDATE_STAT64_NIG(egress_mac_pkt0,
3533                                         etherstatspkts1024octetsto1522octets);
3534         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3535
3536         memcpy(old, new, sizeof(struct nig_stats));
3537
3538         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3539                sizeof(struct mac_stx));
3540         estats->brb_drop_hi = pstats->brb_drop_hi;
3541         estats->brb_drop_lo = pstats->brb_drop_lo;
3542
3543         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3544
3545         return 0;
3546 }
3547
3548 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3549 {
3550         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3551         int cl_id = BP_CL_ID(bp);
3552         struct tstorm_per_port_stats *tport =
3553                                 &stats->tstorm_common.port_statistics;
3554         struct tstorm_per_client_stats *tclient =
3555                         &stats->tstorm_common.client_statistics[cl_id];
3556         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3557         struct xstorm_per_client_stats *xclient =
3558                         &stats->xstorm_common.client_statistics[cl_id];
3559         struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3560         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3561         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3562         u32 diff;
3563
3564         /* are storm stats valid? */
3565         if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3566                                                         bp->stats_counter) {
3567                 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3568                    "  tstorm counter (%d) != stats_counter (%d)\n",
3569                    tclient->stats_counter, bp->stats_counter);
3570                 return -1;
3571         }
3572         if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3573                                                         bp->stats_counter) {
3574                 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3575                    "  xstorm counter (%d) != stats_counter (%d)\n",
3576                    xclient->stats_counter, bp->stats_counter);
3577                 return -2;
3578         }
3579
3580         fstats->total_bytes_received_hi =
3581         fstats->valid_bytes_received_hi =
3582                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3583         fstats->total_bytes_received_lo =
3584         fstats->valid_bytes_received_lo =
3585                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3586
3587         estats->error_bytes_received_hi =
3588                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3589         estats->error_bytes_received_lo =
3590                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3591         ADD_64(estats->error_bytes_received_hi,
3592                estats->rx_stat_ifhcinbadoctets_hi,
3593                estats->error_bytes_received_lo,
3594                estats->rx_stat_ifhcinbadoctets_lo);
3595
3596         ADD_64(fstats->total_bytes_received_hi,
3597                estats->error_bytes_received_hi,
3598                fstats->total_bytes_received_lo,
3599                estats->error_bytes_received_lo);
3600
3601         UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3602         UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3603                                 total_multicast_packets_received);
3604         UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3605                                 total_broadcast_packets_received);
3606
3607         fstats->total_bytes_transmitted_hi =
3608                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3609         fstats->total_bytes_transmitted_lo =
3610                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3611
3612         UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3613                                 total_unicast_packets_transmitted);
3614         UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3615                                 total_multicast_packets_transmitted);
3616         UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3617                                 total_broadcast_packets_transmitted);
3618
3619         memcpy(estats, &(fstats->total_bytes_received_hi),
3620                sizeof(struct host_func_stats) - 2*sizeof(u32));
3621
3622         estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3623         estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3624         estats->brb_truncate_discard =
3625                                 le32_to_cpu(tport->brb_truncate_discard);
3626         estats->mac_discard = le32_to_cpu(tport->mac_discard);
3627
3628         old_tclient->rcv_unicast_bytes.hi =
3629                                 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3630         old_tclient->rcv_unicast_bytes.lo =
3631                                 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3632         old_tclient->rcv_broadcast_bytes.hi =
3633                                 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3634         old_tclient->rcv_broadcast_bytes.lo =
3635                                 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3636         old_tclient->rcv_multicast_bytes.hi =
3637                                 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3638         old_tclient->rcv_multicast_bytes.lo =
3639                                 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3640         old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3641
3642         old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3643         old_tclient->packets_too_big_discard =
3644                                 le32_to_cpu(tclient->packets_too_big_discard);
3645         estats->no_buff_discard =
3646         old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3647         old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3648
3649         old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3650         old_xclient->unicast_bytes_sent.hi =
3651                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3652         old_xclient->unicast_bytes_sent.lo =
3653                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3654         old_xclient->multicast_bytes_sent.hi =
3655                                 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3656         old_xclient->multicast_bytes_sent.lo =
3657                                 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3658         old_xclient->broadcast_bytes_sent.hi =
3659                                 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3660         old_xclient->broadcast_bytes_sent.lo =
3661                                 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3662
3663         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3664
3665         return 0;
3666 }
3667
3668 static void bnx2x_net_stats_update(struct bnx2x *bp)
3669 {
3670         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3671         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3672         struct net_device_stats *nstats = &bp->dev->stats;
3673
3674         nstats->rx_packets =
3675                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3676                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3677                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3678
3679         nstats->tx_packets =
3680                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3681                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3682                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3683
3684         nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3685
3686         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3687
3688         nstats->rx_dropped = old_tclient->checksum_discard +
3689                              estats->mac_discard;
3690         nstats->tx_dropped = 0;
3691
3692         nstats->multicast =
3693                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3694
3695         nstats->collisions =
3696                         estats->tx_stat_dot3statssinglecollisionframes_lo +
3697                         estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3698                         estats->tx_stat_dot3statslatecollisions_lo +
3699                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3700
3701         estats->jabber_packets_received =
3702                                 old_tclient->packets_too_big_discard +
3703                                 estats->rx_stat_dot3statsframestoolong_lo;
3704
3705         nstats->rx_length_errors =
3706                                 estats->rx_stat_etherstatsundersizepkts_lo +
3707                                 estats->jabber_packets_received;
3708         nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3709         nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3710         nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3711         nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3712         nstats->rx_missed_errors = estats->xxoverflow_discard;
3713
3714         nstats->rx_errors = nstats->rx_length_errors +
3715                             nstats->rx_over_errors +
3716                             nstats->rx_crc_errors +
3717                             nstats->rx_frame_errors +
3718                             nstats->rx_fifo_errors +
3719                             nstats->rx_missed_errors;
3720
3721         nstats->tx_aborted_errors =
3722                         estats->tx_stat_dot3statslatecollisions_lo +
3723                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3724         nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3725         nstats->tx_fifo_errors = 0;
3726         nstats->tx_heartbeat_errors = 0;
3727         nstats->tx_window_errors = 0;
3728
3729         nstats->tx_errors = nstats->tx_aborted_errors +
3730                             nstats->tx_carrier_errors;
3731 }
3732
3733 static void bnx2x_stats_update(struct bnx2x *bp)
3734 {
3735         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3736         int update = 0;
3737
3738         if (*stats_comp != DMAE_COMP_VAL)
3739                 return;
3740
3741         if (bp->port.pmf)
3742                 update = (bnx2x_hw_stats_update(bp) == 0);
3743
3744         update |= (bnx2x_storm_stats_update(bp) == 0);
3745
3746         if (update)
3747                 bnx2x_net_stats_update(bp);
3748
3749         else {
3750                 if (bp->stats_pending) {
3751                         bp->stats_pending++;
3752                         if (bp->stats_pending == 3) {
3753                                 BNX2X_ERR("stats not updated for 3 times\n");
3754                                 bnx2x_panic();
3755                                 return;
3756                         }
3757                 }
3758         }
3759
3760         if (bp->msglevel & NETIF_MSG_TIMER) {
3761                 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3762                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3763                 struct net_device_stats *nstats = &bp->dev->stats;
3764                 int i;
3765
3766                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3767                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
3768                                   "  tx pkt (%lx)\n",
3769                        bnx2x_tx_avail(bp->fp),
3770                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3771                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
3772                                   "  rx pkt (%lx)\n",
3773                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3774                              bp->fp->rx_comp_cons),
3775                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3776                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
3777                        netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
3778                        estats->driver_xoff, estats->brb_drop_lo);
3779                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
3780                         "packets_too_big_discard %u  no_buff_discard %u  "
3781                         "mac_discard %u  mac_filter_discard %u  "
3782                         "xxovrflow_discard %u  brb_truncate_discard %u  "
3783                         "ttl0_discard %u\n",
3784                        old_tclient->checksum_discard,
3785                        old_tclient->packets_too_big_discard,
3786                        old_tclient->no_buff_discard, estats->mac_discard,
3787                        estats->mac_filter_discard, estats->xxoverflow_discard,
3788                        estats->brb_truncate_discard,
3789                        old_tclient->ttl0_discard);
3790
3791                 for_each_queue(bp, i) {
3792                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3793                                bnx2x_fp(bp, i, tx_pkt),
3794                                bnx2x_fp(bp, i, rx_pkt),
3795                                bnx2x_fp(bp, i, rx_calls));
3796                 }
3797         }
3798
3799         bnx2x_hw_stats_post(bp);
3800         bnx2x_storm_stats_post(bp);
3801 }
3802
3803 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3804 {
3805         struct dmae_command *dmae;
3806         u32 opcode;
3807         int loader_idx = PMF_DMAE_C(bp);
3808         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3809
3810         bp->executer_idx = 0;
3811
3812         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3813                   DMAE_CMD_C_ENABLE |
3814                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3815 #ifdef __BIG_ENDIAN
3816                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3817 #else
3818                   DMAE_CMD_ENDIANITY_DW_SWAP |
3819 #endif
3820                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3821                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3822
3823         if (bp->port.port_stx) {
3824
3825                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3826                 if (bp->func_stx)
3827                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3828                 else
3829                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3830                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3831                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3832                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3833                 dmae->dst_addr_hi = 0;
3834                 dmae->len = sizeof(struct host_port_stats) >> 2;
3835                 if (bp->func_stx) {
3836                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3837                         dmae->comp_addr_hi = 0;
3838                         dmae->comp_val = 1;
3839                 } else {
3840                         dmae->comp_addr_lo =
3841                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3842                         dmae->comp_addr_hi =
3843                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3844                         dmae->comp_val = DMAE_COMP_VAL;
3845
3846                         *stats_comp = 0;
3847                 }
3848         }
3849
3850         if (bp->func_stx) {
3851
3852                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3853                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3854                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3855                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3856                 dmae->dst_addr_lo = bp->func_stx >> 2;
3857                 dmae->dst_addr_hi = 0;
3858                 dmae->len = sizeof(struct host_func_stats) >> 2;
3859                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3860                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3861                 dmae->comp_val = DMAE_COMP_VAL;
3862
3863                 *stats_comp = 0;
3864         }
3865 }
3866
3867 static void bnx2x_stats_stop(struct bnx2x *bp)
3868 {
3869         int update = 0;
3870
3871         bnx2x_stats_comp(bp);
3872
3873         if (bp->port.pmf)
3874                 update = (bnx2x_hw_stats_update(bp) == 0);
3875
3876         update |= (bnx2x_storm_stats_update(bp) == 0);
3877
3878         if (update) {
3879                 bnx2x_net_stats_update(bp);
3880
3881                 if (bp->port.pmf)
3882                         bnx2x_port_stats_stop(bp);
3883
3884                 bnx2x_hw_stats_post(bp);
3885                 bnx2x_stats_comp(bp);
3886         }
3887 }
3888
3889 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3890 {
3891 }
3892
3893 static const struct {
3894         void (*action)(struct bnx2x *bp);
3895         enum bnx2x_stats_state next_state;
3896 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3897 /* state        event   */
3898 {
3899 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3900 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
3901 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3902 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3903 },
3904 {
3905 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
3906 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
3907 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
3908 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
3909 }
3910 };
3911
3912 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3913 {
3914         enum bnx2x_stats_state state = bp->stats_state;
3915
3916         bnx2x_stats_stm[state][event].action(bp);
3917         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3918
3919         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3920                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3921                    state, event, bp->stats_state);
3922 }
3923
3924 static void bnx2x_timer(unsigned long data)
3925 {
3926         struct bnx2x *bp = (struct bnx2x *) data;
3927
3928         if (!netif_running(bp->dev))
3929                 return;
3930
3931         if (atomic_read(&bp->intr_sem) != 0)
3932                 goto timer_restart;
3933
3934         if (poll) {
3935                 struct bnx2x_fastpath *fp = &bp->fp[0];
3936                 int rc;
3937
3938                 bnx2x_tx_int(fp, 1000);
3939                 rc = bnx2x_rx_int(fp, 1000);
3940         }
3941
3942         if (!BP_NOMCP(bp)) {
3943                 int func = BP_FUNC(bp);
3944                 u32 drv_pulse;
3945                 u32 mcp_pulse;
3946
3947                 ++bp->fw_drv_pulse_wr_seq;
3948                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3949                 /* TBD - add SYSTEM_TIME */
3950                 drv_pulse = bp->fw_drv_pulse_wr_seq;
3951                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3952
3953                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3954                              MCP_PULSE_SEQ_MASK);
3955                 /* The delta between driver pulse and mcp response
3956                  * should be 1 (before mcp response) or 0 (after mcp response)
3957                  */
3958                 if ((drv_pulse != mcp_pulse) &&
3959                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3960                         /* someone lost a heartbeat... */
3961                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3962                                   drv_pulse, mcp_pulse);
3963                 }
3964         }
3965
3966         if ((bp->state == BNX2X_STATE_OPEN) ||
3967             (bp->state == BNX2X_STATE_DISABLED))
3968                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3969
3970 timer_restart:
3971         mod_timer(&bp->timer, jiffies + bp->current_interval);
3972 }
3973
3974 /* end of Statistics */
3975
3976 /* nic init */
3977
3978 /*
3979  * nic init service functions
3980  */
3981
3982 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
3983 {
3984         int port = BP_PORT(bp);
3985
3986         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3987                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3988                         sizeof(struct ustorm_status_block)/4);
3989         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3990                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3991                         sizeof(struct cstorm_status_block)/4);
3992 }
3993
3994 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
3995                           dma_addr_t mapping, int sb_id)
3996 {
3997         int port = BP_PORT(bp);
3998         int func = BP_FUNC(bp);
3999         int index;
4000         u64 section;
4001
4002         /* USTORM */
4003         section = ((u64)mapping) + offsetof(struct host_status_block,
4004                                             u_status_block);
4005         sb->u_status_block.status_block_id = sb_id;
4006
4007         REG_WR(bp, BAR_USTRORM_INTMEM +
4008                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4009         REG_WR(bp, BAR_USTRORM_INTMEM +
4010                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4011                U64_HI(section));
4012         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4013                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4014
4015         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4016                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4017                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4018
4019         /* CSTORM */
4020         section = ((u64)mapping) + offsetof(struct host_status_block,
4021                                             c_status_block);
4022         sb->c_status_block.status_block_id = sb_id;
4023
4024         REG_WR(bp, BAR_CSTRORM_INTMEM +
4025                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4026         REG_WR(bp, BAR_CSTRORM_INTMEM +
4027                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4028                U64_HI(section));
4029         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4030                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4031
4032         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4033                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4034                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4035
4036         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4037 }
4038
4039 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4040 {
4041         int func = BP_FUNC(bp);
4042
4043         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4044                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4045                         sizeof(struct ustorm_def_status_block)/4);
4046         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4047                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4048                         sizeof(struct cstorm_def_status_block)/4);
4049         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4050                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4051                         sizeof(struct xstorm_def_status_block)/4);
4052         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4053                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4054                         sizeof(struct tstorm_def_status_block)/4);
4055 }
4056
4057 static void bnx2x_init_def_sb(struct bnx2x *bp,
4058                               struct host_def_status_block *def_sb,
4059                               dma_addr_t mapping, int sb_id)
4060 {
4061         int port = BP_PORT(bp);
4062         int func = BP_FUNC(bp);
4063         int index, val, reg_offset;
4064         u64 section;
4065
4066         /* ATTN */
4067         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4068                                             atten_status_block);
4069         def_sb->atten_status_block.status_block_id = sb_id;
4070
4071         bp->attn_state = 0;
4072
4073         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4074                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4075
4076         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4077                 bp->attn_group[index].sig[0] = REG_RD(bp,
4078                                                      reg_offset + 0x10*index);
4079                 bp->attn_group[index].sig[1] = REG_RD(bp,
4080                                                reg_offset + 0x4 + 0x10*index);
4081                 bp->attn_group[index].sig[2] = REG_RD(bp,
4082                                                reg_offset + 0x8 + 0x10*index);
4083                 bp->attn_group[index].sig[3] = REG_RD(bp,
4084                                                reg_offset + 0xc + 0x10*index);
4085         }
4086
4087         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4088                              HC_REG_ATTN_MSG0_ADDR_L);
4089
4090         REG_WR(bp, reg_offset, U64_LO(section));
4091         REG_WR(bp, reg_offset + 4, U64_HI(section));
4092
4093         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4094
4095         val = REG_RD(bp, reg_offset);
4096         val |= sb_id;
4097         REG_WR(bp, reg_offset, val);
4098
4099         /* USTORM */
4100         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4101                                             u_def_status_block);
4102         def_sb->u_def_status_block.status_block_id = sb_id;
4103
4104         REG_WR(bp, BAR_USTRORM_INTMEM +
4105                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4106         REG_WR(bp, BAR_USTRORM_INTMEM +
4107                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4108                U64_HI(section));
4109         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4110                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4111
4112         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4113                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4114                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4115
4116         /* CSTORM */
4117         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4118                                             c_def_status_block);
4119         def_sb->c_def_status_block.status_block_id = sb_id;
4120
4121         REG_WR(bp, BAR_CSTRORM_INTMEM +
4122                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4123         REG_WR(bp, BAR_CSTRORM_INTMEM +
4124                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4125                U64_HI(section));
4126         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4127                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4128
4129         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4130                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4131                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4132
4133         /* TSTORM */
4134         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4135                                             t_def_status_block);
4136         def_sb->t_def_status_block.status_block_id = sb_id;
4137
4138         REG_WR(bp, BAR_TSTRORM_INTMEM +
4139                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4140         REG_WR(bp, BAR_TSTRORM_INTMEM +
4141                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4142                U64_HI(section));
4143         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4144                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4145
4146         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4147                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4148                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4149
4150         /* XSTORM */
4151         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4152                                             x_def_status_block);
4153         def_sb->x_def_status_block.status_block_id = sb_id;
4154
4155         REG_WR(bp, BAR_XSTRORM_INTMEM +
4156                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4157         REG_WR(bp, BAR_XSTRORM_INTMEM +
4158                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4159                U64_HI(section));
4160         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4161                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4162
4163         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4164                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4165                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4166
4167         bp->stats_pending = 0;
4168         bp->set_mac_pending = 0;
4169
4170         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4171 }
4172
4173 static void bnx2x_update_coalesce(struct bnx2x *bp)
4174 {
4175         int port = BP_PORT(bp);
4176         int i;
4177
4178         for_each_queue(bp, i) {
4179                 int sb_id = bp->fp[i].sb_id;
4180
4181                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4182                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4183                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4184                                                     U_SB_ETH_RX_CQ_INDEX),
4185                         bp->rx_ticks/12);
4186                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4187                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4188                                                      U_SB_ETH_RX_CQ_INDEX),
4189                          bp->rx_ticks ? 0 : 1);
4190                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4191                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4192                                                      U_SB_ETH_RX_BD_INDEX),
4193                          bp->rx_ticks ? 0 : 1);
4194
4195                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4196                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4197                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4198                                                     C_SB_ETH_TX_CQ_INDEX),
4199                         bp->tx_ticks/12);
4200                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4201                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4202                                                      C_SB_ETH_TX_CQ_INDEX),
4203                          bp->tx_ticks ? 0 : 1);
4204         }
4205 }
4206
4207 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4208                                        struct bnx2x_fastpath *fp, int last)
4209 {
4210         int i;
4211
4212         for (i = 0; i < last; i++) {
4213                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4214                 struct sk_buff *skb = rx_buf->skb;
4215
4216                 if (skb == NULL) {
4217                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4218                         continue;
4219                 }
4220
4221                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4222                         pci_unmap_single(bp->pdev,
4223                                          pci_unmap_addr(rx_buf, mapping),
4224                                          bp->rx_buf_use_size,
4225                                          PCI_DMA_FROMDEVICE);
4226
4227                 dev_kfree_skb(skb);
4228                 rx_buf->skb = NULL;
4229         }
4230 }
4231
4232 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4233 {
4234         int func = BP_FUNC(bp);
4235         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4236                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4237         u16 ring_prod, cqe_ring_prod;
4238         int i, j;
4239
4240         bp->rx_buf_use_size = bp->dev->mtu;
4241         bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4242         bp->rx_buf_size = bp->rx_buf_use_size + 64;
4243
4244         if (bp->flags & TPA_ENABLE_FLAG) {
4245                 DP(NETIF_MSG_IFUP,
4246                    "rx_buf_use_size %d  rx_buf_size %d  effective_mtu %d\n",
4247                    bp->rx_buf_use_size, bp->rx_buf_size,
4248                    bp->dev->mtu + ETH_OVREHEAD);
4249
4250                 for_each_queue(bp, j) {
4251                         struct bnx2x_fastpath *fp = &bp->fp[j];
4252
4253                         for (i = 0; i < max_agg_queues; i++) {
4254                                 fp->tpa_pool[i].skb =
4255                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4256                                 if (!fp->tpa_pool[i].skb) {
4257                                         BNX2X_ERR("Failed to allocate TPA "
4258                                                   "skb pool for queue[%d] - "
4259                                                   "disabling TPA on this "
4260                                                   "queue!\n", j);
4261                                         bnx2x_free_tpa_pool(bp, fp, i);
4262                                         fp->disable_tpa = 1;
4263                                         break;
4264                                 }
4265                                 pci_unmap_addr_set((struct sw_rx_bd *)
4266                                                         &bp->fp->tpa_pool[i],
4267                                                    mapping, 0);
4268                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4269                         }
4270                 }
4271         }
4272
4273         for_each_queue(bp, j) {
4274                 struct bnx2x_fastpath *fp = &bp->fp[j];
4275
4276                 fp->rx_bd_cons = 0;
4277                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4278                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4279
4280                 /* "next page" elements initialization */
4281                 /* SGE ring */
4282                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4283                         struct eth_rx_sge *sge;
4284
4285                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4286                         sge->addr_hi =
4287                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4288                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4289                         sge->addr_lo =
4290                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4291                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4292                 }
4293
4294                 bnx2x_init_sge_ring_bit_mask(fp);
4295
4296                 /* RX BD ring */
4297                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4298                         struct eth_rx_bd *rx_bd;
4299
4300                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4301                         rx_bd->addr_hi =
4302                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4303                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4304                         rx_bd->addr_lo =
4305                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4306                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4307                 }
4308
4309                 /* CQ ring */
4310                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4311                         struct eth_rx_cqe_next_page *nextpg;
4312
4313                         nextpg = (struct eth_rx_cqe_next_page *)
4314                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4315                         nextpg->addr_hi =
4316                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4317                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4318                         nextpg->addr_lo =
4319                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4320                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4321                 }
4322
4323                 /* Allocate SGEs and initialize the ring elements */
4324                 for (i = 0, ring_prod = 0;
4325                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4326
4327                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4328                                 BNX2X_ERR("was only able to allocate "
4329                                           "%d rx sges\n", i);
4330                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4331                                 /* Cleanup already allocated elements */
4332                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4333                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4334                                 fp->disable_tpa = 1;
4335                                 ring_prod = 0;
4336                                 break;
4337                         }
4338                         ring_prod = NEXT_SGE_IDX(ring_prod);
4339                 }
4340                 fp->rx_sge_prod = ring_prod;
4341
4342                 /* Allocate BDs and initialize BD ring */
4343                 fp->rx_comp_cons = 0;
4344                 cqe_ring_prod = ring_prod = 0;
4345                 for (i = 0; i < bp->rx_ring_size; i++) {
4346                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4347                                 BNX2X_ERR("was only able to allocate "
4348                                           "%d rx skbs\n", i);
4349                                 bp->eth_stats.rx_skb_alloc_failed++;
4350                                 break;
4351                         }
4352                         ring_prod = NEXT_RX_IDX(ring_prod);
4353                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4354                         WARN_ON(ring_prod <= i);
4355                 }
4356
4357                 fp->rx_bd_prod = ring_prod;
4358                 /* must not have more available CQEs than BDs */
4359                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4360                                        cqe_ring_prod);
4361                 fp->rx_pkt = fp->rx_calls = 0;
4362
4363                 /* Warning!
4364                  * this will generate an interrupt (to the TSTORM)
4365                  * must only be done after chip is initialized
4366                  */
4367                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4368                                      fp->rx_sge_prod);
4369                 if (j != 0)
4370                         continue;
4371
4372                 REG_WR(bp, BAR_USTRORM_INTMEM +
4373                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4374                        U64_LO(fp->rx_comp_mapping));
4375                 REG_WR(bp, BAR_USTRORM_INTMEM +
4376                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4377                        U64_HI(fp->rx_comp_mapping));
4378         }
4379 }
4380
4381 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4382 {
4383         int i, j;
4384
4385         for_each_queue(bp, j) {
4386                 struct bnx2x_fastpath *fp = &bp->fp[j];
4387
4388                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4389                         struct eth_tx_bd *tx_bd =
4390                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4391
4392                         tx_bd->addr_hi =
4393                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4394                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4395                         tx_bd->addr_lo =
4396                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4397                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4398                 }
4399
4400                 fp->tx_pkt_prod = 0;
4401                 fp->tx_pkt_cons = 0;
4402                 fp->tx_bd_prod = 0;
4403                 fp->tx_bd_cons = 0;
4404                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4405                 fp->tx_pkt = 0;
4406         }
4407 }
4408
4409 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4410 {
4411         int func = BP_FUNC(bp);
4412
4413         spin_lock_init(&bp->spq_lock);
4414
4415         bp->spq_left = MAX_SPQ_PENDING;
4416         bp->spq_prod_idx = 0;
4417         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4418         bp->spq_prod_bd = bp->spq;
4419         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4420
4421         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4422                U64_LO(bp->spq_mapping));
4423         REG_WR(bp,
4424                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4425                U64_HI(bp->spq_mapping));
4426
4427         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4428                bp->spq_prod_idx);
4429 }
4430
4431 static void bnx2x_init_context(struct bnx2x *bp)
4432 {
4433         int i;
4434
4435         for_each_queue(bp, i) {
4436                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4437                 struct bnx2x_fastpath *fp = &bp->fp[i];
4438                 u8 sb_id = FP_SB_ID(fp);
4439
4440                 context->xstorm_st_context.tx_bd_page_base_hi =
4441                                                 U64_HI(fp->tx_desc_mapping);
4442                 context->xstorm_st_context.tx_bd_page_base_lo =
4443                                                 U64_LO(fp->tx_desc_mapping);
4444                 context->xstorm_st_context.db_data_addr_hi =
4445                                                 U64_HI(fp->tx_prods_mapping);
4446                 context->xstorm_st_context.db_data_addr_lo =
4447                                                 U64_LO(fp->tx_prods_mapping);
4448                 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4449                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4450
4451                 context->ustorm_st_context.common.sb_index_numbers =
4452                                                 BNX2X_RX_SB_INDEX_NUM;
4453                 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4454                 context->ustorm_st_context.common.status_block_id = sb_id;
4455                 context->ustorm_st_context.common.flags =
4456                         USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4457                 context->ustorm_st_context.common.mc_alignment_size = 64;
4458                 context->ustorm_st_context.common.bd_buff_size =
4459                                                 bp->rx_buf_use_size;
4460                 context->ustorm_st_context.common.bd_page_base_hi =
4461                                                 U64_HI(fp->rx_desc_mapping);
4462                 context->ustorm_st_context.common.bd_page_base_lo =
4463                                                 U64_LO(fp->rx_desc_mapping);
4464                 if (!fp->disable_tpa) {
4465                         context->ustorm_st_context.common.flags |=
4466                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4467                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4468                         context->ustorm_st_context.common.sge_buff_size =
4469                                         (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4470                         context->ustorm_st_context.common.sge_page_base_hi =
4471                                                 U64_HI(fp->rx_sge_mapping);
4472                         context->ustorm_st_context.common.sge_page_base_lo =
4473                                                 U64_LO(fp->rx_sge_mapping);
4474                 }
4475
4476                 context->cstorm_st_context.sb_index_number =
4477                                                 C_SB_ETH_TX_CQ_INDEX;
4478                 context->cstorm_st_context.status_block_id = sb_id;
4479
4480                 context->xstorm_ag_context.cdu_reserved =
4481                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4482                                                CDU_REGION_NUMBER_XCM_AG,
4483                                                ETH_CONNECTION_TYPE);
4484                 context->ustorm_ag_context.cdu_usage =
4485                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4486                                                CDU_REGION_NUMBER_UCM_AG,
4487                                                ETH_CONNECTION_TYPE);
4488         }
4489 }
4490
4491 static void bnx2x_init_ind_table(struct bnx2x *bp)
4492 {
4493         int port = BP_PORT(bp);
4494         int i;
4495
4496         if (!is_multi(bp))
4497                 return;
4498
4499         DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4500         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4501                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4502                         TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4503                         i % bp->num_queues);
4504
4505         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4506 }
4507
4508 static void bnx2x_set_client_config(struct bnx2x *bp)
4509 {
4510         struct tstorm_eth_client_config tstorm_client = {0};
4511         int port = BP_PORT(bp);
4512         int i;
4513
4514         tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4515         tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4516         tstorm_client.config_flags =
4517                                 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4518 #ifdef BCM_VLAN
4519         if (bp->rx_mode && bp->vlgrp) {
4520                 tstorm_client.config_flags |=
4521                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4522                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4523         }
4524 #endif
4525
4526         if (bp->flags & TPA_ENABLE_FLAG) {
4527                 tstorm_client.max_sges_for_packet =
4528                         BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4529                 tstorm_client.max_sges_for_packet =
4530                         ((tstorm_client.max_sges_for_packet +
4531                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4532                         PAGES_PER_SGE_SHIFT;
4533
4534                 tstorm_client.config_flags |=
4535                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4536         }
4537
4538         for_each_queue(bp, i) {
4539                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4540                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4541                        ((u32 *)&tstorm_client)[0]);
4542                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4543                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4544                        ((u32 *)&tstorm_client)[1]);
4545         }
4546
4547         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4548            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4549 }
4550
4551 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4552 {
4553         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4554         int mode = bp->rx_mode;
4555         int mask = (1 << BP_L_ID(bp));
4556         int func = BP_FUNC(bp);
4557         int i;
4558
4559         DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
4560
4561         switch (mode) {
4562         case BNX2X_RX_MODE_NONE: /* no Rx */
4563                 tstorm_mac_filter.ucast_drop_all = mask;
4564                 tstorm_mac_filter.mcast_drop_all = mask;
4565                 tstorm_mac_filter.bcast_drop_all = mask;
4566                 break;
4567         case BNX2X_RX_MODE_NORMAL:
4568                 tstorm_mac_filter.bcast_accept_all = mask;
4569                 break;
4570         case BNX2X_RX_MODE_ALLMULTI:
4571                 tstorm_mac_filter.mcast_accept_all = mask;
4572                 tstorm_mac_filter.bcast_accept_all = mask;
4573                 break;
4574         case BNX2X_RX_MODE_PROMISC:
4575                 tstorm_mac_filter.ucast_accept_all = mask;
4576                 tstorm_mac_filter.mcast_accept_all = mask;
4577                 tstorm_mac_filter.bcast_accept_all = mask;
4578                 break;
4579         default:
4580                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4581                 break;
4582         }
4583
4584         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4585                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4586                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4587                        ((u32 *)&tstorm_mac_filter)[i]);
4588
4589 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4590                    ((u32 *)&tstorm_mac_filter)[i]); */
4591         }
4592
4593         if (mode != BNX2X_RX_MODE_NONE)
4594                 bnx2x_set_client_config(bp);
4595 }
4596
4597 static void bnx2x_init_internal_common(struct bnx2x *bp)
4598 {
4599         int i;
4600
4601         /* Zero this manually as its initialization is
4602            currently missing in the initTool */
4603         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4604                 REG_WR(bp, BAR_USTRORM_INTMEM +
4605                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4606 }
4607
4608 static void bnx2x_init_internal_port(struct bnx2x *bp)
4609 {
4610         int port = BP_PORT(bp);
4611
4612         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4613         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4614         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4615         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4616 }
4617
4618 static void bnx2x_init_internal_func(struct bnx2x *bp)
4619 {
4620         struct tstorm_eth_function_common_config tstorm_config = {0};
4621         struct stats_indication_flags stats_flags = {0};
4622         int port = BP_PORT(bp);
4623         int func = BP_FUNC(bp);
4624         int i;
4625         u16 max_agg_size;
4626
4627         if (is_multi(bp)) {
4628                 tstorm_config.config_flags = MULTI_FLAGS;
4629                 tstorm_config.rss_result_mask = MULTI_MASK;
4630         }
4631
4632         tstorm_config.leading_client_id = BP_L_ID(bp);
4633
4634         REG_WR(bp, BAR_TSTRORM_INTMEM +
4635                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4636                (*(u32 *)&tstorm_config));
4637
4638         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4639         bnx2x_set_storm_rx_mode(bp);
4640
4641         /* reset xstorm per client statistics */
4642         for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4643                 REG_WR(bp, BAR_XSTRORM_INTMEM +
4644                        XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4645                        i*4, 0);
4646         }
4647         /* reset tstorm per client statistics */
4648         for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4649                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4650                        TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4651                        i*4, 0);
4652         }
4653
4654         /* Init statistics related context */
4655         stats_flags.collect_eth = 1;
4656
4657         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4658                ((u32 *)&stats_flags)[0]);
4659         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4660                ((u32 *)&stats_flags)[1]);
4661
4662         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4663                ((u32 *)&stats_flags)[0]);
4664         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4665                ((u32 *)&stats_flags)[1]);
4666
4667         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4668                ((u32 *)&stats_flags)[0]);
4669         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4670                ((u32 *)&stats_flags)[1]);
4671
4672         REG_WR(bp, BAR_XSTRORM_INTMEM +
4673                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4674                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4675         REG_WR(bp, BAR_XSTRORM_INTMEM +
4676                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4677                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4678
4679         REG_WR(bp, BAR_TSTRORM_INTMEM +
4680                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4681                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4682         REG_WR(bp, BAR_TSTRORM_INTMEM +
4683                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4684                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4685
4686         if (CHIP_IS_E1H(bp)) {
4687                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4688                         IS_E1HMF(bp));
4689                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4690                         IS_E1HMF(bp));
4691                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4692                         IS_E1HMF(bp));
4693                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4694                         IS_E1HMF(bp));
4695
4696                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4697                          bp->e1hov);
4698         }
4699
4700         /* Init CQ ring mapping and aggregation size */
4701         max_agg_size = min((u32)(bp->rx_buf_use_size +
4702                                  8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4703                            (u32)0xffff);
4704         for_each_queue(bp, i) {
4705                 struct bnx2x_fastpath *fp = &bp->fp[i];
4706
4707                 REG_WR(bp, BAR_USTRORM_INTMEM +
4708                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4709                        U64_LO(fp->rx_comp_mapping));
4710                 REG_WR(bp, BAR_USTRORM_INTMEM +
4711                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4712                        U64_HI(fp->rx_comp_mapping));
4713
4714                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4715                          USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4716                          max_agg_size);
4717         }
4718 }
4719
4720 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4721 {
4722         switch (load_code) {
4723         case FW_MSG_CODE_DRV_LOAD_COMMON:
4724                 bnx2x_init_internal_common(bp);
4725                 /* no break */
4726
4727         case FW_MSG_CODE_DRV_LOAD_PORT:
4728                 bnx2x_init_internal_port(bp);
4729                 /* no break */
4730
4731         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4732                 bnx2x_init_internal_func(bp);
4733                 break;
4734
4735         default:
4736                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4737                 break;
4738         }
4739 }
4740
4741 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4742 {
4743         int i;
4744
4745         for_each_queue(bp, i) {
4746                 struct bnx2x_fastpath *fp = &bp->fp[i];
4747
4748                 fp->bp = bp;
4749                 fp->state = BNX2X_FP_STATE_CLOSED;
4750                 fp->index = i;
4751                 fp->cl_id = BP_L_ID(bp) + i;
4752                 fp->sb_id = fp->cl_id;
4753                 DP(NETIF_MSG_IFUP,
4754                    "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
4755                    bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4756                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4757                               FP_SB_ID(fp));
4758                 bnx2x_update_fpsb_idx(fp);
4759         }
4760
4761         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4762                           DEF_SB_ID);
4763         bnx2x_update_dsb_idx(bp);
4764         bnx2x_update_coalesce(bp);
4765         bnx2x_init_rx_rings(bp);
4766         bnx2x_init_tx_ring(bp);
4767         bnx2x_init_sp_ring(bp);
4768         bnx2x_init_context(bp);
4769         bnx2x_init_internal(bp, load_code);
4770         bnx2x_init_ind_table(bp);
4771         bnx2x_int_enable(bp);
4772 }
4773
4774 /* end of nic init */
4775
4776 /*
4777  * gzip service functions
4778  */
4779
4780 static int bnx2x_gunzip_init(struct bnx2x *bp)
4781 {
4782         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4783                                               &bp->gunzip_mapping);
4784         if (bp->gunzip_buf  == NULL)
4785                 goto gunzip_nomem1;
4786
4787         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4788         if (bp->strm  == NULL)
4789                 goto gunzip_nomem2;
4790
4791         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4792                                       GFP_KERNEL);
4793         if (bp->strm->workspace == NULL)
4794                 goto gunzip_nomem3;
4795
4796         return 0;
4797
4798 gunzip_nomem3:
4799         kfree(bp->strm);
4800         bp->strm = NULL;
4801
4802 gunzip_nomem2:
4803         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4804                             bp->gunzip_mapping);
4805         bp->gunzip_buf = NULL;
4806
4807 gunzip_nomem1:
4808         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4809                " un-compression\n", bp->dev->name);
4810         return -ENOMEM;
4811 }
4812
4813 static void bnx2x_gunzip_end(struct bnx2x *bp)
4814 {
4815         kfree(bp->strm->workspace);
4816
4817         kfree(bp->strm);
4818         bp->strm = NULL;
4819
4820         if (bp->gunzip_buf) {
4821                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4822                                     bp->gunzip_mapping);
4823                 bp->gunzip_buf = NULL;
4824         }
4825 }
4826
4827 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4828 {
4829         int n, rc;
4830
4831         /* check gzip header */
4832         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4833                 return -EINVAL;
4834
4835         n = 10;
4836
4837 #define FNAME                           0x8
4838
4839         if (zbuf[3] & FNAME)
4840                 while ((zbuf[n++] != 0) && (n < len));
4841
4842         bp->strm->next_in = zbuf + n;
4843         bp->strm->avail_in = len - n;
4844         bp->strm->next_out = bp->gunzip_buf;
4845         bp->strm->avail_out = FW_BUF_SIZE;
4846
4847         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4848         if (rc != Z_OK)
4849                 return rc;
4850
4851         rc = zlib_inflate(bp->strm, Z_FINISH);
4852         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4853                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4854                        bp->dev->name, bp->strm->msg);
4855
4856         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4857         if (bp->gunzip_outlen & 0x3)
4858                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4859                                     " gunzip_outlen (%d) not aligned\n",
4860                        bp->dev->name, bp->gunzip_outlen);
4861         bp->gunzip_outlen >>= 2;
4862
4863         zlib_inflateEnd(bp->strm);
4864
4865         if (rc == Z_STREAM_END)
4866                 return 0;
4867
4868         return rc;
4869 }
4870
4871 /* nic load/unload */
4872
4873 /*
4874  * General service functions
4875  */
4876
4877 /* send a NIG loopback debug packet */
4878 static void bnx2x_lb_pckt(struct bnx2x *bp)
4879 {
4880         u32 wb_write[3];
4881
4882         /* Ethernet source and destination addresses */
4883         wb_write[0] = 0x55555555;
4884         wb_write[1] = 0x55555555;
4885         wb_write[2] = 0x20;             /* SOP */
4886         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4887
4888         /* NON-IP protocol */
4889         wb_write[0] = 0x09000000;
4890         wb_write[1] = 0x55555555;
4891         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4892         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4893 }
4894
4895 /* some of the internal memories
4896  * are not directly readable from the driver
4897  * to test them we send debug packets
4898  */
4899 static int bnx2x_int_mem_test(struct bnx2x *bp)
4900 {
4901         int factor;
4902         int count, i;
4903         u32 val = 0;
4904
4905         if (CHIP_REV_IS_FPGA(bp))
4906                 factor = 120;
4907         else if (CHIP_REV_IS_EMUL(bp))
4908                 factor = 200;
4909         else
4910                 factor = 1;
4911
4912         DP(NETIF_MSG_HW, "start part1\n");
4913
4914         /* Disable inputs of parser neighbor blocks */
4915         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4916         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4917         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4918         NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4919
4920         /*  Write 0 to parser credits for CFC search request */
4921         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4922
4923         /* send Ethernet packet */
4924         bnx2x_lb_pckt(bp);
4925
4926         /* TODO do i reset NIG statistic? */
4927         /* Wait until NIG register shows 1 packet of size 0x10 */
4928         count = 1000 * factor;
4929         while (count) {
4930
4931                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4932                 val = *bnx2x_sp(bp, wb_data[0]);
4933                 if (val == 0x10)
4934                         break;
4935
4936                 msleep(10);
4937                 count--;
4938         }
4939         if (val != 0x10) {
4940                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4941                 return -1;
4942         }
4943
4944         /* Wait until PRS register shows 1 packet */
4945         count = 1000 * factor;
4946         while (count) {
4947                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4948                 if (val == 1)
4949                         break;
4950
4951                 msleep(10);
4952                 count--;
4953         }
4954         if (val != 0x1) {
4955                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4956                 return -2;
4957         }
4958
4959         /* Reset and init BRB, PRS */
4960         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4961         msleep(50);
4962         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4963         msleep(50);
4964         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4965         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4966
4967         DP(NETIF_MSG_HW, "part2\n");
4968
4969         /* Disable inputs of parser neighbor blocks */
4970         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4971         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4972         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4973         NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4974
4975         /* Write 0 to parser credits for CFC search request */
4976         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4977
4978         /* send 10 Ethernet packets */
4979         for (i = 0; i < 10; i++)
4980                 bnx2x_lb_pckt(bp);
4981
4982         /* Wait until NIG register shows 10 + 1
4983            packets of size 11*0x10 = 0xb0 */
4984         count = 1000 * factor;
4985         while (count) {
4986
4987                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4988                 val = *bnx2x_sp(bp, wb_data[0]);
4989                 if (val == 0xb0)
4990                         break;
4991
4992                 msleep(10);
4993                 count--;
4994         }
4995         if (val != 0xb0) {
4996                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4997                 return -3;
4998         }
4999
5000         /* Wait until PRS register shows 2 packets */
5001         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5002         if (val != 2)
5003                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5004
5005         /* Write 1 to parser credits for CFC search request */
5006         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5007
5008         /* Wait until PRS register shows 3 packets */
5009         msleep(10 * factor);
5010         /* Wait until NIG register shows 1 packet of size 0x10 */
5011         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5012         if (val != 3)
5013                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5014
5015         /* clear NIG EOP FIFO */
5016         for (i = 0; i < 11; i++)
5017                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5018         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5019         if (val != 1) {
5020                 BNX2X_ERR("clear of NIG failed\n");
5021                 return -4;
5022         }
5023
5024         /* Reset and init BRB, PRS, NIG */
5025         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5026         msleep(50);
5027         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5028         msleep(50);
5029         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5030         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5031 #ifndef BCM_ISCSI
5032         /* set NIC mode */
5033         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5034 #endif
5035
5036         /* Enable inputs of parser neighbor blocks */
5037         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5038         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5039         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5040         NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
5041
5042         DP(NETIF_MSG_HW, "done\n");
5043
5044         return 0; /* OK */
5045 }
5046
5047 static void enable_blocks_attention(struct bnx2x *bp)
5048 {
5049         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5050         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5051         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5052         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5053         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5054         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5055         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5056         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5057         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5058 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5059 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5060         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5061         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5062         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5063 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5064 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5065         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5066         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5067         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5068         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5069 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5070 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5071         if (CHIP_REV_IS_FPGA(bp))
5072                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5073         else
5074                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5075         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5076         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5077         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5078 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5079 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5080         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5081         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5082 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5083         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5084 }
5085
5086
5087 static int bnx2x_init_common(struct bnx2x *bp)
5088 {
5089         u32 val, i;
5090
5091         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5092
5093         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5094         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5095
5096         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5097         if (CHIP_IS_E1H(bp))
5098                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5099
5100         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5101         msleep(30);
5102         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5103
5104         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5105         if (CHIP_IS_E1(bp)) {
5106                 /* enable HW interrupt from PXP on USDM overflow
5107                    bit 16 on INT_MASK_0 */
5108                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5109         }
5110
5111         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5112         bnx2x_init_pxp(bp);
5113
5114 #ifdef __BIG_ENDIAN
5115         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5116         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5117         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5118         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5119         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5120         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5121
5122 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5123         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5124         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5125         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5126         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5127 #endif
5128
5129 #ifndef BCM_ISCSI
5130                 /* set NIC mode */
5131                 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5132 #endif
5133
5134         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5135 #ifdef BCM_ISCSI
5136         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5137         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5138         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5139 #endif
5140
5141         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5142                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5143
5144         /* let the HW do it's magic ... */
5145         msleep(100);
5146         /* finish PXP init */
5147         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5148         if (val != 1) {
5149                 BNX2X_ERR("PXP2 CFG failed\n");
5150                 return -EBUSY;
5151         }
5152         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5153         if (val != 1) {
5154                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5155                 return -EBUSY;
5156         }
5157
5158         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5159         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5160
5161         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5162
5163         /* clean the DMAE memory */
5164         bp->dmae_ready = 1;
5165         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5166
5167         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5168         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5169         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5170         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5171
5172         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5173         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5174         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5175         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5176
5177         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5178         /* soft reset pulse */
5179         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5180         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5181
5182 #ifdef BCM_ISCSI
5183         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5184 #endif
5185
5186         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5187         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5188         if (!CHIP_REV_IS_SLOW(bp)) {
5189                 /* enable hw interrupt from doorbell Q */
5190                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5191         }
5192
5193         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5194         if (CHIP_REV_IS_SLOW(bp)) {
5195                 /* fix for emulation and FPGA for no pause */
5196                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5197                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5198                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5199                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5200         }
5201
5202         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5203         if (CHIP_IS_E1H(bp))
5204                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5205
5206         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5207         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5208         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5209         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5210
5211         if (CHIP_IS_E1H(bp)) {
5212                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5213                                 STORM_INTMEM_SIZE_E1H/2);
5214                 bnx2x_init_fill(bp,
5215                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5216                                 0, STORM_INTMEM_SIZE_E1H/2);
5217                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5218                                 STORM_INTMEM_SIZE_E1H/2);
5219                 bnx2x_init_fill(bp,
5220                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5221                                 0, STORM_INTMEM_SIZE_E1H/2);
5222                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5223                                 STORM_INTMEM_SIZE_E1H/2);
5224                 bnx2x_init_fill(bp,
5225                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5226                                 0, STORM_INTMEM_SIZE_E1H/2);
5227                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5228                                 STORM_INTMEM_SIZE_E1H/2);
5229                 bnx2x_init_fill(bp,
5230                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5231                                 0, STORM_INTMEM_SIZE_E1H/2);
5232         } else { /* E1 */
5233                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5234                                 STORM_INTMEM_SIZE_E1);
5235                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5236                                 STORM_INTMEM_SIZE_E1);
5237                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5238                                 STORM_INTMEM_SIZE_E1);
5239                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5240                                 STORM_INTMEM_SIZE_E1);
5241         }
5242
5243         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5244         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5245         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5246         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5247
5248         /* sync semi rtc */
5249         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5250                0x80000000);
5251         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5252                0x80000000);
5253
5254         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5255         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5256         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5257
5258         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5259         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5260                 REG_WR(bp, i, 0xc0cac01a);
5261                 /* TODO: replace with something meaningful */
5262         }
5263         if (CHIP_IS_E1H(bp))
5264                 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5265         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5266
5267         if (sizeof(union cdu_context) != 1024)
5268                 /* we currently assume that a context is 1024 bytes */
5269                 printk(KERN_ALERT PFX "please adjust the size of"
5270                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5271
5272         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5273         val = (4 << 24) + (0 << 12) + 1024;
5274         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5275         if (CHIP_IS_E1(bp)) {
5276                 /* !!! fix pxp client crdit until excel update */
5277                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5278                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5279         }
5280
5281         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5282         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5283
5284         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5285         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5286
5287         /* PXPCS COMMON comes here */
5288         /* Reset PCIE errors for debug */
5289         REG_WR(bp, 0x2814, 0xffffffff);
5290         REG_WR(bp, 0x3820, 0xffffffff);
5291
5292         /* EMAC0 COMMON comes here */
5293         /* EMAC1 COMMON comes here */
5294         /* DBU COMMON comes here */
5295         /* DBG COMMON comes here */
5296
5297         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5298         if (CHIP_IS_E1H(bp)) {
5299                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5300                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5301         }
5302
5303         if (CHIP_REV_IS_SLOW(bp))
5304                 msleep(200);
5305
5306         /* finish CFC init */
5307         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5308         if (val != 1) {
5309                 BNX2X_ERR("CFC LL_INIT failed\n");
5310                 return -EBUSY;
5311         }
5312         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5313         if (val != 1) {
5314                 BNX2X_ERR("CFC AC_INIT failed\n");
5315                 return -EBUSY;
5316         }
5317         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5318         if (val != 1) {
5319                 BNX2X_ERR("CFC CAM_INIT failed\n");
5320                 return -EBUSY;
5321         }
5322         REG_WR(bp, CFC_REG_DEBUG0, 0);
5323
5324         /* read NIG statistic
5325            to see if this is our first up since powerup */
5326         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5327         val = *bnx2x_sp(bp, wb_data[0]);
5328
5329         /* do internal memory self test */
5330         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5331                 BNX2X_ERR("internal mem self test failed\n");
5332                 return -EBUSY;
5333         }
5334
5335         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5336         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5337                 /* Fan failure is indicated by SPIO 5 */
5338                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5339                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5340
5341                 /* set to active low mode */
5342                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5343                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5344                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5345                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5346
5347                 /* enable interrupt to signal the IGU */
5348                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5349                 val |= (1 << MISC_REGISTERS_SPIO_5);
5350                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5351                 break;
5352
5353         default:
5354                 break;
5355         }
5356
5357         /* clear PXP2 attentions */
5358         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5359
5360         enable_blocks_attention(bp);
5361
5362         if (bp->flags & TPA_ENABLE_FLAG) {
5363                 struct tstorm_eth_tpa_exist tmp = {0};
5364
5365                 tmp.tpa_exist = 1;
5366
5367                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5368                        ((u32 *)&tmp)[0]);
5369                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5370                        ((u32 *)&tmp)[1]);
5371         }
5372
5373         return 0;
5374 }
5375
5376 static int bnx2x_init_port(struct bnx2x *bp)
5377 {
5378         int port = BP_PORT(bp);
5379         u32 val;
5380
5381         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5382
5383         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5384
5385         /* Port PXP comes here */
5386         /* Port PXP2 comes here */
5387 #ifdef BCM_ISCSI
5388         /* Port0  1
5389          * Port1  385 */
5390         i++;
5391         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5392         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5393         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5394         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5395
5396         /* Port0  2
5397          * Port1  386 */
5398         i++;
5399         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5400         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5401         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5402         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5403
5404         /* Port0  3
5405          * Port1  387 */
5406         i++;
5407         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5408         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5409         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5410         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5411 #endif
5412         /* Port CMs come here */
5413
5414         /* Port QM comes here */
5415 #ifdef BCM_ISCSI
5416         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5417         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5418
5419         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5420                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5421 #endif
5422         /* Port DQ comes here */
5423         /* Port BRB1 comes here */
5424         /* Port PRS comes here */
5425         /* Port TSDM comes here */
5426         /* Port CSDM comes here */
5427         /* Port USDM comes here */
5428         /* Port XSDM comes here */
5429         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5430                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5431         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5432                              port ? USEM_PORT1_END : USEM_PORT0_END);
5433         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5434                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5435         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5436                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5437         /* Port UPB comes here */
5438         /* Port XPB comes here */
5439
5440         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5441                              port ? PBF_PORT1_END : PBF_PORT0_END);
5442
5443         /* configure PBF to work without PAUSE mtu 9000 */
5444         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5445
5446         /* update threshold */
5447         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5448         /* update init credit */
5449         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5450
5451         /* probe changes */
5452         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5453         msleep(5);
5454         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5455
5456 #ifdef BCM_ISCSI
5457         /* tell the searcher where the T2 table is */
5458         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5459
5460         wb_write[0] = U64_LO(bp->t2_mapping);
5461         wb_write[1] = U64_HI(bp->t2_mapping);
5462         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5463         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5464         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5465         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5466
5467         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5468         /* Port SRCH comes here */
5469 #endif
5470         /* Port CDU comes here */
5471         /* Port CFC comes here */
5472
5473         if (CHIP_IS_E1(bp)) {
5474                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5475                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5476         }
5477         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5478                              port ? HC_PORT1_END : HC_PORT0_END);
5479
5480         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5481                                     MISC_AEU_PORT0_START,
5482                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5483         /* init aeu_mask_attn_func_0/1:
5484          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5485          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5486          *             bits 4-7 are used for "per vn group attention" */
5487         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5488                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5489
5490         /* Port PXPCS comes here */
5491         /* Port EMAC0 comes here */
5492         /* Port EMAC1 comes here */
5493         /* Port DBU comes here */
5494         /* Port DBG comes here */
5495         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5496                              port ? NIG_PORT1_END : NIG_PORT0_END);
5497
5498         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5499
5500         if (CHIP_IS_E1H(bp)) {
5501                 u32 wsum;
5502                 struct cmng_struct_per_port m_cmng_port;
5503                 int vn;
5504
5505                 /* 0x2 disable e1hov, 0x1 enable */
5506                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5507                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5508
5509                 /* Init RATE SHAPING and FAIRNESS contexts.
5510                    Initialize as if there is 10G link. */
5511                 wsum = bnx2x_calc_vn_wsum(bp);
5512                 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5513                 if (IS_E1HMF(bp))
5514                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
5515                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
5516                                         wsum, 10000, &m_cmng_port);
5517         }
5518
5519         /* Port MCP comes here */
5520         /* Port DMAE comes here */
5521
5522         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5523         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5524                 /* add SPIO 5 to group 0 */
5525                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5526                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5527                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5528                 break;
5529
5530         default:
5531                 break;
5532         }
5533
5534         bnx2x__link_reset(bp);
5535
5536         return 0;
5537 }
5538
5539 #define ILT_PER_FUNC            (768/2)
5540 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
5541 /* the phys address is shifted right 12 bits and has an added
5542    1=valid bit added to the 53rd bit
5543    then since this is a wide register(TM)
5544    we split it into two 32 bit writes
5545  */
5546 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5547 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
5548 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
5549 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
5550
5551 #define CNIC_ILT_LINES          0
5552
5553 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5554 {
5555         int reg;
5556
5557         if (CHIP_IS_E1H(bp))
5558                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5559         else /* E1 */
5560                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5561
5562         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5563 }
5564
5565 static int bnx2x_init_func(struct bnx2x *bp)
5566 {
5567         int port = BP_PORT(bp);
5568         int func = BP_FUNC(bp);
5569         int i;
5570
5571         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
5572
5573         i = FUNC_ILT_BASE(func);
5574
5575         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5576         if (CHIP_IS_E1H(bp)) {
5577                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5578                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5579         } else /* E1 */
5580                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5581                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5582
5583
5584         if (CHIP_IS_E1H(bp)) {
5585                 for (i = 0; i < 9; i++)
5586                         bnx2x_init_block(bp,
5587                                          cm_start[func][i], cm_end[func][i]);
5588
5589                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5590                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5591         }
5592
5593         /* HC init per function */
5594         if (CHIP_IS_E1H(bp)) {
5595                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5596
5597                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5598                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5599         }
5600         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5601
5602         if (CHIP_IS_E1H(bp))
5603                 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5604
5605         /* Reset PCIE errors for debug */
5606         REG_WR(bp, 0x2114, 0xffffffff);
5607         REG_WR(bp, 0x2120, 0xffffffff);
5608
5609         return 0;
5610 }
5611
5612 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5613 {
5614         int i, rc = 0;
5615
5616         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5617            BP_FUNC(bp), load_code);
5618
5619         bp->dmae_ready = 0;
5620         mutex_init(&bp->dmae_mutex);
5621         bnx2x_gunzip_init(bp);
5622
5623         switch (load_code) {
5624         case FW_MSG_CODE_DRV_LOAD_COMMON:
5625                 rc = bnx2x_init_common(bp);
5626                 if (rc)
5627                         goto init_hw_err;
5628                 /* no break */
5629
5630         case FW_MSG_CODE_DRV_LOAD_PORT:
5631                 bp->dmae_ready = 1;
5632                 rc = bnx2x_init_port(bp);
5633                 if (rc)
5634                         goto init_hw_err;
5635                 /* no break */
5636
5637         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5638                 bp->dmae_ready = 1;
5639                 rc = bnx2x_init_func(bp);
5640                 if (rc)
5641                         goto init_hw_err;
5642                 break;
5643
5644         default:
5645                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5646                 break;
5647         }
5648
5649         if (!BP_NOMCP(bp)) {
5650                 int func = BP_FUNC(bp);
5651
5652                 bp->fw_drv_pulse_wr_seq =
5653                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5654                                  DRV_PULSE_SEQ_MASK);
5655                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5656                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
5657                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
5658         } else
5659                 bp->func_stx = 0;
5660
5661         /* this needs to be done before gunzip end */
5662         bnx2x_zero_def_sb(bp);
5663         for_each_queue(bp, i)
5664                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5665
5666 init_hw_err:
5667         bnx2x_gunzip_end(bp);
5668
5669         return rc;
5670 }
5671
5672 /* send the MCP a request, block until there is a reply */
5673 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5674 {
5675         int func = BP_FUNC(bp);
5676         u32 seq = ++bp->fw_seq;
5677         u32 rc = 0;
5678         u32 cnt = 1;
5679         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5680
5681         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5682         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5683
5684         do {
5685                 /* let the FW do it's magic ... */
5686                 msleep(delay);
5687
5688                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5689
5690                 /* Give the FW up to 2 second (200*10ms) */
5691         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5692
5693         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5694            cnt*delay, rc, seq);
5695
5696         /* is this a reply to our command? */
5697         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5698                 rc &= FW_MSG_CODE_MASK;
5699
5700         } else {
5701                 /* FW BUG! */
5702                 BNX2X_ERR("FW failed to respond!\n");
5703                 bnx2x_fw_dump(bp);
5704                 rc = 0;
5705         }
5706
5707         return rc;
5708 }
5709
5710 static void bnx2x_free_mem(struct bnx2x *bp)
5711 {
5712
5713 #define BNX2X_PCI_FREE(x, y, size) \
5714         do { \
5715                 if (x) { \
5716                         pci_free_consistent(bp->pdev, size, x, y); \
5717                         x = NULL; \
5718                         y = 0; \
5719                 } \
5720         } while (0)
5721
5722 #define BNX2X_FREE(x) \
5723         do { \
5724                 if (x) { \
5725                         vfree(x); \
5726                         x = NULL; \
5727                 } \
5728         } while (0)
5729
5730         int i;
5731
5732         /* fastpath */
5733         for_each_queue(bp, i) {
5734
5735                 /* Status blocks */
5736                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5737                                bnx2x_fp(bp, i, status_blk_mapping),
5738                                sizeof(struct host_status_block) +
5739                                sizeof(struct eth_tx_db_data));
5740
5741                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5742                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5743                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5744                                bnx2x_fp(bp, i, tx_desc_mapping),
5745                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
5746
5747                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5748                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5749                                bnx2x_fp(bp, i, rx_desc_mapping),
5750                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5751
5752                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5753                                bnx2x_fp(bp, i, rx_comp_mapping),
5754                                sizeof(struct eth_fast_path_rx_cqe) *
5755                                NUM_RCQ_BD);
5756
5757                 /* SGE ring */
5758                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5759                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5760                                bnx2x_fp(bp, i, rx_sge_mapping),
5761                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5762         }
5763         /* end of fastpath */
5764
5765         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5766                        sizeof(struct host_def_status_block));
5767
5768         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5769                        sizeof(struct bnx2x_slowpath));
5770
5771 #ifdef BCM_ISCSI
5772         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5773         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5774         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5775         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5776 #endif
5777         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5778
5779 #undef BNX2X_PCI_FREE
5780 #undef BNX2X_KFREE
5781 }
5782
5783 static int bnx2x_alloc_mem(struct bnx2x *bp)
5784 {
5785
5786 #define BNX2X_PCI_ALLOC(x, y, size) \
5787         do { \
5788                 x = pci_alloc_consistent(bp->pdev, size, y); \
5789                 if (x == NULL) \
5790                         goto alloc_mem_err; \
5791                 memset(x, 0, size); \
5792         } while (0)
5793
5794 #define BNX2X_ALLOC(x, size) \
5795         do { \
5796                 x = vmalloc(size); \
5797                 if (x == NULL) \
5798                         goto alloc_mem_err; \
5799                 memset(x, 0, size); \
5800         } while (0)
5801
5802         int i;
5803
5804         /* fastpath */
5805         for_each_queue(bp, i) {
5806                 bnx2x_fp(bp, i, bp) = bp;
5807
5808                 /* Status blocks */
5809                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5810                                 &bnx2x_fp(bp, i, status_blk_mapping),
5811                                 sizeof(struct host_status_block) +
5812                                 sizeof(struct eth_tx_db_data));
5813
5814                 bnx2x_fp(bp, i, hw_tx_prods) =
5815                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5816
5817                 bnx2x_fp(bp, i, tx_prods_mapping) =
5818                                 bnx2x_fp(bp, i, status_blk_mapping) +
5819                                 sizeof(struct host_status_block);
5820
5821                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5822                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5823                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5824                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5825                                 &bnx2x_fp(bp, i, tx_desc_mapping),
5826                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5827
5828                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5829                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5830                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5831                                 &bnx2x_fp(bp, i, rx_desc_mapping),
5832                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5833
5834                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5835                                 &bnx2x_fp(bp, i, rx_comp_mapping),
5836                                 sizeof(struct eth_fast_path_rx_cqe) *
5837                                 NUM_RCQ_BD);
5838
5839                 /* SGE ring */
5840                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5841                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5842                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5843                                 &bnx2x_fp(bp, i, rx_sge_mapping),
5844                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5845         }
5846         /* end of fastpath */
5847
5848         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5849                         sizeof(struct host_def_status_block));
5850
5851         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5852                         sizeof(struct bnx2x_slowpath));
5853
5854 #ifdef BCM_ISCSI
5855         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5856
5857         /* Initialize T1 */
5858         for (i = 0; i < 64*1024; i += 64) {
5859                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5860                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5861         }
5862
5863         /* allocate searcher T2 table
5864            we allocate 1/4 of alloc num for T2
5865           (which is not entered into the ILT) */
5866         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5867
5868         /* Initialize T2 */
5869         for (i = 0; i < 16*1024; i += 64)
5870                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5871
5872         /* now fixup the last line in the block to point to the next block */
5873         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5874
5875         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5876         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5877
5878         /* QM queues (128*MAX_CONN) */
5879         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5880 #endif
5881
5882         /* Slow path ring */
5883         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5884
5885         return 0;
5886
5887 alloc_mem_err:
5888         bnx2x_free_mem(bp);
5889         return -ENOMEM;
5890
5891 #undef BNX2X_PCI_ALLOC
5892 #undef BNX2X_ALLOC
5893 }
5894
5895 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5896 {
5897         int i;
5898
5899         for_each_queue(bp, i) {
5900                 struct bnx2x_fastpath *fp = &bp->fp[i];
5901
5902                 u16 bd_cons = fp->tx_bd_cons;
5903                 u16 sw_prod = fp->tx_pkt_prod;
5904                 u16 sw_cons = fp->tx_pkt_cons;
5905
5906                 while (sw_cons != sw_prod) {
5907                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5908                         sw_cons++;
5909                 }
5910         }
5911 }
5912
5913 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5914 {
5915         int i, j;
5916
5917         for_each_queue(bp, j) {
5918                 struct bnx2x_fastpath *fp = &bp->fp[j];
5919
5920                 for (i = 0; i < NUM_RX_BD; i++) {
5921                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5922                         struct sk_buff *skb = rx_buf->skb;
5923
5924                         if (skb == NULL)
5925                                 continue;
5926
5927                         pci_unmap_single(bp->pdev,
5928                                          pci_unmap_addr(rx_buf, mapping),
5929                                          bp->rx_buf_use_size,
5930                                          PCI_DMA_FROMDEVICE);
5931
5932                         rx_buf->skb = NULL;
5933                         dev_kfree_skb(skb);
5934                 }
5935                 if (!fp->disable_tpa)
5936                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5937                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
5938                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
5939         }
5940 }
5941
5942 static void bnx2x_free_skbs(struct bnx2x *bp)
5943 {
5944         bnx2x_free_tx_skbs(bp);
5945         bnx2x_free_rx_skbs(bp);
5946 }
5947
5948 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5949 {
5950         int i, offset = 1;
5951
5952         free_irq(bp->msix_table[0].vector, bp->dev);
5953         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5954            bp->msix_table[0].vector);
5955
5956         for_each_queue(bp, i) {
5957                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
5958                    "state %x\n", i, bp->msix_table[i + offset].vector,
5959                    bnx2x_fp(bp, i, state));
5960
5961                 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5962                         BNX2X_ERR("IRQ of fp #%d being freed while "
5963                                   "state != closed\n", i);
5964
5965                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
5966         }
5967 }
5968
5969 static void bnx2x_free_irq(struct bnx2x *bp)
5970 {
5971         if (bp->flags & USING_MSIX_FLAG) {
5972                 bnx2x_free_msix_irqs(bp);
5973                 pci_disable_msix(bp->pdev);
5974                 bp->flags &= ~USING_MSIX_FLAG;
5975
5976         } else
5977                 free_irq(bp->pdev->irq, bp->dev);
5978 }
5979
5980 static int bnx2x_enable_msix(struct bnx2x *bp)
5981 {
5982         int i, rc, offset;
5983
5984         bp->msix_table[0].entry = 0;
5985         offset = 1;
5986         DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
5987
5988         for_each_queue(bp, i) {
5989                 int igu_vec = offset + i + BP_L_ID(bp);
5990
5991                 bp->msix_table[i + offset].entry = igu_vec;
5992                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
5993                    "(fastpath #%u)\n", i + offset, igu_vec, i);
5994         }
5995
5996         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
5997                              bp->num_queues + offset);
5998         if (rc) {
5999                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6000                 return -1;
6001         }
6002         bp->flags |= USING_MSIX_FLAG;
6003
6004         return 0;
6005 }
6006
6007 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6008 {
6009         int i, rc, offset = 1;
6010
6011         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6012                          bp->dev->name, bp->dev);
6013         if (rc) {
6014                 BNX2X_ERR("request sp irq failed\n");
6015                 return -EBUSY;
6016         }
6017
6018         for_each_queue(bp, i) {
6019                 rc = request_irq(bp->msix_table[i + offset].vector,
6020                                  bnx2x_msix_fp_int, 0,
6021                                  bp->dev->name, &bp->fp[i]);
6022                 if (rc) {
6023                         BNX2X_ERR("request fp #%d irq failed  rc %d\n",
6024                                   i + offset, rc);
6025                         bnx2x_free_msix_irqs(bp);
6026                         return -EBUSY;
6027                 }
6028
6029                 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6030         }
6031
6032         return 0;
6033 }
6034
6035 static int bnx2x_req_irq(struct bnx2x *bp)
6036 {
6037         int rc;
6038
6039         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6040                          bp->dev->name, bp->dev);
6041         if (!rc)
6042                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6043
6044         return rc;
6045 }
6046
6047 /*
6048  * Init service functions
6049  */
6050
6051 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6052 {
6053         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6054         int port = BP_PORT(bp);
6055
6056         /* CAM allocation
6057          * unicasts 0-31:port0 32-63:port1
6058          * multicast 64-127:port0 128-191:port1
6059          */
6060         config->hdr.length_6b = 2;
6061         config->hdr.offset = port ? 31 : 0;
6062         config->hdr.client_id = BP_CL_ID(bp);
6063         config->hdr.reserved1 = 0;
6064
6065         /* primary MAC */
6066         config->config_table[0].cam_entry.msb_mac_addr =
6067                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6068         config->config_table[0].cam_entry.middle_mac_addr =
6069                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6070         config->config_table[0].cam_entry.lsb_mac_addr =
6071                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6072         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6073         if (set)
6074                 config->config_table[0].target_table_entry.flags = 0;
6075         else
6076                 CAM_INVALIDATE(config->config_table[0]);
6077         config->config_table[0].target_table_entry.client_id = 0;
6078         config->config_table[0].target_table_entry.vlan_id = 0;
6079
6080         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6081            (set ? "setting" : "clearing"),
6082            config->config_table[0].cam_entry.msb_mac_addr,
6083            config->config_table[0].cam_entry.middle_mac_addr,
6084            config->config_table[0].cam_entry.lsb_mac_addr);
6085
6086         /* broadcast */
6087         config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6088         config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6089         config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6090         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6091         if (set)
6092                 config->config_table[1].target_table_entry.flags =
6093                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6094         else
6095                 CAM_INVALIDATE(config->config_table[1]);
6096         config->config_table[1].target_table_entry.client_id = 0;
6097         config->config_table[1].target_table_entry.vlan_id = 0;
6098
6099         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6100                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6101                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6102 }
6103
6104 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6105 {
6106         struct mac_configuration_cmd_e1h *config =
6107                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6108
6109         if (set && (bp->state != BNX2X_STATE_OPEN)) {
6110                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6111                 return;
6112         }
6113
6114         /* CAM allocation for E1H
6115          * unicasts: by func number
6116          * multicast: 20+FUNC*20, 20 each
6117          */
6118         config->hdr.length_6b = 1;
6119         config->hdr.offset = BP_FUNC(bp);
6120         config->hdr.client_id = BP_CL_ID(bp);
6121         config->hdr.reserved1 = 0;
6122
6123         /* primary MAC */
6124         config->config_table[0].msb_mac_addr =
6125                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6126         config->config_table[0].middle_mac_addr =
6127                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6128         config->config_table[0].lsb_mac_addr =
6129                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6130         config->config_table[0].client_id = BP_L_ID(bp);
6131         config->config_table[0].vlan_id = 0;
6132         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6133         if (set)
6134                 config->config_table[0].flags = BP_PORT(bp);
6135         else
6136                 config->config_table[0].flags =
6137                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6138
6139         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6140            (set ? "setting" : "clearing"),
6141            config->config_table[0].msb_mac_addr,
6142            config->config_table[0].middle_mac_addr,
6143            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6144
6145         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6146                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6147                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6148 }
6149
6150 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6151                              int *state_p, int poll)
6152 {
6153         /* can take a while if any port is running */
6154         int cnt = 500;
6155
6156         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6157            poll ? "polling" : "waiting", state, idx);
6158
6159         might_sleep();
6160         while (cnt--) {
6161                 if (poll) {
6162                         bnx2x_rx_int(bp->fp, 10);
6163                         /* if index is different from 0
6164                          * the reply for some commands will
6165                          * be on the non default queue
6166                          */
6167                         if (idx)
6168                                 bnx2x_rx_int(&bp->fp[idx], 10);
6169                 }
6170
6171                 mb(); /* state is changed by bnx2x_sp_event() */
6172                 if (*state_p == state)
6173                         return 0;
6174
6175                 msleep(1);
6176         }
6177
6178         /* timeout! */
6179         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6180                   poll ? "polling" : "waiting", state, idx);
6181 #ifdef BNX2X_STOP_ON_ERROR
6182         bnx2x_panic();
6183 #endif
6184
6185         return -EBUSY;
6186 }
6187
6188 static int bnx2x_setup_leading(struct bnx2x *bp)
6189 {
6190         int rc;
6191
6192         /* reset IGU state */
6193         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6194
6195         /* SETUP ramrod */
6196         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6197
6198         /* Wait for completion */
6199         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6200
6201         return rc;
6202 }
6203
6204 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6205 {
6206         /* reset IGU state */
6207         bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6208
6209         /* SETUP ramrod */
6210         bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6211         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6212
6213         /* Wait for completion */
6214         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6215                                  &(bp->fp[index].state), 0);
6216 }
6217
6218 static int bnx2x_poll(struct napi_struct *napi, int budget);
6219 static void bnx2x_set_rx_mode(struct net_device *dev);
6220
6221 /* must be called with rtnl_lock */
6222 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6223 {
6224         u32 load_code;
6225         int i, rc;
6226
6227 #ifdef BNX2X_STOP_ON_ERROR
6228         if (unlikely(bp->panic))
6229                 return -EPERM;
6230 #endif
6231
6232         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6233
6234         /* Send LOAD_REQUEST command to MCP
6235            Returns the type of LOAD command:
6236            if it is the first port to be initialized
6237            common blocks should be initialized, otherwise - not
6238         */
6239         if (!BP_NOMCP(bp)) {
6240                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6241                 if (!load_code) {
6242                         BNX2X_ERR("MCP response failure, aborting\n");
6243                         return -EBUSY;
6244                 }
6245                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6246                         return -EBUSY; /* other port in diagnostic mode */
6247
6248         } else {
6249                 int port = BP_PORT(bp);
6250
6251                 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6252                    load_count[0], load_count[1], load_count[2]);
6253                 load_count[0]++;
6254                 load_count[1 + port]++;
6255                 DP(NETIF_MSG_IFUP, "NO MCP new load counts       %d, %d, %d\n",
6256                    load_count[0], load_count[1], load_count[2]);
6257                 if (load_count[0] == 1)
6258                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6259                 else if (load_count[1 + port] == 1)
6260                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6261                 else
6262                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6263         }
6264
6265         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6266             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6267                 bp->port.pmf = 1;
6268         else
6269                 bp->port.pmf = 0;
6270         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6271
6272         /* if we can't use MSI-X we only need one fp,
6273          * so try to enable MSI-X with the requested number of fp's
6274          * and fallback to inta with one fp
6275          */
6276         if (use_inta) {
6277                 bp->num_queues = 1;
6278
6279         } else {
6280                 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6281                         /* user requested number */
6282                         bp->num_queues = use_multi;
6283
6284                 else if (use_multi)
6285                         bp->num_queues = min_t(u32, num_online_cpus(),
6286                                                BP_MAX_QUEUES(bp));
6287                 else
6288                         bp->num_queues = 1;
6289
6290                 if (bnx2x_enable_msix(bp)) {
6291                         /* failed to enable MSI-X */
6292                         bp->num_queues = 1;
6293                         if (use_multi)
6294                                 BNX2X_ERR("Multi requested but failed"
6295                                           " to enable MSI-X\n");
6296                 }
6297         }
6298         DP(NETIF_MSG_IFUP,
6299            "set number of queues to %d\n", bp->num_queues);
6300
6301         if (bnx2x_alloc_mem(bp))
6302                 return -ENOMEM;
6303
6304         for_each_queue(bp, i)
6305                 bnx2x_fp(bp, i, disable_tpa) =
6306                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6307
6308         if (bp->flags & USING_MSIX_FLAG) {
6309                 rc = bnx2x_req_msix_irqs(bp);
6310                 if (rc) {
6311                         pci_disable_msix(bp->pdev);
6312                         goto load_error;
6313                 }
6314         } else {
6315                 bnx2x_ack_int(bp);
6316                 rc = bnx2x_req_irq(bp);
6317                 if (rc) {
6318                         BNX2X_ERR("IRQ request failed, aborting\n");
6319                         goto load_error;
6320                 }
6321         }
6322
6323         for_each_queue(bp, i)
6324                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6325                                bnx2x_poll, 128);
6326
6327         /* Initialize HW */
6328         rc = bnx2x_init_hw(bp, load_code);
6329         if (rc) {
6330                 BNX2X_ERR("HW init failed, aborting\n");
6331                 goto load_error;
6332         }
6333
6334         /* Setup NIC internals and enable interrupts */
6335         bnx2x_nic_init(bp, load_code);
6336
6337         /* Send LOAD_DONE command to MCP */
6338         if (!BP_NOMCP(bp)) {
6339                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6340                 if (!load_code) {
6341                         BNX2X_ERR("MCP response failure, aborting\n");
6342                         rc = -EBUSY;
6343                         goto load_int_disable;
6344                 }
6345         }
6346
6347         bnx2x_stats_init(bp);
6348
6349         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6350
6351         /* Enable Rx interrupt handling before sending the ramrod
6352            as it's completed on Rx FP queue */
6353         for_each_queue(bp, i)
6354                 napi_enable(&bnx2x_fp(bp, i, napi));
6355
6356         /* Enable interrupt handling */
6357         atomic_set(&bp->intr_sem, 0);
6358
6359         rc = bnx2x_setup_leading(bp);
6360         if (rc) {
6361                 BNX2X_ERR("Setup leading failed!\n");
6362                 goto load_stop_netif;
6363         }
6364
6365         if (CHIP_IS_E1H(bp))
6366                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6367                         BNX2X_ERR("!!!  mf_cfg function disabled\n");
6368                         bp->state = BNX2X_STATE_DISABLED;
6369                 }
6370
6371         if (bp->state == BNX2X_STATE_OPEN)
6372                 for_each_nondefault_queue(bp, i) {
6373                         rc = bnx2x_setup_multi(bp, i);
6374                         if (rc)
6375                                 goto load_stop_netif;
6376                 }
6377
6378         if (CHIP_IS_E1(bp))
6379                 bnx2x_set_mac_addr_e1(bp, 1);
6380         else
6381                 bnx2x_set_mac_addr_e1h(bp, 1);
6382
6383         if (bp->port.pmf)
6384                 bnx2x_initial_phy_init(bp);
6385
6386         /* Start fast path */
6387         switch (load_mode) {
6388         case LOAD_NORMAL:
6389                 /* Tx queue should be only reenabled */
6390                 netif_wake_queue(bp->dev);
6391                 bnx2x_set_rx_mode(bp->dev);
6392                 break;
6393
6394         case LOAD_OPEN:
6395                 netif_start_queue(bp->dev);
6396                 bnx2x_set_rx_mode(bp->dev);
6397                 if (bp->flags & USING_MSIX_FLAG)
6398                         printk(KERN_INFO PFX "%s: using MSI-X\n",
6399                                bp->dev->name);
6400                 break;
6401
6402         case LOAD_DIAG:
6403                 bnx2x_set_rx_mode(bp->dev);
6404                 bp->state = BNX2X_STATE_DIAG;
6405                 break;
6406
6407         default:
6408                 break;
6409         }
6410
6411         if (!bp->port.pmf)
6412                 bnx2x__link_status_update(bp);
6413
6414         /* start the timer */
6415         mod_timer(&bp->timer, jiffies + bp->current_interval);
6416
6417
6418         return 0;
6419
6420 load_stop_netif:
6421         for_each_queue(bp, i)
6422                 napi_disable(&bnx2x_fp(bp, i, napi));
6423
6424 load_int_disable:
6425         bnx2x_int_disable_sync(bp);
6426
6427         /* Release IRQs */
6428         bnx2x_free_irq(bp);
6429
6430         /* Free SKBs, SGEs, TPA pool and driver internals */
6431         bnx2x_free_skbs(bp);
6432         for_each_queue(bp, i)
6433                 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6434                                         RX_SGE_CNT*NUM_RX_SGE_PAGES);
6435 load_error:
6436         bnx2x_free_mem(bp);
6437
6438         /* TBD we really need to reset the chip
6439            if we want to recover from this */
6440         return rc;
6441 }
6442
6443 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6444 {
6445         int rc;
6446
6447         /* halt the connection */
6448         bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6449         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6450
6451         /* Wait for completion */
6452         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6453                                &(bp->fp[index].state), 1);
6454         if (rc) /* timeout */
6455                 return rc;
6456
6457         /* delete cfc entry */
6458         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6459
6460         /* Wait for completion */
6461         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6462                                &(bp->fp[index].state), 1);
6463         return rc;
6464 }
6465
6466 static int bnx2x_stop_leading(struct bnx2x *bp)
6467 {
6468         u16 dsb_sp_prod_idx;
6469         /* if the other port is handling traffic,
6470            this can take a lot of time */
6471         int cnt = 500;
6472         int rc;
6473
6474         might_sleep();
6475
6476         /* Send HALT ramrod */
6477         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6478         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6479
6480         /* Wait for completion */
6481         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6482                                &(bp->fp[0].state), 1);
6483         if (rc) /* timeout */
6484                 return rc;
6485
6486         dsb_sp_prod_idx = *bp->dsb_sp_prod;
6487
6488         /* Send PORT_DELETE ramrod */
6489         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6490
6491         /* Wait for completion to arrive on default status block
6492            we are going to reset the chip anyway
6493            so there is not much to do if this times out
6494          */
6495         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6496                 if (!cnt) {
6497                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6498                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6499                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
6500 #ifdef BNX2X_STOP_ON_ERROR
6501                         bnx2x_panic();
6502 #else
6503                         rc = -EBUSY;
6504 #endif
6505                         break;
6506                 }
6507                 cnt--;
6508                 msleep(1);
6509         }
6510         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6511         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6512
6513         return rc;
6514 }
6515
6516 static void bnx2x_reset_func(struct bnx2x *bp)
6517 {
6518         int port = BP_PORT(bp);
6519         int func = BP_FUNC(bp);
6520         int base, i;
6521
6522         /* Configure IGU */
6523         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6524         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6525
6526         REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6527
6528         /* Clear ILT */
6529         base = FUNC_ILT_BASE(func);
6530         for (i = base; i < base + ILT_PER_FUNC; i++)
6531                 bnx2x_ilt_wr(bp, i, 0);
6532 }
6533
6534 static void bnx2x_reset_port(struct bnx2x *bp)
6535 {
6536         int port = BP_PORT(bp);
6537         u32 val;
6538
6539         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6540
6541         /* Do not rcv packets to BRB */
6542         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6543         /* Do not direct rcv packets that are not for MCP to the BRB */
6544         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6545                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6546
6547         /* Configure AEU */
6548         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6549
6550         msleep(100);
6551         /* Check for BRB port occupancy */
6552         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6553         if (val)
6554                 DP(NETIF_MSG_IFDOWN,
6555                    "BRB1 is not empty  %d blooks are occupied\n", val);
6556
6557         /* TODO: Close Doorbell port? */
6558 }
6559
6560 static void bnx2x_reset_common(struct bnx2x *bp)
6561 {
6562         /* reset_common */
6563         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6564                0xd3ffff7f);
6565         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6566 }
6567
6568 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6569 {
6570         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
6571            BP_FUNC(bp), reset_code);
6572
6573         switch (reset_code) {
6574         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6575                 bnx2x_reset_port(bp);
6576                 bnx2x_reset_func(bp);
6577                 bnx2x_reset_common(bp);
6578                 break;
6579
6580         case FW_MSG_CODE_DRV_UNLOAD_PORT:
6581                 bnx2x_reset_port(bp);
6582                 bnx2x_reset_func(bp);
6583                 break;
6584
6585         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6586                 bnx2x_reset_func(bp);
6587                 break;
6588
6589         default:
6590                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6591                 break;
6592         }
6593 }
6594
6595 /* msut be called with rtnl_lock */
6596 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6597 {
6598         int port = BP_PORT(bp);
6599         u32 reset_code = 0;
6600         int i, cnt, rc;
6601
6602         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6603
6604         bp->rx_mode = BNX2X_RX_MODE_NONE;
6605         bnx2x_set_storm_rx_mode(bp);
6606
6607         if (netif_running(bp->dev)) {
6608                 netif_tx_disable(bp->dev);
6609                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6610         }
6611
6612         del_timer_sync(&bp->timer);
6613         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6614                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6615         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6616
6617         /* Wait until tx fast path tasks complete */
6618         for_each_queue(bp, i) {
6619                 struct bnx2x_fastpath *fp = &bp->fp[i];
6620
6621                 cnt = 1000;
6622                 smp_rmb();
6623                 while (BNX2X_HAS_TX_WORK(fp)) {
6624
6625                         if (!netif_running(bp->dev))
6626                                 bnx2x_tx_int(fp, 1000);
6627
6628                         if (!cnt) {
6629                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
6630                                           i);
6631 #ifdef BNX2X_STOP_ON_ERROR
6632                                 bnx2x_panic();
6633                                 return -EBUSY;
6634 #else
6635                                 break;
6636 #endif
6637                         }
6638                         cnt--;
6639                         msleep(1);
6640                         smp_rmb();
6641                 }
6642         }
6643
6644         /* Give HW time to discard old tx messages */
6645         msleep(1);
6646
6647         for_each_queue(bp, i)
6648                 napi_disable(&bnx2x_fp(bp, i, napi));
6649         /* Disable interrupts after Tx and Rx are disabled on stack level */
6650         bnx2x_int_disable_sync(bp);
6651
6652         /* Release IRQs */
6653         bnx2x_free_irq(bp);
6654
6655         if (unload_mode == UNLOAD_NORMAL)
6656                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6657
6658         else if (bp->flags & NO_WOL_FLAG) {
6659                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6660                 if (CHIP_IS_E1H(bp))
6661                         REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6662
6663         } else if (bp->wol) {
6664                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6665                 u8 *mac_addr = bp->dev->dev_addr;
6666                 u32 val;
6667                 /* The mac address is written to entries 1-4 to
6668                    preserve entry 0 which is used by the PMF */
6669                 u8 entry = (BP_E1HVN(bp) + 1)*8;
6670
6671                 val = (mac_addr[0] << 8) | mac_addr[1];
6672                 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry, val);
6673
6674                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6675                       (mac_addr[4] << 8) | mac_addr[5];
6676                 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6677
6678                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6679
6680         } else
6681                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6682
6683         if (CHIP_IS_E1(bp)) {
6684                 struct mac_configuration_cmd *config =
6685                                                 bnx2x_sp(bp, mcast_config);
6686
6687                 bnx2x_set_mac_addr_e1(bp, 0);
6688
6689                 for (i = 0; i < config->hdr.length_6b; i++)
6690                         CAM_INVALIDATE(config->config_table[i]);
6691
6692                 config->hdr.length_6b = i;
6693                 if (CHIP_REV_IS_SLOW(bp))
6694                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6695                 else
6696                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6697                 config->hdr.client_id = BP_CL_ID(bp);
6698                 config->hdr.reserved1 = 0;
6699
6700                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6701                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6702                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6703
6704         } else { /* E1H */
6705                 bnx2x_set_mac_addr_e1h(bp, 0);
6706
6707                 for (i = 0; i < MC_HASH_SIZE; i++)
6708                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6709         }
6710
6711         if (CHIP_IS_E1H(bp))
6712                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6713
6714         /* Close multi and leading connections
6715            Completions for ramrods are collected in a synchronous way */
6716         for_each_nondefault_queue(bp, i)
6717                 if (bnx2x_stop_multi(bp, i))
6718                         goto unload_error;
6719
6720         rc = bnx2x_stop_leading(bp);
6721         if (rc) {
6722                 BNX2X_ERR("Stop leading failed!\n");
6723 #ifdef BNX2X_STOP_ON_ERROR
6724                 return -EBUSY;
6725 #else
6726                 goto unload_error;
6727 #endif
6728         }
6729
6730 unload_error:
6731         if (!BP_NOMCP(bp))
6732                 reset_code = bnx2x_fw_command(bp, reset_code);
6733         else {
6734                 DP(NETIF_MSG_IFDOWN, "NO MCP load counts      %d, %d, %d\n",
6735                    load_count[0], load_count[1], load_count[2]);
6736                 load_count[0]--;
6737                 load_count[1 + port]--;
6738                 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts  %d, %d, %d\n",
6739                    load_count[0], load_count[1], load_count[2]);
6740                 if (load_count[0] == 0)
6741                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6742                 else if (load_count[1 + port] == 0)
6743                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6744                 else
6745                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6746         }
6747
6748         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6749             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6750                 bnx2x__link_reset(bp);
6751
6752         /* Reset the chip */
6753         bnx2x_reset_chip(bp, reset_code);
6754
6755         /* Report UNLOAD_DONE to MCP */
6756         if (!BP_NOMCP(bp))
6757                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6758
6759         /* Free SKBs, SGEs, TPA pool and driver internals */
6760         bnx2x_free_skbs(bp);
6761         for_each_queue(bp, i)
6762                 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6763                                         RX_SGE_CNT*NUM_RX_SGE_PAGES);
6764         bnx2x_free_mem(bp);
6765
6766         bp->state = BNX2X_STATE_CLOSED;
6767
6768         netif_carrier_off(bp->dev);
6769
6770         return 0;
6771 }
6772
6773 static void bnx2x_reset_task(struct work_struct *work)
6774 {
6775         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6776
6777 #ifdef BNX2X_STOP_ON_ERROR
6778         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6779                   " so reset not done to allow debug dump,\n"
6780          KERN_ERR " you will need to reboot when done\n");
6781         return;
6782 #endif
6783
6784         rtnl_lock();
6785
6786         if (!netif_running(bp->dev))
6787                 goto reset_task_exit;
6788
6789         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6790         bnx2x_nic_load(bp, LOAD_NORMAL);
6791
6792 reset_task_exit:
6793         rtnl_unlock();
6794 }
6795
6796 /* end of nic load/unload */
6797
6798 /* ethtool_ops */
6799
6800 /*
6801  * Init service functions
6802  */
6803
6804 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6805 {
6806         u32 val;
6807
6808         /* Check if there is any driver already loaded */
6809         val = REG_RD(bp, MISC_REG_UNPREPARED);
6810         if (val == 0x1) {
6811                 /* Check if it is the UNDI driver
6812                  * UNDI driver initializes CID offset for normal bell to 0x7
6813                  */
6814                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6815                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6816                 if (val == 0x7) {
6817                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6818                         /* save our func */
6819                         int func = BP_FUNC(bp);
6820                         u32 swap_en;
6821                         u32 swap_val;
6822
6823                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
6824
6825                         /* try unload UNDI on port 0 */
6826                         bp->func = 0;
6827                         bp->fw_seq =
6828                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6829                                 DRV_MSG_SEQ_NUMBER_MASK);
6830                         reset_code = bnx2x_fw_command(bp, reset_code);
6831
6832                         /* if UNDI is loaded on the other port */
6833                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6834
6835                                 /* send "DONE" for previous unload */
6836                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6837
6838                                 /* unload UNDI on port 1 */
6839                                 bp->func = 1;
6840                                 bp->fw_seq =
6841                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6842                                         DRV_MSG_SEQ_NUMBER_MASK);
6843                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6844
6845                                 bnx2x_fw_command(bp, reset_code);
6846                         }
6847
6848                         REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6849                                     HC_REG_CONFIG_0), 0x1000);
6850
6851                         /* close input traffic and wait for it */
6852                         /* Do not rcv packets to BRB */
6853                         REG_WR(bp,
6854                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6855                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6856                         /* Do not direct rcv packets that are not for MCP to
6857                          * the BRB */
6858                         REG_WR(bp,
6859                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6860                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6861                         /* clear AEU */
6862                         REG_WR(bp,
6863                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6864                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6865                         msleep(10);
6866
6867                         /* save NIG port swap info */
6868                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6869                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6870                         /* reset device */
6871                         REG_WR(bp,
6872                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6873                                0xd3ffffff);
6874                         REG_WR(bp,
6875                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6876                                0x1403);
6877                         /* take the NIG out of reset and restore swap values */
6878                         REG_WR(bp,
6879                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6880                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
6881                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6882                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6883
6884                         /* send unload done to the MCP */
6885                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6886
6887                         /* restore our func and fw_seq */
6888                         bp->func = func;
6889                         bp->fw_seq =
6890                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6891                                 DRV_MSG_SEQ_NUMBER_MASK);
6892                 }
6893                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6894         }
6895 }
6896
6897 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6898 {
6899         u32 val, val2, val3, val4, id;
6900         u16 pmc;
6901
6902         /* Get the chip revision id and number. */
6903         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6904         val = REG_RD(bp, MISC_REG_CHIP_NUM);
6905         id = ((val & 0xffff) << 16);
6906         val = REG_RD(bp, MISC_REG_CHIP_REV);
6907         id |= ((val & 0xf) << 12);
6908         val = REG_RD(bp, MISC_REG_CHIP_METAL);
6909         id |= ((val & 0xff) << 4);
6910         REG_RD(bp, MISC_REG_BOND_ID);
6911         id |= (val & 0xf);
6912         bp->common.chip_id = id;
6913         bp->link_params.chip_id = bp->common.chip_id;
6914         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6915
6916         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6917         bp->common.flash_size = (NVRAM_1MB_SIZE <<
6918                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
6919         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6920                        bp->common.flash_size, bp->common.flash_size);
6921
6922         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6923         bp->link_params.shmem_base = bp->common.shmem_base;
6924         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6925
6926         if (!bp->common.shmem_base ||
6927             (bp->common.shmem_base < 0xA0000) ||
6928             (bp->common.shmem_base >= 0xC0000)) {
6929                 BNX2X_DEV_INFO("MCP not active\n");
6930                 bp->flags |= NO_MCP_FLAG;
6931                 return;
6932         }
6933
6934         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6935         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6936                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6937                 BNX2X_ERR("BAD MCP validity signature\n");
6938
6939         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6940         bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6941
6942         BNX2X_DEV_INFO("hw_config 0x%08x  board 0x%08x\n",
6943                        bp->common.hw_config, bp->common.board);
6944
6945         bp->link_params.hw_led_mode = ((bp->common.hw_config &
6946                                         SHARED_HW_CFG_LED_MODE_MASK) >>
6947                                        SHARED_HW_CFG_LED_MODE_SHIFT);
6948
6949         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6950         bp->common.bc_ver = val;
6951         BNX2X_DEV_INFO("bc_ver %X\n", val);
6952         if (val < BNX2X_BC_VER) {
6953                 /* for now only warn
6954                  * later we might need to enforce this */
6955                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6956                           " please upgrade BC\n", BNX2X_BC_VER, val);
6957         }
6958
6959         if (BP_E1HVN(bp) == 0) {
6960                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6961                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6962         } else {
6963                 /* no WOL capability for E1HVN != 0 */
6964                 bp->flags |= NO_WOL_FLAG;
6965         }
6966         BNX2X_DEV_INFO("%sWoL capable\n",
6967                        (bp->flags & NO_WOL_FLAG) ? "Not " : "");
6968
6969         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6970         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6971         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6972         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6973
6974         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6975                val, val2, val3, val4);
6976 }
6977
6978 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6979                                                     u32 switch_cfg)
6980 {
6981         int port = BP_PORT(bp);
6982         u32 ext_phy_type;
6983
6984         switch (switch_cfg) {
6985         case SWITCH_CFG_1G:
6986                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6987
6988                 ext_phy_type =
6989                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6990                 switch (ext_phy_type) {
6991                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6992                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6993                                        ext_phy_type);
6994
6995                         bp->port.supported |= (SUPPORTED_10baseT_Half |
6996                                                SUPPORTED_10baseT_Full |
6997                                                SUPPORTED_100baseT_Half |
6998                                                SUPPORTED_100baseT_Full |
6999                                                SUPPORTED_1000baseT_Full |
7000                                                SUPPORTED_2500baseX_Full |
7001                                                SUPPORTED_TP |
7002                                                SUPPORTED_FIBRE |
7003                                                SUPPORTED_Autoneg |
7004                                                SUPPORTED_Pause |
7005                                                SUPPORTED_Asym_Pause);
7006                         break;
7007
7008                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7009                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7010                                        ext_phy_type);
7011
7012                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7013                                                SUPPORTED_10baseT_Full |
7014                                                SUPPORTED_100baseT_Half |
7015                                                SUPPORTED_100baseT_Full |
7016                                                SUPPORTED_1000baseT_Full |
7017                                                SUPPORTED_TP |
7018                                                SUPPORTED_FIBRE |
7019                                                SUPPORTED_Autoneg |
7020                                                SUPPORTED_Pause |
7021                                                SUPPORTED_Asym_Pause);
7022                         break;
7023
7024                 default:
7025                         BNX2X_ERR("NVRAM config error. "
7026                                   "BAD SerDes ext_phy_config 0x%x\n",
7027                                   bp->link_params.ext_phy_config);
7028                         return;
7029                 }
7030
7031                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7032                                            port*0x10);
7033                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7034                 break;
7035
7036         case SWITCH_CFG_10G:
7037                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7038
7039                 ext_phy_type =
7040                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7041                 switch (ext_phy_type) {
7042                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7043                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7044                                        ext_phy_type);
7045
7046                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7047                                                SUPPORTED_10baseT_Full |
7048                                                SUPPORTED_100baseT_Half |
7049                                                SUPPORTED_100baseT_Full |
7050                                                SUPPORTED_1000baseT_Full |
7051                                                SUPPORTED_2500baseX_Full |
7052                                                SUPPORTED_10000baseT_Full |
7053                                                SUPPORTED_TP |
7054                                                SUPPORTED_FIBRE |
7055                                                SUPPORTED_Autoneg |
7056                                                SUPPORTED_Pause |
7057                                                SUPPORTED_Asym_Pause);
7058                         break;
7059
7060                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7061                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7062                                        ext_phy_type);
7063
7064                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7065                                                SUPPORTED_FIBRE |
7066                                                SUPPORTED_Pause |
7067                                                SUPPORTED_Asym_Pause);
7068                         break;
7069
7070                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7071                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7072                                        ext_phy_type);
7073
7074                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7075                                                SUPPORTED_1000baseT_Full |
7076                                                SUPPORTED_FIBRE |
7077                                                SUPPORTED_Pause |
7078                                                SUPPORTED_Asym_Pause);
7079                         break;
7080
7081                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7082                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7083                                        ext_phy_type);
7084
7085                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7086                                                SUPPORTED_1000baseT_Full |
7087                                                SUPPORTED_FIBRE |
7088                                                SUPPORTED_Autoneg |
7089                                                SUPPORTED_Pause |
7090                                                SUPPORTED_Asym_Pause);
7091                         break;
7092
7093                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7094                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7095                                        ext_phy_type);
7096
7097                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7098                                                SUPPORTED_2500baseX_Full |
7099                                                SUPPORTED_1000baseT_Full |
7100                                                SUPPORTED_FIBRE |
7101                                                SUPPORTED_Autoneg |
7102                                                SUPPORTED_Pause |
7103                                                SUPPORTED_Asym_Pause);
7104                         break;
7105
7106                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7107                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7108                                        ext_phy_type);
7109
7110                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7111                                                SUPPORTED_TP |
7112                                                SUPPORTED_Autoneg |
7113                                                SUPPORTED_Pause |
7114                                                SUPPORTED_Asym_Pause);
7115                         break;
7116
7117                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7118                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7119                                   bp->link_params.ext_phy_config);
7120                         break;
7121
7122                 default:
7123                         BNX2X_ERR("NVRAM config error. "
7124                                   "BAD XGXS ext_phy_config 0x%x\n",
7125                                   bp->link_params.ext_phy_config);
7126                         return;
7127                 }
7128
7129                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7130                                            port*0x18);
7131                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7132
7133                 break;
7134
7135         default:
7136                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7137                           bp->port.link_config);
7138                 return;
7139         }
7140         bp->link_params.phy_addr = bp->port.phy_addr;
7141
7142         /* mask what we support according to speed_cap_mask */
7143         if (!(bp->link_params.speed_cap_mask &
7144                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7145                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7146
7147         if (!(bp->link_params.speed_cap_mask &
7148                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7149                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7150
7151         if (!(bp->link_params.speed_cap_mask &
7152                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7153                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7154
7155         if (!(bp->link_params.speed_cap_mask &
7156                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7157                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7158
7159         if (!(bp->link_params.speed_cap_mask &
7160                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7161                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7162                                         SUPPORTED_1000baseT_Full);
7163
7164         if (!(bp->link_params.speed_cap_mask &
7165                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7166                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7167
7168         if (!(bp->link_params.speed_cap_mask &
7169                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7170                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7171
7172         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7173 }
7174
7175 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7176 {
7177         bp->link_params.req_duplex = DUPLEX_FULL;
7178
7179         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7180         case PORT_FEATURE_LINK_SPEED_AUTO:
7181                 if (bp->port.supported & SUPPORTED_Autoneg) {
7182                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7183                         bp->port.advertising = bp->port.supported;
7184                 } else {
7185                         u32 ext_phy_type =
7186                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7187
7188                         if ((ext_phy_type ==
7189                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7190                             (ext_phy_type ==
7191                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7192                                 /* force 10G, no AN */
7193                                 bp->link_params.req_line_speed = SPEED_10000;
7194                                 bp->port.advertising =
7195                                                 (ADVERTISED_10000baseT_Full |
7196                                                  ADVERTISED_FIBRE);
7197                                 break;
7198                         }
7199                         BNX2X_ERR("NVRAM config error. "
7200                                   "Invalid link_config 0x%x"
7201                                   "  Autoneg not supported\n",
7202                                   bp->port.link_config);
7203                         return;
7204                 }
7205                 break;
7206
7207         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7208                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7209                         bp->link_params.req_line_speed = SPEED_10;
7210                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7211                                                 ADVERTISED_TP);
7212                 } else {
7213                         BNX2X_ERR("NVRAM config error. "
7214                                   "Invalid link_config 0x%x"
7215                                   "  speed_cap_mask 0x%x\n",
7216                                   bp->port.link_config,
7217                                   bp->link_params.speed_cap_mask);
7218                         return;
7219                 }
7220                 break;
7221
7222         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7223                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7224                         bp->link_params.req_line_speed = SPEED_10;
7225                         bp->link_params.req_duplex = DUPLEX_HALF;
7226                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7227                                                 ADVERTISED_TP);
7228                 } else {
7229                         BNX2X_ERR("NVRAM config error. "
7230                                   "Invalid link_config 0x%x"
7231                                   "  speed_cap_mask 0x%x\n",
7232                                   bp->port.link_config,
7233                                   bp->link_params.speed_cap_mask);
7234                         return;
7235                 }
7236                 break;
7237
7238         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7239                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7240                         bp->link_params.req_line_speed = SPEED_100;
7241                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7242                                                 ADVERTISED_TP);
7243                 } else {
7244                         BNX2X_ERR("NVRAM config error. "
7245                                   "Invalid link_config 0x%x"
7246                                   "  speed_cap_mask 0x%x\n",
7247                                   bp->port.link_config,
7248                                   bp->link_params.speed_cap_mask);
7249                         return;
7250                 }
7251                 break;
7252
7253         case PORT_FEATURE_LINK_SPEED_100M_HALF:
7254                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7255                         bp->link_params.req_line_speed = SPEED_100;
7256                         bp->link_params.req_duplex = DUPLEX_HALF;
7257                         bp->port.advertising = (ADVERTISED_100baseT_Half |
7258                                                 ADVERTISED_TP);
7259                 } else {
7260                         BNX2X_ERR("NVRAM config error. "
7261                                   "Invalid link_config 0x%x"
7262                                   "  speed_cap_mask 0x%x\n",
7263                                   bp->port.link_config,
7264                                   bp->link_params.speed_cap_mask);
7265                         return;
7266                 }
7267                 break;
7268
7269         case PORT_FEATURE_LINK_SPEED_1G:
7270                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7271                         bp->link_params.req_line_speed = SPEED_1000;
7272                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
7273                                                 ADVERTISED_TP);
7274                 } else {
7275                         BNX2X_ERR("NVRAM config error. "
7276                                   "Invalid link_config 0x%x"
7277                                   "  speed_cap_mask 0x%x\n",
7278                                   bp->port.link_config,
7279                                   bp->link_params.speed_cap_mask);
7280                         return;
7281                 }
7282                 break;
7283
7284         case PORT_FEATURE_LINK_SPEED_2_5G:
7285                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7286                         bp->link_params.req_line_speed = SPEED_2500;
7287                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
7288                                                 ADVERTISED_TP);
7289                 } else {
7290                         BNX2X_ERR("NVRAM config error. "
7291                                   "Invalid link_config 0x%x"
7292                                   "  speed_cap_mask 0x%x\n",
7293                                   bp->port.link_config,
7294                                   bp->link_params.speed_cap_mask);
7295                         return;
7296                 }
7297                 break;
7298
7299         case PORT_FEATURE_LINK_SPEED_10G_CX4:
7300         case PORT_FEATURE_LINK_SPEED_10G_KX4:
7301         case PORT_FEATURE_LINK_SPEED_10G_KR:
7302                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7303                         bp->link_params.req_line_speed = SPEED_10000;
7304                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
7305                                                 ADVERTISED_FIBRE);
7306                 } else {
7307                         BNX2X_ERR("NVRAM config error. "
7308                                   "Invalid link_config 0x%x"
7309                                   "  speed_cap_mask 0x%x\n",
7310                                   bp->port.link_config,
7311                                   bp->link_params.speed_cap_mask);
7312                         return;
7313                 }
7314                 break;
7315
7316         default:
7317                 BNX2X_ERR("NVRAM config error. "
7318                           "BAD link speed link_config 0x%x\n",
7319                           bp->port.link_config);
7320                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7321                 bp->port.advertising = bp->port.supported;
7322                 break;
7323         }
7324
7325         bp->link_params.req_flow_ctrl = (bp->port.link_config &
7326                                          PORT_FEATURE_FLOW_CONTROL_MASK);
7327         if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
7328             !(bp->port.supported & SUPPORTED_Autoneg))
7329                 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
7330
7331         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
7332                        "  advertising 0x%x\n",
7333                        bp->link_params.req_line_speed,
7334                        bp->link_params.req_duplex,
7335                        bp->link_params.req_flow_ctrl, bp->port.advertising);
7336 }
7337
7338 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7339 {
7340         int port = BP_PORT(bp);
7341         u32 val, val2;
7342
7343         bp->link_params.bp = bp;
7344         bp->link_params.port = port;
7345
7346         bp->link_params.serdes_config =
7347                 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7348         bp->link_params.lane_config =
7349                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7350         bp->link_params.ext_phy_config =
7351                 SHMEM_RD(bp,
7352                          dev_info.port_hw_config[port].external_phy_config);
7353         bp->link_params.speed_cap_mask =
7354                 SHMEM_RD(bp,
7355                          dev_info.port_hw_config[port].speed_capability_mask);
7356
7357         bp->port.link_config =
7358                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7359
7360         BNX2X_DEV_INFO("serdes_config 0x%08x  lane_config 0x%08x\n"
7361              KERN_INFO "  ext_phy_config 0x%08x  speed_cap_mask 0x%08x"
7362                        "  link_config 0x%08x\n",
7363                        bp->link_params.serdes_config,
7364                        bp->link_params.lane_config,
7365                        bp->link_params.ext_phy_config,
7366                        bp->link_params.speed_cap_mask, bp->port.link_config);
7367
7368         bp->link_params.switch_cfg = (bp->port.link_config &
7369                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
7370         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7371
7372         bnx2x_link_settings_requested(bp);
7373
7374         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7375         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7376         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7377         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7378         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7379         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7380         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7381         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7382         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7383         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7384 }
7385
7386 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7387 {
7388         int func = BP_FUNC(bp);
7389         u32 val, val2;
7390         int rc = 0;
7391
7392         bnx2x_get_common_hwinfo(bp);
7393
7394         bp->e1hov = 0;
7395         bp->e1hmf = 0;
7396         if (CHIP_IS_E1H(bp)) {
7397                 bp->mf_config =
7398                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7399
7400                 val =
7401                    (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7402                     FUNC_MF_CFG_E1HOV_TAG_MASK);
7403                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7404
7405                         bp->e1hov = val;
7406                         bp->e1hmf = 1;
7407                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
7408                                        "(0x%04x)\n",
7409                                        func, bp->e1hov, bp->e1hov);
7410                 } else {
7411                         BNX2X_DEV_INFO("Single function mode\n");
7412                         if (BP_E1HVN(bp)) {
7413                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
7414                                           "  aborting\n", func);
7415                                 rc = -EPERM;
7416                         }
7417                 }
7418         }
7419
7420         if (!BP_NOMCP(bp)) {
7421                 bnx2x_get_port_hwinfo(bp);
7422
7423                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7424                               DRV_MSG_SEQ_NUMBER_MASK);
7425                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7426         }
7427
7428         if (IS_E1HMF(bp)) {
7429                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7430                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
7431                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7432                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7433                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7434                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7435                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7436                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7437                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7438                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7439                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7440                                ETH_ALEN);
7441                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7442                                ETH_ALEN);
7443                 }
7444
7445                 return rc;
7446         }
7447
7448         if (BP_NOMCP(bp)) {
7449                 /* only supposed to happen on emulation/FPGA */
7450                 BNX2X_ERR("warning rendom MAC workaround active\n");
7451                 random_ether_addr(bp->dev->dev_addr);
7452                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7453         }
7454
7455         return rc;
7456 }
7457
7458 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7459 {
7460         int func = BP_FUNC(bp);
7461         int rc;
7462
7463         /* Disable interrupt handling until HW is initialized */
7464         atomic_set(&bp->intr_sem, 1);
7465
7466         mutex_init(&bp->port.phy_mutex);
7467
7468         INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7469         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7470
7471         rc = bnx2x_get_hwinfo(bp);
7472
7473         /* need to reset chip if undi was active */
7474         if (!BP_NOMCP(bp))
7475                 bnx2x_undi_unload(bp);
7476
7477         if (CHIP_REV_IS_FPGA(bp))
7478                 printk(KERN_ERR PFX "FPGA detected\n");
7479
7480         if (BP_NOMCP(bp) && (func == 0))
7481                 printk(KERN_ERR PFX
7482                        "MCP disabled, must load devices in order!\n");
7483
7484         /* Set TPA flags */
7485         if (disable_tpa) {
7486                 bp->flags &= ~TPA_ENABLE_FLAG;
7487                 bp->dev->features &= ~NETIF_F_LRO;
7488         } else {
7489                 bp->flags |= TPA_ENABLE_FLAG;
7490                 bp->dev->features |= NETIF_F_LRO;
7491         }
7492
7493
7494         bp->tx_ring_size = MAX_TX_AVAIL;
7495         bp->rx_ring_size = MAX_RX_AVAIL;
7496
7497         bp->rx_csum = 1;
7498         bp->rx_offset = 0;
7499
7500         bp->tx_ticks = 50;
7501         bp->rx_ticks = 25;
7502
7503         bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7504         bp->current_interval = (poll ? poll : bp->timer_interval);
7505
7506         init_timer(&bp->timer);
7507         bp->timer.expires = jiffies + bp->current_interval;
7508         bp->timer.data = (unsigned long) bp;
7509         bp->timer.function = bnx2x_timer;
7510
7511         return rc;
7512 }
7513
7514 /*
7515  * ethtool service functions
7516  */
7517
7518 /* All ethtool functions called with rtnl_lock */
7519
7520 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7521 {
7522         struct bnx2x *bp = netdev_priv(dev);
7523
7524         cmd->supported = bp->port.supported;
7525         cmd->advertising = bp->port.advertising;
7526
7527         if (netif_carrier_ok(dev)) {
7528                 cmd->speed = bp->link_vars.line_speed;
7529                 cmd->duplex = bp->link_vars.duplex;
7530         } else {
7531                 cmd->speed = bp->link_params.req_line_speed;
7532                 cmd->duplex = bp->link_params.req_duplex;
7533         }
7534         if (IS_E1HMF(bp)) {
7535                 u16 vn_max_rate;
7536
7537                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7538                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7539                 if (vn_max_rate < cmd->speed)
7540                         cmd->speed = vn_max_rate;
7541         }
7542
7543         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7544                 u32 ext_phy_type =
7545                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7546
7547                 switch (ext_phy_type) {
7548                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7549                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7550                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7551                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7552                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7553                         cmd->port = PORT_FIBRE;
7554                         break;
7555
7556                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7557                         cmd->port = PORT_TP;
7558                         break;
7559
7560                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7561                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7562                                   bp->link_params.ext_phy_config);
7563                         break;
7564
7565                 default:
7566                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7567                            bp->link_params.ext_phy_config);
7568                         break;
7569                 }
7570         } else
7571                 cmd->port = PORT_TP;
7572
7573         cmd->phy_address = bp->port.phy_addr;
7574         cmd->transceiver = XCVR_INTERNAL;
7575
7576         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7577                 cmd->autoneg = AUTONEG_ENABLE;
7578         else
7579                 cmd->autoneg = AUTONEG_DISABLE;
7580
7581         cmd->maxtxpkt = 0;
7582         cmd->maxrxpkt = 0;
7583
7584         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7585            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7586            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7587            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7588            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7589            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7590            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7591
7592         return 0;
7593 }
7594
7595 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7596 {
7597         struct bnx2x *bp = netdev_priv(dev);
7598         u32 advertising;
7599
7600         if (IS_E1HMF(bp))
7601                 return 0;
7602
7603         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7604            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7605            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7606            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7607            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7608            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7609            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7610
7611         if (cmd->autoneg == AUTONEG_ENABLE) {
7612                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7613                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7614                         return -EINVAL;
7615                 }
7616
7617                 /* advertise the requested speed and duplex if supported */
7618                 cmd->advertising &= bp->port.supported;
7619
7620                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7621                 bp->link_params.req_duplex = DUPLEX_FULL;
7622                 bp->port.advertising |= (ADVERTISED_Autoneg |
7623                                          cmd->advertising);
7624
7625         } else { /* forced speed */
7626                 /* advertise the requested speed and duplex if supported */
7627                 switch (cmd->speed) {
7628                 case SPEED_10:
7629                         if (cmd->duplex == DUPLEX_FULL) {
7630                                 if (!(bp->port.supported &
7631                                       SUPPORTED_10baseT_Full)) {
7632                                         DP(NETIF_MSG_LINK,
7633                                            "10M full not supported\n");
7634                                         return -EINVAL;
7635                                 }
7636
7637                                 advertising = (ADVERTISED_10baseT_Full |
7638                                                ADVERTISED_TP);
7639                         } else {
7640                                 if (!(bp->port.supported &
7641                                       SUPPORTED_10baseT_Half)) {
7642                                         DP(NETIF_MSG_LINK,
7643                                            "10M half not supported\n");
7644                                         return -EINVAL;
7645                                 }
7646
7647                                 advertising = (ADVERTISED_10baseT_Half |
7648                                                ADVERTISED_TP);
7649                         }
7650                         break;
7651
7652                 case SPEED_100:
7653                         if (cmd->duplex == DUPLEX_FULL) {
7654                                 if (!(bp->port.supported &
7655                                                 SUPPORTED_100baseT_Full)) {
7656                                         DP(NETIF_MSG_LINK,
7657                                            "100M full not supported\n");
7658                                         return -EINVAL;
7659                                 }
7660
7661                                 advertising = (ADVERTISED_100baseT_Full |
7662                                                ADVERTISED_TP);
7663                         } else {
7664                                 if (!(bp->port.supported &
7665                                                 SUPPORTED_100baseT_Half)) {
7666                                         DP(NETIF_MSG_LINK,
7667                                            "100M half not supported\n");
7668                                         return -EINVAL;
7669                                 }
7670
7671                                 advertising = (ADVERTISED_100baseT_Half |
7672                                                ADVERTISED_TP);
7673                         }
7674                         break;
7675
7676                 case SPEED_1000:
7677                         if (cmd->duplex != DUPLEX_FULL) {
7678                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
7679                                 return -EINVAL;
7680                         }
7681
7682                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7683                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
7684                                 return -EINVAL;
7685                         }
7686
7687                         advertising = (ADVERTISED_1000baseT_Full |
7688                                        ADVERTISED_TP);
7689                         break;
7690
7691                 case SPEED_2500:
7692                         if (cmd->duplex != DUPLEX_FULL) {
7693                                 DP(NETIF_MSG_LINK,
7694                                    "2.5G half not supported\n");
7695                                 return -EINVAL;
7696                         }
7697
7698                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7699                                 DP(NETIF_MSG_LINK,
7700                                    "2.5G full not supported\n");
7701                                 return -EINVAL;
7702                         }
7703
7704                         advertising = (ADVERTISED_2500baseX_Full |
7705                                        ADVERTISED_TP);
7706                         break;
7707
7708                 case SPEED_10000:
7709                         if (cmd->duplex != DUPLEX_FULL) {
7710                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
7711                                 return -EINVAL;
7712                         }
7713
7714                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7715                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
7716                                 return -EINVAL;
7717                         }
7718
7719                         advertising = (ADVERTISED_10000baseT_Full |
7720                                        ADVERTISED_FIBRE);
7721                         break;
7722
7723                 default:
7724                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
7725                         return -EINVAL;
7726                 }
7727
7728                 bp->link_params.req_line_speed = cmd->speed;
7729                 bp->link_params.req_duplex = cmd->duplex;
7730                 bp->port.advertising = advertising;
7731         }
7732
7733         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7734            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
7735            bp->link_params.req_line_speed, bp->link_params.req_duplex,
7736            bp->port.advertising);
7737
7738         if (netif_running(dev)) {
7739                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7740                 bnx2x_link_set(bp);
7741         }
7742
7743         return 0;
7744 }
7745
7746 #define PHY_FW_VER_LEN                  10
7747
7748 static void bnx2x_get_drvinfo(struct net_device *dev,
7749                               struct ethtool_drvinfo *info)
7750 {
7751         struct bnx2x *bp = netdev_priv(dev);
7752         char phy_fw_ver[PHY_FW_VER_LEN];
7753
7754         strcpy(info->driver, DRV_MODULE_NAME);
7755         strcpy(info->version, DRV_MODULE_VERSION);
7756
7757         phy_fw_ver[0] = '\0';
7758         if (bp->port.pmf) {
7759                 bnx2x_acquire_phy_lock(bp);
7760                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7761                                              (bp->state != BNX2X_STATE_CLOSED),
7762                                              phy_fw_ver, PHY_FW_VER_LEN);
7763                 bnx2x_release_phy_lock(bp);
7764         }
7765
7766         snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
7767                  BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
7768                  BCM_5710_FW_REVISION_VERSION,
7769                  BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver,
7770                  ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
7771         strcpy(info->bus_info, pci_name(bp->pdev));
7772         info->n_stats = BNX2X_NUM_STATS;
7773         info->testinfo_len = BNX2X_NUM_TESTS;
7774         info->eedump_len = bp->common.flash_size;
7775         info->regdump_len = 0;
7776 }
7777
7778 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7779 {
7780         struct bnx2x *bp = netdev_priv(dev);
7781
7782         if (bp->flags & NO_WOL_FLAG) {
7783                 wol->supported = 0;
7784                 wol->wolopts = 0;
7785         } else {
7786                 wol->supported = WAKE_MAGIC;
7787                 if (bp->wol)
7788                         wol->wolopts = WAKE_MAGIC;
7789                 else
7790                         wol->wolopts = 0;
7791         }
7792         memset(&wol->sopass, 0, sizeof(wol->sopass));
7793 }
7794
7795 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7796 {
7797         struct bnx2x *bp = netdev_priv(dev);
7798
7799         if (wol->wolopts & ~WAKE_MAGIC)
7800                 return -EINVAL;
7801
7802         if (wol->wolopts & WAKE_MAGIC) {
7803                 if (bp->flags & NO_WOL_FLAG)
7804                         return -EINVAL;
7805
7806                 bp->wol = 1;
7807         } else
7808                 bp->wol = 0;
7809
7810         return 0;
7811 }
7812
7813 static u32 bnx2x_get_msglevel(struct net_device *dev)
7814 {
7815         struct bnx2x *bp = netdev_priv(dev);
7816
7817         return bp->msglevel;
7818 }
7819
7820 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7821 {
7822         struct bnx2x *bp = netdev_priv(dev);
7823
7824         if (capable(CAP_NET_ADMIN))
7825                 bp->msglevel = level;
7826 }
7827
7828 static int bnx2x_nway_reset(struct net_device *dev)
7829 {
7830         struct bnx2x *bp = netdev_priv(dev);
7831
7832         if (!bp->port.pmf)
7833                 return 0;
7834
7835         if (netif_running(dev)) {
7836                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7837                 bnx2x_link_set(bp);
7838         }
7839
7840         return 0;
7841 }
7842
7843 static int bnx2x_get_eeprom_len(struct net_device *dev)
7844 {
7845         struct bnx2x *bp = netdev_priv(dev);
7846
7847         return bp->common.flash_size;
7848 }
7849
7850 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7851 {
7852         int port = BP_PORT(bp);
7853         int count, i;
7854         u32 val = 0;
7855
7856         /* adjust timeout for emulation/FPGA */
7857         count = NVRAM_TIMEOUT_COUNT;
7858         if (CHIP_REV_IS_SLOW(bp))
7859                 count *= 100;
7860
7861         /* request access to nvram interface */
7862         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7863                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7864
7865         for (i = 0; i < count*10; i++) {
7866                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7867                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7868                         break;
7869
7870                 udelay(5);
7871         }
7872
7873         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7874                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7875                 return -EBUSY;
7876         }
7877
7878         return 0;
7879 }
7880
7881 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7882 {
7883         int port = BP_PORT(bp);
7884         int count, i;
7885         u32 val = 0;
7886
7887         /* adjust timeout for emulation/FPGA */
7888         count = NVRAM_TIMEOUT_COUNT;
7889         if (CHIP_REV_IS_SLOW(bp))
7890                 count *= 100;
7891
7892         /* relinquish nvram interface */
7893         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7894                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7895
7896         for (i = 0; i < count*10; i++) {
7897                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7898                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7899                         break;
7900
7901                 udelay(5);
7902         }
7903
7904         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7905                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7906                 return -EBUSY;
7907         }
7908
7909         return 0;
7910 }
7911
7912 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7913 {
7914         u32 val;
7915
7916         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7917
7918         /* enable both bits, even on read */
7919         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7920                (val | MCPR_NVM_ACCESS_ENABLE_EN |
7921                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
7922 }
7923
7924 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7925 {
7926         u32 val;
7927
7928         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7929
7930         /* disable both bits, even after read */
7931         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7932                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7933                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7934 }
7935
7936 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7937                                   u32 cmd_flags)
7938 {
7939         int count, i, rc;
7940         u32 val;
7941
7942         /* build the command word */
7943         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7944
7945         /* need to clear DONE bit separately */
7946         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7947
7948         /* address of the NVRAM to read from */
7949         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7950                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7951
7952         /* issue a read command */
7953         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7954
7955         /* adjust timeout for emulation/FPGA */
7956         count = NVRAM_TIMEOUT_COUNT;
7957         if (CHIP_REV_IS_SLOW(bp))
7958                 count *= 100;
7959
7960         /* wait for completion */
7961         *ret_val = 0;
7962         rc = -EBUSY;
7963         for (i = 0; i < count; i++) {
7964                 udelay(5);
7965                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7966
7967                 if (val & MCPR_NVM_COMMAND_DONE) {
7968                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
7969                         /* we read nvram data in cpu order
7970                          * but ethtool sees it as an array of bytes
7971                          * converting to big-endian will do the work */
7972                         val = cpu_to_be32(val);
7973                         *ret_val = val;
7974                         rc = 0;
7975                         break;
7976                 }
7977         }
7978
7979         return rc;
7980 }
7981
7982 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7983                             int buf_size)
7984 {
7985         int rc;
7986         u32 cmd_flags;
7987         u32 val;
7988
7989         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
7990                 DP(BNX2X_MSG_NVM,
7991                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
7992                    offset, buf_size);
7993                 return -EINVAL;
7994         }
7995
7996         if (offset + buf_size > bp->common.flash_size) {
7997                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
7998                                   " buf_size (0x%x) > flash_size (0x%x)\n",
7999                    offset, buf_size, bp->common.flash_size);
8000                 return -EINVAL;
8001         }
8002
8003         /* request access to nvram interface */
8004         rc = bnx2x_acquire_nvram_lock(bp);
8005         if (rc)
8006                 return rc;
8007
8008         /* enable access to nvram interface */
8009         bnx2x_enable_nvram_access(bp);
8010
8011         /* read the first word(s) */
8012         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8013         while ((buf_size > sizeof(u32)) && (rc == 0)) {
8014                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8015                 memcpy(ret_buf, &val, 4);
8016
8017                 /* advance to the next dword */
8018                 offset += sizeof(u32);
8019                 ret_buf += sizeof(u32);
8020                 buf_size -= sizeof(u32);
8021                 cmd_flags = 0;
8022         }
8023
8024         if (rc == 0) {
8025                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8026                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8027                 memcpy(ret_buf, &val, 4);
8028         }
8029
8030         /* disable access to nvram interface */
8031         bnx2x_disable_nvram_access(bp);
8032         bnx2x_release_nvram_lock(bp);
8033
8034         return rc;
8035 }
8036
8037 static int bnx2x_get_eeprom(struct net_device *dev,
8038                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8039 {
8040         struct bnx2x *bp = netdev_priv(dev);
8041         int rc;
8042
8043         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8044            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8045            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8046            eeprom->len, eeprom->len);
8047
8048         /* parameters already validated in ethtool_get_eeprom */
8049
8050         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8051
8052         return rc;
8053 }
8054
8055 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8056                                    u32 cmd_flags)
8057 {
8058         int count, i, rc;
8059
8060         /* build the command word */
8061         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8062
8063         /* need to clear DONE bit separately */
8064         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8065
8066         /* write the data */
8067         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8068
8069         /* address of the NVRAM to write to */
8070         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8071                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8072
8073         /* issue the write command */
8074         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8075
8076         /* adjust timeout for emulation/FPGA */
8077         count = NVRAM_TIMEOUT_COUNT;
8078         if (CHIP_REV_IS_SLOW(bp))
8079                 count *= 100;
8080
8081         /* wait for completion */
8082         rc = -EBUSY;
8083         for (i = 0; i < count; i++) {
8084                 udelay(5);
8085                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8086                 if (val & MCPR_NVM_COMMAND_DONE) {
8087                         rc = 0;
8088                         break;
8089                 }
8090         }
8091
8092         return rc;
8093 }
8094
8095 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
8096
8097 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8098                               int buf_size)
8099 {
8100         int rc;
8101         u32 cmd_flags;
8102         u32 align_offset;
8103         u32 val;
8104
8105         if (offset + buf_size > bp->common.flash_size) {
8106                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8107                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8108                    offset, buf_size, bp->common.flash_size);
8109                 return -EINVAL;
8110         }
8111
8112         /* request access to nvram interface */
8113         rc = bnx2x_acquire_nvram_lock(bp);
8114         if (rc)
8115                 return rc;
8116
8117         /* enable access to nvram interface */
8118         bnx2x_enable_nvram_access(bp);
8119
8120         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8121         align_offset = (offset & ~0x03);
8122         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8123
8124         if (rc == 0) {
8125                 val &= ~(0xff << BYTE_OFFSET(offset));
8126                 val |= (*data_buf << BYTE_OFFSET(offset));
8127
8128                 /* nvram data is returned as an array of bytes
8129                  * convert it back to cpu order */
8130                 val = be32_to_cpu(val);
8131
8132                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8133                                              cmd_flags);
8134         }
8135
8136         /* disable access to nvram interface */
8137         bnx2x_disable_nvram_access(bp);
8138         bnx2x_release_nvram_lock(bp);
8139
8140         return rc;
8141 }
8142
8143 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8144                              int buf_size)
8145 {
8146         int rc;
8147         u32 cmd_flags;
8148         u32 val;
8149         u32 written_so_far;
8150
8151         if (buf_size == 1)      /* ethtool */
8152                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8153
8154         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8155                 DP(BNX2X_MSG_NVM,
8156                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8157                    offset, buf_size);
8158                 return -EINVAL;
8159         }
8160
8161         if (offset + buf_size > bp->common.flash_size) {
8162                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8163                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8164                    offset, buf_size, bp->common.flash_size);
8165                 return -EINVAL;
8166         }
8167
8168         /* request access to nvram interface */
8169         rc = bnx2x_acquire_nvram_lock(bp);
8170         if (rc)
8171                 return rc;
8172
8173         /* enable access to nvram interface */
8174         bnx2x_enable_nvram_access(bp);
8175
8176         written_so_far = 0;
8177         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8178         while ((written_so_far < buf_size) && (rc == 0)) {
8179                 if (written_so_far == (buf_size - sizeof(u32)))
8180                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8181                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8182                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8183                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8184                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8185
8186                 memcpy(&val, data_buf, 4);
8187
8188                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8189
8190                 /* advance to the next dword */
8191                 offset += sizeof(u32);
8192                 data_buf += sizeof(u32);
8193                 written_so_far += sizeof(u32);
8194                 cmd_flags = 0;
8195         }
8196
8197         /* disable access to nvram interface */
8198         bnx2x_disable_nvram_access(bp);
8199         bnx2x_release_nvram_lock(bp);
8200
8201         return rc;
8202 }
8203
8204 static int bnx2x_set_eeprom(struct net_device *dev,
8205                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8206 {
8207         struct bnx2x *bp = netdev_priv(dev);
8208         int rc;
8209
8210         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8211            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8212            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8213            eeprom->len, eeprom->len);
8214
8215         /* parameters already validated in ethtool_set_eeprom */
8216
8217         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8218         if (eeprom->magic == 0x00504859)
8219                 if (bp->port.pmf) {
8220
8221                         bnx2x_acquire_phy_lock(bp);
8222                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
8223                                              bp->link_params.ext_phy_config,
8224                                              (bp->state != BNX2X_STATE_CLOSED),
8225                                              eebuf, eeprom->len);
8226                         if ((bp->state == BNX2X_STATE_OPEN) ||
8227                             (bp->state == BNX2X_STATE_DISABLED)) {
8228                                 rc |= bnx2x_link_reset(&bp->link_params,
8229                                                        &bp->link_vars);
8230                                 rc |= bnx2x_phy_init(&bp->link_params,
8231                                                      &bp->link_vars);
8232                         }
8233                         bnx2x_release_phy_lock(bp);
8234
8235                 } else /* Only the PMF can access the PHY */
8236                         return -EINVAL;
8237         else
8238                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8239
8240         return rc;
8241 }
8242
8243 static int bnx2x_get_coalesce(struct net_device *dev,
8244                               struct ethtool_coalesce *coal)
8245 {
8246         struct bnx2x *bp = netdev_priv(dev);
8247
8248         memset(coal, 0, sizeof(struct ethtool_coalesce));
8249
8250         coal->rx_coalesce_usecs = bp->rx_ticks;
8251         coal->tx_coalesce_usecs = bp->tx_ticks;
8252
8253         return 0;
8254 }
8255
8256 static int bnx2x_set_coalesce(struct net_device *dev,
8257                               struct ethtool_coalesce *coal)
8258 {
8259         struct bnx2x *bp = netdev_priv(dev);
8260
8261         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8262         if (bp->rx_ticks > 3000)
8263                 bp->rx_ticks = 3000;
8264
8265         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8266         if (bp->tx_ticks > 0x3000)
8267                 bp->tx_ticks = 0x3000;
8268
8269         if (netif_running(dev))
8270                 bnx2x_update_coalesce(bp);
8271
8272         return 0;
8273 }
8274
8275 static void bnx2x_get_ringparam(struct net_device *dev,
8276                                 struct ethtool_ringparam *ering)
8277 {
8278         struct bnx2x *bp = netdev_priv(dev);
8279
8280         ering->rx_max_pending = MAX_RX_AVAIL;
8281         ering->rx_mini_max_pending = 0;
8282         ering->rx_jumbo_max_pending = 0;
8283
8284         ering->rx_pending = bp->rx_ring_size;
8285         ering->rx_mini_pending = 0;
8286         ering->rx_jumbo_pending = 0;
8287
8288         ering->tx_max_pending = MAX_TX_AVAIL;
8289         ering->tx_pending = bp->tx_ring_size;
8290 }
8291
8292 static int bnx2x_set_ringparam(struct net_device *dev,
8293                                struct ethtool_ringparam *ering)
8294 {
8295         struct bnx2x *bp = netdev_priv(dev);
8296         int rc = 0;
8297
8298         if ((ering->rx_pending > MAX_RX_AVAIL) ||
8299             (ering->tx_pending > MAX_TX_AVAIL) ||
8300             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8301                 return -EINVAL;
8302
8303         bp->rx_ring_size = ering->rx_pending;
8304         bp->tx_ring_size = ering->tx_pending;
8305
8306         if (netif_running(dev)) {
8307                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8308                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8309         }
8310
8311         return rc;
8312 }
8313
8314 static void bnx2x_get_pauseparam(struct net_device *dev,
8315                                  struct ethtool_pauseparam *epause)
8316 {
8317         struct bnx2x *bp = netdev_priv(dev);
8318
8319         epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8320                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8321
8322         epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8323                             FLOW_CTRL_RX);
8324         epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8325                             FLOW_CTRL_TX);
8326
8327         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8328            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8329            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8330 }
8331
8332 static int bnx2x_set_pauseparam(struct net_device *dev,
8333                                 struct ethtool_pauseparam *epause)
8334 {
8335         struct bnx2x *bp = netdev_priv(dev);
8336
8337         if (IS_E1HMF(bp))
8338                 return 0;
8339
8340         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8341            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8342            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8343
8344         bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8345
8346         if (epause->rx_pause)
8347                 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8348
8349         if (epause->tx_pause)
8350                 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8351
8352         if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8353                 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
8354
8355         if (epause->autoneg) {
8356                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8357                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8358                         return -EINVAL;
8359                 }
8360
8361                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8362                         bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8363         }
8364
8365         DP(NETIF_MSG_LINK,
8366            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8367
8368         if (netif_running(dev)) {
8369                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8370                 bnx2x_link_set(bp);
8371         }
8372
8373         return 0;
8374 }
8375
8376 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8377 {
8378         struct bnx2x *bp = netdev_priv(dev);
8379         int changed = 0;
8380         int rc = 0;
8381
8382         /* TPA requires Rx CSUM offloading */
8383         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8384                 if (!(dev->features & NETIF_F_LRO)) {
8385                         dev->features |= NETIF_F_LRO;
8386                         bp->flags |= TPA_ENABLE_FLAG;
8387                         changed = 1;
8388                 }
8389
8390         } else if (dev->features & NETIF_F_LRO) {
8391                 dev->features &= ~NETIF_F_LRO;
8392                 bp->flags &= ~TPA_ENABLE_FLAG;
8393                 changed = 1;
8394         }
8395
8396         if (changed && netif_running(dev)) {
8397                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8398                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8399         }
8400
8401         return rc;
8402 }
8403
8404 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8405 {
8406         struct bnx2x *bp = netdev_priv(dev);
8407
8408         return bp->rx_csum;
8409 }
8410
8411 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8412 {
8413         struct bnx2x *bp = netdev_priv(dev);
8414         int rc = 0;
8415
8416         bp->rx_csum = data;
8417
8418         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8419            TPA'ed packets will be discarded due to wrong TCP CSUM */
8420         if (!data) {
8421                 u32 flags = ethtool_op_get_flags(dev);
8422
8423                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8424         }
8425
8426         return rc;
8427 }
8428
8429 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8430 {
8431         if (data) {
8432                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8433                 dev->features |= NETIF_F_TSO6;
8434         } else {
8435                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8436                 dev->features &= ~NETIF_F_TSO6;
8437         }
8438
8439         return 0;
8440 }
8441
8442 static const struct {
8443         char string[ETH_GSTRING_LEN];
8444 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8445         { "register_test (offline)" },
8446         { "memory_test (offline)" },
8447         { "loopback_test (offline)" },
8448         { "nvram_test (online)" },
8449         { "interrupt_test (online)" },
8450         { "link_test (online)" },
8451         { "idle check (online)" },
8452         { "MC errors (online)" }
8453 };
8454
8455 static int bnx2x_self_test_count(struct net_device *dev)
8456 {
8457         return BNX2X_NUM_TESTS;
8458 }
8459
8460 static int bnx2x_test_registers(struct bnx2x *bp)
8461 {
8462         int idx, i, rc = -ENODEV;
8463         u32 wr_val = 0;
8464         int port = BP_PORT(bp);
8465         static const struct {
8466                 u32  offset0;
8467                 u32  offset1;
8468                 u32  mask;
8469         } reg_tbl[] = {
8470 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
8471                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
8472                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
8473                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
8474                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
8475                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
8476                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
8477                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
8478                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
8479                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
8480 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
8481                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
8482                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
8483                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
8484                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
8485                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8486                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
8487                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
8488                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
8489                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
8490 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
8491                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
8492                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
8493                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
8494                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
8495                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
8496                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
8497                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
8498                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
8499                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
8500 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
8501                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
8502                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
8503                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
8504                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8505                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
8506                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8507                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
8508
8509                 { 0xffffffff, 0, 0x00000000 }
8510         };
8511
8512         if (!netif_running(bp->dev))
8513                 return rc;
8514
8515         /* Repeat the test twice:
8516            First by writing 0x00000000, second by writing 0xffffffff */
8517         for (idx = 0; idx < 2; idx++) {
8518
8519                 switch (idx) {
8520                 case 0:
8521                         wr_val = 0;
8522                         break;
8523                 case 1:
8524                         wr_val = 0xffffffff;
8525                         break;
8526                 }
8527
8528                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8529                         u32 offset, mask, save_val, val;
8530
8531                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8532                         mask = reg_tbl[i].mask;
8533
8534                         save_val = REG_RD(bp, offset);
8535
8536                         REG_WR(bp, offset, wr_val);
8537                         val = REG_RD(bp, offset);
8538
8539                         /* Restore the original register's value */
8540                         REG_WR(bp, offset, save_val);
8541
8542                         /* verify that value is as expected value */
8543                         if ((val & mask) != (wr_val & mask))
8544                                 goto test_reg_exit;
8545                 }
8546         }
8547
8548         rc = 0;
8549
8550 test_reg_exit:
8551         return rc;
8552 }
8553
8554 static int bnx2x_test_memory(struct bnx2x *bp)
8555 {
8556         int i, j, rc = -ENODEV;
8557         u32 val;
8558         static const struct {
8559                 u32 offset;
8560                 int size;
8561         } mem_tbl[] = {
8562                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
8563                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8564                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
8565                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
8566                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
8567                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
8568                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
8569
8570                 { 0xffffffff, 0 }
8571         };
8572         static const struct {
8573                 char *name;
8574                 u32 offset;
8575                 u32 e1_mask;
8576                 u32 e1h_mask;
8577         } prty_tbl[] = {
8578                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
8579                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
8580                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
8581                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
8582                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
8583                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
8584
8585                 { NULL, 0xffffffff, 0, 0 }
8586         };
8587
8588         if (!netif_running(bp->dev))
8589                 return rc;
8590
8591         /* Go through all the memories */
8592         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8593                 for (j = 0; j < mem_tbl[i].size; j++)
8594                         REG_RD(bp, mem_tbl[i].offset + j*4);
8595
8596         /* Check the parity status */
8597         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8598                 val = REG_RD(bp, prty_tbl[i].offset);
8599                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8600                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8601                         DP(NETIF_MSG_HW,
8602                            "%s is 0x%x\n", prty_tbl[i].name, val);
8603                         goto test_mem_exit;
8604                 }
8605         }
8606
8607         rc = 0;
8608
8609 test_mem_exit:
8610         return rc;
8611 }
8612
8613 static void bnx2x_netif_start(struct bnx2x *bp)
8614 {
8615         int i;
8616
8617         if (atomic_dec_and_test(&bp->intr_sem)) {
8618                 if (netif_running(bp->dev)) {
8619                         bnx2x_int_enable(bp);
8620                         for_each_queue(bp, i)
8621                                 napi_enable(&bnx2x_fp(bp, i, napi));
8622                         if (bp->state == BNX2X_STATE_OPEN)
8623                                 netif_wake_queue(bp->dev);
8624                 }
8625         }
8626 }
8627
8628 static void bnx2x_netif_stop(struct bnx2x *bp)
8629 {
8630         int i;
8631
8632         if (netif_running(bp->dev)) {
8633                 netif_tx_disable(bp->dev);
8634                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8635                 for_each_queue(bp, i)
8636                         napi_disable(&bnx2x_fp(bp, i, napi));
8637         }
8638         bnx2x_int_disable_sync(bp);
8639 }
8640
8641 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8642 {
8643         int cnt = 1000;
8644
8645         if (link_up)
8646                 while (bnx2x_link_test(bp) && cnt--)
8647                         msleep(10);
8648 }
8649
8650 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8651 {
8652         unsigned int pkt_size, num_pkts, i;
8653         struct sk_buff *skb;
8654         unsigned char *packet;
8655         struct bnx2x_fastpath *fp = &bp->fp[0];
8656         u16 tx_start_idx, tx_idx;
8657         u16 rx_start_idx, rx_idx;
8658         u16 pkt_prod;
8659         struct sw_tx_bd *tx_buf;
8660         struct eth_tx_bd *tx_bd;
8661         dma_addr_t mapping;
8662         union eth_rx_cqe *cqe;
8663         u8 cqe_fp_flags;
8664         struct sw_rx_bd *rx_buf;
8665         u16 len;
8666         int rc = -ENODEV;
8667
8668         if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8669                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8670                 bnx2x_acquire_phy_lock(bp);
8671                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8672                 bnx2x_release_phy_lock(bp);
8673
8674         } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8675                 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8676                 bnx2x_acquire_phy_lock(bp);
8677                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8678                 bnx2x_release_phy_lock(bp);
8679                 /* wait until link state is restored */
8680                 bnx2x_wait_for_link(bp, link_up);
8681
8682         } else
8683                 return -EINVAL;
8684
8685         pkt_size = 1514;
8686         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8687         if (!skb) {
8688                 rc = -ENOMEM;
8689                 goto test_loopback_exit;
8690         }
8691         packet = skb_put(skb, pkt_size);
8692         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8693         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8694         for (i = ETH_HLEN; i < pkt_size; i++)
8695                 packet[i] = (unsigned char) (i & 0xff);
8696
8697         num_pkts = 0;
8698         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8699         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8700
8701         pkt_prod = fp->tx_pkt_prod++;
8702         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8703         tx_buf->first_bd = fp->tx_bd_prod;
8704         tx_buf->skb = skb;
8705
8706         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8707         mapping = pci_map_single(bp->pdev, skb->data,
8708                                  skb_headlen(skb), PCI_DMA_TODEVICE);
8709         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8710         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8711         tx_bd->nbd = cpu_to_le16(1);
8712         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8713         tx_bd->vlan = cpu_to_le16(pkt_prod);
8714         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8715                                        ETH_TX_BD_FLAGS_END_BD);
8716         tx_bd->general_data = ((UNICAST_ADDRESS <<
8717                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8718
8719         fp->hw_tx_prods->bds_prod =
8720                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8721         mb(); /* FW restriction: must not reorder writing nbd and packets */
8722         fp->hw_tx_prods->packets_prod =
8723                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8724         DOORBELL(bp, FP_IDX(fp), 0);
8725
8726         mmiowb();
8727
8728         num_pkts++;
8729         fp->tx_bd_prod++;
8730         bp->dev->trans_start = jiffies;
8731
8732         udelay(100);
8733
8734         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8735         if (tx_idx != tx_start_idx + num_pkts)
8736                 goto test_loopback_exit;
8737
8738         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8739         if (rx_idx != rx_start_idx + num_pkts)
8740                 goto test_loopback_exit;
8741
8742         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8743         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8744         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8745                 goto test_loopback_rx_exit;
8746
8747         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8748         if (len != pkt_size)
8749                 goto test_loopback_rx_exit;
8750
8751         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8752         skb = rx_buf->skb;
8753         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8754         for (i = ETH_HLEN; i < pkt_size; i++)
8755                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8756                         goto test_loopback_rx_exit;
8757
8758         rc = 0;
8759
8760 test_loopback_rx_exit:
8761         bp->dev->last_rx = jiffies;
8762
8763         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8764         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8765         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8766         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8767
8768         /* Update producers */
8769         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8770                              fp->rx_sge_prod);
8771         mmiowb(); /* keep prod updates ordered */
8772
8773 test_loopback_exit:
8774         bp->link_params.loopback_mode = LOOPBACK_NONE;
8775
8776         return rc;
8777 }
8778
8779 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8780 {
8781         int rc = 0;
8782
8783         if (!netif_running(bp->dev))
8784                 return BNX2X_LOOPBACK_FAILED;
8785
8786         bnx2x_netif_stop(bp);
8787
8788         if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8789                 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8790                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8791         }
8792
8793         if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8794                 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8795                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8796         }
8797
8798         bnx2x_netif_start(bp);
8799
8800         return rc;
8801 }
8802
8803 #define CRC32_RESIDUAL                  0xdebb20e3
8804
8805 static int bnx2x_test_nvram(struct bnx2x *bp)
8806 {
8807         static const struct {
8808                 int offset;
8809                 int size;
8810         } nvram_tbl[] = {
8811                 {     0,  0x14 }, /* bootstrap */
8812                 {  0x14,  0xec }, /* dir */
8813                 { 0x100, 0x350 }, /* manuf_info */
8814                 { 0x450,  0xf0 }, /* feature_info */
8815                 { 0x640,  0x64 }, /* upgrade_key_info */
8816                 { 0x6a4,  0x64 },
8817                 { 0x708,  0x70 }, /* manuf_key_info */
8818                 { 0x778,  0x70 },
8819                 {     0,     0 }
8820         };
8821         u32 buf[0x350 / 4];
8822         u8 *data = (u8 *)buf;
8823         int i, rc;
8824         u32 magic, csum;
8825
8826         rc = bnx2x_nvram_read(bp, 0, data, 4);
8827         if (rc) {
8828                 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8829                 goto test_nvram_exit;
8830         }
8831
8832         magic = be32_to_cpu(buf[0]);
8833         if (magic != 0x669955aa) {
8834                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8835                 rc = -ENODEV;
8836                 goto test_nvram_exit;
8837         }
8838
8839         for (i = 0; nvram_tbl[i].size; i++) {
8840
8841                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8842                                       nvram_tbl[i].size);
8843                 if (rc) {
8844                         DP(NETIF_MSG_PROBE,
8845                            "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8846                         goto test_nvram_exit;
8847                 }
8848
8849                 csum = ether_crc_le(nvram_tbl[i].size, data);
8850                 if (csum != CRC32_RESIDUAL) {
8851                         DP(NETIF_MSG_PROBE,
8852                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8853                         rc = -ENODEV;
8854                         goto test_nvram_exit;
8855                 }
8856         }
8857
8858 test_nvram_exit:
8859         return rc;
8860 }
8861
8862 static int bnx2x_test_intr(struct bnx2x *bp)
8863 {
8864         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8865         int i, rc;
8866
8867         if (!netif_running(bp->dev))
8868                 return -ENODEV;
8869
8870         config->hdr.length_6b = 0;
8871         config->hdr.offset = 0;
8872         config->hdr.client_id = BP_CL_ID(bp);
8873         config->hdr.reserved1 = 0;
8874
8875         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8876                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8877                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8878         if (rc == 0) {
8879                 bp->set_mac_pending++;
8880                 for (i = 0; i < 10; i++) {
8881                         if (!bp->set_mac_pending)
8882                                 break;
8883                         msleep_interruptible(10);
8884                 }
8885                 if (i == 10)
8886                         rc = -ENODEV;
8887         }
8888
8889         return rc;
8890 }
8891
8892 static void bnx2x_self_test(struct net_device *dev,
8893                             struct ethtool_test *etest, u64 *buf)
8894 {
8895         struct bnx2x *bp = netdev_priv(dev);
8896
8897         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8898
8899         if (!netif_running(dev))
8900                 return;
8901
8902         /* offline tests are not suppoerted in MF mode */
8903         if (IS_E1HMF(bp))
8904                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8905
8906         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8907                 u8 link_up;
8908
8909                 link_up = bp->link_vars.link_up;
8910                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8911                 bnx2x_nic_load(bp, LOAD_DIAG);
8912                 /* wait until link state is restored */
8913                 bnx2x_wait_for_link(bp, link_up);
8914
8915                 if (bnx2x_test_registers(bp) != 0) {
8916                         buf[0] = 1;
8917                         etest->flags |= ETH_TEST_FL_FAILED;
8918                 }
8919                 if (bnx2x_test_memory(bp) != 0) {
8920                         buf[1] = 1;
8921                         etest->flags |= ETH_TEST_FL_FAILED;
8922                 }
8923                 buf[2] = bnx2x_test_loopback(bp, link_up);
8924                 if (buf[2] != 0)
8925                         etest->flags |= ETH_TEST_FL_FAILED;
8926
8927                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8928                 bnx2x_nic_load(bp, LOAD_NORMAL);
8929                 /* wait until link state is restored */
8930                 bnx2x_wait_for_link(bp, link_up);
8931         }
8932         if (bnx2x_test_nvram(bp) != 0) {
8933                 buf[3] = 1;
8934                 etest->flags |= ETH_TEST_FL_FAILED;
8935         }
8936         if (bnx2x_test_intr(bp) != 0) {
8937                 buf[4] = 1;
8938                 etest->flags |= ETH_TEST_FL_FAILED;
8939         }
8940         if (bp->port.pmf)
8941                 if (bnx2x_link_test(bp) != 0) {
8942                         buf[5] = 1;
8943                         etest->flags |= ETH_TEST_FL_FAILED;
8944                 }
8945         buf[7] = bnx2x_mc_assert(bp);
8946         if (buf[7] != 0)
8947                 etest->flags |= ETH_TEST_FL_FAILED;
8948
8949 #ifdef BNX2X_EXTRA_DEBUG
8950         bnx2x_panic_dump(bp);
8951 #endif
8952 }
8953
8954 static const struct {
8955         long offset;
8956         int size;
8957         u32 flags;
8958 #define STATS_FLAGS_PORT                1
8959 #define STATS_FLAGS_FUNC                2
8960         u8 string[ETH_GSTRING_LEN];
8961 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8962 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8963                                 8, STATS_FLAGS_FUNC, "rx_bytes" },
8964         { STATS_OFFSET32(error_bytes_received_hi),
8965                                 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8966         { STATS_OFFSET32(total_bytes_transmitted_hi),
8967                                 8, STATS_FLAGS_FUNC, "tx_bytes" },
8968         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8969                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
8970         { STATS_OFFSET32(total_unicast_packets_received_hi),
8971                                 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
8972         { STATS_OFFSET32(total_multicast_packets_received_hi),
8973                                 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
8974         { STATS_OFFSET32(total_broadcast_packets_received_hi),
8975                                 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
8976         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8977                                 8, STATS_FLAGS_FUNC, "tx_packets" },
8978         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8979                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
8980 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8981                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
8982         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8983                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
8984         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8985                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
8986         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8987                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
8988         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8989                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
8990         { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8991                                 8, STATS_FLAGS_PORT, "tx_deferred" },
8992         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
8993                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
8994         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
8995                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
8996         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
8997                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
8998         { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
8999                                 8, STATS_FLAGS_PORT, "rx_fragments" },
9000 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9001                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
9002         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9003                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9004         { STATS_OFFSET32(jabber_packets_received),
9005                                 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9006         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9007                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9008         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9009                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9010         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9011                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9012         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9013                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9014         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9015                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9016         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9017                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9018         { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9019                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9020 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9021                                 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9022         { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9023                                 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9024         { STATS_OFFSET32(tx_stat_outxonsent_hi),
9025                                 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9026         { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9027                                 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9028         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9029                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9030         { STATS_OFFSET32(mac_filter_discard),
9031                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9032         { STATS_OFFSET32(no_buff_discard),
9033                                 4, STATS_FLAGS_FUNC, "rx_discards" },
9034         { STATS_OFFSET32(xxoverflow_discard),
9035                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9036         { STATS_OFFSET32(brb_drop_hi),
9037                                 8, STATS_FLAGS_PORT, "brb_discard" },
9038         { STATS_OFFSET32(brb_truncate_hi),
9039                                 8, STATS_FLAGS_PORT, "brb_truncate" },
9040 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9041                                 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9042         { STATS_OFFSET32(rx_skb_alloc_failed),
9043                                 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9044 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9045                                 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9046 };
9047
9048 #define IS_NOT_E1HMF_STAT(bp, i) \
9049                 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9050
9051 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9052 {
9053         struct bnx2x *bp = netdev_priv(dev);
9054         int i, j;
9055
9056         switch (stringset) {
9057         case ETH_SS_STATS:
9058                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9059                         if (IS_NOT_E1HMF_STAT(bp, i))
9060                                 continue;
9061                         strcpy(buf + j*ETH_GSTRING_LEN,
9062                                bnx2x_stats_arr[i].string);
9063                         j++;
9064                 }
9065                 break;
9066
9067         case ETH_SS_TEST:
9068                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9069                 break;
9070         }
9071 }
9072
9073 static int bnx2x_get_stats_count(struct net_device *dev)
9074 {
9075         struct bnx2x *bp = netdev_priv(dev);
9076         int i, num_stats = 0;
9077
9078         for (i = 0; i < BNX2X_NUM_STATS; i++) {
9079                 if (IS_NOT_E1HMF_STAT(bp, i))
9080                         continue;
9081                 num_stats++;
9082         }
9083         return num_stats;
9084 }
9085
9086 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9087                                     struct ethtool_stats *stats, u64 *buf)
9088 {
9089         struct bnx2x *bp = netdev_priv(dev);
9090         u32 *hw_stats = (u32 *)&bp->eth_stats;
9091         int i, j;
9092
9093         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9094                 if (IS_NOT_E1HMF_STAT(bp, i))
9095                         continue;
9096
9097                 if (bnx2x_stats_arr[i].size == 0) {
9098                         /* skip this counter */
9099                         buf[j] = 0;
9100                         j++;
9101                         continue;
9102                 }
9103                 if (bnx2x_stats_arr[i].size == 4) {
9104                         /* 4-byte counter */
9105                         buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9106                         j++;
9107                         continue;
9108                 }
9109                 /* 8-byte counter */
9110                 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9111                                   *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9112                 j++;
9113         }
9114 }
9115
9116 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9117 {
9118         struct bnx2x *bp = netdev_priv(dev);
9119         int port = BP_PORT(bp);
9120         int i;
9121
9122         if (!netif_running(dev))
9123                 return 0;
9124
9125         if (!bp->port.pmf)
9126                 return 0;
9127
9128         if (data == 0)
9129                 data = 2;
9130
9131         for (i = 0; i < (data * 2); i++) {
9132                 if ((i % 2) == 0)
9133                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9134                                       bp->link_params.hw_led_mode,
9135                                       bp->link_params.chip_id);
9136                 else
9137                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9138                                       bp->link_params.hw_led_mode,
9139                                       bp->link_params.chip_id);
9140
9141                 msleep_interruptible(500);
9142                 if (signal_pending(current))
9143                         break;
9144         }
9145
9146         if (bp->link_vars.link_up)
9147                 bnx2x_set_led(bp, port, LED_MODE_OPER,
9148                               bp->link_vars.line_speed,
9149                               bp->link_params.hw_led_mode,
9150                               bp->link_params.chip_id);
9151
9152         return 0;
9153 }
9154
9155 static struct ethtool_ops bnx2x_ethtool_ops = {
9156         .get_settings           = bnx2x_get_settings,
9157         .set_settings           = bnx2x_set_settings,
9158         .get_drvinfo            = bnx2x_get_drvinfo,
9159         .get_wol                = bnx2x_get_wol,
9160         .set_wol                = bnx2x_set_wol,
9161         .get_msglevel           = bnx2x_get_msglevel,
9162         .set_msglevel           = bnx2x_set_msglevel,
9163         .nway_reset             = bnx2x_nway_reset,
9164         .get_link               = ethtool_op_get_link,
9165         .get_eeprom_len         = bnx2x_get_eeprom_len,
9166         .get_eeprom             = bnx2x_get_eeprom,
9167         .set_eeprom             = bnx2x_set_eeprom,
9168         .get_coalesce           = bnx2x_get_coalesce,
9169         .set_coalesce           = bnx2x_set_coalesce,
9170         .get_ringparam          = bnx2x_get_ringparam,
9171         .set_ringparam          = bnx2x_set_ringparam,
9172         .get_pauseparam         = bnx2x_get_pauseparam,
9173         .set_pauseparam         = bnx2x_set_pauseparam,
9174         .get_rx_csum            = bnx2x_get_rx_csum,
9175         .set_rx_csum            = bnx2x_set_rx_csum,
9176         .get_tx_csum            = ethtool_op_get_tx_csum,
9177         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
9178         .set_flags              = bnx2x_set_flags,
9179         .get_flags              = ethtool_op_get_flags,
9180         .get_sg                 = ethtool_op_get_sg,
9181         .set_sg                 = ethtool_op_set_sg,
9182         .get_tso                = ethtool_op_get_tso,
9183         .set_tso                = bnx2x_set_tso,
9184         .self_test_count        = bnx2x_self_test_count,
9185         .self_test              = bnx2x_self_test,
9186         .get_strings            = bnx2x_get_strings,
9187         .phys_id                = bnx2x_phys_id,
9188         .get_stats_count        = bnx2x_get_stats_count,
9189         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
9190 };
9191
9192 /* end of ethtool_ops */
9193
9194 /****************************************************************************
9195 * General service functions
9196 ****************************************************************************/
9197
9198 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9199 {
9200         u16 pmcsr;
9201
9202         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9203
9204         switch (state) {
9205         case PCI_D0:
9206                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9207                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9208                                        PCI_PM_CTRL_PME_STATUS));
9209
9210                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9211                 /* delay required during transition out of D3hot */
9212                         msleep(20);
9213                 break;
9214
9215         case PCI_D3hot:
9216                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9217                 pmcsr |= 3;
9218
9219                 if (bp->wol)
9220                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9221
9222                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9223                                       pmcsr);
9224
9225                 /* No more memory access after this point until
9226                 * device is brought back to D0.
9227                 */
9228                 break;
9229
9230         default:
9231                 return -EINVAL;
9232         }
9233         return 0;
9234 }
9235
9236 /*
9237  * net_device service functions
9238  */
9239
9240 static int bnx2x_poll(struct napi_struct *napi, int budget)
9241 {
9242         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9243                                                  napi);
9244         struct bnx2x *bp = fp->bp;
9245         int work_done = 0;
9246
9247 #ifdef BNX2X_STOP_ON_ERROR
9248         if (unlikely(bp->panic))
9249                 goto poll_panic;
9250 #endif
9251
9252         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9253         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9254         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9255
9256         bnx2x_update_fpsb_idx(fp);
9257
9258         if (BNX2X_HAS_TX_WORK(fp))
9259                 bnx2x_tx_int(fp, budget);
9260
9261         if (BNX2X_HAS_RX_WORK(fp))
9262                 work_done = bnx2x_rx_int(fp, budget);
9263
9264         rmb(); /* BNX2X_HAS_WORK() reads the status block */
9265
9266         /* must not complete if we consumed full budget */
9267         if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9268
9269 #ifdef BNX2X_STOP_ON_ERROR
9270 poll_panic:
9271 #endif
9272                 netif_rx_complete(bp->dev, napi);
9273
9274                 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9275                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9276                 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9277                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9278         }
9279         return work_done;
9280 }
9281
9282
9283 /* we split the first BD into headers and data BDs
9284  * to ease the pain of our fellow micocode engineers
9285  * we use one mapping for both BDs
9286  * So far this has only been observed to happen
9287  * in Other Operating Systems(TM)
9288  */
9289 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9290                                    struct bnx2x_fastpath *fp,
9291                                    struct eth_tx_bd **tx_bd, u16 hlen,
9292                                    u16 bd_prod, int nbd)
9293 {
9294         struct eth_tx_bd *h_tx_bd = *tx_bd;
9295         struct eth_tx_bd *d_tx_bd;
9296         dma_addr_t mapping;
9297         int old_len = le16_to_cpu(h_tx_bd->nbytes);
9298
9299         /* first fix first BD */
9300         h_tx_bd->nbd = cpu_to_le16(nbd);
9301         h_tx_bd->nbytes = cpu_to_le16(hlen);
9302
9303         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9304            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9305            h_tx_bd->addr_lo, h_tx_bd->nbd);
9306
9307         /* now get a new data BD
9308          * (after the pbd) and fill it */
9309         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9310         d_tx_bd = &fp->tx_desc_ring[bd_prod];
9311
9312         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9313                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9314
9315         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9316         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9317         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9318         d_tx_bd->vlan = 0;
9319         /* this marks the BD as one that has no individual mapping
9320          * the FW ignores this flag in a BD not marked start
9321          */
9322         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9323         DP(NETIF_MSG_TX_QUEUED,
9324            "TSO split data size is %d (%x:%x)\n",
9325            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9326
9327         /* update tx_bd for marking the last BD flag */
9328         *tx_bd = d_tx_bd;
9329
9330         return bd_prod;
9331 }
9332
9333 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9334 {
9335         if (fix > 0)
9336                 csum = (u16) ~csum_fold(csum_sub(csum,
9337                                 csum_partial(t_header - fix, fix, 0)));
9338
9339         else if (fix < 0)
9340                 csum = (u16) ~csum_fold(csum_add(csum,
9341                                 csum_partial(t_header, -fix, 0)));
9342
9343         return swab16(csum);
9344 }
9345
9346 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9347 {
9348         u32 rc;
9349
9350         if (skb->ip_summed != CHECKSUM_PARTIAL)
9351                 rc = XMIT_PLAIN;
9352
9353         else {
9354                 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9355                         rc = XMIT_CSUM_V6;
9356                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9357                                 rc |= XMIT_CSUM_TCP;
9358
9359                 } else {
9360                         rc = XMIT_CSUM_V4;
9361                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9362                                 rc |= XMIT_CSUM_TCP;
9363                 }
9364         }
9365
9366         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9367                 rc |= XMIT_GSO_V4;
9368
9369         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9370                 rc |= XMIT_GSO_V6;
9371
9372         return rc;
9373 }
9374
9375 /* check if packet requires linearization (packet is too fragmented) */
9376 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9377                              u32 xmit_type)
9378 {
9379         int to_copy = 0;
9380         int hlen = 0;
9381         int first_bd_sz = 0;
9382
9383         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9384         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9385
9386                 if (xmit_type & XMIT_GSO) {
9387                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9388                         /* Check if LSO packet needs to be copied:
9389                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9390                         int wnd_size = MAX_FETCH_BD - 3;
9391                         /* Number of widnows to check */
9392                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9393                         int wnd_idx = 0;
9394                         int frag_idx = 0;
9395                         u32 wnd_sum = 0;
9396
9397                         /* Headers length */
9398                         hlen = (int)(skb_transport_header(skb) - skb->data) +
9399                                 tcp_hdrlen(skb);
9400
9401                         /* Amount of data (w/o headers) on linear part of SKB*/
9402                         first_bd_sz = skb_headlen(skb) - hlen;
9403
9404                         wnd_sum  = first_bd_sz;
9405
9406                         /* Calculate the first sum - it's special */
9407                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9408                                 wnd_sum +=
9409                                         skb_shinfo(skb)->frags[frag_idx].size;
9410
9411                         /* If there was data on linear skb data - check it */
9412                         if (first_bd_sz > 0) {
9413                                 if (unlikely(wnd_sum < lso_mss)) {
9414                                         to_copy = 1;
9415                                         goto exit_lbl;
9416                                 }
9417
9418                                 wnd_sum -= first_bd_sz;
9419                         }
9420
9421                         /* Others are easier: run through the frag list and
9422                            check all windows */
9423                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9424                                 wnd_sum +=
9425                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9426
9427                                 if (unlikely(wnd_sum < lso_mss)) {
9428                                         to_copy = 1;
9429                                         break;
9430                                 }
9431                                 wnd_sum -=
9432                                         skb_shinfo(skb)->frags[wnd_idx].size;
9433                         }
9434
9435                 } else {
9436                         /* in non-LSO too fragmented packet should always
9437                            be linearized */
9438                         to_copy = 1;
9439                 }
9440         }
9441
9442 exit_lbl:
9443         if (unlikely(to_copy))
9444                 DP(NETIF_MSG_TX_QUEUED,
9445                    "Linearization IS REQUIRED for %s packet. "
9446                    "num_frags %d  hlen %d  first_bd_sz %d\n",
9447                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9448                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9449
9450         return to_copy;
9451 }
9452
9453 /* called with netif_tx_lock
9454  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9455  * netif_wake_queue()
9456  */
9457 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9458 {
9459         struct bnx2x *bp = netdev_priv(dev);
9460         struct bnx2x_fastpath *fp;
9461         struct sw_tx_bd *tx_buf;
9462         struct eth_tx_bd *tx_bd;
9463         struct eth_tx_parse_bd *pbd = NULL;
9464         u16 pkt_prod, bd_prod;
9465         int nbd, fp_index;
9466         dma_addr_t mapping;
9467         u32 xmit_type = bnx2x_xmit_type(bp, skb);
9468         int vlan_off = (bp->e1hov ? 4 : 0);
9469         int i;
9470         u8 hlen = 0;
9471
9472 #ifdef BNX2X_STOP_ON_ERROR
9473         if (unlikely(bp->panic))
9474                 return NETDEV_TX_BUSY;
9475 #endif
9476
9477         fp_index = (smp_processor_id() % bp->num_queues);
9478         fp = &bp->fp[fp_index];
9479
9480         if (unlikely(bnx2x_tx_avail(bp->fp) <
9481                                         (skb_shinfo(skb)->nr_frags + 3))) {
9482                 bp->eth_stats.driver_xoff++,
9483                 netif_stop_queue(dev);
9484                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9485                 return NETDEV_TX_BUSY;
9486         }
9487
9488         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
9489            "  gso type %x  xmit_type %x\n",
9490            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9491            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9492
9493         /* First, check if we need to linearaize the skb
9494            (due to FW restrictions) */
9495         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9496                 /* Statistics of linearization */
9497                 bp->lin_cnt++;
9498                 if (skb_linearize(skb) != 0) {
9499                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9500                            "silently dropping this SKB\n");
9501                         dev_kfree_skb_any(skb);
9502                         return NETDEV_TX_OK;
9503                 }
9504         }
9505
9506         /*
9507         Please read carefully. First we use one BD which we mark as start,
9508         then for TSO or xsum we have a parsing info BD,
9509         and only then we have the rest of the TSO BDs.
9510         (don't forget to mark the last one as last,
9511         and to unmap only AFTER you write to the BD ...)
9512         And above all, all pdb sizes are in words - NOT DWORDS!
9513         */
9514
9515         pkt_prod = fp->tx_pkt_prod++;
9516         bd_prod = TX_BD(fp->tx_bd_prod);
9517
9518         /* get a tx_buf and first BD */
9519         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9520         tx_bd = &fp->tx_desc_ring[bd_prod];
9521
9522         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9523         tx_bd->general_data = (UNICAST_ADDRESS <<
9524                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9525         tx_bd->general_data |= 1; /* header nbd */
9526
9527         /* remember the first BD of the packet */
9528         tx_buf->first_bd = fp->tx_bd_prod;
9529         tx_buf->skb = skb;
9530
9531         DP(NETIF_MSG_TX_QUEUED,
9532            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
9533            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9534
9535         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9536                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9537                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9538                 vlan_off += 4;
9539         } else
9540                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9541
9542         if (xmit_type) {
9543
9544                 /* turn on parsing and get a BD */
9545                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9546                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9547
9548                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9549         }
9550
9551         if (xmit_type & XMIT_CSUM) {
9552                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9553
9554                 /* for now NS flag is not used in Linux */
9555                 pbd->global_data = (hlen |
9556                                     ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9557                                      ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9558
9559                 pbd->ip_hlen = (skb_transport_header(skb) -
9560                                 skb_network_header(skb)) / 2;
9561
9562                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9563
9564                 pbd->total_hlen = cpu_to_le16(hlen);
9565                 hlen = hlen*2 - vlan_off;
9566
9567                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9568
9569                 if (xmit_type & XMIT_CSUM_V4)
9570                         tx_bd->bd_flags.as_bitfield |=
9571                                                 ETH_TX_BD_FLAGS_IP_CSUM;
9572                 else
9573                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9574
9575                 if (xmit_type & XMIT_CSUM_TCP) {
9576                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9577
9578                 } else {
9579                         s8 fix = SKB_CS_OFF(skb); /* signed! */
9580
9581                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9582                         pbd->cs_offset = fix / 2;
9583
9584                         DP(NETIF_MSG_TX_QUEUED,
9585                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
9586                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9587                            SKB_CS(skb));
9588
9589                         /* HW bug: fixup the CSUM */
9590                         pbd->tcp_pseudo_csum =
9591                                 bnx2x_csum_fix(skb_transport_header(skb),
9592                                                SKB_CS(skb), fix);
9593
9594                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9595                            pbd->tcp_pseudo_csum);
9596                 }
9597         }
9598
9599         mapping = pci_map_single(bp->pdev, skb->data,
9600                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9601
9602         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9603         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9604         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9605         tx_bd->nbd = cpu_to_le16(nbd);
9606         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9607
9608         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
9609            "  nbytes %d  flags %x  vlan %x\n",
9610            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9611            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9612            le16_to_cpu(tx_bd->vlan));
9613
9614         if (xmit_type & XMIT_GSO) {
9615
9616                 DP(NETIF_MSG_TX_QUEUED,
9617                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
9618                    skb->len, hlen, skb_headlen(skb),
9619                    skb_shinfo(skb)->gso_size);
9620
9621                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9622
9623                 if (unlikely(skb_headlen(skb) > hlen))
9624                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9625                                                  bd_prod, ++nbd);
9626
9627                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9628                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9629                 pbd->tcp_flags = pbd_tcp_flags(skb);
9630
9631                 if (xmit_type & XMIT_GSO_V4) {
9632                         pbd->ip_id = swab16(ip_hdr(skb)->id);
9633                         pbd->tcp_pseudo_csum =
9634                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9635                                                           ip_hdr(skb)->daddr,
9636                                                           0, IPPROTO_TCP, 0));
9637
9638                 } else
9639                         pbd->tcp_pseudo_csum =
9640                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9641                                                         &ipv6_hdr(skb)->daddr,
9642                                                         0, IPPROTO_TCP, 0));
9643
9644                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9645         }
9646
9647         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9648                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9649
9650                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9651                 tx_bd = &fp->tx_desc_ring[bd_prod];
9652
9653                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9654                                        frag->size, PCI_DMA_TODEVICE);
9655
9656                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9657                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9658                 tx_bd->nbytes = cpu_to_le16(frag->size);
9659                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9660                 tx_bd->bd_flags.as_bitfield = 0;
9661
9662                 DP(NETIF_MSG_TX_QUEUED,
9663                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
9664                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9665                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9666         }
9667
9668         /* now at last mark the BD as the last BD */
9669         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9670
9671         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
9672            tx_bd, tx_bd->bd_flags.as_bitfield);
9673
9674         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9675
9676         /* now send a tx doorbell, counting the next BD
9677          * if the packet contains or ends with it
9678          */
9679         if (TX_BD_POFF(bd_prod) < nbd)
9680                 nbd++;
9681
9682         if (pbd)
9683                 DP(NETIF_MSG_TX_QUEUED,
9684                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
9685                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
9686                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9687                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9688                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9689
9690         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
9691
9692         fp->hw_tx_prods->bds_prod =
9693                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9694         mb(); /* FW restriction: must not reorder writing nbd and packets */
9695         fp->hw_tx_prods->packets_prod =
9696                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9697         DOORBELL(bp, FP_IDX(fp), 0);
9698
9699         mmiowb();
9700
9701         fp->tx_bd_prod += nbd;
9702         dev->trans_start = jiffies;
9703
9704         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9705                 netif_stop_queue(dev);
9706                 bp->eth_stats.driver_xoff++;
9707                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9708                         netif_wake_queue(dev);
9709         }
9710         fp->tx_pkt++;
9711
9712         return NETDEV_TX_OK;
9713 }
9714
9715 /* called with rtnl_lock */
9716 static int bnx2x_open(struct net_device *dev)
9717 {
9718         struct bnx2x *bp = netdev_priv(dev);
9719
9720         bnx2x_set_power_state(bp, PCI_D0);
9721
9722         return bnx2x_nic_load(bp, LOAD_OPEN);
9723 }
9724
9725 /* called with rtnl_lock */
9726 static int bnx2x_close(struct net_device *dev)
9727 {
9728         struct bnx2x *bp = netdev_priv(dev);
9729
9730         /* Unload the driver, release IRQs */
9731         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9732         if (atomic_read(&bp->pdev->enable_cnt) == 1)
9733                 if (!CHIP_REV_IS_SLOW(bp))
9734                         bnx2x_set_power_state(bp, PCI_D3hot);
9735
9736         return 0;
9737 }
9738
9739 /* called with netif_tx_lock from set_multicast */
9740 static void bnx2x_set_rx_mode(struct net_device *dev)
9741 {
9742         struct bnx2x *bp = netdev_priv(dev);
9743         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9744         int port = BP_PORT(bp);
9745
9746         if (bp->state != BNX2X_STATE_OPEN) {
9747                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9748                 return;
9749         }
9750
9751         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9752
9753         if (dev->flags & IFF_PROMISC)
9754                 rx_mode = BNX2X_RX_MODE_PROMISC;
9755
9756         else if ((dev->flags & IFF_ALLMULTI) ||
9757                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9758                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9759
9760         else { /* some multicasts */
9761                 if (CHIP_IS_E1(bp)) {
9762                         int i, old, offset;
9763                         struct dev_mc_list *mclist;
9764                         struct mac_configuration_cmd *config =
9765                                                 bnx2x_sp(bp, mcast_config);
9766
9767                         for (i = 0, mclist = dev->mc_list;
9768                              mclist && (i < dev->mc_count);
9769                              i++, mclist = mclist->next) {
9770
9771                                 config->config_table[i].
9772                                         cam_entry.msb_mac_addr =
9773                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
9774                                 config->config_table[i].
9775                                         cam_entry.middle_mac_addr =
9776                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
9777                                 config->config_table[i].
9778                                         cam_entry.lsb_mac_addr =
9779                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
9780                                 config->config_table[i].cam_entry.flags =
9781                                                         cpu_to_le16(port);
9782                                 config->config_table[i].
9783                                         target_table_entry.flags = 0;
9784                                 config->config_table[i].
9785                                         target_table_entry.client_id = 0;
9786                                 config->config_table[i].
9787                                         target_table_entry.vlan_id = 0;
9788
9789                                 DP(NETIF_MSG_IFUP,
9790                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9791                                    config->config_table[i].
9792                                                 cam_entry.msb_mac_addr,
9793                                    config->config_table[i].
9794                                                 cam_entry.middle_mac_addr,
9795                                    config->config_table[i].
9796                                                 cam_entry.lsb_mac_addr);
9797                         }
9798                         old = config->hdr.length_6b;
9799                         if (old > i) {
9800                                 for (; i < old; i++) {
9801                                         if (CAM_IS_INVALID(config->
9802                                                            config_table[i])) {
9803                                                 i--; /* already invalidated */
9804                                                 break;
9805                                         }
9806                                         /* invalidate */
9807                                         CAM_INVALIDATE(config->
9808                                                        config_table[i]);
9809                                 }
9810                         }
9811
9812                         if (CHIP_REV_IS_SLOW(bp))
9813                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9814                         else
9815                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
9816
9817                         config->hdr.length_6b = i;
9818                         config->hdr.offset = offset;
9819                         config->hdr.client_id = BP_CL_ID(bp);
9820                         config->hdr.reserved1 = 0;
9821
9822                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9823                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9824                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9825                                       0);
9826                 } else { /* E1H */
9827                         /* Accept one or more multicasts */
9828                         struct dev_mc_list *mclist;
9829                         u32 mc_filter[MC_HASH_SIZE];
9830                         u32 crc, bit, regidx;
9831                         int i;
9832
9833                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9834
9835                         for (i = 0, mclist = dev->mc_list;
9836                              mclist && (i < dev->mc_count);
9837                              i++, mclist = mclist->next) {
9838
9839                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9840                                    "%02x:%02x:%02x:%02x:%02x:%02x\n",
9841                                    mclist->dmi_addr[0], mclist->dmi_addr[1],
9842                                    mclist->dmi_addr[2], mclist->dmi_addr[3],
9843                                    mclist->dmi_addr[4], mclist->dmi_addr[5]);
9844
9845                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9846                                 bit = (crc >> 24) & 0xff;
9847                                 regidx = bit >> 5;
9848                                 bit &= 0x1f;
9849                                 mc_filter[regidx] |= (1 << bit);
9850                         }
9851
9852                         for (i = 0; i < MC_HASH_SIZE; i++)
9853                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9854                                        mc_filter[i]);
9855                 }
9856         }
9857
9858         bp->rx_mode = rx_mode;
9859         bnx2x_set_storm_rx_mode(bp);
9860 }
9861
9862 /* called with rtnl_lock */
9863 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9864 {
9865         struct sockaddr *addr = p;
9866         struct bnx2x *bp = netdev_priv(dev);
9867
9868         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9869                 return -EINVAL;
9870
9871         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9872         if (netif_running(dev)) {
9873                 if (CHIP_IS_E1(bp))
9874                         bnx2x_set_mac_addr_e1(bp, 1);
9875                 else
9876                         bnx2x_set_mac_addr_e1h(bp, 1);
9877         }
9878
9879         return 0;
9880 }
9881
9882 /* called with rtnl_lock */
9883 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9884 {
9885         struct mii_ioctl_data *data = if_mii(ifr);
9886         struct bnx2x *bp = netdev_priv(dev);
9887         int err;
9888
9889         switch (cmd) {
9890         case SIOCGMIIPHY:
9891                 data->phy_id = bp->port.phy_addr;
9892
9893                 /* fallthrough */
9894
9895         case SIOCGMIIREG: {
9896                 u16 mii_regval;
9897
9898                 if (!netif_running(dev))
9899                         return -EAGAIN;
9900
9901                 mutex_lock(&bp->port.phy_mutex);
9902                 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr,
9903                                       DEFAULT_PHY_DEV_ADDR,
9904                                       (data->reg_num & 0x1f), &mii_regval);
9905                 data->val_out = mii_regval;
9906                 mutex_unlock(&bp->port.phy_mutex);
9907                 return err;
9908         }
9909
9910         case SIOCSMIIREG:
9911                 if (!capable(CAP_NET_ADMIN))
9912                         return -EPERM;
9913
9914                 if (!netif_running(dev))
9915                         return -EAGAIN;
9916
9917                 mutex_lock(&bp->port.phy_mutex);
9918                 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr,
9919                                        DEFAULT_PHY_DEV_ADDR,
9920                                        (data->reg_num & 0x1f), data->val_in);
9921                 mutex_unlock(&bp->port.phy_mutex);
9922                 return err;
9923
9924         default:
9925                 /* do nothing */
9926                 break;
9927         }
9928
9929         return -EOPNOTSUPP;
9930 }
9931
9932 /* called with rtnl_lock */
9933 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9934 {
9935         struct bnx2x *bp = netdev_priv(dev);
9936         int rc = 0;
9937
9938         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9939             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9940                 return -EINVAL;
9941
9942         /* This does not race with packet allocation
9943          * because the actual alloc size is
9944          * only updated as part of load
9945          */
9946         dev->mtu = new_mtu;
9947
9948         if (netif_running(dev)) {
9949                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9950                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9951         }
9952
9953         return rc;
9954 }
9955
9956 static void bnx2x_tx_timeout(struct net_device *dev)
9957 {
9958         struct bnx2x *bp = netdev_priv(dev);
9959
9960 #ifdef BNX2X_STOP_ON_ERROR
9961         if (!bp->panic)
9962                 bnx2x_panic();
9963 #endif
9964         /* This allows the netif to be shutdown gracefully before resetting */
9965         schedule_work(&bp->reset_task);
9966 }
9967
9968 #ifdef BCM_VLAN
9969 /* called with rtnl_lock */
9970 static void bnx2x_vlan_rx_register(struct net_device *dev,
9971                                    struct vlan_group *vlgrp)
9972 {
9973         struct bnx2x *bp = netdev_priv(dev);
9974
9975         bp->vlgrp = vlgrp;
9976         if (netif_running(dev))
9977                 bnx2x_set_client_config(bp);
9978 }
9979
9980 #endif
9981
9982 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9983 static void poll_bnx2x(struct net_device *dev)
9984 {
9985         struct bnx2x *bp = netdev_priv(dev);
9986
9987         disable_irq(bp->pdev->irq);
9988         bnx2x_interrupt(bp->pdev->irq, dev);
9989         enable_irq(bp->pdev->irq);
9990 }
9991 #endif
9992
9993 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9994                                     struct net_device *dev)
9995 {
9996         struct bnx2x *bp;
9997         int rc;
9998
9999         SET_NETDEV_DEV(dev, &pdev->dev);
10000         bp = netdev_priv(dev);
10001
10002         bp->dev = dev;
10003         bp->pdev = pdev;
10004         bp->flags = 0;
10005         bp->func = PCI_FUNC(pdev->devfn);
10006
10007         rc = pci_enable_device(pdev);
10008         if (rc) {
10009                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10010                 goto err_out;
10011         }
10012
10013         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10014                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10015                        " aborting\n");
10016                 rc = -ENODEV;
10017                 goto err_out_disable;
10018         }
10019
10020         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10021                 printk(KERN_ERR PFX "Cannot find second PCI device"
10022                        " base address, aborting\n");
10023                 rc = -ENODEV;
10024                 goto err_out_disable;
10025         }
10026
10027         if (atomic_read(&pdev->enable_cnt) == 1) {
10028                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10029                 if (rc) {
10030                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10031                                " aborting\n");
10032                         goto err_out_disable;
10033                 }
10034
10035                 pci_set_master(pdev);
10036                 pci_save_state(pdev);
10037         }
10038
10039         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10040         if (bp->pm_cap == 0) {
10041                 printk(KERN_ERR PFX "Cannot find power management"
10042                        " capability, aborting\n");
10043                 rc = -EIO;
10044                 goto err_out_release;
10045         }
10046
10047         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10048         if (bp->pcie_cap == 0) {
10049                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10050                        " aborting\n");
10051                 rc = -EIO;
10052                 goto err_out_release;
10053         }
10054
10055         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10056                 bp->flags |= USING_DAC_FLAG;
10057                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10058                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10059                                " failed, aborting\n");
10060                         rc = -EIO;
10061                         goto err_out_release;
10062                 }
10063
10064         } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10065                 printk(KERN_ERR PFX "System does not support DMA,"
10066                        " aborting\n");
10067                 rc = -EIO;
10068                 goto err_out_release;
10069         }
10070
10071         dev->mem_start = pci_resource_start(pdev, 0);
10072         dev->base_addr = dev->mem_start;
10073         dev->mem_end = pci_resource_end(pdev, 0);
10074
10075         dev->irq = pdev->irq;
10076
10077         bp->regview = ioremap_nocache(dev->base_addr,
10078                                       pci_resource_len(pdev, 0));
10079         if (!bp->regview) {
10080                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10081                 rc = -ENOMEM;
10082                 goto err_out_release;
10083         }
10084
10085         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10086                                         min_t(u64, BNX2X_DB_SIZE,
10087                                               pci_resource_len(pdev, 2)));
10088         if (!bp->doorbells) {
10089                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10090                 rc = -ENOMEM;
10091                 goto err_out_unmap;
10092         }
10093
10094         bnx2x_set_power_state(bp, PCI_D0);
10095
10096         /* clean indirect addresses */
10097         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10098                                PCICFG_VENDOR_ID_OFFSET);
10099         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10100         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10101         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10102         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10103
10104         dev->hard_start_xmit = bnx2x_start_xmit;
10105         dev->watchdog_timeo = TX_TIMEOUT;
10106
10107         dev->ethtool_ops = &bnx2x_ethtool_ops;
10108         dev->open = bnx2x_open;
10109         dev->stop = bnx2x_close;
10110         dev->set_multicast_list = bnx2x_set_rx_mode;
10111         dev->set_mac_address = bnx2x_change_mac_addr;
10112         dev->do_ioctl = bnx2x_ioctl;
10113         dev->change_mtu = bnx2x_change_mtu;
10114         dev->tx_timeout = bnx2x_tx_timeout;
10115 #ifdef BCM_VLAN
10116         dev->vlan_rx_register = bnx2x_vlan_rx_register;
10117 #endif
10118 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10119         dev->poll_controller = poll_bnx2x;
10120 #endif
10121         dev->features |= NETIF_F_SG;
10122         dev->features |= NETIF_F_HW_CSUM;
10123         if (bp->flags & USING_DAC_FLAG)
10124                 dev->features |= NETIF_F_HIGHDMA;
10125 #ifdef BCM_VLAN
10126         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10127 #endif
10128         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10129         dev->features |= NETIF_F_TSO6;
10130
10131         return 0;
10132
10133 err_out_unmap:
10134         if (bp->regview) {
10135                 iounmap(bp->regview);
10136                 bp->regview = NULL;
10137         }
10138         if (bp->doorbells) {
10139                 iounmap(bp->doorbells);
10140                 bp->doorbells = NULL;
10141         }
10142
10143 err_out_release:
10144         if (atomic_read(&pdev->enable_cnt) == 1)
10145                 pci_release_regions(pdev);
10146
10147 err_out_disable:
10148         pci_disable_device(pdev);
10149         pci_set_drvdata(pdev, NULL);
10150
10151 err_out:
10152         return rc;
10153 }
10154
10155 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10156 {
10157         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10158
10159         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10160         return val;
10161 }
10162
10163 /* return value of 1=2.5GHz 2=5GHz */
10164 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10165 {
10166         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10167
10168         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10169         return val;
10170 }
10171
10172 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10173                                     const struct pci_device_id *ent)
10174 {
10175         static int version_printed;
10176         struct net_device *dev = NULL;
10177         struct bnx2x *bp;
10178         int rc;
10179         DECLARE_MAC_BUF(mac);
10180
10181         if (version_printed++ == 0)
10182                 printk(KERN_INFO "%s", version);
10183
10184         /* dev zeroed in init_etherdev */
10185         dev = alloc_etherdev(sizeof(*bp));
10186         if (!dev) {
10187                 printk(KERN_ERR PFX "Cannot allocate net device\n");
10188                 return -ENOMEM;
10189         }
10190
10191         netif_carrier_off(dev);
10192
10193         bp = netdev_priv(dev);
10194         bp->msglevel = debug;
10195
10196         rc = bnx2x_init_dev(pdev, dev);
10197         if (rc < 0) {
10198                 free_netdev(dev);
10199                 return rc;
10200         }
10201
10202         rc = register_netdev(dev);
10203         if (rc) {
10204                 dev_err(&pdev->dev, "Cannot register net device\n");
10205                 goto init_one_exit;
10206         }
10207
10208         pci_set_drvdata(pdev, dev);
10209
10210         rc = bnx2x_init_bp(bp);
10211         if (rc) {
10212                 unregister_netdev(dev);
10213                 goto init_one_exit;
10214         }
10215
10216         bp->common.name = board_info[ent->driver_data].name;
10217         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10218                " IRQ %d, ", dev->name, bp->common.name,
10219                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10220                bnx2x_get_pcie_width(bp),
10221                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10222                dev->base_addr, bp->pdev->irq);
10223         printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
10224         return 0;
10225
10226 init_one_exit:
10227         if (bp->regview)
10228                 iounmap(bp->regview);
10229
10230         if (bp->doorbells)
10231                 iounmap(bp->doorbells);
10232
10233         free_netdev(dev);
10234
10235         if (atomic_read(&pdev->enable_cnt) == 1)
10236                 pci_release_regions(pdev);
10237
10238         pci_disable_device(pdev);
10239         pci_set_drvdata(pdev, NULL);
10240
10241         return rc;
10242 }
10243
10244 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10245 {
10246         struct net_device *dev = pci_get_drvdata(pdev);
10247         struct bnx2x *bp;
10248
10249         if (!dev) {
10250                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10251                 return;
10252         }
10253         bp = netdev_priv(dev);
10254
10255         unregister_netdev(dev);
10256
10257         if (bp->regview)
10258                 iounmap(bp->regview);
10259
10260         if (bp->doorbells)
10261                 iounmap(bp->doorbells);
10262
10263         free_netdev(dev);
10264
10265         if (atomic_read(&pdev->enable_cnt) == 1)
10266                 pci_release_regions(pdev);
10267
10268         pci_disable_device(pdev);
10269         pci_set_drvdata(pdev, NULL);
10270 }
10271
10272 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10273 {
10274         struct net_device *dev = pci_get_drvdata(pdev);
10275         struct bnx2x *bp;
10276
10277         if (!dev) {
10278                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10279                 return -ENODEV;
10280         }
10281         bp = netdev_priv(dev);
10282
10283         rtnl_lock();
10284
10285         pci_save_state(pdev);
10286
10287         if (!netif_running(dev)) {
10288                 rtnl_unlock();
10289                 return 0;
10290         }
10291
10292         netif_device_detach(dev);
10293
10294         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10295
10296         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10297
10298         rtnl_unlock();
10299
10300         return 0;
10301 }
10302
10303 static int bnx2x_resume(struct pci_dev *pdev)
10304 {
10305         struct net_device *dev = pci_get_drvdata(pdev);
10306         struct bnx2x *bp;
10307         int rc;
10308
10309         if (!dev) {
10310                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10311                 return -ENODEV;
10312         }
10313         bp = netdev_priv(dev);
10314
10315         rtnl_lock();
10316
10317         pci_restore_state(pdev);
10318
10319         if (!netif_running(dev)) {
10320                 rtnl_unlock();
10321                 return 0;
10322         }
10323
10324         bnx2x_set_power_state(bp, PCI_D0);
10325         netif_device_attach(dev);
10326
10327         rc = bnx2x_nic_load(bp, LOAD_OPEN);
10328
10329         rtnl_unlock();
10330
10331         return rc;
10332 }
10333
10334 /**
10335  * bnx2x_io_error_detected - called when PCI error is detected
10336  * @pdev: Pointer to PCI device
10337  * @state: The current pci connection state
10338  *
10339  * This function is called after a PCI bus error affecting
10340  * this device has been detected.
10341  */
10342 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10343                                                 pci_channel_state_t state)
10344 {
10345         struct net_device *dev = pci_get_drvdata(pdev);
10346         struct bnx2x *bp = netdev_priv(dev);
10347
10348         rtnl_lock();
10349
10350         netif_device_detach(dev);
10351
10352         if (netif_running(dev))
10353                 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10354
10355         pci_disable_device(pdev);
10356
10357         rtnl_unlock();
10358
10359         /* Request a slot reset */
10360         return PCI_ERS_RESULT_NEED_RESET;
10361 }
10362
10363 /**
10364  * bnx2x_io_slot_reset - called after the PCI bus has been reset
10365  * @pdev: Pointer to PCI device
10366  *
10367  * Restart the card from scratch, as if from a cold-boot.
10368  */
10369 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10370 {
10371         struct net_device *dev = pci_get_drvdata(pdev);
10372         struct bnx2x *bp = netdev_priv(dev);
10373
10374         rtnl_lock();
10375
10376         if (pci_enable_device(pdev)) {
10377                 dev_err(&pdev->dev,
10378                         "Cannot re-enable PCI device after reset\n");
10379                 rtnl_unlock();
10380                 return PCI_ERS_RESULT_DISCONNECT;
10381         }
10382
10383         pci_set_master(pdev);
10384         pci_restore_state(pdev);
10385
10386         if (netif_running(dev))
10387                 bnx2x_set_power_state(bp, PCI_D0);
10388
10389         rtnl_unlock();
10390
10391         return PCI_ERS_RESULT_RECOVERED;
10392 }
10393
10394 /**
10395  * bnx2x_io_resume - called when traffic can start flowing again
10396  * @pdev: Pointer to PCI device
10397  *
10398  * This callback is called when the error recovery driver tells us that
10399  * its OK to resume normal operation.
10400  */
10401 static void bnx2x_io_resume(struct pci_dev *pdev)
10402 {
10403         struct net_device *dev = pci_get_drvdata(pdev);
10404         struct bnx2x *bp = netdev_priv(dev);
10405
10406         rtnl_lock();
10407
10408         if (netif_running(dev))
10409                 bnx2x_nic_load(bp, LOAD_OPEN);
10410
10411         netif_device_attach(dev);
10412
10413         rtnl_unlock();
10414 }
10415
10416 static struct pci_error_handlers bnx2x_err_handler = {
10417         .error_detected = bnx2x_io_error_detected,
10418         .slot_reset = bnx2x_io_slot_reset,
10419         .resume = bnx2x_io_resume,
10420 };
10421
10422 static struct pci_driver bnx2x_pci_driver = {
10423         .name        = DRV_MODULE_NAME,
10424         .id_table    = bnx2x_pci_tbl,
10425         .probe       = bnx2x_init_one,
10426         .remove      = __devexit_p(bnx2x_remove_one),
10427         .suspend     = bnx2x_suspend,
10428         .resume      = bnx2x_resume,
10429         .err_handler = &bnx2x_err_handler,
10430 };
10431
10432 static int __init bnx2x_init(void)
10433 {
10434         return pci_register_driver(&bnx2x_pci_driver);
10435 }
10436
10437 static void __exit bnx2x_cleanup(void)
10438 {
10439         pci_unregister_driver(&bnx2x_pci_driver);
10440 }
10441
10442 module_init(bnx2x_init);
10443 module_exit(bnx2x_cleanup);
10444