]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/bnx2x_main.c
4b84f6ce5ed22d560fe250dda6516dff9e8df1f7
[net-next-2.6.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
57 #include "bnx2x.h"
58 #include "bnx2x_init.h"
59
60 #define DRV_MODULE_VERSION      "1.45.25"
61 #define DRV_MODULE_RELDATE      "2009/01/22"
62 #define BNX2X_BC_VER            0x040200
63
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT              (5*HZ)
66
67 static char version[] __devinitdata =
68         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
75
76 static int disable_tpa;
77 static int use_inta;
78 static int poll;
79 static int debug;
80 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
81 static int use_multi;
82
83 module_param(disable_tpa, int, 0);
84 module_param(use_inta, int, 0);
85 module_param(poll, int, 0);
86 module_param(debug, int, 0);
87 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
88 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
89 MODULE_PARM_DESC(poll, "use polling (for debug)");
90 MODULE_PARM_DESC(debug, "default debug msglevel");
91
92 #ifdef BNX2X_MULTI
93 module_param(use_multi, int, 0);
94 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
95 #endif
96 static struct workqueue_struct *bnx2x_wq;
97
98 enum bnx2x_board_type {
99         BCM57710 = 0,
100         BCM57711 = 1,
101         BCM57711E = 2,
102 };
103
104 /* indexed by board_type, above */
105 static struct {
106         char *name;
107 } board_info[] __devinitdata = {
108         { "Broadcom NetXtreme II BCM57710 XGb" },
109         { "Broadcom NetXtreme II BCM57711 XGb" },
110         { "Broadcom NetXtreme II BCM57711E XGb" }
111 };
112
113
114 static const struct pci_device_id bnx2x_pci_tbl[] = {
115         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
121         { 0 }
122 };
123
124 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125
126 /****************************************************************************
127 * General service functions
128 ****************************************************************************/
129
130 /* used only at init
131  * locking is done by mcp
132  */
133 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134 {
135         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138                                PCICFG_VENDOR_ID_OFFSET);
139 }
140
141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142 {
143         u32 val;
144
145         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148                                PCICFG_VENDOR_ID_OFFSET);
149
150         return val;
151 }
152
153 static const u32 dmae_reg_go_c[] = {
154         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158 };
159
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162                             int idx)
163 {
164         u32 cmd_offset;
165         int i;
166
167         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
171                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
173         }
174         REG_WR(bp, dmae_reg_go_c[idx], 1);
175 }
176
177 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178                       u32 len32)
179 {
180         struct dmae_command *dmae = &bp->init_dmae;
181         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
182         int cnt = 200;
183
184         if (!bp->dmae_ready) {
185                 u32 *data = bnx2x_sp(bp, wb_data[0]);
186
187                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
188                    "  using indirect\n", dst_addr, len32);
189                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190                 return;
191         }
192
193         mutex_lock(&bp->dmae_mutex);
194
195         memset(dmae, 0, sizeof(struct dmae_command));
196
197         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200 #ifdef __BIG_ENDIAN
201                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
202 #else
203                         DMAE_CMD_ENDIANITY_DW_SWAP |
204 #endif
205                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
207         dmae->src_addr_lo = U64_LO(dma_addr);
208         dmae->src_addr_hi = U64_HI(dma_addr);
209         dmae->dst_addr_lo = dst_addr >> 2;
210         dmae->dst_addr_hi = 0;
211         dmae->len = len32;
212         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
214         dmae->comp_val = DMAE_COMP_VAL;
215
216         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
217            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
218                     "dst_addr [%x:%08x (%08x)]\n"
219            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
220            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
223         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
224            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
226
227         *wb_comp = 0;
228
229         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
230
231         udelay(5);
232
233         while (*wb_comp != DMAE_COMP_VAL) {
234                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235
236                 if (!cnt) {
237                         BNX2X_ERR("dmae timeout!\n");
238                         break;
239                 }
240                 cnt--;
241                 /* adjust delay for emulation/FPGA */
242                 if (CHIP_REV_IS_SLOW(bp))
243                         msleep(100);
244                 else
245                         udelay(5);
246         }
247
248         mutex_unlock(&bp->dmae_mutex);
249 }
250
251 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
252 {
253         struct dmae_command *dmae = &bp->init_dmae;
254         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
255         int cnt = 200;
256
257         if (!bp->dmae_ready) {
258                 u32 *data = bnx2x_sp(bp, wb_data[0]);
259                 int i;
260
261                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
262                    "  using indirect\n", src_addr, len32);
263                 for (i = 0; i < len32; i++)
264                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265                 return;
266         }
267
268         mutex_lock(&bp->dmae_mutex);
269
270         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271         memset(dmae, 0, sizeof(struct dmae_command));
272
273         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276 #ifdef __BIG_ENDIAN
277                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
278 #else
279                         DMAE_CMD_ENDIANITY_DW_SWAP |
280 #endif
281                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
283         dmae->src_addr_lo = src_addr >> 2;
284         dmae->src_addr_hi = 0;
285         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287         dmae->len = len32;
288         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
290         dmae->comp_val = DMAE_COMP_VAL;
291
292         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
293            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
294                     "dst_addr [%x:%08x (%08x)]\n"
295            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
296            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
299
300         *wb_comp = 0;
301
302         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
303
304         udelay(5);
305
306         while (*wb_comp != DMAE_COMP_VAL) {
307
308                 if (!cnt) {
309                         BNX2X_ERR("dmae timeout!\n");
310                         break;
311                 }
312                 cnt--;
313                 /* adjust delay for emulation/FPGA */
314                 if (CHIP_REV_IS_SLOW(bp))
315                         msleep(100);
316                 else
317                         udelay(5);
318         }
319         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
320            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
322
323         mutex_unlock(&bp->dmae_mutex);
324 }
325
326 /* used only for slowpath so not inlined */
327 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328 {
329         u32 wb_write[2];
330
331         wb_write[0] = val_hi;
332         wb_write[1] = val_lo;
333         REG_WR_DMAE(bp, reg, wb_write, 2);
334 }
335
336 #ifdef USE_WB_RD
337 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338 {
339         u32 wb_data[2];
340
341         REG_RD_DMAE(bp, reg, wb_data, 2);
342
343         return HILO_U64(wb_data[0], wb_data[1]);
344 }
345 #endif
346
347 static int bnx2x_mc_assert(struct bnx2x *bp)
348 {
349         char last_idx;
350         int i, rc = 0;
351         u32 row0, row1, row2, row3;
352
353         /* XSTORM */
354         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
356         if (last_idx)
357                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358
359         /* print the asserts */
360         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361
362                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363                               XSTORM_ASSERT_LIST_OFFSET(i));
364                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370
371                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373                                   " 0x%08x 0x%08x 0x%08x\n",
374                                   i, row3, row2, row1, row0);
375                         rc++;
376                 } else {
377                         break;
378                 }
379         }
380
381         /* TSTORM */
382         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
384         if (last_idx)
385                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386
387         /* print the asserts */
388         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389
390                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391                               TSTORM_ASSERT_LIST_OFFSET(i));
392                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398
399                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401                                   " 0x%08x 0x%08x 0x%08x\n",
402                                   i, row3, row2, row1, row0);
403                         rc++;
404                 } else {
405                         break;
406                 }
407         }
408
409         /* CSTORM */
410         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
412         if (last_idx)
413                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414
415         /* print the asserts */
416         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417
418                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419                               CSTORM_ASSERT_LIST_OFFSET(i));
420                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426
427                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429                                   " 0x%08x 0x%08x 0x%08x\n",
430                                   i, row3, row2, row1, row0);
431                         rc++;
432                 } else {
433                         break;
434                 }
435         }
436
437         /* USTORM */
438         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439                            USTORM_ASSERT_LIST_INDEX_OFFSET);
440         if (last_idx)
441                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442
443         /* print the asserts */
444         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445
446                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447                               USTORM_ASSERT_LIST_OFFSET(i));
448                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
450                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
452                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
454
455                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457                                   " 0x%08x 0x%08x 0x%08x\n",
458                                   i, row3, row2, row1, row0);
459                         rc++;
460                 } else {
461                         break;
462                 }
463         }
464
465         return rc;
466 }
467
468 static void bnx2x_fw_dump(struct bnx2x *bp)
469 {
470         u32 mark, offset;
471         u32 data[9];
472         int word;
473
474         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
475         mark = ((mark + 0x3) & ~0x3);
476         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
477
478         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479                 for (word = 0; word < 8; word++)
480                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481                                                   offset + 4*word));
482                 data[8] = 0x0;
483                 printk(KERN_CONT "%s", (char *)data);
484         }
485         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486                 for (word = 0; word < 8; word++)
487                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488                                                   offset + 4*word));
489                 data[8] = 0x0;
490                 printk(KERN_CONT "%s", (char *)data);
491         }
492         printk("\n" KERN_ERR PFX "end of fw dump\n");
493 }
494
495 static void bnx2x_panic_dump(struct bnx2x *bp)
496 {
497         int i;
498         u16 j, start, end;
499
500         bp->stats_state = STATS_STATE_DISABLED;
501         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502
503         BNX2X_ERR("begin crash dump -----------------\n");
504
505         for_each_queue(bp, i) {
506                 struct bnx2x_fastpath *fp = &bp->fp[i];
507                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508
509                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
510                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
511                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
512                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
513                 BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
514                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
515                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
516                           fp->rx_bd_prod, fp->rx_bd_cons,
517                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
520                           "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
521                           "  *sb_u_idx(%x)  bd data(%x,%x)\n",
522                           fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523                           fp->status_blk->c_status_block.status_block_index,
524                           fp->fp_u_idx,
525                           fp->status_blk->u_status_block.status_block_index,
526                           hw_prods->packets_prod, hw_prods->bds_prod);
527
528                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530                 for (j = start; j < end; j++) {
531                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534                                   sw_bd->skb, sw_bd->first_bd);
535                 }
536
537                 start = TX_BD(fp->tx_bd_cons - 10);
538                 end = TX_BD(fp->tx_bd_cons + 254);
539                 for (j = start; j < end; j++) {
540                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544                 }
545
546                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548                 for (j = start; j < end; j++) {
549                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
553                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
554                 }
555
556                 start = RX_SGE(fp->rx_sge_prod);
557                 end = RX_SGE(fp->last_max_sge);
558                 for (j = start; j < end; j++) {
559                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
563                                   j, rx_sge[1], rx_sge[0], sw_page->page);
564                 }
565
566                 start = RCQ_BD(fp->rx_comp_cons - 10);
567                 end = RCQ_BD(fp->rx_comp_cons + 503);
568                 for (j = start; j < end; j++) {
569                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
573                 }
574         }
575
576         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
577                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
578                   "  spq_prod_idx(%u)\n",
579                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
580                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
582         bnx2x_fw_dump(bp);
583         bnx2x_mc_assert(bp);
584         BNX2X_ERR("end crash dump -----------------\n");
585 }
586
587 static void bnx2x_int_enable(struct bnx2x *bp)
588 {
589         int port = BP_PORT(bp);
590         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591         u32 val = REG_RD(bp, addr);
592         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
593
594         if (msix) {
595                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
596                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
597                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
598         } else {
599                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
600                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
601                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
602                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
603
604                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
605                    val, port, addr, msix);
606
607                 REG_WR(bp, addr, val);
608
609                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
610         }
611
612         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
613            val, port, addr, msix);
614
615         REG_WR(bp, addr, val);
616
617         if (CHIP_IS_E1H(bp)) {
618                 /* init leading/trailing edge */
619                 if (IS_E1HMF(bp)) {
620                         val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
621                         if (bp->port.pmf)
622                                 /* enable nig attention */
623                                 val |= 0x0100;
624                 } else
625                         val = 0xffff;
626
627                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
628                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
629         }
630 }
631
632 static void bnx2x_int_disable(struct bnx2x *bp)
633 {
634         int port = BP_PORT(bp);
635         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
636         u32 val = REG_RD(bp, addr);
637
638         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
641                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
642
643         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644            val, port, addr);
645
646         REG_WR(bp, addr, val);
647         if (REG_RD(bp, addr) != val)
648                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
649 }
650
651 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
652 {
653         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
654         int i;
655
656         /* disable interrupt handling */
657         atomic_inc(&bp->intr_sem);
658         if (disable_hw)
659                 /* prevent the HW from sending interrupts */
660                 bnx2x_int_disable(bp);
661
662         /* make sure all ISRs are done */
663         if (msix) {
664                 for_each_queue(bp, i)
665                         synchronize_irq(bp->msix_table[i].vector);
666
667                 /* one more for the Slow Path IRQ */
668                 synchronize_irq(bp->msix_table[i].vector);
669         } else
670                 synchronize_irq(bp->pdev->irq);
671
672         /* make sure sp_task is not running */
673         cancel_delayed_work(&bp->sp_task);
674         flush_workqueue(bnx2x_wq);
675 }
676
677 /* fast path */
678
679 /*
680  * General service functions
681  */
682
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684                                 u8 storm, u16 index, u8 op, u8 update)
685 {
686         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687                        COMMAND_REG_INT_ACK);
688         struct igu_ack_register igu_ack;
689
690         igu_ack.status_block_index = index;
691         igu_ack.sb_id_and_flags =
692                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
697         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698            (*(u32 *)&igu_ack), hc_addr);
699         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
700 }
701
702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703 {
704         struct host_status_block *fpsb = fp->status_blk;
705         u16 rc = 0;
706
707         barrier(); /* status block is written to by the chip */
708         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710                 rc |= 1;
711         }
712         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714                 rc |= 2;
715         }
716         return rc;
717 }
718
719 static u16 bnx2x_ack_int(struct bnx2x *bp)
720 {
721         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722                        COMMAND_REG_SIMD_MASK);
723         u32 result = REG_RD(bp, hc_addr);
724
725         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726            result, hc_addr);
727
728         return result;
729 }
730
731
732 /*
733  * fast path service functions
734  */
735
736 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
737 {
738         u16 tx_cons_sb;
739
740         /* Tell compiler that status block fields can change */
741         barrier();
742         tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
743         return ((fp->tx_pkt_prod != tx_cons_sb) ||
744                 (fp->tx_pkt_prod != fp->tx_pkt_cons));
745 }
746
747 /* free skb in the packet ring at pos idx
748  * return idx of last bd freed
749  */
750 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
751                              u16 idx)
752 {
753         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
754         struct eth_tx_bd *tx_bd;
755         struct sk_buff *skb = tx_buf->skb;
756         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
757         int nbd;
758
759         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
760            idx, tx_buf, skb);
761
762         /* unmap first bd */
763         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
764         tx_bd = &fp->tx_desc_ring[bd_idx];
765         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
766                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
767
768         nbd = le16_to_cpu(tx_bd->nbd) - 1;
769         new_cons = nbd + tx_buf->first_bd;
770 #ifdef BNX2X_STOP_ON_ERROR
771         if (nbd > (MAX_SKB_FRAGS + 2)) {
772                 BNX2X_ERR("BAD nbd!\n");
773                 bnx2x_panic();
774         }
775 #endif
776
777         /* Skip a parse bd and the TSO split header bd
778            since they have no mapping */
779         if (nbd)
780                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
781
782         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
783                                            ETH_TX_BD_FLAGS_TCP_CSUM |
784                                            ETH_TX_BD_FLAGS_SW_LSO)) {
785                 if (--nbd)
786                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
787                 tx_bd = &fp->tx_desc_ring[bd_idx];
788                 /* is this a TSO split header bd? */
789                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
790                         if (--nbd)
791                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
792                 }
793         }
794
795         /* now free frags */
796         while (nbd > 0) {
797
798                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
799                 tx_bd = &fp->tx_desc_ring[bd_idx];
800                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
801                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
802                 if (--nbd)
803                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
804         }
805
806         /* release skb */
807         WARN_ON(!skb);
808         dev_kfree_skb(skb);
809         tx_buf->first_bd = 0;
810         tx_buf->skb = NULL;
811
812         return new_cons;
813 }
814
815 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
816 {
817         s16 used;
818         u16 prod;
819         u16 cons;
820
821         barrier(); /* Tell compiler that prod and cons can change */
822         prod = fp->tx_bd_prod;
823         cons = fp->tx_bd_cons;
824
825         /* NUM_TX_RINGS = number of "next-page" entries
826            It will be used as a threshold */
827         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
828
829 #ifdef BNX2X_STOP_ON_ERROR
830         WARN_ON(used < 0);
831         WARN_ON(used > fp->bp->tx_ring_size);
832         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
833 #endif
834
835         return (s16)(fp->bp->tx_ring_size) - used;
836 }
837
838 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
839 {
840         struct bnx2x *bp = fp->bp;
841         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
842         int done = 0;
843
844 #ifdef BNX2X_STOP_ON_ERROR
845         if (unlikely(bp->panic))
846                 return;
847 #endif
848
849         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
850         sw_cons = fp->tx_pkt_cons;
851
852         while (sw_cons != hw_cons) {
853                 u16 pkt_cons;
854
855                 pkt_cons = TX_BD(sw_cons);
856
857                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
858
859                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
860                    hw_cons, sw_cons, pkt_cons);
861
862 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
863                         rmb();
864                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
865                 }
866 */
867                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
868                 sw_cons++;
869                 done++;
870
871                 if (done == work)
872                         break;
873         }
874
875         fp->tx_pkt_cons = sw_cons;
876         fp->tx_bd_cons = bd_cons;
877
878         /* Need to make the tx_cons update visible to start_xmit()
879          * before checking for netif_queue_stopped().  Without the
880          * memory barrier, there is a small possibility that start_xmit()
881          * will miss it and cause the queue to be stopped forever.
882          */
883         smp_mb();
884
885         /* TBD need a thresh? */
886         if (unlikely(netif_queue_stopped(bp->dev))) {
887
888                 netif_tx_lock(bp->dev);
889
890                 if (netif_queue_stopped(bp->dev) &&
891                     (bp->state == BNX2X_STATE_OPEN) &&
892                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
893                         netif_wake_queue(bp->dev);
894
895                 netif_tx_unlock(bp->dev);
896         }
897 }
898
899
900 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
901                            union eth_rx_cqe *rr_cqe)
902 {
903         struct bnx2x *bp = fp->bp;
904         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
905         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
906
907         DP(BNX2X_MSG_SP,
908            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
909            FP_IDX(fp), cid, command, bp->state,
910            rr_cqe->ramrod_cqe.ramrod_type);
911
912         bp->spq_left++;
913
914         if (FP_IDX(fp)) {
915                 switch (command | fp->state) {
916                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
917                                                 BNX2X_FP_STATE_OPENING):
918                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
919                            cid);
920                         fp->state = BNX2X_FP_STATE_OPEN;
921                         break;
922
923                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
924                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
925                            cid);
926                         fp->state = BNX2X_FP_STATE_HALTED;
927                         break;
928
929                 default:
930                         BNX2X_ERR("unexpected MC reply (%d)  "
931                                   "fp->state is %x\n", command, fp->state);
932                         break;
933                 }
934                 mb(); /* force bnx2x_wait_ramrod() to see the change */
935                 return;
936         }
937
938         switch (command | bp->state) {
939         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
940                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
941                 bp->state = BNX2X_STATE_OPEN;
942                 break;
943
944         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
945                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
946                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
947                 fp->state = BNX2X_FP_STATE_HALTED;
948                 break;
949
950         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
951                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
952                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
953                 break;
954
955
956         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
957         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
958                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
959                 bp->set_mac_pending = 0;
960                 break;
961
962         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
963                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
964                 break;
965
966         default:
967                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
968                           command, bp->state);
969                 break;
970         }
971         mb(); /* force bnx2x_wait_ramrod() to see the change */
972 }
973
974 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
975                                      struct bnx2x_fastpath *fp, u16 index)
976 {
977         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
978         struct page *page = sw_buf->page;
979         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
980
981         /* Skip "next page" elements */
982         if (!page)
983                 return;
984
985         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
986                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
987         __free_pages(page, PAGES_PER_SGE_SHIFT);
988
989         sw_buf->page = NULL;
990         sge->addr_hi = 0;
991         sge->addr_lo = 0;
992 }
993
994 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
995                                            struct bnx2x_fastpath *fp, int last)
996 {
997         int i;
998
999         for (i = 0; i < last; i++)
1000                 bnx2x_free_rx_sge(bp, fp, i);
1001 }
1002
1003 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1004                                      struct bnx2x_fastpath *fp, u16 index)
1005 {
1006         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1007         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1008         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1009         dma_addr_t mapping;
1010
1011         if (unlikely(page == NULL))
1012                 return -ENOMEM;
1013
1014         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1015                                PCI_DMA_FROMDEVICE);
1016         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1017                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1018                 return -ENOMEM;
1019         }
1020
1021         sw_buf->page = page;
1022         pci_unmap_addr_set(sw_buf, mapping, mapping);
1023
1024         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1025         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1026
1027         return 0;
1028 }
1029
1030 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1031                                      struct bnx2x_fastpath *fp, u16 index)
1032 {
1033         struct sk_buff *skb;
1034         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1035         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1036         dma_addr_t mapping;
1037
1038         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1039         if (unlikely(skb == NULL))
1040                 return -ENOMEM;
1041
1042         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1043                                  PCI_DMA_FROMDEVICE);
1044         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1045                 dev_kfree_skb(skb);
1046                 return -ENOMEM;
1047         }
1048
1049         rx_buf->skb = skb;
1050         pci_unmap_addr_set(rx_buf, mapping, mapping);
1051
1052         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1053         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1054
1055         return 0;
1056 }
1057
1058 /* note that we are not allocating a new skb,
1059  * we are just moving one from cons to prod
1060  * we are not creating a new mapping,
1061  * so there is no need to check for dma_mapping_error().
1062  */
1063 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1064                                struct sk_buff *skb, u16 cons, u16 prod)
1065 {
1066         struct bnx2x *bp = fp->bp;
1067         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1068         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1069         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1070         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1071
1072         pci_dma_sync_single_for_device(bp->pdev,
1073                                        pci_unmap_addr(cons_rx_buf, mapping),
1074                                        bp->rx_offset + RX_COPY_THRESH,
1075                                        PCI_DMA_FROMDEVICE);
1076
1077         prod_rx_buf->skb = cons_rx_buf->skb;
1078         pci_unmap_addr_set(prod_rx_buf, mapping,
1079                            pci_unmap_addr(cons_rx_buf, mapping));
1080         *prod_bd = *cons_bd;
1081 }
1082
1083 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1084                                              u16 idx)
1085 {
1086         u16 last_max = fp->last_max_sge;
1087
1088         if (SUB_S16(idx, last_max) > 0)
1089                 fp->last_max_sge = idx;
1090 }
1091
1092 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1093 {
1094         int i, j;
1095
1096         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1097                 int idx = RX_SGE_CNT * i - 1;
1098
1099                 for (j = 0; j < 2; j++) {
1100                         SGE_MASK_CLEAR_BIT(fp, idx);
1101                         idx--;
1102                 }
1103         }
1104 }
1105
1106 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1107                                   struct eth_fast_path_rx_cqe *fp_cqe)
1108 {
1109         struct bnx2x *bp = fp->bp;
1110         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1111                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1112                       SGE_PAGE_SHIFT;
1113         u16 last_max, last_elem, first_elem;
1114         u16 delta = 0;
1115         u16 i;
1116
1117         if (!sge_len)
1118                 return;
1119
1120         /* First mark all used pages */
1121         for (i = 0; i < sge_len; i++)
1122                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1123
1124         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1125            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1126
1127         /* Here we assume that the last SGE index is the biggest */
1128         prefetch((void *)(fp->sge_mask));
1129         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1130
1131         last_max = RX_SGE(fp->last_max_sge);
1132         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1133         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1134
1135         /* If ring is not full */
1136         if (last_elem + 1 != first_elem)
1137                 last_elem++;
1138
1139         /* Now update the prod */
1140         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1141                 if (likely(fp->sge_mask[i]))
1142                         break;
1143
1144                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1145                 delta += RX_SGE_MASK_ELEM_SZ;
1146         }
1147
1148         if (delta > 0) {
1149                 fp->rx_sge_prod += delta;
1150                 /* clear page-end entries */
1151                 bnx2x_clear_sge_mask_next_elems(fp);
1152         }
1153
1154         DP(NETIF_MSG_RX_STATUS,
1155            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1156            fp->last_max_sge, fp->rx_sge_prod);
1157 }
1158
1159 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1160 {
1161         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1162         memset(fp->sge_mask, 0xff,
1163                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1164
1165         /* Clear the two last indices in the page to 1:
1166            these are the indices that correspond to the "next" element,
1167            hence will never be indicated and should be removed from
1168            the calculations. */
1169         bnx2x_clear_sge_mask_next_elems(fp);
1170 }
1171
1172 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1173                             struct sk_buff *skb, u16 cons, u16 prod)
1174 {
1175         struct bnx2x *bp = fp->bp;
1176         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1177         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1178         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1179         dma_addr_t mapping;
1180
1181         /* move empty skb from pool to prod and map it */
1182         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1183         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1184                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1185         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1186
1187         /* move partial skb from cons to pool (don't unmap yet) */
1188         fp->tpa_pool[queue] = *cons_rx_buf;
1189
1190         /* mark bin state as start - print error if current state != stop */
1191         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1192                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1193
1194         fp->tpa_state[queue] = BNX2X_TPA_START;
1195
1196         /* point prod_bd to new skb */
1197         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1198         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1199
1200 #ifdef BNX2X_STOP_ON_ERROR
1201         fp->tpa_queue_used |= (1 << queue);
1202 #ifdef __powerpc64__
1203         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1204 #else
1205         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1206 #endif
1207            fp->tpa_queue_used);
1208 #endif
1209 }
1210
1211 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1212                                struct sk_buff *skb,
1213                                struct eth_fast_path_rx_cqe *fp_cqe,
1214                                u16 cqe_idx)
1215 {
1216         struct sw_rx_page *rx_pg, old_rx_pg;
1217         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1218         u32 i, frag_len, frag_size, pages;
1219         int err;
1220         int j;
1221
1222         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1223         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1224
1225         /* This is needed in order to enable forwarding support */
1226         if (frag_size)
1227                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1228                                                max(frag_size, (u32)len_on_bd));
1229
1230 #ifdef BNX2X_STOP_ON_ERROR
1231         if (pages >
1232             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1233                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1234                           pages, cqe_idx);
1235                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1236                           fp_cqe->pkt_len, len_on_bd);
1237                 bnx2x_panic();
1238                 return -EINVAL;
1239         }
1240 #endif
1241
1242         /* Run through the SGL and compose the fragmented skb */
1243         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1244                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1245
1246                 /* FW gives the indices of the SGE as if the ring is an array
1247                    (meaning that "next" element will consume 2 indices) */
1248                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1249                 rx_pg = &fp->rx_page_ring[sge_idx];
1250                 old_rx_pg = *rx_pg;
1251
1252                 /* If we fail to allocate a substitute page, we simply stop
1253                    where we are and drop the whole packet */
1254                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1255                 if (unlikely(err)) {
1256                         bp->eth_stats.rx_skb_alloc_failed++;
1257                         return err;
1258                 }
1259
1260                 /* Unmap the page as we r going to pass it to the stack */
1261                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1262                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1263
1264                 /* Add one frag and update the appropriate fields in the skb */
1265                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1266
1267                 skb->data_len += frag_len;
1268                 skb->truesize += frag_len;
1269                 skb->len += frag_len;
1270
1271                 frag_size -= frag_len;
1272         }
1273
1274         return 0;
1275 }
1276
1277 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1278                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1279                            u16 cqe_idx)
1280 {
1281         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1282         struct sk_buff *skb = rx_buf->skb;
1283         /* alloc new skb */
1284         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1285
1286         /* Unmap skb in the pool anyway, as we are going to change
1287            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1288            fails. */
1289         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1290                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1291
1292         if (likely(new_skb)) {
1293                 /* fix ip xsum and give it to the stack */
1294                 /* (no need to map the new skb) */
1295 #ifdef BCM_VLAN
1296                 int is_vlan_cqe =
1297                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1298                          PARSING_FLAGS_VLAN);
1299                 int is_not_hwaccel_vlan_cqe =
1300                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1301 #endif
1302
1303                 prefetch(skb);
1304                 prefetch(((char *)(skb)) + 128);
1305
1306 #ifdef BNX2X_STOP_ON_ERROR
1307                 if (pad + len > bp->rx_buf_size) {
1308                         BNX2X_ERR("skb_put is about to fail...  "
1309                                   "pad %d  len %d  rx_buf_size %d\n",
1310                                   pad, len, bp->rx_buf_size);
1311                         bnx2x_panic();
1312                         return;
1313                 }
1314 #endif
1315
1316                 skb_reserve(skb, pad);
1317                 skb_put(skb, len);
1318
1319                 skb->protocol = eth_type_trans(skb, bp->dev);
1320                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1321
1322                 {
1323                         struct iphdr *iph;
1324
1325                         iph = (struct iphdr *)skb->data;
1326 #ifdef BCM_VLAN
1327                         /* If there is no Rx VLAN offloading -
1328                            take VLAN tag into an account */
1329                         if (unlikely(is_not_hwaccel_vlan_cqe))
1330                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1331 #endif
1332                         iph->check = 0;
1333                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1334                 }
1335
1336                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1337                                          &cqe->fast_path_cqe, cqe_idx)) {
1338 #ifdef BCM_VLAN
1339                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1340                             (!is_not_hwaccel_vlan_cqe))
1341                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1342                                                 le16_to_cpu(cqe->fast_path_cqe.
1343                                                             vlan_tag));
1344                         else
1345 #endif
1346                                 netif_receive_skb(skb);
1347                 } else {
1348                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1349                            " - dropping packet!\n");
1350                         dev_kfree_skb(skb);
1351                 }
1352
1353
1354                 /* put new skb in bin */
1355                 fp->tpa_pool[queue].skb = new_skb;
1356
1357         } else {
1358                 /* else drop the packet and keep the buffer in the bin */
1359                 DP(NETIF_MSG_RX_STATUS,
1360                    "Failed to allocate new skb - dropping packet!\n");
1361                 bp->eth_stats.rx_skb_alloc_failed++;
1362         }
1363
1364         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1365 }
1366
1367 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1368                                         struct bnx2x_fastpath *fp,
1369                                         u16 bd_prod, u16 rx_comp_prod,
1370                                         u16 rx_sge_prod)
1371 {
1372         struct tstorm_eth_rx_producers rx_prods = {0};
1373         int i;
1374
1375         /* Update producers */
1376         rx_prods.bd_prod = bd_prod;
1377         rx_prods.cqe_prod = rx_comp_prod;
1378         rx_prods.sge_prod = rx_sge_prod;
1379
1380         /*
1381          * Make sure that the BD and SGE data is updated before updating the
1382          * producers since FW might read the BD/SGE right after the producer
1383          * is updated.
1384          * This is only applicable for weak-ordered memory model archs such
1385          * as IA-64. The following barrier is also mandatory since FW will
1386          * assumes BDs must have buffers.
1387          */
1388         wmb();
1389
1390         for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1391                 REG_WR(bp, BAR_TSTRORM_INTMEM +
1392                        TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1393                        ((u32 *)&rx_prods)[i]);
1394
1395         mmiowb(); /* keep prod updates ordered */
1396
1397         DP(NETIF_MSG_RX_STATUS,
1398            "Wrote: bd_prod %u  cqe_prod %u  sge_prod %u\n",
1399            bd_prod, rx_comp_prod, rx_sge_prod);
1400 }
1401
1402 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1403 {
1404         struct bnx2x *bp = fp->bp;
1405         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1406         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1407         int rx_pkt = 0;
1408
1409 #ifdef BNX2X_STOP_ON_ERROR
1410         if (unlikely(bp->panic))
1411                 return 0;
1412 #endif
1413
1414         /* CQ "next element" is of the size of the regular element,
1415            that's why it's ok here */
1416         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1417         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1418                 hw_comp_cons++;
1419
1420         bd_cons = fp->rx_bd_cons;
1421         bd_prod = fp->rx_bd_prod;
1422         bd_prod_fw = bd_prod;
1423         sw_comp_cons = fp->rx_comp_cons;
1424         sw_comp_prod = fp->rx_comp_prod;
1425
1426         /* Memory barrier necessary as speculative reads of the rx
1427          * buffer can be ahead of the index in the status block
1428          */
1429         rmb();
1430
1431         DP(NETIF_MSG_RX_STATUS,
1432            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1433            FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1434
1435         while (sw_comp_cons != hw_comp_cons) {
1436                 struct sw_rx_bd *rx_buf = NULL;
1437                 struct sk_buff *skb;
1438                 union eth_rx_cqe *cqe;
1439                 u8 cqe_fp_flags;
1440                 u16 len, pad;
1441
1442                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1443                 bd_prod = RX_BD(bd_prod);
1444                 bd_cons = RX_BD(bd_cons);
1445
1446                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1447                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1448
1449                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1450                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1451                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1452                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1453                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1454                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1455
1456                 /* is this a slowpath msg? */
1457                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1458                         bnx2x_sp_event(fp, cqe);
1459                         goto next_cqe;
1460
1461                 /* this is an rx packet */
1462                 } else {
1463                         rx_buf = &fp->rx_buf_ring[bd_cons];
1464                         skb = rx_buf->skb;
1465                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1466                         pad = cqe->fast_path_cqe.placement_offset;
1467
1468                         /* If CQE is marked both TPA_START and TPA_END
1469                            it is a non-TPA CQE */
1470                         if ((!fp->disable_tpa) &&
1471                             (TPA_TYPE(cqe_fp_flags) !=
1472                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1473                                 u16 queue = cqe->fast_path_cqe.queue_index;
1474
1475                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1476                                         DP(NETIF_MSG_RX_STATUS,
1477                                            "calling tpa_start on queue %d\n",
1478                                            queue);
1479
1480                                         bnx2x_tpa_start(fp, queue, skb,
1481                                                         bd_cons, bd_prod);
1482                                         goto next_rx;
1483                                 }
1484
1485                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1486                                         DP(NETIF_MSG_RX_STATUS,
1487                                            "calling tpa_stop on queue %d\n",
1488                                            queue);
1489
1490                                         if (!BNX2X_RX_SUM_FIX(cqe))
1491                                                 BNX2X_ERR("STOP on none TCP "
1492                                                           "data\n");
1493
1494                                         /* This is a size of the linear data
1495                                            on this skb */
1496                                         len = le16_to_cpu(cqe->fast_path_cqe.
1497                                                                 len_on_bd);
1498                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1499                                                     len, cqe, comp_ring_cons);
1500 #ifdef BNX2X_STOP_ON_ERROR
1501                                         if (bp->panic)
1502                                                 return -EINVAL;
1503 #endif
1504
1505                                         bnx2x_update_sge_prod(fp,
1506                                                         &cqe->fast_path_cqe);
1507                                         goto next_cqe;
1508                                 }
1509                         }
1510
1511                         pci_dma_sync_single_for_device(bp->pdev,
1512                                         pci_unmap_addr(rx_buf, mapping),
1513                                                        pad + RX_COPY_THRESH,
1514                                                        PCI_DMA_FROMDEVICE);
1515                         prefetch(skb);
1516                         prefetch(((char *)(skb)) + 128);
1517
1518                         /* is this an error packet? */
1519                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1520                                 DP(NETIF_MSG_RX_ERR,
1521                                    "ERROR  flags %x  rx packet %u\n",
1522                                    cqe_fp_flags, sw_comp_cons);
1523                                 bp->eth_stats.rx_err_discard_pkt++;
1524                                 goto reuse_rx;
1525                         }
1526
1527                         /* Since we don't have a jumbo ring
1528                          * copy small packets if mtu > 1500
1529                          */
1530                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1531                             (len <= RX_COPY_THRESH)) {
1532                                 struct sk_buff *new_skb;
1533
1534                                 new_skb = netdev_alloc_skb(bp->dev,
1535                                                            len + pad);
1536                                 if (new_skb == NULL) {
1537                                         DP(NETIF_MSG_RX_ERR,
1538                                            "ERROR  packet dropped "
1539                                            "because of alloc failure\n");
1540                                         bp->eth_stats.rx_skb_alloc_failed++;
1541                                         goto reuse_rx;
1542                                 }
1543
1544                                 /* aligned copy */
1545                                 skb_copy_from_linear_data_offset(skb, pad,
1546                                                     new_skb->data + pad, len);
1547                                 skb_reserve(new_skb, pad);
1548                                 skb_put(new_skb, len);
1549
1550                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1551
1552                                 skb = new_skb;
1553
1554                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1555                                 pci_unmap_single(bp->pdev,
1556                                         pci_unmap_addr(rx_buf, mapping),
1557                                                  bp->rx_buf_size,
1558                                                  PCI_DMA_FROMDEVICE);
1559                                 skb_reserve(skb, pad);
1560                                 skb_put(skb, len);
1561
1562                         } else {
1563                                 DP(NETIF_MSG_RX_ERR,
1564                                    "ERROR  packet dropped because "
1565                                    "of alloc failure\n");
1566                                 bp->eth_stats.rx_skb_alloc_failed++;
1567 reuse_rx:
1568                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1569                                 goto next_rx;
1570                         }
1571
1572                         skb->protocol = eth_type_trans(skb, bp->dev);
1573
1574                         skb->ip_summed = CHECKSUM_NONE;
1575                         if (bp->rx_csum) {
1576                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1577                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1578                                 else
1579                                         bp->eth_stats.hw_csum_err++;
1580                         }
1581                 }
1582
1583 #ifdef BCM_VLAN
1584                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1585                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1586                      PARSING_FLAGS_VLAN))
1587                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1588                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1589                 else
1590 #endif
1591                         netif_receive_skb(skb);
1592
1593
1594 next_rx:
1595                 rx_buf->skb = NULL;
1596
1597                 bd_cons = NEXT_RX_IDX(bd_cons);
1598                 bd_prod = NEXT_RX_IDX(bd_prod);
1599                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1600                 rx_pkt++;
1601 next_cqe:
1602                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1603                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1604
1605                 if (rx_pkt == budget)
1606                         break;
1607         } /* while */
1608
1609         fp->rx_bd_cons = bd_cons;
1610         fp->rx_bd_prod = bd_prod_fw;
1611         fp->rx_comp_cons = sw_comp_cons;
1612         fp->rx_comp_prod = sw_comp_prod;
1613
1614         /* Update producers */
1615         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1616                              fp->rx_sge_prod);
1617
1618         fp->rx_pkt += rx_pkt;
1619         fp->rx_calls++;
1620
1621         return rx_pkt;
1622 }
1623
1624 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1625 {
1626         struct bnx2x_fastpath *fp = fp_cookie;
1627         struct bnx2x *bp = fp->bp;
1628         int index = FP_IDX(fp);
1629
1630         /* Return here if interrupt is disabled */
1631         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1632                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1633                 return IRQ_HANDLED;
1634         }
1635
1636         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1637            index, FP_SB_ID(fp));
1638         bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1639
1640 #ifdef BNX2X_STOP_ON_ERROR
1641         if (unlikely(bp->panic))
1642                 return IRQ_HANDLED;
1643 #endif
1644
1645         prefetch(fp->rx_cons_sb);
1646         prefetch(fp->tx_cons_sb);
1647         prefetch(&fp->status_blk->c_status_block.status_block_index);
1648         prefetch(&fp->status_blk->u_status_block.status_block_index);
1649
1650         netif_rx_schedule(&bnx2x_fp(bp, index, napi));
1651
1652         return IRQ_HANDLED;
1653 }
1654
1655 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1656 {
1657         struct net_device *dev = dev_instance;
1658         struct bnx2x *bp = netdev_priv(dev);
1659         u16 status = bnx2x_ack_int(bp);
1660         u16 mask;
1661
1662         /* Return here if interrupt is shared and it's not for us */
1663         if (unlikely(status == 0)) {
1664                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1665                 return IRQ_NONE;
1666         }
1667         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1668
1669         /* Return here if interrupt is disabled */
1670         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1671                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1672                 return IRQ_HANDLED;
1673         }
1674
1675 #ifdef BNX2X_STOP_ON_ERROR
1676         if (unlikely(bp->panic))
1677                 return IRQ_HANDLED;
1678 #endif
1679
1680         mask = 0x2 << bp->fp[0].sb_id;
1681         if (status & mask) {
1682                 struct bnx2x_fastpath *fp = &bp->fp[0];
1683
1684                 prefetch(fp->rx_cons_sb);
1685                 prefetch(fp->tx_cons_sb);
1686                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1687                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1688
1689                 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
1690
1691                 status &= ~mask;
1692         }
1693
1694
1695         if (unlikely(status & 0x1)) {
1696                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1697
1698                 status &= ~0x1;
1699                 if (!status)
1700                         return IRQ_HANDLED;
1701         }
1702
1703         if (status)
1704                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1705                    status);
1706
1707         return IRQ_HANDLED;
1708 }
1709
1710 /* end of fast path */
1711
1712 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1713
1714 /* Link */
1715
1716 /*
1717  * General service functions
1718  */
1719
1720 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1721 {
1722         u32 lock_status;
1723         u32 resource_bit = (1 << resource);
1724         int func = BP_FUNC(bp);
1725         u32 hw_lock_control_reg;
1726         int cnt;
1727
1728         /* Validating that the resource is within range */
1729         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1730                 DP(NETIF_MSG_HW,
1731                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1732                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1733                 return -EINVAL;
1734         }
1735
1736         if (func <= 5) {
1737                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1738         } else {
1739                 hw_lock_control_reg =
1740                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1741         }
1742
1743         /* Validating that the resource is not already taken */
1744         lock_status = REG_RD(bp, hw_lock_control_reg);
1745         if (lock_status & resource_bit) {
1746                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1747                    lock_status, resource_bit);
1748                 return -EEXIST;
1749         }
1750
1751         /* Try for 5 second every 5ms */
1752         for (cnt = 0; cnt < 1000; cnt++) {
1753                 /* Try to acquire the lock */
1754                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1755                 lock_status = REG_RD(bp, hw_lock_control_reg);
1756                 if (lock_status & resource_bit)
1757                         return 0;
1758
1759                 msleep(5);
1760         }
1761         DP(NETIF_MSG_HW, "Timeout\n");
1762         return -EAGAIN;
1763 }
1764
1765 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1766 {
1767         u32 lock_status;
1768         u32 resource_bit = (1 << resource);
1769         int func = BP_FUNC(bp);
1770         u32 hw_lock_control_reg;
1771
1772         /* Validating that the resource is within range */
1773         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1774                 DP(NETIF_MSG_HW,
1775                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1776                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1777                 return -EINVAL;
1778         }
1779
1780         if (func <= 5) {
1781                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1782         } else {
1783                 hw_lock_control_reg =
1784                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1785         }
1786
1787         /* Validating that the resource is currently taken */
1788         lock_status = REG_RD(bp, hw_lock_control_reg);
1789         if (!(lock_status & resource_bit)) {
1790                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1791                    lock_status, resource_bit);
1792                 return -EFAULT;
1793         }
1794
1795         REG_WR(bp, hw_lock_control_reg, resource_bit);
1796         return 0;
1797 }
1798
1799 /* HW Lock for shared dual port PHYs */
1800 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1801 {
1802         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1803
1804         mutex_lock(&bp->port.phy_mutex);
1805
1806         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1807             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1808                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1809 }
1810
1811 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1812 {
1813         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1814
1815         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1816             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1817                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1818
1819         mutex_unlock(&bp->port.phy_mutex);
1820 }
1821
1822 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1823 {
1824         /* The GPIO should be swapped if swap register is set and active */
1825         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1826                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1827         int gpio_shift = gpio_num +
1828                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1829         u32 gpio_mask = (1 << gpio_shift);
1830         u32 gpio_reg;
1831
1832         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1833                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1834                 return -EINVAL;
1835         }
1836
1837         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1838         /* read GPIO and mask except the float bits */
1839         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1840
1841         switch (mode) {
1842         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1843                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1844                    gpio_num, gpio_shift);
1845                 /* clear FLOAT and set CLR */
1846                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1847                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1848                 break;
1849
1850         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1851                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1852                    gpio_num, gpio_shift);
1853                 /* clear FLOAT and set SET */
1854                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1855                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1856                 break;
1857
1858         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1859                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1860                    gpio_num, gpio_shift);
1861                 /* set FLOAT */
1862                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1863                 break;
1864
1865         default:
1866                 break;
1867         }
1868
1869         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1870         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1871
1872         return 0;
1873 }
1874
1875 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1876 {
1877         u32 spio_mask = (1 << spio_num);
1878         u32 spio_reg;
1879
1880         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1881             (spio_num > MISC_REGISTERS_SPIO_7)) {
1882                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1883                 return -EINVAL;
1884         }
1885
1886         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1887         /* read SPIO and mask except the float bits */
1888         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1889
1890         switch (mode) {
1891         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1892                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1893                 /* clear FLOAT and set CLR */
1894                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1895                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1896                 break;
1897
1898         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1899                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1900                 /* clear FLOAT and set SET */
1901                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1902                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1903                 break;
1904
1905         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1906                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1907                 /* set FLOAT */
1908                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1909                 break;
1910
1911         default:
1912                 break;
1913         }
1914
1915         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1916         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1917
1918         return 0;
1919 }
1920
1921 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1922 {
1923         switch (bp->link_vars.ieee_fc &
1924                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1925         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1926                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1927                                           ADVERTISED_Pause);
1928                 break;
1929         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1930                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1931                                          ADVERTISED_Pause);
1932                 break;
1933         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1934                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1935                 break;
1936         default:
1937                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1938                                           ADVERTISED_Pause);
1939                 break;
1940         }
1941 }
1942
1943 static void bnx2x_link_report(struct bnx2x *bp)
1944 {
1945         if (bp->link_vars.link_up) {
1946                 if (bp->state == BNX2X_STATE_OPEN)
1947                         netif_carrier_on(bp->dev);
1948                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1949
1950                 printk("%d Mbps ", bp->link_vars.line_speed);
1951
1952                 if (bp->link_vars.duplex == DUPLEX_FULL)
1953                         printk("full duplex");
1954                 else
1955                         printk("half duplex");
1956
1957                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1958                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1959                                 printk(", receive ");
1960                                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1961                                         printk("& transmit ");
1962                         } else {
1963                                 printk(", transmit ");
1964                         }
1965                         printk("flow control ON");
1966                 }
1967                 printk("\n");
1968
1969         } else { /* link_down */
1970                 netif_carrier_off(bp->dev);
1971                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1972         }
1973 }
1974
1975 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1976 {
1977         if (!BP_NOMCP(bp)) {
1978                 u8 rc;
1979
1980                 /* Initialize link parameters structure variables */
1981                 /* It is recommended to turn off RX FC for jumbo frames
1982                    for better performance */
1983                 if (IS_E1HMF(bp))
1984                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1985                 else if (bp->dev->mtu > 5000)
1986                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1987                 else
1988                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1989
1990                 bnx2x_acquire_phy_lock(bp);
1991                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1992                 bnx2x_release_phy_lock(bp);
1993
1994                 bnx2x_calc_fc_adv(bp);
1995
1996                 if (bp->link_vars.link_up)
1997                         bnx2x_link_report(bp);
1998
1999
2000                 return rc;
2001         }
2002         BNX2X_ERR("Bootcode is missing -not initializing link\n");
2003         return -EINVAL;
2004 }
2005
2006 static void bnx2x_link_set(struct bnx2x *bp)
2007 {
2008         if (!BP_NOMCP(bp)) {
2009                 bnx2x_acquire_phy_lock(bp);
2010                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2011                 bnx2x_release_phy_lock(bp);
2012
2013                 bnx2x_calc_fc_adv(bp);
2014         } else
2015                 BNX2X_ERR("Bootcode is missing -not setting link\n");
2016 }
2017
2018 static void bnx2x__link_reset(struct bnx2x *bp)
2019 {
2020         if (!BP_NOMCP(bp)) {
2021                 bnx2x_acquire_phy_lock(bp);
2022                 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
2023                 bnx2x_release_phy_lock(bp);
2024         } else
2025                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2026 }
2027
2028 static u8 bnx2x_link_test(struct bnx2x *bp)
2029 {
2030         u8 rc;
2031
2032         bnx2x_acquire_phy_lock(bp);
2033         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2034         bnx2x_release_phy_lock(bp);
2035
2036         return rc;
2037 }
2038
2039 /* Calculates the sum of vn_min_rates.
2040    It's needed for further normalizing of the min_rates.
2041
2042    Returns:
2043      sum of vn_min_rates
2044        or
2045      0 - if all the min_rates are 0.
2046      In the later case fairness algorithm should be deactivated.
2047      If not all min_rates are zero then those that are zeroes will
2048      be set to 1.
2049  */
2050 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2051 {
2052         int i, port = BP_PORT(bp);
2053         u32 wsum = 0;
2054         int all_zero = 1;
2055
2056         for (i = 0; i < E1HVN_MAX; i++) {
2057                 u32 vn_cfg =
2058                         SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2059                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2060                                      FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2061                 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2062                         /* If min rate is zero - set it to 1 */
2063                         if (!vn_min_rate)
2064                                 vn_min_rate = DEF_MIN_RATE;
2065                         else
2066                                 all_zero = 0;
2067
2068                         wsum += vn_min_rate;
2069                 }
2070         }
2071
2072         /* ... only if all min rates are zeros - disable FAIRNESS */
2073         if (all_zero)
2074                 return 0;
2075
2076         return wsum;
2077 }
2078
2079 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2080                                    int en_fness,
2081                                    u16 port_rate,
2082                                    struct cmng_struct_per_port *m_cmng_port)
2083 {
2084         u32 r_param = port_rate / 8;
2085         int port = BP_PORT(bp);
2086         int i;
2087
2088         memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2089
2090         /* Enable minmax only if we are in e1hmf mode */
2091         if (IS_E1HMF(bp)) {
2092                 u32 fair_periodic_timeout_usec;
2093                 u32 t_fair;
2094
2095                 /* Enable rate shaping and fairness */
2096                 m_cmng_port->flags.cmng_vn_enable = 1;
2097                 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2098                 m_cmng_port->flags.rate_shaping_enable = 1;
2099
2100                 if (!en_fness)
2101                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2102                            "  fairness will be disabled\n");
2103
2104                 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2105                 m_cmng_port->rs_vars.rs_periodic_timeout =
2106                                                 RS_PERIODIC_TIMEOUT_USEC / 4;
2107
2108                 /* this is the threshold below which no timer arming will occur
2109                    1.25 coefficient is for the threshold to be a little bigger
2110                    than the real time, to compensate for timer in-accuracy */
2111                 m_cmng_port->rs_vars.rs_threshold =
2112                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2113
2114                 /* resolution of fairness timer */
2115                 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2116                 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2117                 t_fair = T_FAIR_COEF / port_rate;
2118
2119                 /* this is the threshold below which we won't arm
2120                    the timer anymore */
2121                 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2122
2123                 /* we multiply by 1e3/8 to get bytes/msec.
2124                    We don't want the credits to pass a credit
2125                    of the T_FAIR*FAIR_MEM (algorithm resolution) */
2126                 m_cmng_port->fair_vars.upper_bound =
2127                                                 r_param * t_fair * FAIR_MEM;
2128                 /* since each tick is 4 usec */
2129                 m_cmng_port->fair_vars.fairness_timeout =
2130                                                 fair_periodic_timeout_usec / 4;
2131
2132         } else {
2133                 /* Disable rate shaping and fairness */
2134                 m_cmng_port->flags.cmng_vn_enable = 0;
2135                 m_cmng_port->flags.fairness_enable = 0;
2136                 m_cmng_port->flags.rate_shaping_enable = 0;
2137
2138                 DP(NETIF_MSG_IFUP,
2139                    "Single function mode  minmax will be disabled\n");
2140         }
2141
2142         /* Store it to internal memory */
2143         for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2144                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2145                        XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2146                        ((u32 *)(m_cmng_port))[i]);
2147 }
2148
2149 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2150                                    u32 wsum, u16 port_rate,
2151                                  struct cmng_struct_per_port *m_cmng_port)
2152 {
2153         struct rate_shaping_vars_per_vn m_rs_vn;
2154         struct fairness_vars_per_vn m_fair_vn;
2155         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2156         u16 vn_min_rate, vn_max_rate;
2157         int i;
2158
2159         /* If function is hidden - set min and max to zeroes */
2160         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2161                 vn_min_rate = 0;
2162                 vn_max_rate = 0;
2163
2164         } else {
2165                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2166                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2167                 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2168                    if current min rate is zero - set it to 1.
2169                    This is a requirement of the algorithm. */
2170                 if ((vn_min_rate == 0) && wsum)
2171                         vn_min_rate = DEF_MIN_RATE;
2172                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2173                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2174         }
2175
2176         DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d  vn_max_rate=%d  "
2177            "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2178
2179         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2180         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2181
2182         /* global vn counter - maximal Mbps for this vn */
2183         m_rs_vn.vn_counter.rate = vn_max_rate;
2184
2185         /* quota - number of bytes transmitted in this period */
2186         m_rs_vn.vn_counter.quota =
2187                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2188
2189 #ifdef BNX2X_PER_PROT_QOS
2190         /* per protocol counter */
2191         for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2192                 /* maximal Mbps for this protocol */
2193                 m_rs_vn.protocol_counters[protocol].rate =
2194                                                 protocol_max_rate[protocol];
2195                 /* the quota in each timer period -
2196                    number of bytes transmitted in this period */
2197                 m_rs_vn.protocol_counters[protocol].quota =
2198                         (u32)(rs_periodic_timeout_usec *
2199                           ((double)m_rs_vn.
2200                                    protocol_counters[protocol].rate/8));
2201         }
2202 #endif
2203
2204         if (wsum) {
2205                 /* credit for each period of the fairness algorithm:
2206                    number of bytes in T_FAIR (the vn share the port rate).
2207                    wsum should not be larger than 10000, thus
2208                    T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2209                 m_fair_vn.vn_credit_delta =
2210                         max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2211                             (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2212                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2213                    m_fair_vn.vn_credit_delta);
2214         }
2215
2216 #ifdef BNX2X_PER_PROT_QOS
2217         do {
2218                 u32 protocolWeightSum = 0;
2219
2220                 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2221                         protocolWeightSum +=
2222                                         drvInit.protocol_min_rate[protocol];
2223                 /* per protocol counter -
2224                    NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2225                 if (protocolWeightSum > 0) {
2226                         for (protocol = 0;
2227                              protocol < NUM_OF_PROTOCOLS; protocol++)
2228                                 /* credit for each period of the
2229                                    fairness algorithm - number of bytes in
2230                                    T_FAIR (the protocol share the vn rate) */
2231                                 m_fair_vn.protocol_credit_delta[protocol] =
2232                                         (u32)((vn_min_rate / 8) * t_fair *
2233                                         protocol_min_rate / protocolWeightSum);
2234                 }
2235         } while (0);
2236 #endif
2237
2238         /* Store it to internal memory */
2239         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2240                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2241                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2242                        ((u32 *)(&m_rs_vn))[i]);
2243
2244         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2245                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2246                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2247                        ((u32 *)(&m_fair_vn))[i]);
2248 }
2249
2250 /* This function is called upon link interrupt */
2251 static void bnx2x_link_attn(struct bnx2x *bp)
2252 {
2253         int vn;
2254
2255         /* Make sure that we are synced with the current statistics */
2256         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2257
2258         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2259
2260         if (bp->link_vars.link_up) {
2261
2262                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2263                         struct host_port_stats *pstats;
2264
2265                         pstats = bnx2x_sp(bp, port_stats);
2266                         /* reset old bmac stats */
2267                         memset(&(pstats->mac_stx[0]), 0,
2268                                sizeof(struct mac_stx));
2269                 }
2270                 if ((bp->state == BNX2X_STATE_OPEN) ||
2271                     (bp->state == BNX2X_STATE_DISABLED))
2272                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2273         }
2274
2275         /* indicate link status */
2276         bnx2x_link_report(bp);
2277
2278         if (IS_E1HMF(bp)) {
2279                 int func;
2280
2281                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2282                         if (vn == BP_E1HVN(bp))
2283                                 continue;
2284
2285                         func = ((vn << 1) | BP_PORT(bp));
2286
2287                         /* Set the attention towards other drivers
2288                            on the same port */
2289                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2290                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2291                 }
2292         }
2293
2294         if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2295                 struct cmng_struct_per_port m_cmng_port;
2296                 u32 wsum;
2297                 int port = BP_PORT(bp);
2298
2299                 /* Init RATE SHAPING and FAIRNESS contexts */
2300                 wsum = bnx2x_calc_vn_wsum(bp);
2301                 bnx2x_init_port_minmax(bp, (int)wsum,
2302                                         bp->link_vars.line_speed,
2303                                         &m_cmng_port);
2304                 if (IS_E1HMF(bp))
2305                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2306                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
2307                                         wsum, bp->link_vars.line_speed,
2308                                                      &m_cmng_port);
2309         }
2310 }
2311
2312 static void bnx2x__link_status_update(struct bnx2x *bp)
2313 {
2314         if (bp->state != BNX2X_STATE_OPEN)
2315                 return;
2316
2317         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2318
2319         if (bp->link_vars.link_up)
2320                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2321         else
2322                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2323
2324         /* indicate link status */
2325         bnx2x_link_report(bp);
2326 }
2327
2328 static void bnx2x_pmf_update(struct bnx2x *bp)
2329 {
2330         int port = BP_PORT(bp);
2331         u32 val;
2332
2333         bp->port.pmf = 1;
2334         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2335
2336         /* enable nig attention */
2337         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2338         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2339         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2340
2341         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2342 }
2343
2344 /* end of Link */
2345
2346 /* slow path */
2347
2348 /*
2349  * General service functions
2350  */
2351
2352 /* the slow path queue is odd since completions arrive on the fastpath ring */
2353 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2354                          u32 data_hi, u32 data_lo, int common)
2355 {
2356         int func = BP_FUNC(bp);
2357
2358         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2359            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2360            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2361            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2362            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2363
2364 #ifdef BNX2X_STOP_ON_ERROR
2365         if (unlikely(bp->panic))
2366                 return -EIO;
2367 #endif
2368
2369         spin_lock_bh(&bp->spq_lock);
2370
2371         if (!bp->spq_left) {
2372                 BNX2X_ERR("BUG! SPQ ring full!\n");
2373                 spin_unlock_bh(&bp->spq_lock);
2374                 bnx2x_panic();
2375                 return -EBUSY;
2376         }
2377
2378         /* CID needs port number to be encoded int it */
2379         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2380                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2381                                      HW_CID(bp, cid)));
2382         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2383         if (common)
2384                 bp->spq_prod_bd->hdr.type |=
2385                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2386
2387         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2388         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2389
2390         bp->spq_left--;
2391
2392         if (bp->spq_prod_bd == bp->spq_last_bd) {
2393                 bp->spq_prod_bd = bp->spq;
2394                 bp->spq_prod_idx = 0;
2395                 DP(NETIF_MSG_TIMER, "end of spq\n");
2396
2397         } else {
2398                 bp->spq_prod_bd++;
2399                 bp->spq_prod_idx++;
2400         }
2401
2402         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2403                bp->spq_prod_idx);
2404
2405         spin_unlock_bh(&bp->spq_lock);
2406         return 0;
2407 }
2408
2409 /* acquire split MCP access lock register */
2410 static int bnx2x_acquire_alr(struct bnx2x *bp)
2411 {
2412         u32 i, j, val;
2413         int rc = 0;
2414
2415         might_sleep();
2416         i = 100;
2417         for (j = 0; j < i*10; j++) {
2418                 val = (1UL << 31);
2419                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2420                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2421                 if (val & (1L << 31))
2422                         break;
2423
2424                 msleep(5);
2425         }
2426         if (!(val & (1L << 31))) {
2427                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2428                 rc = -EBUSY;
2429         }
2430
2431         return rc;
2432 }
2433
2434 /* release split MCP access lock register */
2435 static void bnx2x_release_alr(struct bnx2x *bp)
2436 {
2437         u32 val = 0;
2438
2439         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2440 }
2441
2442 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2443 {
2444         struct host_def_status_block *def_sb = bp->def_status_blk;
2445         u16 rc = 0;
2446
2447         barrier(); /* status block is written to by the chip */
2448         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2449                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2450                 rc |= 1;
2451         }
2452         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2453                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2454                 rc |= 2;
2455         }
2456         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2457                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2458                 rc |= 4;
2459         }
2460         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2461                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2462                 rc |= 8;
2463         }
2464         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2465                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2466                 rc |= 16;
2467         }
2468         return rc;
2469 }
2470
2471 /*
2472  * slow path service functions
2473  */
2474
2475 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2476 {
2477         int port = BP_PORT(bp);
2478         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2479                        COMMAND_REG_ATTN_BITS_SET);
2480         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2481                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2482         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2483                                        NIG_REG_MASK_INTERRUPT_PORT0;
2484         u32 aeu_mask;
2485
2486         if (bp->attn_state & asserted)
2487                 BNX2X_ERR("IGU ERROR\n");
2488
2489         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2490         aeu_mask = REG_RD(bp, aeu_addr);
2491
2492         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2493            aeu_mask, asserted);
2494         aeu_mask &= ~(asserted & 0xff);
2495         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2496
2497         REG_WR(bp, aeu_addr, aeu_mask);
2498         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2499
2500         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2501         bp->attn_state |= asserted;
2502         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2503
2504         if (asserted & ATTN_HARD_WIRED_MASK) {
2505                 if (asserted & ATTN_NIG_FOR_FUNC) {
2506
2507                         bnx2x_acquire_phy_lock(bp);
2508
2509                         /* save nig interrupt mask */
2510                         bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2511                         REG_WR(bp, nig_int_mask_addr, 0);
2512
2513                         bnx2x_link_attn(bp);
2514
2515                         /* handle unicore attn? */
2516                 }
2517                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2518                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2519
2520                 if (asserted & GPIO_2_FUNC)
2521                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2522
2523                 if (asserted & GPIO_3_FUNC)
2524                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2525
2526                 if (asserted & GPIO_4_FUNC)
2527                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2528
2529                 if (port == 0) {
2530                         if (asserted & ATTN_GENERAL_ATTN_1) {
2531                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2532                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2533                         }
2534                         if (asserted & ATTN_GENERAL_ATTN_2) {
2535                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2536                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2537                         }
2538                         if (asserted & ATTN_GENERAL_ATTN_3) {
2539                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2540                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2541                         }
2542                 } else {
2543                         if (asserted & ATTN_GENERAL_ATTN_4) {
2544                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2545                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2546                         }
2547                         if (asserted & ATTN_GENERAL_ATTN_5) {
2548                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2549                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2550                         }
2551                         if (asserted & ATTN_GENERAL_ATTN_6) {
2552                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2553                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2554                         }
2555                 }
2556
2557         } /* if hardwired */
2558
2559         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2560            asserted, hc_addr);
2561         REG_WR(bp, hc_addr, asserted);
2562
2563         /* now set back the mask */
2564         if (asserted & ATTN_NIG_FOR_FUNC) {
2565                 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2566                 bnx2x_release_phy_lock(bp);
2567         }
2568 }
2569
2570 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2571 {
2572         int port = BP_PORT(bp);
2573         int reg_offset;
2574         u32 val;
2575
2576         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2577                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2578
2579         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2580
2581                 val = REG_RD(bp, reg_offset);
2582                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2583                 REG_WR(bp, reg_offset, val);
2584
2585                 BNX2X_ERR("SPIO5 hw attention\n");
2586
2587                 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2588                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
2589                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2590                         /* Fan failure attention */
2591
2592                         /* The PHY reset is controlled by GPIO 1 */
2593                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2594                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2595                         /* Low power mode is controlled by GPIO 2 */
2596                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2597                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2598                         /* mark the failure */
2599                         bp->link_params.ext_phy_config &=
2600                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2601                         bp->link_params.ext_phy_config |=
2602                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2603                         SHMEM_WR(bp,
2604                                  dev_info.port_hw_config[port].
2605                                                         external_phy_config,
2606                                  bp->link_params.ext_phy_config);
2607                         /* log the failure */
2608                         printk(KERN_ERR PFX "Fan Failure on Network"
2609                                " Controller %s has caused the driver to"
2610                                " shutdown the card to prevent permanent"
2611                                " damage.  Please contact Dell Support for"
2612                                " assistance\n", bp->dev->name);
2613                         break;
2614
2615                 default:
2616                         break;
2617                 }
2618         }
2619
2620         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2621
2622                 val = REG_RD(bp, reg_offset);
2623                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2624                 REG_WR(bp, reg_offset, val);
2625
2626                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2627                           (attn & HW_INTERRUT_ASSERT_SET_0));
2628                 bnx2x_panic();
2629         }
2630 }
2631
2632 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2633 {
2634         u32 val;
2635
2636         if (attn & BNX2X_DOORQ_ASSERT) {
2637
2638                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2639                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2640                 /* DORQ discard attention */
2641                 if (val & 0x2)
2642                         BNX2X_ERR("FATAL error from DORQ\n");
2643         }
2644
2645         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2646
2647                 int port = BP_PORT(bp);
2648                 int reg_offset;
2649
2650                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2651                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2652
2653                 val = REG_RD(bp, reg_offset);
2654                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2655                 REG_WR(bp, reg_offset, val);
2656
2657                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2658                           (attn & HW_INTERRUT_ASSERT_SET_1));
2659                 bnx2x_panic();
2660         }
2661 }
2662
2663 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2664 {
2665         u32 val;
2666
2667         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2668
2669                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2670                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2671                 /* CFC error attention */
2672                 if (val & 0x2)
2673                         BNX2X_ERR("FATAL error from CFC\n");
2674         }
2675
2676         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2677
2678                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2679                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2680                 /* RQ_USDMDP_FIFO_OVERFLOW */
2681                 if (val & 0x18000)
2682                         BNX2X_ERR("FATAL error from PXP\n");
2683         }
2684
2685         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2686
2687                 int port = BP_PORT(bp);
2688                 int reg_offset;
2689
2690                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2691                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2692
2693                 val = REG_RD(bp, reg_offset);
2694                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2695                 REG_WR(bp, reg_offset, val);
2696
2697                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2698                           (attn & HW_INTERRUT_ASSERT_SET_2));
2699                 bnx2x_panic();
2700         }
2701 }
2702
2703 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2704 {
2705         u32 val;
2706
2707         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2708
2709                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2710                         int func = BP_FUNC(bp);
2711
2712                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2713                         bnx2x__link_status_update(bp);
2714                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2715                                                         DRV_STATUS_PMF)
2716                                 bnx2x_pmf_update(bp);
2717
2718                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2719
2720                         BNX2X_ERR("MC assert!\n");
2721                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2722                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2723                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2724                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2725                         bnx2x_panic();
2726
2727                 } else if (attn & BNX2X_MCP_ASSERT) {
2728
2729                         BNX2X_ERR("MCP assert!\n");
2730                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2731                         bnx2x_fw_dump(bp);
2732
2733                 } else
2734                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2735         }
2736
2737         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2738                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2739                 if (attn & BNX2X_GRC_TIMEOUT) {
2740                         val = CHIP_IS_E1H(bp) ?
2741                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2742                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2743                 }
2744                 if (attn & BNX2X_GRC_RSV) {
2745                         val = CHIP_IS_E1H(bp) ?
2746                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2747                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2748                 }
2749                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2750         }
2751 }
2752
2753 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2754 {
2755         struct attn_route attn;
2756         struct attn_route group_mask;
2757         int port = BP_PORT(bp);
2758         int index;
2759         u32 reg_addr;
2760         u32 val;
2761         u32 aeu_mask;
2762
2763         /* need to take HW lock because MCP or other port might also
2764            try to handle this event */
2765         bnx2x_acquire_alr(bp);
2766
2767         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2768         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2769         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2770         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2771         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2772            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2773
2774         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2775                 if (deasserted & (1 << index)) {
2776                         group_mask = bp->attn_group[index];
2777
2778                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2779                            index, group_mask.sig[0], group_mask.sig[1],
2780                            group_mask.sig[2], group_mask.sig[3]);
2781
2782                         bnx2x_attn_int_deasserted3(bp,
2783                                         attn.sig[3] & group_mask.sig[3]);
2784                         bnx2x_attn_int_deasserted1(bp,
2785                                         attn.sig[1] & group_mask.sig[1]);
2786                         bnx2x_attn_int_deasserted2(bp,
2787                                         attn.sig[2] & group_mask.sig[2]);
2788                         bnx2x_attn_int_deasserted0(bp,
2789                                         attn.sig[0] & group_mask.sig[0]);
2790
2791                         if ((attn.sig[0] & group_mask.sig[0] &
2792                                                 HW_PRTY_ASSERT_SET_0) ||
2793                             (attn.sig[1] & group_mask.sig[1] &
2794                                                 HW_PRTY_ASSERT_SET_1) ||
2795                             (attn.sig[2] & group_mask.sig[2] &
2796                                                 HW_PRTY_ASSERT_SET_2))
2797                                 BNX2X_ERR("FATAL HW block parity attention\n");
2798                 }
2799         }
2800
2801         bnx2x_release_alr(bp);
2802
2803         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2804
2805         val = ~deasserted;
2806         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2807            val, reg_addr);
2808         REG_WR(bp, reg_addr, val);
2809
2810         if (~bp->attn_state & deasserted)
2811                 BNX2X_ERR("IGU ERROR\n");
2812
2813         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2814                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2815
2816         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2817         aeu_mask = REG_RD(bp, reg_addr);
2818
2819         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2820            aeu_mask, deasserted);
2821         aeu_mask |= (deasserted & 0xff);
2822         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2823
2824         REG_WR(bp, reg_addr, aeu_mask);
2825         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2826
2827         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2828         bp->attn_state &= ~deasserted;
2829         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2830 }
2831
2832 static void bnx2x_attn_int(struct bnx2x *bp)
2833 {
2834         /* read local copy of bits */
2835         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2836                                                                 attn_bits);
2837         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2838                                                                 attn_bits_ack);
2839         u32 attn_state = bp->attn_state;
2840
2841         /* look for changed bits */
2842         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2843         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2844
2845         DP(NETIF_MSG_HW,
2846            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2847            attn_bits, attn_ack, asserted, deasserted);
2848
2849         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2850                 BNX2X_ERR("BAD attention state\n");
2851
2852         /* handle bits that were raised */
2853         if (asserted)
2854                 bnx2x_attn_int_asserted(bp, asserted);
2855
2856         if (deasserted)
2857                 bnx2x_attn_int_deasserted(bp, deasserted);
2858 }
2859
2860 static void bnx2x_sp_task(struct work_struct *work)
2861 {
2862         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2863         u16 status;
2864
2865
2866         /* Return here if interrupt is disabled */
2867         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2868                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2869                 return;
2870         }
2871
2872         status = bnx2x_update_dsb_idx(bp);
2873 /*      if (status == 0)                                     */
2874 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2875
2876         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2877
2878         /* HW attentions */
2879         if (status & 0x1)
2880                 bnx2x_attn_int(bp);
2881
2882         /* CStorm events: query_stats, port delete ramrod */
2883         if (status & 0x2)
2884                 bp->stats_pending = 0;
2885
2886         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2887                      IGU_INT_NOP, 1);
2888         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2889                      IGU_INT_NOP, 1);
2890         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2891                      IGU_INT_NOP, 1);
2892         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2893                      IGU_INT_NOP, 1);
2894         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2895                      IGU_INT_ENABLE, 1);
2896
2897 }
2898
2899 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2900 {
2901         struct net_device *dev = dev_instance;
2902         struct bnx2x *bp = netdev_priv(dev);
2903
2904         /* Return here if interrupt is disabled */
2905         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2906                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2907                 return IRQ_HANDLED;
2908         }
2909
2910         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2911
2912 #ifdef BNX2X_STOP_ON_ERROR
2913         if (unlikely(bp->panic))
2914                 return IRQ_HANDLED;
2915 #endif
2916
2917         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2918
2919         return IRQ_HANDLED;
2920 }
2921
2922 /* end of slow path */
2923
2924 /* Statistics */
2925
2926 /****************************************************************************
2927 * Macros
2928 ****************************************************************************/
2929
2930 /* sum[hi:lo] += add[hi:lo] */
2931 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2932         do { \
2933                 s_lo += a_lo; \
2934                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2935         } while (0)
2936
2937 /* difference = minuend - subtrahend */
2938 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2939         do { \
2940                 if (m_lo < s_lo) { \
2941                         /* underflow */ \
2942                         d_hi = m_hi - s_hi; \
2943                         if (d_hi > 0) { \
2944                                 /* we can 'loan' 1 */ \
2945                                 d_hi--; \
2946                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2947                         } else { \
2948                                 /* m_hi <= s_hi */ \
2949                                 d_hi = 0; \
2950                                 d_lo = 0; \
2951                         } \
2952                 } else { \
2953                         /* m_lo >= s_lo */ \
2954                         if (m_hi < s_hi) { \
2955                                 d_hi = 0; \
2956                                 d_lo = 0; \
2957                         } else { \
2958                                 /* m_hi >= s_hi */ \
2959                                 d_hi = m_hi - s_hi; \
2960                                 d_lo = m_lo - s_lo; \
2961                         } \
2962                 } \
2963         } while (0)
2964
2965 #define UPDATE_STAT64(s, t) \
2966         do { \
2967                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2968                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2969                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2970                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2971                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2972                        pstats->mac_stx[1].t##_lo, diff.lo); \
2973         } while (0)
2974
2975 #define UPDATE_STAT64_NIG(s, t) \
2976         do { \
2977                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2978                         diff.lo, new->s##_lo, old->s##_lo); \
2979                 ADD_64(estats->t##_hi, diff.hi, \
2980                        estats->t##_lo, diff.lo); \
2981         } while (0)
2982
2983 /* sum[hi:lo] += add */
2984 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2985         do { \
2986                 s_lo += a; \
2987                 s_hi += (s_lo < a) ? 1 : 0; \
2988         } while (0)
2989
2990 #define UPDATE_EXTEND_STAT(s) \
2991         do { \
2992                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2993                               pstats->mac_stx[1].s##_lo, \
2994                               new->s); \
2995         } while (0)
2996
2997 #define UPDATE_EXTEND_TSTAT(s, t) \
2998         do { \
2999                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
3000                 old_tclient->s = le32_to_cpu(tclient->s); \
3001                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3002         } while (0)
3003
3004 #define UPDATE_EXTEND_XSTAT(s, t) \
3005         do { \
3006                 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3007                 old_xclient->s = le32_to_cpu(xclient->s); \
3008                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3009         } while (0)
3010
3011 /*
3012  * General service functions
3013  */
3014
3015 static inline long bnx2x_hilo(u32 *hiref)
3016 {
3017         u32 lo = *(hiref + 1);
3018 #if (BITS_PER_LONG == 64)
3019         u32 hi = *hiref;
3020
3021         return HILO_U64(hi, lo);
3022 #else
3023         return lo;
3024 #endif
3025 }
3026
3027 /*
3028  * Init service functions
3029  */
3030
3031 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3032 {
3033         if (!bp->stats_pending) {
3034                 struct eth_query_ramrod_data ramrod_data = {0};
3035                 int rc;
3036
3037                 ramrod_data.drv_counter = bp->stats_counter++;
3038                 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3039                 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3040
3041                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3042                                    ((u32 *)&ramrod_data)[1],
3043                                    ((u32 *)&ramrod_data)[0], 0);
3044                 if (rc == 0) {
3045                         /* stats ramrod has it's own slot on the spq */
3046                         bp->spq_left++;
3047                         bp->stats_pending = 1;
3048                 }
3049         }
3050 }
3051
3052 static void bnx2x_stats_init(struct bnx2x *bp)
3053 {
3054         int port = BP_PORT(bp);
3055
3056         bp->executer_idx = 0;
3057         bp->stats_counter = 0;
3058
3059         /* port stats */
3060         if (!BP_NOMCP(bp))
3061                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3062         else
3063                 bp->port.port_stx = 0;
3064         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3065
3066         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3067         bp->port.old_nig_stats.brb_discard =
3068                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3069         bp->port.old_nig_stats.brb_truncate =
3070                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3071         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3072                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3073         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3074                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3075
3076         /* function stats */
3077         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3078         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3079         memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3080         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3081
3082         bp->stats_state = STATS_STATE_DISABLED;
3083         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3084                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3085 }
3086
3087 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3088 {
3089         struct dmae_command *dmae = &bp->stats_dmae;
3090         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3091
3092         *stats_comp = DMAE_COMP_VAL;
3093
3094         /* loader */
3095         if (bp->executer_idx) {
3096                 int loader_idx = PMF_DMAE_C(bp);
3097
3098                 memset(dmae, 0, sizeof(struct dmae_command));
3099
3100                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3101                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3102                                 DMAE_CMD_DST_RESET |
3103 #ifdef __BIG_ENDIAN
3104                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3105 #else
3106                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3107 #endif
3108                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3109                                                DMAE_CMD_PORT_0) |
3110                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3111                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3112                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3113                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3114                                      sizeof(struct dmae_command) *
3115                                      (loader_idx + 1)) >> 2;
3116                 dmae->dst_addr_hi = 0;
3117                 dmae->len = sizeof(struct dmae_command) >> 2;
3118                 if (CHIP_IS_E1(bp))
3119                         dmae->len--;
3120                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3121                 dmae->comp_addr_hi = 0;
3122                 dmae->comp_val = 1;
3123
3124                 *stats_comp = 0;
3125                 bnx2x_post_dmae(bp, dmae, loader_idx);
3126
3127         } else if (bp->func_stx) {
3128                 *stats_comp = 0;
3129                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3130         }
3131 }
3132
3133 static int bnx2x_stats_comp(struct bnx2x *bp)
3134 {
3135         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3136         int cnt = 10;
3137
3138         might_sleep();
3139         while (*stats_comp != DMAE_COMP_VAL) {
3140                 if (!cnt) {
3141                         BNX2X_ERR("timeout waiting for stats finished\n");
3142                         break;
3143                 }
3144                 cnt--;
3145                 msleep(1);
3146         }
3147         return 1;
3148 }
3149
3150 /*
3151  * Statistics service functions
3152  */
3153
3154 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3155 {
3156         struct dmae_command *dmae;
3157         u32 opcode;
3158         int loader_idx = PMF_DMAE_C(bp);
3159         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3160
3161         /* sanity */
3162         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3163                 BNX2X_ERR("BUG!\n");
3164                 return;
3165         }
3166
3167         bp->executer_idx = 0;
3168
3169         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3170                   DMAE_CMD_C_ENABLE |
3171                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3172 #ifdef __BIG_ENDIAN
3173                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3174 #else
3175                   DMAE_CMD_ENDIANITY_DW_SWAP |
3176 #endif
3177                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3178                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3179
3180         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3181         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3182         dmae->src_addr_lo = bp->port.port_stx >> 2;
3183         dmae->src_addr_hi = 0;
3184         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3185         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3186         dmae->len = DMAE_LEN32_RD_MAX;
3187         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3188         dmae->comp_addr_hi = 0;
3189         dmae->comp_val = 1;
3190
3191         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3192         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3193         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3194         dmae->src_addr_hi = 0;
3195         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3196                                    DMAE_LEN32_RD_MAX * 4);
3197         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3198                                    DMAE_LEN32_RD_MAX * 4);
3199         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3200         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3201         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3202         dmae->comp_val = DMAE_COMP_VAL;
3203
3204         *stats_comp = 0;
3205         bnx2x_hw_stats_post(bp);
3206         bnx2x_stats_comp(bp);
3207 }
3208
3209 static void bnx2x_port_stats_init(struct bnx2x *bp)
3210 {
3211         struct dmae_command *dmae;
3212         int port = BP_PORT(bp);
3213         int vn = BP_E1HVN(bp);
3214         u32 opcode;
3215         int loader_idx = PMF_DMAE_C(bp);
3216         u32 mac_addr;
3217         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3218
3219         /* sanity */
3220         if (!bp->link_vars.link_up || !bp->port.pmf) {
3221                 BNX2X_ERR("BUG!\n");
3222                 return;
3223         }
3224
3225         bp->executer_idx = 0;
3226
3227         /* MCP */
3228         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3229                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3230                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3231 #ifdef __BIG_ENDIAN
3232                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3233 #else
3234                   DMAE_CMD_ENDIANITY_DW_SWAP |
3235 #endif
3236                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3237                   (vn << DMAE_CMD_E1HVN_SHIFT));
3238
3239         if (bp->port.port_stx) {
3240
3241                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3242                 dmae->opcode = opcode;
3243                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3244                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3245                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3246                 dmae->dst_addr_hi = 0;
3247                 dmae->len = sizeof(struct host_port_stats) >> 2;
3248                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3249                 dmae->comp_addr_hi = 0;
3250                 dmae->comp_val = 1;
3251         }
3252
3253         if (bp->func_stx) {
3254
3255                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3256                 dmae->opcode = opcode;
3257                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3258                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3259                 dmae->dst_addr_lo = bp->func_stx >> 2;
3260                 dmae->dst_addr_hi = 0;
3261                 dmae->len = sizeof(struct host_func_stats) >> 2;
3262                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3263                 dmae->comp_addr_hi = 0;
3264                 dmae->comp_val = 1;
3265         }
3266
3267         /* MAC */
3268         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3269                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3270                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3271 #ifdef __BIG_ENDIAN
3272                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3273 #else
3274                   DMAE_CMD_ENDIANITY_DW_SWAP |
3275 #endif
3276                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3277                   (vn << DMAE_CMD_E1HVN_SHIFT));
3278
3279         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3280
3281                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3282                                    NIG_REG_INGRESS_BMAC0_MEM);
3283
3284                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3285                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3286                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3287                 dmae->opcode = opcode;
3288                 dmae->src_addr_lo = (mac_addr +
3289                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3290                 dmae->src_addr_hi = 0;
3291                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3292                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3293                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3294                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3295                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3296                 dmae->comp_addr_hi = 0;
3297                 dmae->comp_val = 1;
3298
3299                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3300                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3301                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3302                 dmae->opcode = opcode;
3303                 dmae->src_addr_lo = (mac_addr +
3304                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3305                 dmae->src_addr_hi = 0;
3306                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3307                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3308                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3309                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3310                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3311                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3312                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3313                 dmae->comp_addr_hi = 0;
3314                 dmae->comp_val = 1;
3315
3316         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3317
3318                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3319
3320                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3321                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3322                 dmae->opcode = opcode;
3323                 dmae->src_addr_lo = (mac_addr +
3324                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3325                 dmae->src_addr_hi = 0;
3326                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3327                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3328                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3329                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3330                 dmae->comp_addr_hi = 0;
3331                 dmae->comp_val = 1;
3332
3333                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3334                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3335                 dmae->opcode = opcode;
3336                 dmae->src_addr_lo = (mac_addr +
3337                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3338                 dmae->src_addr_hi = 0;
3339                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3340                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3341                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3342                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3343                 dmae->len = 1;
3344                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3345                 dmae->comp_addr_hi = 0;
3346                 dmae->comp_val = 1;
3347
3348                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3349                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3350                 dmae->opcode = opcode;
3351                 dmae->src_addr_lo = (mac_addr +
3352                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3353                 dmae->src_addr_hi = 0;
3354                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3355                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3356                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3357                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3358                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3359                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3360                 dmae->comp_addr_hi = 0;
3361                 dmae->comp_val = 1;
3362         }
3363
3364         /* NIG */
3365         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3366         dmae->opcode = opcode;
3367         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3368                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3369         dmae->src_addr_hi = 0;
3370         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3371         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3372         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3373         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3374         dmae->comp_addr_hi = 0;
3375         dmae->comp_val = 1;
3376
3377         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3378         dmae->opcode = opcode;
3379         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3380                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3381         dmae->src_addr_hi = 0;
3382         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3383                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3384         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3385                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3386         dmae->len = (2*sizeof(u32)) >> 2;
3387         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3388         dmae->comp_addr_hi = 0;
3389         dmae->comp_val = 1;
3390
3391         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3392         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3393                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3394                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3395 #ifdef __BIG_ENDIAN
3396                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3397 #else
3398                         DMAE_CMD_ENDIANITY_DW_SWAP |
3399 #endif
3400                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3401                         (vn << DMAE_CMD_E1HVN_SHIFT));
3402         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3403                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3404         dmae->src_addr_hi = 0;
3405         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3406                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3407         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3408                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3409         dmae->len = (2*sizeof(u32)) >> 2;
3410         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3411         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3412         dmae->comp_val = DMAE_COMP_VAL;
3413
3414         *stats_comp = 0;
3415 }
3416
3417 static void bnx2x_func_stats_init(struct bnx2x *bp)
3418 {
3419         struct dmae_command *dmae = &bp->stats_dmae;
3420         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3421
3422         /* sanity */
3423         if (!bp->func_stx) {
3424                 BNX2X_ERR("BUG!\n");
3425                 return;
3426         }
3427
3428         bp->executer_idx = 0;
3429         memset(dmae, 0, sizeof(struct dmae_command));
3430
3431         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3432                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3433                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3434 #ifdef __BIG_ENDIAN
3435                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3436 #else
3437                         DMAE_CMD_ENDIANITY_DW_SWAP |
3438 #endif
3439                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3440                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3441         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3442         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3443         dmae->dst_addr_lo = bp->func_stx >> 2;
3444         dmae->dst_addr_hi = 0;
3445         dmae->len = sizeof(struct host_func_stats) >> 2;
3446         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3447         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3448         dmae->comp_val = DMAE_COMP_VAL;
3449
3450         *stats_comp = 0;
3451 }
3452
3453 static void bnx2x_stats_start(struct bnx2x *bp)
3454 {
3455         if (bp->port.pmf)
3456                 bnx2x_port_stats_init(bp);
3457
3458         else if (bp->func_stx)
3459                 bnx2x_func_stats_init(bp);
3460
3461         bnx2x_hw_stats_post(bp);
3462         bnx2x_storm_stats_post(bp);
3463 }
3464
3465 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3466 {
3467         bnx2x_stats_comp(bp);
3468         bnx2x_stats_pmf_update(bp);
3469         bnx2x_stats_start(bp);
3470 }
3471
3472 static void bnx2x_stats_restart(struct bnx2x *bp)
3473 {
3474         bnx2x_stats_comp(bp);
3475         bnx2x_stats_start(bp);
3476 }
3477
3478 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3479 {
3480         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3481         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3482         struct regpair diff;
3483
3484         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3485         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3486         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3487         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3488         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3489         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3490         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3491         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3492         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3493         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3494         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3495         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3496         UPDATE_STAT64(tx_stat_gt127,
3497                                 tx_stat_etherstatspkts65octetsto127octets);
3498         UPDATE_STAT64(tx_stat_gt255,
3499                                 tx_stat_etherstatspkts128octetsto255octets);
3500         UPDATE_STAT64(tx_stat_gt511,
3501                                 tx_stat_etherstatspkts256octetsto511octets);
3502         UPDATE_STAT64(tx_stat_gt1023,
3503                                 tx_stat_etherstatspkts512octetsto1023octets);
3504         UPDATE_STAT64(tx_stat_gt1518,
3505                                 tx_stat_etherstatspkts1024octetsto1522octets);
3506         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3507         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3508         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3509         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3510         UPDATE_STAT64(tx_stat_gterr,
3511                                 tx_stat_dot3statsinternalmactransmiterrors);
3512         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3513 }
3514
3515 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3516 {
3517         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3518         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3519
3520         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3521         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3522         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3523         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3524         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3525         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3526         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3527         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3528         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3529         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3530         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3531         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3532         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3533         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3534         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3535         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3536         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3537         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3538         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3539         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3540         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3541         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3542         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3543         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3544         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3545         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3546         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3547         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3548         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3549         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3550         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3551 }
3552
3553 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3554 {
3555         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3556         struct nig_stats *old = &(bp->port.old_nig_stats);
3557         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3558         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3559         struct regpair diff;
3560
3561         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3562                 bnx2x_bmac_stats_update(bp);
3563
3564         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3565                 bnx2x_emac_stats_update(bp);
3566
3567         else { /* unreached */
3568                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3569                 return -1;
3570         }
3571
3572         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3573                       new->brb_discard - old->brb_discard);
3574         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3575                       new->brb_truncate - old->brb_truncate);
3576
3577         UPDATE_STAT64_NIG(egress_mac_pkt0,
3578                                         etherstatspkts1024octetsto1522octets);
3579         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3580
3581         memcpy(old, new, sizeof(struct nig_stats));
3582
3583         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3584                sizeof(struct mac_stx));
3585         estats->brb_drop_hi = pstats->brb_drop_hi;
3586         estats->brb_drop_lo = pstats->brb_drop_lo;
3587
3588         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3589
3590         return 0;
3591 }
3592
3593 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3594 {
3595         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3596         int cl_id = BP_CL_ID(bp);
3597         struct tstorm_per_port_stats *tport =
3598                                 &stats->tstorm_common.port_statistics;
3599         struct tstorm_per_client_stats *tclient =
3600                         &stats->tstorm_common.client_statistics[cl_id];
3601         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3602         struct xstorm_per_client_stats *xclient =
3603                         &stats->xstorm_common.client_statistics[cl_id];
3604         struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3605         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3606         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3607         u32 diff;
3608
3609         /* are storm stats valid? */
3610         if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3611                                                         bp->stats_counter) {
3612                 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3613                    "  tstorm counter (%d) != stats_counter (%d)\n",
3614                    tclient->stats_counter, bp->stats_counter);
3615                 return -1;
3616         }
3617         if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3618                                                         bp->stats_counter) {
3619                 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3620                    "  xstorm counter (%d) != stats_counter (%d)\n",
3621                    xclient->stats_counter, bp->stats_counter);
3622                 return -2;
3623         }
3624
3625         fstats->total_bytes_received_hi =
3626         fstats->valid_bytes_received_hi =
3627                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3628         fstats->total_bytes_received_lo =
3629         fstats->valid_bytes_received_lo =
3630                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3631
3632         estats->error_bytes_received_hi =
3633                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3634         estats->error_bytes_received_lo =
3635                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3636         ADD_64(estats->error_bytes_received_hi,
3637                estats->rx_stat_ifhcinbadoctets_hi,
3638                estats->error_bytes_received_lo,
3639                estats->rx_stat_ifhcinbadoctets_lo);
3640
3641         ADD_64(fstats->total_bytes_received_hi,
3642                estats->error_bytes_received_hi,
3643                fstats->total_bytes_received_lo,
3644                estats->error_bytes_received_lo);
3645
3646         UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3647         UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3648                                 total_multicast_packets_received);
3649         UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3650                                 total_broadcast_packets_received);
3651
3652         fstats->total_bytes_transmitted_hi =
3653                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3654         fstats->total_bytes_transmitted_lo =
3655                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3656
3657         UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3658                                 total_unicast_packets_transmitted);
3659         UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3660                                 total_multicast_packets_transmitted);
3661         UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3662                                 total_broadcast_packets_transmitted);
3663
3664         memcpy(estats, &(fstats->total_bytes_received_hi),
3665                sizeof(struct host_func_stats) - 2*sizeof(u32));
3666
3667         estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3668         estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3669         estats->brb_truncate_discard =
3670                                 le32_to_cpu(tport->brb_truncate_discard);
3671         estats->mac_discard = le32_to_cpu(tport->mac_discard);
3672
3673         old_tclient->rcv_unicast_bytes.hi =
3674                                 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3675         old_tclient->rcv_unicast_bytes.lo =
3676                                 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3677         old_tclient->rcv_broadcast_bytes.hi =
3678                                 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3679         old_tclient->rcv_broadcast_bytes.lo =
3680                                 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3681         old_tclient->rcv_multicast_bytes.hi =
3682                                 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3683         old_tclient->rcv_multicast_bytes.lo =
3684                                 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3685         old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3686
3687         old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3688         old_tclient->packets_too_big_discard =
3689                                 le32_to_cpu(tclient->packets_too_big_discard);
3690         estats->no_buff_discard =
3691         old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3692         old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3693
3694         old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3695         old_xclient->unicast_bytes_sent.hi =
3696                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3697         old_xclient->unicast_bytes_sent.lo =
3698                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3699         old_xclient->multicast_bytes_sent.hi =
3700                                 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3701         old_xclient->multicast_bytes_sent.lo =
3702                                 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3703         old_xclient->broadcast_bytes_sent.hi =
3704                                 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3705         old_xclient->broadcast_bytes_sent.lo =
3706                                 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3707
3708         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3709
3710         return 0;
3711 }
3712
3713 static void bnx2x_net_stats_update(struct bnx2x *bp)
3714 {
3715         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3716         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3717         struct net_device_stats *nstats = &bp->dev->stats;
3718
3719         nstats->rx_packets =
3720                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3721                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3722                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3723
3724         nstats->tx_packets =
3725                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3726                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3727                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3728
3729         nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3730
3731         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3732
3733         nstats->rx_dropped = old_tclient->checksum_discard +
3734                              estats->mac_discard;
3735         nstats->tx_dropped = 0;
3736
3737         nstats->multicast =
3738                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3739
3740         nstats->collisions =
3741                         estats->tx_stat_dot3statssinglecollisionframes_lo +
3742                         estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3743                         estats->tx_stat_dot3statslatecollisions_lo +
3744                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3745
3746         estats->jabber_packets_received =
3747                                 old_tclient->packets_too_big_discard +
3748                                 estats->rx_stat_dot3statsframestoolong_lo;
3749
3750         nstats->rx_length_errors =
3751                                 estats->rx_stat_etherstatsundersizepkts_lo +
3752                                 estats->jabber_packets_received;
3753         nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3754         nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3755         nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3756         nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3757         nstats->rx_missed_errors = estats->xxoverflow_discard;
3758
3759         nstats->rx_errors = nstats->rx_length_errors +
3760                             nstats->rx_over_errors +
3761                             nstats->rx_crc_errors +
3762                             nstats->rx_frame_errors +
3763                             nstats->rx_fifo_errors +
3764                             nstats->rx_missed_errors;
3765
3766         nstats->tx_aborted_errors =
3767                         estats->tx_stat_dot3statslatecollisions_lo +
3768                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3769         nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3770         nstats->tx_fifo_errors = 0;
3771         nstats->tx_heartbeat_errors = 0;
3772         nstats->tx_window_errors = 0;
3773
3774         nstats->tx_errors = nstats->tx_aborted_errors +
3775                             nstats->tx_carrier_errors;
3776 }
3777
3778 static void bnx2x_stats_update(struct bnx2x *bp)
3779 {
3780         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3781         int update = 0;
3782
3783         if (*stats_comp != DMAE_COMP_VAL)
3784                 return;
3785
3786         if (bp->port.pmf)
3787                 update = (bnx2x_hw_stats_update(bp) == 0);
3788
3789         update |= (bnx2x_storm_stats_update(bp) == 0);
3790
3791         if (update)
3792                 bnx2x_net_stats_update(bp);
3793
3794         else {
3795                 if (bp->stats_pending) {
3796                         bp->stats_pending++;
3797                         if (bp->stats_pending == 3) {
3798                                 BNX2X_ERR("stats not updated for 3 times\n");
3799                                 bnx2x_panic();
3800                                 return;
3801                         }
3802                 }
3803         }
3804
3805         if (bp->msglevel & NETIF_MSG_TIMER) {
3806                 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3807                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3808                 struct net_device_stats *nstats = &bp->dev->stats;
3809                 int i;
3810
3811                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3812                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
3813                                   "  tx pkt (%lx)\n",
3814                        bnx2x_tx_avail(bp->fp),
3815                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3816                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
3817                                   "  rx pkt (%lx)\n",
3818                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3819                              bp->fp->rx_comp_cons),
3820                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3821                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
3822                        netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3823                        estats->driver_xoff, estats->brb_drop_lo);
3824                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
3825                         "packets_too_big_discard %u  no_buff_discard %u  "
3826                         "mac_discard %u  mac_filter_discard %u  "
3827                         "xxovrflow_discard %u  brb_truncate_discard %u  "
3828                         "ttl0_discard %u\n",
3829                        old_tclient->checksum_discard,
3830                        old_tclient->packets_too_big_discard,
3831                        old_tclient->no_buff_discard, estats->mac_discard,
3832                        estats->mac_filter_discard, estats->xxoverflow_discard,
3833                        estats->brb_truncate_discard,
3834                        old_tclient->ttl0_discard);
3835
3836                 for_each_queue(bp, i) {
3837                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3838                                bnx2x_fp(bp, i, tx_pkt),
3839                                bnx2x_fp(bp, i, rx_pkt),
3840                                bnx2x_fp(bp, i, rx_calls));
3841                 }
3842         }
3843
3844         bnx2x_hw_stats_post(bp);
3845         bnx2x_storm_stats_post(bp);
3846 }
3847
3848 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3849 {
3850         struct dmae_command *dmae;
3851         u32 opcode;
3852         int loader_idx = PMF_DMAE_C(bp);
3853         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3854
3855         bp->executer_idx = 0;
3856
3857         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3858                   DMAE_CMD_C_ENABLE |
3859                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3860 #ifdef __BIG_ENDIAN
3861                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3862 #else
3863                   DMAE_CMD_ENDIANITY_DW_SWAP |
3864 #endif
3865                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3866                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3867
3868         if (bp->port.port_stx) {
3869
3870                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3871                 if (bp->func_stx)
3872                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3873                 else
3874                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3875                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3876                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3877                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3878                 dmae->dst_addr_hi = 0;
3879                 dmae->len = sizeof(struct host_port_stats) >> 2;
3880                 if (bp->func_stx) {
3881                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3882                         dmae->comp_addr_hi = 0;
3883                         dmae->comp_val = 1;
3884                 } else {
3885                         dmae->comp_addr_lo =
3886                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3887                         dmae->comp_addr_hi =
3888                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3889                         dmae->comp_val = DMAE_COMP_VAL;
3890
3891                         *stats_comp = 0;
3892                 }
3893         }
3894
3895         if (bp->func_stx) {
3896
3897                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3898                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3899                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3900                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3901                 dmae->dst_addr_lo = bp->func_stx >> 2;
3902                 dmae->dst_addr_hi = 0;
3903                 dmae->len = sizeof(struct host_func_stats) >> 2;
3904                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3905                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3906                 dmae->comp_val = DMAE_COMP_VAL;
3907
3908                 *stats_comp = 0;
3909         }
3910 }
3911
3912 static void bnx2x_stats_stop(struct bnx2x *bp)
3913 {
3914         int update = 0;
3915
3916         bnx2x_stats_comp(bp);
3917
3918         if (bp->port.pmf)
3919                 update = (bnx2x_hw_stats_update(bp) == 0);
3920
3921         update |= (bnx2x_storm_stats_update(bp) == 0);
3922
3923         if (update) {
3924                 bnx2x_net_stats_update(bp);
3925
3926                 if (bp->port.pmf)
3927                         bnx2x_port_stats_stop(bp);
3928
3929                 bnx2x_hw_stats_post(bp);
3930                 bnx2x_stats_comp(bp);
3931         }
3932 }
3933
3934 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3935 {
3936 }
3937
3938 static const struct {
3939         void (*action)(struct bnx2x *bp);
3940         enum bnx2x_stats_state next_state;
3941 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3942 /* state        event   */
3943 {
3944 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3945 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
3946 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3947 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3948 },
3949 {
3950 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
3951 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
3952 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
3953 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
3954 }
3955 };
3956
3957 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3958 {
3959         enum bnx2x_stats_state state = bp->stats_state;
3960
3961         bnx2x_stats_stm[state][event].action(bp);
3962         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3963
3964         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3965                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3966                    state, event, bp->stats_state);
3967 }
3968
3969 static void bnx2x_timer(unsigned long data)
3970 {
3971         struct bnx2x *bp = (struct bnx2x *) data;
3972
3973         if (!netif_running(bp->dev))
3974                 return;
3975
3976         if (atomic_read(&bp->intr_sem) != 0)
3977                 goto timer_restart;
3978
3979         if (poll) {
3980                 struct bnx2x_fastpath *fp = &bp->fp[0];
3981                 int rc;
3982
3983                 bnx2x_tx_int(fp, 1000);
3984                 rc = bnx2x_rx_int(fp, 1000);
3985         }
3986
3987         if (!BP_NOMCP(bp)) {
3988                 int func = BP_FUNC(bp);
3989                 u32 drv_pulse;
3990                 u32 mcp_pulse;
3991
3992                 ++bp->fw_drv_pulse_wr_seq;
3993                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3994                 /* TBD - add SYSTEM_TIME */
3995                 drv_pulse = bp->fw_drv_pulse_wr_seq;
3996                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3997
3998                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3999                              MCP_PULSE_SEQ_MASK);
4000                 /* The delta between driver pulse and mcp response
4001                  * should be 1 (before mcp response) or 0 (after mcp response)
4002                  */
4003                 if ((drv_pulse != mcp_pulse) &&
4004                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4005                         /* someone lost a heartbeat... */
4006                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4007                                   drv_pulse, mcp_pulse);
4008                 }
4009         }
4010
4011         if ((bp->state == BNX2X_STATE_OPEN) ||
4012             (bp->state == BNX2X_STATE_DISABLED))
4013                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4014
4015 timer_restart:
4016         mod_timer(&bp->timer, jiffies + bp->current_interval);
4017 }
4018
4019 /* end of Statistics */
4020
4021 /* nic init */
4022
4023 /*
4024  * nic init service functions
4025  */
4026
4027 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4028 {
4029         int port = BP_PORT(bp);
4030
4031         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4032                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4033                         sizeof(struct ustorm_status_block)/4);
4034         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4035                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4036                         sizeof(struct cstorm_status_block)/4);
4037 }
4038
4039 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4040                           dma_addr_t mapping, int sb_id)
4041 {
4042         int port = BP_PORT(bp);
4043         int func = BP_FUNC(bp);
4044         int index;
4045         u64 section;
4046
4047         /* USTORM */
4048         section = ((u64)mapping) + offsetof(struct host_status_block,
4049                                             u_status_block);
4050         sb->u_status_block.status_block_id = sb_id;
4051
4052         REG_WR(bp, BAR_USTRORM_INTMEM +
4053                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4054         REG_WR(bp, BAR_USTRORM_INTMEM +
4055                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4056                U64_HI(section));
4057         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4058                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4059
4060         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4061                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4062                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4063
4064         /* CSTORM */
4065         section = ((u64)mapping) + offsetof(struct host_status_block,
4066                                             c_status_block);
4067         sb->c_status_block.status_block_id = sb_id;
4068
4069         REG_WR(bp, BAR_CSTRORM_INTMEM +
4070                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4071         REG_WR(bp, BAR_CSTRORM_INTMEM +
4072                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4073                U64_HI(section));
4074         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4075                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4076
4077         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4078                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4079                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4080
4081         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4082 }
4083
4084 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4085 {
4086         int func = BP_FUNC(bp);
4087
4088         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4089                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4090                         sizeof(struct ustorm_def_status_block)/4);
4091         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4092                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4093                         sizeof(struct cstorm_def_status_block)/4);
4094         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4095                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4096                         sizeof(struct xstorm_def_status_block)/4);
4097         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4098                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4099                         sizeof(struct tstorm_def_status_block)/4);
4100 }
4101
4102 static void bnx2x_init_def_sb(struct bnx2x *bp,
4103                               struct host_def_status_block *def_sb,
4104                               dma_addr_t mapping, int sb_id)
4105 {
4106         int port = BP_PORT(bp);
4107         int func = BP_FUNC(bp);
4108         int index, val, reg_offset;
4109         u64 section;
4110
4111         /* ATTN */
4112         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4113                                             atten_status_block);
4114         def_sb->atten_status_block.status_block_id = sb_id;
4115
4116         bp->attn_state = 0;
4117
4118         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4119                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4120
4121         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4122                 bp->attn_group[index].sig[0] = REG_RD(bp,
4123                                                      reg_offset + 0x10*index);
4124                 bp->attn_group[index].sig[1] = REG_RD(bp,
4125                                                reg_offset + 0x4 + 0x10*index);
4126                 bp->attn_group[index].sig[2] = REG_RD(bp,
4127                                                reg_offset + 0x8 + 0x10*index);
4128                 bp->attn_group[index].sig[3] = REG_RD(bp,
4129                                                reg_offset + 0xc + 0x10*index);
4130         }
4131
4132         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4133                              HC_REG_ATTN_MSG0_ADDR_L);
4134
4135         REG_WR(bp, reg_offset, U64_LO(section));
4136         REG_WR(bp, reg_offset + 4, U64_HI(section));
4137
4138         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4139
4140         val = REG_RD(bp, reg_offset);
4141         val |= sb_id;
4142         REG_WR(bp, reg_offset, val);
4143
4144         /* USTORM */
4145         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4146                                             u_def_status_block);
4147         def_sb->u_def_status_block.status_block_id = sb_id;
4148
4149         REG_WR(bp, BAR_USTRORM_INTMEM +
4150                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4151         REG_WR(bp, BAR_USTRORM_INTMEM +
4152                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4153                U64_HI(section));
4154         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4155                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4156
4157         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4158                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4159                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4160
4161         /* CSTORM */
4162         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4163                                             c_def_status_block);
4164         def_sb->c_def_status_block.status_block_id = sb_id;
4165
4166         REG_WR(bp, BAR_CSTRORM_INTMEM +
4167                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4168         REG_WR(bp, BAR_CSTRORM_INTMEM +
4169                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4170                U64_HI(section));
4171         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4172                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4173
4174         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4175                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4176                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4177
4178         /* TSTORM */
4179         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4180                                             t_def_status_block);
4181         def_sb->t_def_status_block.status_block_id = sb_id;
4182
4183         REG_WR(bp, BAR_TSTRORM_INTMEM +
4184                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4185         REG_WR(bp, BAR_TSTRORM_INTMEM +
4186                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4187                U64_HI(section));
4188         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4189                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4190
4191         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4192                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4193                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4194
4195         /* XSTORM */
4196         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4197                                             x_def_status_block);
4198         def_sb->x_def_status_block.status_block_id = sb_id;
4199
4200         REG_WR(bp, BAR_XSTRORM_INTMEM +
4201                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4202         REG_WR(bp, BAR_XSTRORM_INTMEM +
4203                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4204                U64_HI(section));
4205         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4206                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4207
4208         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4209                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4210                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4211
4212         bp->stats_pending = 0;
4213         bp->set_mac_pending = 0;
4214
4215         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4216 }
4217
4218 static void bnx2x_update_coalesce(struct bnx2x *bp)
4219 {
4220         int port = BP_PORT(bp);
4221         int i;
4222
4223         for_each_queue(bp, i) {
4224                 int sb_id = bp->fp[i].sb_id;
4225
4226                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4227                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4228                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4229                                                     U_SB_ETH_RX_CQ_INDEX),
4230                         bp->rx_ticks/12);
4231                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4232                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4233                                                      U_SB_ETH_RX_CQ_INDEX),
4234                          bp->rx_ticks ? 0 : 1);
4235                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4236                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4237                                                      U_SB_ETH_RX_BD_INDEX),
4238                          bp->rx_ticks ? 0 : 1);
4239
4240                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4241                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4242                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4243                                                     C_SB_ETH_TX_CQ_INDEX),
4244                         bp->tx_ticks/12);
4245                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4246                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4247                                                      C_SB_ETH_TX_CQ_INDEX),
4248                          bp->tx_ticks ? 0 : 1);
4249         }
4250 }
4251
4252 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4253                                        struct bnx2x_fastpath *fp, int last)
4254 {
4255         int i;
4256
4257         for (i = 0; i < last; i++) {
4258                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4259                 struct sk_buff *skb = rx_buf->skb;
4260
4261                 if (skb == NULL) {
4262                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4263                         continue;
4264                 }
4265
4266                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4267                         pci_unmap_single(bp->pdev,
4268                                          pci_unmap_addr(rx_buf, mapping),
4269                                          bp->rx_buf_size,
4270                                          PCI_DMA_FROMDEVICE);
4271
4272                 dev_kfree_skb(skb);
4273                 rx_buf->skb = NULL;
4274         }
4275 }
4276
4277 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4278 {
4279         int func = BP_FUNC(bp);
4280         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4281                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4282         u16 ring_prod, cqe_ring_prod;
4283         int i, j;
4284
4285         bp->rx_buf_size = bp->dev->mtu;
4286         bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4287                 BCM_RX_ETH_PAYLOAD_ALIGN;
4288
4289         if (bp->flags & TPA_ENABLE_FLAG) {
4290                 DP(NETIF_MSG_IFUP,
4291                    "rx_buf_size %d  effective_mtu %d\n",
4292                    bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
4293
4294                 for_each_queue(bp, j) {
4295                         struct bnx2x_fastpath *fp = &bp->fp[j];
4296
4297                         for (i = 0; i < max_agg_queues; i++) {
4298                                 fp->tpa_pool[i].skb =
4299                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4300                                 if (!fp->tpa_pool[i].skb) {
4301                                         BNX2X_ERR("Failed to allocate TPA "
4302                                                   "skb pool for queue[%d] - "
4303                                                   "disabling TPA on this "
4304                                                   "queue!\n", j);
4305                                         bnx2x_free_tpa_pool(bp, fp, i);
4306                                         fp->disable_tpa = 1;
4307                                         break;
4308                                 }
4309                                 pci_unmap_addr_set((struct sw_rx_bd *)
4310                                                         &bp->fp->tpa_pool[i],
4311                                                    mapping, 0);
4312                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4313                         }
4314                 }
4315         }
4316
4317         for_each_queue(bp, j) {
4318                 struct bnx2x_fastpath *fp = &bp->fp[j];
4319
4320                 fp->rx_bd_cons = 0;
4321                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4322                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4323
4324                 /* "next page" elements initialization */
4325                 /* SGE ring */
4326                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4327                         struct eth_rx_sge *sge;
4328
4329                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4330                         sge->addr_hi =
4331                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4332                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4333                         sge->addr_lo =
4334                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4335                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4336                 }
4337
4338                 bnx2x_init_sge_ring_bit_mask(fp);
4339
4340                 /* RX BD ring */
4341                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4342                         struct eth_rx_bd *rx_bd;
4343
4344                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4345                         rx_bd->addr_hi =
4346                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4347                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4348                         rx_bd->addr_lo =
4349                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4350                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4351                 }
4352
4353                 /* CQ ring */
4354                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4355                         struct eth_rx_cqe_next_page *nextpg;
4356
4357                         nextpg = (struct eth_rx_cqe_next_page *)
4358                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4359                         nextpg->addr_hi =
4360                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4361                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4362                         nextpg->addr_lo =
4363                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4364                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4365                 }
4366
4367                 /* Allocate SGEs and initialize the ring elements */
4368                 for (i = 0, ring_prod = 0;
4369                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4370
4371                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4372                                 BNX2X_ERR("was only able to allocate "
4373                                           "%d rx sges\n", i);
4374                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4375                                 /* Cleanup already allocated elements */
4376                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4377                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4378                                 fp->disable_tpa = 1;
4379                                 ring_prod = 0;
4380                                 break;
4381                         }
4382                         ring_prod = NEXT_SGE_IDX(ring_prod);
4383                 }
4384                 fp->rx_sge_prod = ring_prod;
4385
4386                 /* Allocate BDs and initialize BD ring */
4387                 fp->rx_comp_cons = 0;
4388                 cqe_ring_prod = ring_prod = 0;
4389                 for (i = 0; i < bp->rx_ring_size; i++) {
4390                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4391                                 BNX2X_ERR("was only able to allocate "
4392                                           "%d rx skbs\n", i);
4393                                 bp->eth_stats.rx_skb_alloc_failed++;
4394                                 break;
4395                         }
4396                         ring_prod = NEXT_RX_IDX(ring_prod);
4397                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4398                         WARN_ON(ring_prod <= i);
4399                 }
4400
4401                 fp->rx_bd_prod = ring_prod;
4402                 /* must not have more available CQEs than BDs */
4403                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4404                                        cqe_ring_prod);
4405                 fp->rx_pkt = fp->rx_calls = 0;
4406
4407                 /* Warning!
4408                  * this will generate an interrupt (to the TSTORM)
4409                  * must only be done after chip is initialized
4410                  */
4411                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4412                                      fp->rx_sge_prod);
4413                 if (j != 0)
4414                         continue;
4415
4416                 REG_WR(bp, BAR_USTRORM_INTMEM +
4417                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4418                        U64_LO(fp->rx_comp_mapping));
4419                 REG_WR(bp, BAR_USTRORM_INTMEM +
4420                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4421                        U64_HI(fp->rx_comp_mapping));
4422         }
4423 }
4424
4425 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4426 {
4427         int i, j;
4428
4429         for_each_queue(bp, j) {
4430                 struct bnx2x_fastpath *fp = &bp->fp[j];
4431
4432                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4433                         struct eth_tx_bd *tx_bd =
4434                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4435
4436                         tx_bd->addr_hi =
4437                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4438                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4439                         tx_bd->addr_lo =
4440                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4441                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4442                 }
4443
4444                 fp->tx_pkt_prod = 0;
4445                 fp->tx_pkt_cons = 0;
4446                 fp->tx_bd_prod = 0;
4447                 fp->tx_bd_cons = 0;
4448                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4449                 fp->tx_pkt = 0;
4450         }
4451 }
4452
4453 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4454 {
4455         int func = BP_FUNC(bp);
4456
4457         spin_lock_init(&bp->spq_lock);
4458
4459         bp->spq_left = MAX_SPQ_PENDING;
4460         bp->spq_prod_idx = 0;
4461         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4462         bp->spq_prod_bd = bp->spq;
4463         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4464
4465         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4466                U64_LO(bp->spq_mapping));
4467         REG_WR(bp,
4468                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4469                U64_HI(bp->spq_mapping));
4470
4471         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4472                bp->spq_prod_idx);
4473 }
4474
4475 static void bnx2x_init_context(struct bnx2x *bp)
4476 {
4477         int i;
4478
4479         for_each_queue(bp, i) {
4480                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4481                 struct bnx2x_fastpath *fp = &bp->fp[i];
4482                 u8 sb_id = FP_SB_ID(fp);
4483
4484                 context->xstorm_st_context.tx_bd_page_base_hi =
4485                                                 U64_HI(fp->tx_desc_mapping);
4486                 context->xstorm_st_context.tx_bd_page_base_lo =
4487                                                 U64_LO(fp->tx_desc_mapping);
4488                 context->xstorm_st_context.db_data_addr_hi =
4489                                                 U64_HI(fp->tx_prods_mapping);
4490                 context->xstorm_st_context.db_data_addr_lo =
4491                                                 U64_LO(fp->tx_prods_mapping);
4492                 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4493                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4494
4495                 context->ustorm_st_context.common.sb_index_numbers =
4496                                                 BNX2X_RX_SB_INDEX_NUM;
4497                 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4498                 context->ustorm_st_context.common.status_block_id = sb_id;
4499                 context->ustorm_st_context.common.flags =
4500                         USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4501                 context->ustorm_st_context.common.mc_alignment_size =
4502                         BCM_RX_ETH_PAYLOAD_ALIGN;
4503                 context->ustorm_st_context.common.bd_buff_size =
4504                                                 bp->rx_buf_size;
4505                 context->ustorm_st_context.common.bd_page_base_hi =
4506                                                 U64_HI(fp->rx_desc_mapping);
4507                 context->ustorm_st_context.common.bd_page_base_lo =
4508                                                 U64_LO(fp->rx_desc_mapping);
4509                 if (!fp->disable_tpa) {
4510                         context->ustorm_st_context.common.flags |=
4511                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4512                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4513                         context->ustorm_st_context.common.sge_buff_size =
4514                                         (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4515                         context->ustorm_st_context.common.sge_page_base_hi =
4516                                                 U64_HI(fp->rx_sge_mapping);
4517                         context->ustorm_st_context.common.sge_page_base_lo =
4518                                                 U64_LO(fp->rx_sge_mapping);
4519                 }
4520
4521                 context->cstorm_st_context.sb_index_number =
4522                                                 C_SB_ETH_TX_CQ_INDEX;
4523                 context->cstorm_st_context.status_block_id = sb_id;
4524
4525                 context->xstorm_ag_context.cdu_reserved =
4526                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4527                                                CDU_REGION_NUMBER_XCM_AG,
4528                                                ETH_CONNECTION_TYPE);
4529                 context->ustorm_ag_context.cdu_usage =
4530                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4531                                                CDU_REGION_NUMBER_UCM_AG,
4532                                                ETH_CONNECTION_TYPE);
4533         }
4534 }
4535
4536 static void bnx2x_init_ind_table(struct bnx2x *bp)
4537 {
4538         int func = BP_FUNC(bp);
4539         int i;
4540
4541         if (!is_multi(bp))
4542                 return;
4543
4544         DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4545         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4546                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4547                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4548                         BP_CL_ID(bp) + (i % bp->num_queues));
4549 }
4550
4551 static void bnx2x_set_client_config(struct bnx2x *bp)
4552 {
4553         struct tstorm_eth_client_config tstorm_client = {0};
4554         int port = BP_PORT(bp);
4555         int i;
4556
4557         tstorm_client.mtu = bp->dev->mtu;
4558         tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4559         tstorm_client.config_flags =
4560                                 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4561 #ifdef BCM_VLAN
4562         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4563                 tstorm_client.config_flags |=
4564                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4565                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4566         }
4567 #endif
4568
4569         if (bp->flags & TPA_ENABLE_FLAG) {
4570                 tstorm_client.max_sges_for_packet =
4571                         SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4572                 tstorm_client.max_sges_for_packet =
4573                         ((tstorm_client.max_sges_for_packet +
4574                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4575                         PAGES_PER_SGE_SHIFT;
4576
4577                 tstorm_client.config_flags |=
4578                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4579         }
4580
4581         for_each_queue(bp, i) {
4582                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4583                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4584                        ((u32 *)&tstorm_client)[0]);
4585                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4586                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4587                        ((u32 *)&tstorm_client)[1]);
4588         }
4589
4590         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4591            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4592 }
4593
4594 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4595 {
4596         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4597         int mode = bp->rx_mode;
4598         int mask = (1 << BP_L_ID(bp));
4599         int func = BP_FUNC(bp);
4600         int i;
4601
4602         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4603
4604         switch (mode) {
4605         case BNX2X_RX_MODE_NONE: /* no Rx */
4606                 tstorm_mac_filter.ucast_drop_all = mask;
4607                 tstorm_mac_filter.mcast_drop_all = mask;
4608                 tstorm_mac_filter.bcast_drop_all = mask;
4609                 break;
4610         case BNX2X_RX_MODE_NORMAL:
4611                 tstorm_mac_filter.bcast_accept_all = mask;
4612                 break;
4613         case BNX2X_RX_MODE_ALLMULTI:
4614                 tstorm_mac_filter.mcast_accept_all = mask;
4615                 tstorm_mac_filter.bcast_accept_all = mask;
4616                 break;
4617         case BNX2X_RX_MODE_PROMISC:
4618                 tstorm_mac_filter.ucast_accept_all = mask;
4619                 tstorm_mac_filter.mcast_accept_all = mask;
4620                 tstorm_mac_filter.bcast_accept_all = mask;
4621                 break;
4622         default:
4623                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4624                 break;
4625         }
4626
4627         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4628                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4629                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4630                        ((u32 *)&tstorm_mac_filter)[i]);
4631
4632 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4633                    ((u32 *)&tstorm_mac_filter)[i]); */
4634         }
4635
4636         if (mode != BNX2X_RX_MODE_NONE)
4637                 bnx2x_set_client_config(bp);
4638 }
4639
4640 static void bnx2x_init_internal_common(struct bnx2x *bp)
4641 {
4642         int i;
4643
4644         if (bp->flags & TPA_ENABLE_FLAG) {
4645                 struct tstorm_eth_tpa_exist tpa = {0};
4646
4647                 tpa.tpa_exist = 1;
4648
4649                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4650                        ((u32 *)&tpa)[0]);
4651                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4652                        ((u32 *)&tpa)[1]);
4653         }
4654
4655         /* Zero this manually as its initialization is
4656            currently missing in the initTool */
4657         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4658                 REG_WR(bp, BAR_USTRORM_INTMEM +
4659                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4660 }
4661
4662 static void bnx2x_init_internal_port(struct bnx2x *bp)
4663 {
4664         int port = BP_PORT(bp);
4665
4666         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4667         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4668         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4669         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4670 }
4671
4672 static void bnx2x_init_internal_func(struct bnx2x *bp)
4673 {
4674         struct tstorm_eth_function_common_config tstorm_config = {0};
4675         struct stats_indication_flags stats_flags = {0};
4676         int port = BP_PORT(bp);
4677         int func = BP_FUNC(bp);
4678         int i;
4679         u16 max_agg_size;
4680
4681         if (is_multi(bp)) {
4682                 tstorm_config.config_flags = MULTI_FLAGS;
4683                 tstorm_config.rss_result_mask = MULTI_MASK;
4684         }
4685
4686         tstorm_config.leading_client_id = BP_L_ID(bp);
4687
4688         REG_WR(bp, BAR_TSTRORM_INTMEM +
4689                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4690                (*(u32 *)&tstorm_config));
4691
4692         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4693         bnx2x_set_storm_rx_mode(bp);
4694
4695         /* reset xstorm per client statistics */
4696         for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4697                 REG_WR(bp, BAR_XSTRORM_INTMEM +
4698                        XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4699                        i*4, 0);
4700         }
4701         /* reset tstorm per client statistics */
4702         for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4703                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4704                        TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4705                        i*4, 0);
4706         }
4707
4708         /* Init statistics related context */
4709         stats_flags.collect_eth = 1;
4710
4711         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4712                ((u32 *)&stats_flags)[0]);
4713         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4714                ((u32 *)&stats_flags)[1]);
4715
4716         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4717                ((u32 *)&stats_flags)[0]);
4718         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4719                ((u32 *)&stats_flags)[1]);
4720
4721         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4722                ((u32 *)&stats_flags)[0]);
4723         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4724                ((u32 *)&stats_flags)[1]);
4725
4726         REG_WR(bp, BAR_XSTRORM_INTMEM +
4727                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4728                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4729         REG_WR(bp, BAR_XSTRORM_INTMEM +
4730                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4731                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4732
4733         REG_WR(bp, BAR_TSTRORM_INTMEM +
4734                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4735                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4736         REG_WR(bp, BAR_TSTRORM_INTMEM +
4737                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4738                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4739
4740         if (CHIP_IS_E1H(bp)) {
4741                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4742                         IS_E1HMF(bp));
4743                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4744                         IS_E1HMF(bp));
4745                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4746                         IS_E1HMF(bp));
4747                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4748                         IS_E1HMF(bp));
4749
4750                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4751                          bp->e1hov);
4752         }
4753
4754         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4755         max_agg_size =
4756                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4757                           SGE_PAGE_SIZE * PAGES_PER_SGE),
4758                     (u32)0xffff);
4759         for_each_queue(bp, i) {
4760                 struct bnx2x_fastpath *fp = &bp->fp[i];
4761
4762                 REG_WR(bp, BAR_USTRORM_INTMEM +
4763                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4764                        U64_LO(fp->rx_comp_mapping));
4765                 REG_WR(bp, BAR_USTRORM_INTMEM +
4766                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4767                        U64_HI(fp->rx_comp_mapping));
4768
4769                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4770                          USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4771                          max_agg_size);
4772         }
4773 }
4774
4775 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4776 {
4777         switch (load_code) {
4778         case FW_MSG_CODE_DRV_LOAD_COMMON:
4779                 bnx2x_init_internal_common(bp);
4780                 /* no break */
4781
4782         case FW_MSG_CODE_DRV_LOAD_PORT:
4783                 bnx2x_init_internal_port(bp);
4784                 /* no break */
4785
4786         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4787                 bnx2x_init_internal_func(bp);
4788                 break;
4789
4790         default:
4791                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4792                 break;
4793         }
4794 }
4795
4796 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4797 {
4798         int i;
4799
4800         for_each_queue(bp, i) {
4801                 struct bnx2x_fastpath *fp = &bp->fp[i];
4802
4803                 fp->bp = bp;
4804                 fp->state = BNX2X_FP_STATE_CLOSED;
4805                 fp->index = i;
4806                 fp->cl_id = BP_L_ID(bp) + i;
4807                 fp->sb_id = fp->cl_id;
4808                 DP(NETIF_MSG_IFUP,
4809                    "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
4810                    bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4811                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4812                               FP_SB_ID(fp));
4813                 bnx2x_update_fpsb_idx(fp);
4814         }
4815
4816         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4817                           DEF_SB_ID);
4818         bnx2x_update_dsb_idx(bp);
4819         bnx2x_update_coalesce(bp);
4820         bnx2x_init_rx_rings(bp);
4821         bnx2x_init_tx_ring(bp);
4822         bnx2x_init_sp_ring(bp);
4823         bnx2x_init_context(bp);
4824         bnx2x_init_internal(bp, load_code);
4825         bnx2x_init_ind_table(bp);
4826         bnx2x_stats_init(bp);
4827
4828         /* At this point, we are ready for interrupts */
4829         atomic_set(&bp->intr_sem, 0);
4830
4831         /* flush all before enabling interrupts */
4832         mb();
4833         mmiowb();
4834
4835         bnx2x_int_enable(bp);
4836 }
4837
4838 /* end of nic init */
4839
4840 /*
4841  * gzip service functions
4842  */
4843
4844 static int bnx2x_gunzip_init(struct bnx2x *bp)
4845 {
4846         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4847                                               &bp->gunzip_mapping);
4848         if (bp->gunzip_buf  == NULL)
4849                 goto gunzip_nomem1;
4850
4851         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4852         if (bp->strm  == NULL)
4853                 goto gunzip_nomem2;
4854
4855         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4856                                       GFP_KERNEL);
4857         if (bp->strm->workspace == NULL)
4858                 goto gunzip_nomem3;
4859
4860         return 0;
4861
4862 gunzip_nomem3:
4863         kfree(bp->strm);
4864         bp->strm = NULL;
4865
4866 gunzip_nomem2:
4867         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4868                             bp->gunzip_mapping);
4869         bp->gunzip_buf = NULL;
4870
4871 gunzip_nomem1:
4872         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4873                " un-compression\n", bp->dev->name);
4874         return -ENOMEM;
4875 }
4876
4877 static void bnx2x_gunzip_end(struct bnx2x *bp)
4878 {
4879         kfree(bp->strm->workspace);
4880
4881         kfree(bp->strm);
4882         bp->strm = NULL;
4883
4884         if (bp->gunzip_buf) {
4885                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4886                                     bp->gunzip_mapping);
4887                 bp->gunzip_buf = NULL;
4888         }
4889 }
4890
4891 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4892 {
4893         int n, rc;
4894
4895         /* check gzip header */
4896         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4897                 return -EINVAL;
4898
4899         n = 10;
4900
4901 #define FNAME                           0x8
4902
4903         if (zbuf[3] & FNAME)
4904                 while ((zbuf[n++] != 0) && (n < len));
4905
4906         bp->strm->next_in = zbuf + n;
4907         bp->strm->avail_in = len - n;
4908         bp->strm->next_out = bp->gunzip_buf;
4909         bp->strm->avail_out = FW_BUF_SIZE;
4910
4911         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4912         if (rc != Z_OK)
4913                 return rc;
4914
4915         rc = zlib_inflate(bp->strm, Z_FINISH);
4916         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4917                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4918                        bp->dev->name, bp->strm->msg);
4919
4920         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4921         if (bp->gunzip_outlen & 0x3)
4922                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4923                                     " gunzip_outlen (%d) not aligned\n",
4924                        bp->dev->name, bp->gunzip_outlen);
4925         bp->gunzip_outlen >>= 2;
4926
4927         zlib_inflateEnd(bp->strm);
4928
4929         if (rc == Z_STREAM_END)
4930                 return 0;
4931
4932         return rc;
4933 }
4934
4935 /* nic load/unload */
4936
4937 /*
4938  * General service functions
4939  */
4940
4941 /* send a NIG loopback debug packet */
4942 static void bnx2x_lb_pckt(struct bnx2x *bp)
4943 {
4944         u32 wb_write[3];
4945
4946         /* Ethernet source and destination addresses */
4947         wb_write[0] = 0x55555555;
4948         wb_write[1] = 0x55555555;
4949         wb_write[2] = 0x20;             /* SOP */
4950         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4951
4952         /* NON-IP protocol */
4953         wb_write[0] = 0x09000000;
4954         wb_write[1] = 0x55555555;
4955         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4956         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4957 }
4958
4959 /* some of the internal memories
4960  * are not directly readable from the driver
4961  * to test them we send debug packets
4962  */
4963 static int bnx2x_int_mem_test(struct bnx2x *bp)
4964 {
4965         int factor;
4966         int count, i;
4967         u32 val = 0;
4968
4969         if (CHIP_REV_IS_FPGA(bp))
4970                 factor = 120;
4971         else if (CHIP_REV_IS_EMUL(bp))
4972                 factor = 200;
4973         else
4974                 factor = 1;
4975
4976         DP(NETIF_MSG_HW, "start part1\n");
4977
4978         /* Disable inputs of parser neighbor blocks */
4979         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4980         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4981         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4982         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4983
4984         /*  Write 0 to parser credits for CFC search request */
4985         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4986
4987         /* send Ethernet packet */
4988         bnx2x_lb_pckt(bp);
4989
4990         /* TODO do i reset NIG statistic? */
4991         /* Wait until NIG register shows 1 packet of size 0x10 */
4992         count = 1000 * factor;
4993         while (count) {
4994
4995                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4996                 val = *bnx2x_sp(bp, wb_data[0]);
4997                 if (val == 0x10)
4998                         break;
4999
5000                 msleep(10);
5001                 count--;
5002         }
5003         if (val != 0x10) {
5004                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5005                 return -1;
5006         }
5007
5008         /* Wait until PRS register shows 1 packet */
5009         count = 1000 * factor;
5010         while (count) {
5011                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5012                 if (val == 1)
5013                         break;
5014
5015                 msleep(10);
5016                 count--;
5017         }
5018         if (val != 0x1) {
5019                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5020                 return -2;
5021         }
5022
5023         /* Reset and init BRB, PRS */
5024         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5025         msleep(50);
5026         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5027         msleep(50);
5028         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5029         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5030
5031         DP(NETIF_MSG_HW, "part2\n");
5032
5033         /* Disable inputs of parser neighbor blocks */
5034         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5035         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5036         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5037         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5038
5039         /* Write 0 to parser credits for CFC search request */
5040         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5041
5042         /* send 10 Ethernet packets */
5043         for (i = 0; i < 10; i++)
5044                 bnx2x_lb_pckt(bp);
5045
5046         /* Wait until NIG register shows 10 + 1
5047            packets of size 11*0x10 = 0xb0 */
5048         count = 1000 * factor;
5049         while (count) {
5050
5051                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5052                 val = *bnx2x_sp(bp, wb_data[0]);
5053                 if (val == 0xb0)
5054                         break;
5055
5056                 msleep(10);
5057                 count--;
5058         }
5059         if (val != 0xb0) {
5060                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5061                 return -3;
5062         }
5063
5064         /* Wait until PRS register shows 2 packets */
5065         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5066         if (val != 2)
5067                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5068
5069         /* Write 1 to parser credits for CFC search request */
5070         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5071
5072         /* Wait until PRS register shows 3 packets */
5073         msleep(10 * factor);
5074         /* Wait until NIG register shows 1 packet of size 0x10 */
5075         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5076         if (val != 3)
5077                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5078
5079         /* clear NIG EOP FIFO */
5080         for (i = 0; i < 11; i++)
5081                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5082         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5083         if (val != 1) {
5084                 BNX2X_ERR("clear of NIG failed\n");
5085                 return -4;
5086         }
5087
5088         /* Reset and init BRB, PRS, NIG */
5089         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5090         msleep(50);
5091         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5092         msleep(50);
5093         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5094         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5095 #ifndef BCM_ISCSI
5096         /* set NIC mode */
5097         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5098 #endif
5099
5100         /* Enable inputs of parser neighbor blocks */
5101         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5102         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5103         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5104         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5105
5106         DP(NETIF_MSG_HW, "done\n");
5107
5108         return 0; /* OK */
5109 }
5110
5111 static void enable_blocks_attention(struct bnx2x *bp)
5112 {
5113         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5114         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5115         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5116         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5117         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5118         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5119         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5120         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5121         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5122 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5123 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5124         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5125         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5126         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5127 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5128 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5129         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5130         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5131         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5132         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5133 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5134 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5135         if (CHIP_REV_IS_FPGA(bp))
5136                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5137         else
5138                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5139         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5140         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5141         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5142 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5143 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5144         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5145         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5146 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5147         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5148 }
5149
5150
5151 static void bnx2x_reset_common(struct bnx2x *bp)
5152 {
5153         /* reset_common */
5154         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5155                0xd3ffff7f);
5156         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5157 }
5158
5159 static int bnx2x_init_common(struct bnx2x *bp)
5160 {
5161         u32 val, i;
5162
5163         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5164
5165         bnx2x_reset_common(bp);
5166         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5167         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5168
5169         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5170         if (CHIP_IS_E1H(bp))
5171                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5172
5173         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5174         msleep(30);
5175         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5176
5177         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5178         if (CHIP_IS_E1(bp)) {
5179                 /* enable HW interrupt from PXP on USDM overflow
5180                    bit 16 on INT_MASK_0 */
5181                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5182         }
5183
5184         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5185         bnx2x_init_pxp(bp);
5186
5187 #ifdef __BIG_ENDIAN
5188         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5189         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5190         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5191         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5192         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5193
5194 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5195         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5196         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5197         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5198         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5199 #endif
5200
5201         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5202 #ifdef BCM_ISCSI
5203         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5204         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5205         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5206 #endif
5207
5208         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5209                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5210
5211         /* let the HW do it's magic ... */
5212         msleep(100);
5213         /* finish PXP init */
5214         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5215         if (val != 1) {
5216                 BNX2X_ERR("PXP2 CFG failed\n");
5217                 return -EBUSY;
5218         }
5219         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5220         if (val != 1) {
5221                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5222                 return -EBUSY;
5223         }
5224
5225         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5226         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5227
5228         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5229
5230         /* clean the DMAE memory */
5231         bp->dmae_ready = 1;
5232         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5233
5234         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5235         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5236         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5237         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5238
5239         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5240         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5241         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5242         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5243
5244         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5245         /* soft reset pulse */
5246         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5247         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5248
5249 #ifdef BCM_ISCSI
5250         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5251 #endif
5252
5253         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5254         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5255         if (!CHIP_REV_IS_SLOW(bp)) {
5256                 /* enable hw interrupt from doorbell Q */
5257                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5258         }
5259
5260         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5261         if (CHIP_REV_IS_SLOW(bp)) {
5262                 /* fix for emulation and FPGA for no pause */
5263                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5264                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5265                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5266                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5267         }
5268
5269         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5270         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5271         /* set NIC mode */
5272         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5273         if (CHIP_IS_E1H(bp))
5274                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5275
5276         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5277         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5278         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5279         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5280
5281         if (CHIP_IS_E1H(bp)) {
5282                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5283                                 STORM_INTMEM_SIZE_E1H/2);
5284                 bnx2x_init_fill(bp,
5285                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5286                                 0, STORM_INTMEM_SIZE_E1H/2);
5287                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5288                                 STORM_INTMEM_SIZE_E1H/2);
5289                 bnx2x_init_fill(bp,
5290                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5291                                 0, STORM_INTMEM_SIZE_E1H/2);
5292                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5293                                 STORM_INTMEM_SIZE_E1H/2);
5294                 bnx2x_init_fill(bp,
5295                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5296                                 0, STORM_INTMEM_SIZE_E1H/2);
5297                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5298                                 STORM_INTMEM_SIZE_E1H/2);
5299                 bnx2x_init_fill(bp,
5300                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5301                                 0, STORM_INTMEM_SIZE_E1H/2);
5302         } else { /* E1 */
5303                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5304                                 STORM_INTMEM_SIZE_E1);
5305                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5306                                 STORM_INTMEM_SIZE_E1);
5307                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5308                                 STORM_INTMEM_SIZE_E1);
5309                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5310                                 STORM_INTMEM_SIZE_E1);
5311         }
5312
5313         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5314         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5315         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5316         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5317
5318         /* sync semi rtc */
5319         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5320                0x80000000);
5321         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5322                0x80000000);
5323
5324         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5325         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5326         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5327
5328         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5329         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5330                 REG_WR(bp, i, 0xc0cac01a);
5331                 /* TODO: replace with something meaningful */
5332         }
5333         if (CHIP_IS_E1H(bp))
5334                 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5335         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5336
5337         if (sizeof(union cdu_context) != 1024)
5338                 /* we currently assume that a context is 1024 bytes */
5339                 printk(KERN_ALERT PFX "please adjust the size of"
5340                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5341
5342         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5343         val = (4 << 24) + (0 << 12) + 1024;
5344         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5345         if (CHIP_IS_E1(bp)) {
5346                 /* !!! fix pxp client crdit until excel update */
5347                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5348                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5349         }
5350
5351         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5352         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5353
5354         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5355         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5356
5357         /* PXPCS COMMON comes here */
5358         /* Reset PCIE errors for debug */
5359         REG_WR(bp, 0x2814, 0xffffffff);
5360         REG_WR(bp, 0x3820, 0xffffffff);
5361
5362         /* EMAC0 COMMON comes here */
5363         /* EMAC1 COMMON comes here */
5364         /* DBU COMMON comes here */
5365         /* DBG COMMON comes here */
5366
5367         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5368         if (CHIP_IS_E1H(bp)) {
5369                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5370                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5371         }
5372
5373         if (CHIP_REV_IS_SLOW(bp))
5374                 msleep(200);
5375
5376         /* finish CFC init */
5377         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5378         if (val != 1) {
5379                 BNX2X_ERR("CFC LL_INIT failed\n");
5380                 return -EBUSY;
5381         }
5382         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5383         if (val != 1) {
5384                 BNX2X_ERR("CFC AC_INIT failed\n");
5385                 return -EBUSY;
5386         }
5387         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5388         if (val != 1) {
5389                 BNX2X_ERR("CFC CAM_INIT failed\n");
5390                 return -EBUSY;
5391         }
5392         REG_WR(bp, CFC_REG_DEBUG0, 0);
5393
5394         /* read NIG statistic
5395            to see if this is our first up since powerup */
5396         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5397         val = *bnx2x_sp(bp, wb_data[0]);
5398
5399         /* do internal memory self test */
5400         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5401                 BNX2X_ERR("internal mem self test failed\n");
5402                 return -EBUSY;
5403         }
5404
5405         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5406         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5407         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5408                 /* Fan failure is indicated by SPIO 5 */
5409                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5410                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5411
5412                 /* set to active low mode */
5413                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5414                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5415                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5416                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5417
5418                 /* enable interrupt to signal the IGU */
5419                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5420                 val |= (1 << MISC_REGISTERS_SPIO_5);
5421                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5422                 break;
5423
5424         default:
5425                 break;
5426         }
5427
5428         /* clear PXP2 attentions */
5429         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5430
5431         enable_blocks_attention(bp);
5432
5433         if (!BP_NOMCP(bp)) {
5434                 bnx2x_acquire_phy_lock(bp);
5435                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5436                 bnx2x_release_phy_lock(bp);
5437         } else
5438                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5439
5440         return 0;
5441 }
5442
5443 static int bnx2x_init_port(struct bnx2x *bp)
5444 {
5445         int port = BP_PORT(bp);
5446         u32 val;
5447
5448         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5449
5450         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5451
5452         /* Port PXP comes here */
5453         /* Port PXP2 comes here */
5454 #ifdef BCM_ISCSI
5455         /* Port0  1
5456          * Port1  385 */
5457         i++;
5458         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5459         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5460         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5461         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5462
5463         /* Port0  2
5464          * Port1  386 */
5465         i++;
5466         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5467         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5468         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5469         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5470
5471         /* Port0  3
5472          * Port1  387 */
5473         i++;
5474         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5475         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5476         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5477         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5478 #endif
5479         /* Port CMs come here */
5480
5481         /* Port QM comes here */
5482 #ifdef BCM_ISCSI
5483         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5484         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5485
5486         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5487                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5488 #endif
5489         /* Port DQ comes here */
5490         /* Port BRB1 comes here */
5491         /* Port PRS comes here */
5492         /* Port TSDM comes here */
5493         /* Port CSDM comes here */
5494         /* Port USDM comes here */
5495         /* Port XSDM comes here */
5496         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5497                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5498         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5499                              port ? USEM_PORT1_END : USEM_PORT0_END);
5500         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5501                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5502         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5503                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5504         /* Port UPB comes here */
5505         /* Port XPB comes here */
5506
5507         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5508                              port ? PBF_PORT1_END : PBF_PORT0_END);
5509
5510         /* configure PBF to work without PAUSE mtu 9000 */
5511         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5512
5513         /* update threshold */
5514         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5515         /* update init credit */
5516         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5517
5518         /* probe changes */
5519         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5520         msleep(5);
5521         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5522
5523 #ifdef BCM_ISCSI
5524         /* tell the searcher where the T2 table is */
5525         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5526
5527         wb_write[0] = U64_LO(bp->t2_mapping);
5528         wb_write[1] = U64_HI(bp->t2_mapping);
5529         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5530         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5531         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5532         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5533
5534         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5535         /* Port SRCH comes here */
5536 #endif
5537         /* Port CDU comes here */
5538         /* Port CFC comes here */
5539
5540         if (CHIP_IS_E1(bp)) {
5541                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5542                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5543         }
5544         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5545                              port ? HC_PORT1_END : HC_PORT0_END);
5546
5547         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5548                                     MISC_AEU_PORT0_START,
5549                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5550         /* init aeu_mask_attn_func_0/1:
5551          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5552          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5553          *             bits 4-7 are used for "per vn group attention" */
5554         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5555                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5556
5557         /* Port PXPCS comes here */
5558         /* Port EMAC0 comes here */
5559         /* Port EMAC1 comes here */
5560         /* Port DBU comes here */
5561         /* Port DBG comes here */
5562         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5563                              port ? NIG_PORT1_END : NIG_PORT0_END);
5564
5565         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5566
5567         if (CHIP_IS_E1H(bp)) {
5568                 u32 wsum;
5569                 struct cmng_struct_per_port m_cmng_port;
5570                 int vn;
5571
5572                 /* 0x2 disable e1hov, 0x1 enable */
5573                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5574                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5575
5576                 /* Init RATE SHAPING and FAIRNESS contexts.
5577                    Initialize as if there is 10G link. */
5578                 wsum = bnx2x_calc_vn_wsum(bp);
5579                 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5580                 if (IS_E1HMF(bp))
5581                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
5582                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
5583                                         wsum, 10000, &m_cmng_port);
5584         }
5585
5586         /* Port MCP comes here */
5587         /* Port DMAE comes here */
5588
5589         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5590         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
5591         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5592                 /* add SPIO 5 to group 0 */
5593                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5594                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5595                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5596                 break;
5597
5598         default:
5599                 break;
5600         }
5601
5602         bnx2x__link_reset(bp);
5603
5604         return 0;
5605 }
5606
5607 #define ILT_PER_FUNC            (768/2)
5608 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
5609 /* the phys address is shifted right 12 bits and has an added
5610    1=valid bit added to the 53rd bit
5611    then since this is a wide register(TM)
5612    we split it into two 32 bit writes
5613  */
5614 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5615 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
5616 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
5617 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
5618
5619 #define CNIC_ILT_LINES          0
5620
5621 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5622 {
5623         int reg;
5624
5625         if (CHIP_IS_E1H(bp))
5626                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5627         else /* E1 */
5628                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5629
5630         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5631 }
5632
5633 static int bnx2x_init_func(struct bnx2x *bp)
5634 {
5635         int port = BP_PORT(bp);
5636         int func = BP_FUNC(bp);
5637         int i;
5638
5639         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
5640
5641         i = FUNC_ILT_BASE(func);
5642
5643         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5644         if (CHIP_IS_E1H(bp)) {
5645                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5646                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5647         } else /* E1 */
5648                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5649                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5650
5651
5652         if (CHIP_IS_E1H(bp)) {
5653                 for (i = 0; i < 9; i++)
5654                         bnx2x_init_block(bp,
5655                                          cm_start[func][i], cm_end[func][i]);
5656
5657                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5658                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5659         }
5660
5661         /* HC init per function */
5662         if (CHIP_IS_E1H(bp)) {
5663                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5664
5665                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5666                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5667         }
5668         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5669
5670         if (CHIP_IS_E1H(bp))
5671                 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5672
5673         /* Reset PCIE errors for debug */
5674         REG_WR(bp, 0x2114, 0xffffffff);
5675         REG_WR(bp, 0x2120, 0xffffffff);
5676
5677         return 0;
5678 }
5679
5680 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5681 {
5682         int i, rc = 0;
5683
5684         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5685            BP_FUNC(bp), load_code);
5686
5687         bp->dmae_ready = 0;
5688         mutex_init(&bp->dmae_mutex);
5689         bnx2x_gunzip_init(bp);
5690
5691         switch (load_code) {
5692         case FW_MSG_CODE_DRV_LOAD_COMMON:
5693                 rc = bnx2x_init_common(bp);
5694                 if (rc)
5695                         goto init_hw_err;
5696                 /* no break */
5697
5698         case FW_MSG_CODE_DRV_LOAD_PORT:
5699                 bp->dmae_ready = 1;
5700                 rc = bnx2x_init_port(bp);
5701                 if (rc)
5702                         goto init_hw_err;
5703                 /* no break */
5704
5705         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5706                 bp->dmae_ready = 1;
5707                 rc = bnx2x_init_func(bp);
5708                 if (rc)
5709                         goto init_hw_err;
5710                 break;
5711
5712         default:
5713                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5714                 break;
5715         }
5716
5717         if (!BP_NOMCP(bp)) {
5718                 int func = BP_FUNC(bp);
5719
5720                 bp->fw_drv_pulse_wr_seq =
5721                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5722                                  DRV_PULSE_SEQ_MASK);
5723                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5724                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
5725                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
5726         } else
5727                 bp->func_stx = 0;
5728
5729         /* this needs to be done before gunzip end */
5730         bnx2x_zero_def_sb(bp);
5731         for_each_queue(bp, i)
5732                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5733
5734 init_hw_err:
5735         bnx2x_gunzip_end(bp);
5736
5737         return rc;
5738 }
5739
5740 /* send the MCP a request, block until there is a reply */
5741 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5742 {
5743         int func = BP_FUNC(bp);
5744         u32 seq = ++bp->fw_seq;
5745         u32 rc = 0;
5746         u32 cnt = 1;
5747         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5748
5749         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5750         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5751
5752         do {
5753                 /* let the FW do it's magic ... */
5754                 msleep(delay);
5755
5756                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5757
5758                 /* Give the FW up to 2 second (200*10ms) */
5759         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5760
5761         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5762            cnt*delay, rc, seq);
5763
5764         /* is this a reply to our command? */
5765         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5766                 rc &= FW_MSG_CODE_MASK;
5767
5768         } else {
5769                 /* FW BUG! */
5770                 BNX2X_ERR("FW failed to respond!\n");
5771                 bnx2x_fw_dump(bp);
5772                 rc = 0;
5773         }
5774
5775         return rc;
5776 }
5777
5778 static void bnx2x_free_mem(struct bnx2x *bp)
5779 {
5780
5781 #define BNX2X_PCI_FREE(x, y, size) \
5782         do { \
5783                 if (x) { \
5784                         pci_free_consistent(bp->pdev, size, x, y); \
5785                         x = NULL; \
5786                         y = 0; \
5787                 } \
5788         } while (0)
5789
5790 #define BNX2X_FREE(x) \
5791         do { \
5792                 if (x) { \
5793                         vfree(x); \
5794                         x = NULL; \
5795                 } \
5796         } while (0)
5797
5798         int i;
5799
5800         /* fastpath */
5801         for_each_queue(bp, i) {
5802
5803                 /* Status blocks */
5804                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5805                                bnx2x_fp(bp, i, status_blk_mapping),
5806                                sizeof(struct host_status_block) +
5807                                sizeof(struct eth_tx_db_data));
5808
5809                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5810                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5811                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5812                                bnx2x_fp(bp, i, tx_desc_mapping),
5813                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
5814
5815                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5816                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5817                                bnx2x_fp(bp, i, rx_desc_mapping),
5818                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5819
5820                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5821                                bnx2x_fp(bp, i, rx_comp_mapping),
5822                                sizeof(struct eth_fast_path_rx_cqe) *
5823                                NUM_RCQ_BD);
5824
5825                 /* SGE ring */
5826                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5827                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5828                                bnx2x_fp(bp, i, rx_sge_mapping),
5829                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5830         }
5831         /* end of fastpath */
5832
5833         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5834                        sizeof(struct host_def_status_block));
5835
5836         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5837                        sizeof(struct bnx2x_slowpath));
5838
5839 #ifdef BCM_ISCSI
5840         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5841         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5842         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5843         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5844 #endif
5845         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5846
5847 #undef BNX2X_PCI_FREE
5848 #undef BNX2X_KFREE
5849 }
5850
5851 static int bnx2x_alloc_mem(struct bnx2x *bp)
5852 {
5853
5854 #define BNX2X_PCI_ALLOC(x, y, size) \
5855         do { \
5856                 x = pci_alloc_consistent(bp->pdev, size, y); \
5857                 if (x == NULL) \
5858                         goto alloc_mem_err; \
5859                 memset(x, 0, size); \
5860         } while (0)
5861
5862 #define BNX2X_ALLOC(x, size) \
5863         do { \
5864                 x = vmalloc(size); \
5865                 if (x == NULL) \
5866                         goto alloc_mem_err; \
5867                 memset(x, 0, size); \
5868         } while (0)
5869
5870         int i;
5871
5872         /* fastpath */
5873         for_each_queue(bp, i) {
5874                 bnx2x_fp(bp, i, bp) = bp;
5875
5876                 /* Status blocks */
5877                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5878                                 &bnx2x_fp(bp, i, status_blk_mapping),
5879                                 sizeof(struct host_status_block) +
5880                                 sizeof(struct eth_tx_db_data));
5881
5882                 bnx2x_fp(bp, i, hw_tx_prods) =
5883                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5884
5885                 bnx2x_fp(bp, i, tx_prods_mapping) =
5886                                 bnx2x_fp(bp, i, status_blk_mapping) +
5887                                 sizeof(struct host_status_block);
5888
5889                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5890                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5891                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5892                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5893                                 &bnx2x_fp(bp, i, tx_desc_mapping),
5894                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5895
5896                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5897                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5898                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5899                                 &bnx2x_fp(bp, i, rx_desc_mapping),
5900                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5901
5902                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5903                                 &bnx2x_fp(bp, i, rx_comp_mapping),
5904                                 sizeof(struct eth_fast_path_rx_cqe) *
5905                                 NUM_RCQ_BD);
5906
5907                 /* SGE ring */
5908                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5909                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5910                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5911                                 &bnx2x_fp(bp, i, rx_sge_mapping),
5912                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5913         }
5914         /* end of fastpath */
5915
5916         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5917                         sizeof(struct host_def_status_block));
5918
5919         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5920                         sizeof(struct bnx2x_slowpath));
5921
5922 #ifdef BCM_ISCSI
5923         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5924
5925         /* Initialize T1 */
5926         for (i = 0; i < 64*1024; i += 64) {
5927                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5928                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5929         }
5930
5931         /* allocate searcher T2 table
5932            we allocate 1/4 of alloc num for T2
5933           (which is not entered into the ILT) */
5934         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5935
5936         /* Initialize T2 */
5937         for (i = 0; i < 16*1024; i += 64)
5938                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5939
5940         /* now fixup the last line in the block to point to the next block */
5941         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5942
5943         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5944         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5945
5946         /* QM queues (128*MAX_CONN) */
5947         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5948 #endif
5949
5950         /* Slow path ring */
5951         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5952
5953         return 0;
5954
5955 alloc_mem_err:
5956         bnx2x_free_mem(bp);
5957         return -ENOMEM;
5958
5959 #undef BNX2X_PCI_ALLOC
5960 #undef BNX2X_ALLOC
5961 }
5962
5963 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5964 {
5965         int i;
5966
5967         for_each_queue(bp, i) {
5968                 struct bnx2x_fastpath *fp = &bp->fp[i];
5969
5970                 u16 bd_cons = fp->tx_bd_cons;
5971                 u16 sw_prod = fp->tx_pkt_prod;
5972                 u16 sw_cons = fp->tx_pkt_cons;
5973
5974                 while (sw_cons != sw_prod) {
5975                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5976                         sw_cons++;
5977                 }
5978         }
5979 }
5980
5981 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5982 {
5983         int i, j;
5984
5985         for_each_queue(bp, j) {
5986                 struct bnx2x_fastpath *fp = &bp->fp[j];
5987
5988                 for (i = 0; i < NUM_RX_BD; i++) {
5989                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5990                         struct sk_buff *skb = rx_buf->skb;
5991
5992                         if (skb == NULL)
5993                                 continue;
5994
5995                         pci_unmap_single(bp->pdev,
5996                                          pci_unmap_addr(rx_buf, mapping),
5997                                          bp->rx_buf_size,
5998                                          PCI_DMA_FROMDEVICE);
5999
6000                         rx_buf->skb = NULL;
6001                         dev_kfree_skb(skb);
6002                 }
6003                 if (!fp->disable_tpa)
6004                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6005                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6006                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6007         }
6008 }
6009
6010 static void bnx2x_free_skbs(struct bnx2x *bp)
6011 {
6012         bnx2x_free_tx_skbs(bp);
6013         bnx2x_free_rx_skbs(bp);
6014 }
6015
6016 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6017 {
6018         int i, offset = 1;
6019
6020         free_irq(bp->msix_table[0].vector, bp->dev);
6021         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6022            bp->msix_table[0].vector);
6023
6024         for_each_queue(bp, i) {
6025                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6026                    "state %x\n", i, bp->msix_table[i + offset].vector,
6027                    bnx2x_fp(bp, i, state));
6028
6029                 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
6030                         BNX2X_ERR("IRQ of fp #%d being freed while "
6031                                   "state != closed\n", i);
6032
6033                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6034         }
6035 }
6036
6037 static void bnx2x_free_irq(struct bnx2x *bp)
6038 {
6039         if (bp->flags & USING_MSIX_FLAG) {
6040                 bnx2x_free_msix_irqs(bp);
6041                 pci_disable_msix(bp->pdev);
6042                 bp->flags &= ~USING_MSIX_FLAG;
6043
6044         } else
6045                 free_irq(bp->pdev->irq, bp->dev);
6046 }
6047
6048 static int bnx2x_enable_msix(struct bnx2x *bp)
6049 {
6050         int i, rc, offset;
6051
6052         bp->msix_table[0].entry = 0;
6053         offset = 1;
6054         DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
6055
6056         for_each_queue(bp, i) {
6057                 int igu_vec = offset + i + BP_L_ID(bp);
6058
6059                 bp->msix_table[i + offset].entry = igu_vec;
6060                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6061                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6062         }
6063
6064         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6065                              bp->num_queues + offset);
6066         if (rc) {
6067                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6068                 return -1;
6069         }
6070         bp->flags |= USING_MSIX_FLAG;
6071
6072         return 0;
6073 }
6074
6075 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6076 {
6077         int i, rc, offset = 1;
6078
6079         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6080                          bp->dev->name, bp->dev);
6081         if (rc) {
6082                 BNX2X_ERR("request sp irq failed\n");
6083                 return -EBUSY;
6084         }
6085
6086         for_each_queue(bp, i) {
6087                 rc = request_irq(bp->msix_table[i + offset].vector,
6088                                  bnx2x_msix_fp_int, 0,
6089                                  bp->dev->name, &bp->fp[i]);
6090                 if (rc) {
6091                         BNX2X_ERR("request fp #%d irq failed  rc -%d\n",
6092                                   i + offset, -rc);
6093                         bnx2x_free_msix_irqs(bp);
6094                         return -EBUSY;
6095                 }
6096
6097                 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6098         }
6099
6100         return 0;
6101 }
6102
6103 static int bnx2x_req_irq(struct bnx2x *bp)
6104 {
6105         int rc;
6106
6107         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6108                          bp->dev->name, bp->dev);
6109         if (!rc)
6110                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6111
6112         return rc;
6113 }
6114
6115 static void bnx2x_napi_enable(struct bnx2x *bp)
6116 {
6117         int i;
6118
6119         for_each_queue(bp, i)
6120                 napi_enable(&bnx2x_fp(bp, i, napi));
6121 }
6122
6123 static void bnx2x_napi_disable(struct bnx2x *bp)
6124 {
6125         int i;
6126
6127         for_each_queue(bp, i)
6128                 napi_disable(&bnx2x_fp(bp, i, napi));
6129 }
6130
6131 static void bnx2x_netif_start(struct bnx2x *bp)
6132 {
6133         if (atomic_dec_and_test(&bp->intr_sem)) {
6134                 if (netif_running(bp->dev)) {
6135                         if (bp->state == BNX2X_STATE_OPEN)
6136                                 netif_wake_queue(bp->dev);
6137                         bnx2x_napi_enable(bp);
6138                         bnx2x_int_enable(bp);
6139                 }
6140         }
6141 }
6142
6143 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6144 {
6145         bnx2x_int_disable_sync(bp, disable_hw);
6146         bnx2x_napi_disable(bp);
6147         if (netif_running(bp->dev)) {
6148                 netif_tx_disable(bp->dev);
6149                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6150         }
6151 }
6152
6153 /*
6154  * Init service functions
6155  */
6156
6157 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6158 {
6159         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6160         int port = BP_PORT(bp);
6161
6162         /* CAM allocation
6163          * unicasts 0-31:port0 32-63:port1
6164          * multicast 64-127:port0 128-191:port1
6165          */
6166         config->hdr.length_6b = 2;
6167         config->hdr.offset = port ? 32 : 0;
6168         config->hdr.client_id = BP_CL_ID(bp);
6169         config->hdr.reserved1 = 0;
6170
6171         /* primary MAC */
6172         config->config_table[0].cam_entry.msb_mac_addr =
6173                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6174         config->config_table[0].cam_entry.middle_mac_addr =
6175                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6176         config->config_table[0].cam_entry.lsb_mac_addr =
6177                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6178         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6179         if (set)
6180                 config->config_table[0].target_table_entry.flags = 0;
6181         else
6182                 CAM_INVALIDATE(config->config_table[0]);
6183         config->config_table[0].target_table_entry.client_id = 0;
6184         config->config_table[0].target_table_entry.vlan_id = 0;
6185
6186         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6187            (set ? "setting" : "clearing"),
6188            config->config_table[0].cam_entry.msb_mac_addr,
6189            config->config_table[0].cam_entry.middle_mac_addr,
6190            config->config_table[0].cam_entry.lsb_mac_addr);
6191
6192         /* broadcast */
6193         config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6194         config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6195         config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6196         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6197         if (set)
6198                 config->config_table[1].target_table_entry.flags =
6199                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6200         else
6201                 CAM_INVALIDATE(config->config_table[1]);
6202         config->config_table[1].target_table_entry.client_id = 0;
6203         config->config_table[1].target_table_entry.vlan_id = 0;
6204
6205         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6206                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6207                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6208 }
6209
6210 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6211 {
6212         struct mac_configuration_cmd_e1h *config =
6213                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6214
6215         if (set && (bp->state != BNX2X_STATE_OPEN)) {
6216                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6217                 return;
6218         }
6219
6220         /* CAM allocation for E1H
6221          * unicasts: by func number
6222          * multicast: 20+FUNC*20, 20 each
6223          */
6224         config->hdr.length_6b = 1;
6225         config->hdr.offset = BP_FUNC(bp);
6226         config->hdr.client_id = BP_CL_ID(bp);
6227         config->hdr.reserved1 = 0;
6228
6229         /* primary MAC */
6230         config->config_table[0].msb_mac_addr =
6231                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6232         config->config_table[0].middle_mac_addr =
6233                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6234         config->config_table[0].lsb_mac_addr =
6235                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6236         config->config_table[0].client_id = BP_L_ID(bp);
6237         config->config_table[0].vlan_id = 0;
6238         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6239         if (set)
6240                 config->config_table[0].flags = BP_PORT(bp);
6241         else
6242                 config->config_table[0].flags =
6243                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6244
6245         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6246            (set ? "setting" : "clearing"),
6247            config->config_table[0].msb_mac_addr,
6248            config->config_table[0].middle_mac_addr,
6249            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6250
6251         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6252                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6253                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6254 }
6255
6256 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6257                              int *state_p, int poll)
6258 {
6259         /* can take a while if any port is running */
6260         int cnt = 500;
6261
6262         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6263            poll ? "polling" : "waiting", state, idx);
6264
6265         might_sleep();
6266         while (cnt--) {
6267                 if (poll) {
6268                         bnx2x_rx_int(bp->fp, 10);
6269                         /* if index is different from 0
6270                          * the reply for some commands will
6271                          * be on the non default queue
6272                          */
6273                         if (idx)
6274                                 bnx2x_rx_int(&bp->fp[idx], 10);
6275                 }
6276
6277                 mb(); /* state is changed by bnx2x_sp_event() */
6278                 if (*state_p == state)
6279                         return 0;
6280
6281                 msleep(1);
6282         }
6283
6284         /* timeout! */
6285         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6286                   poll ? "polling" : "waiting", state, idx);
6287 #ifdef BNX2X_STOP_ON_ERROR
6288         bnx2x_panic();
6289 #endif
6290
6291         return -EBUSY;
6292 }
6293
6294 static int bnx2x_setup_leading(struct bnx2x *bp)
6295 {
6296         int rc;
6297
6298         /* reset IGU state */
6299         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6300
6301         /* SETUP ramrod */
6302         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6303
6304         /* Wait for completion */
6305         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6306
6307         return rc;
6308 }
6309
6310 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6311 {
6312         /* reset IGU state */
6313         bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6314
6315         /* SETUP ramrod */
6316         bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6317         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6318
6319         /* Wait for completion */
6320         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6321                                  &(bp->fp[index].state), 0);
6322 }
6323
6324 static int bnx2x_poll(struct napi_struct *napi, int budget);
6325 static void bnx2x_set_rx_mode(struct net_device *dev);
6326
6327 /* must be called with rtnl_lock */
6328 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6329 {
6330         u32 load_code;
6331         int i, rc = 0;
6332 #ifdef BNX2X_STOP_ON_ERROR
6333         if (unlikely(bp->panic))
6334                 return -EPERM;
6335 #endif
6336
6337         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6338
6339         if (use_inta) {
6340                 bp->num_queues = 1;
6341
6342         } else {
6343                 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6344                         /* user requested number */
6345                         bp->num_queues = use_multi;
6346
6347                 else if (use_multi)
6348                         bp->num_queues = min_t(u32, num_online_cpus(),
6349                                                BP_MAX_QUEUES(bp));
6350                 else
6351                         bp->num_queues = 1;
6352
6353                 DP(NETIF_MSG_IFUP,
6354                    "set number of queues to %d\n", bp->num_queues);
6355
6356                 /* if we can't use MSI-X we only need one fp,
6357                  * so try to enable MSI-X with the requested number of fp's
6358                  * and fallback to MSI or legacy INTx with one fp
6359                  */
6360                 rc = bnx2x_enable_msix(bp);
6361                 if (rc) {
6362                         /* failed to enable MSI-X */
6363                         bp->num_queues = 1;
6364                         if (use_multi)
6365                                 BNX2X_ERR("Multi requested but failed"
6366                                           " to enable MSI-X\n");
6367                 }
6368         }
6369
6370         if (bnx2x_alloc_mem(bp))
6371                 return -ENOMEM;
6372
6373         for_each_queue(bp, i)
6374                 bnx2x_fp(bp, i, disable_tpa) =
6375                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6376
6377         for_each_queue(bp, i)
6378                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6379                                bnx2x_poll, 128);
6380
6381 #ifdef BNX2X_STOP_ON_ERROR
6382         for_each_queue(bp, i) {
6383                 struct bnx2x_fastpath *fp = &bp->fp[i];
6384
6385                 fp->poll_no_work = 0;
6386                 fp->poll_calls = 0;
6387                 fp->poll_max_calls = 0;
6388                 fp->poll_complete = 0;
6389                 fp->poll_exit = 0;
6390         }
6391 #endif
6392         bnx2x_napi_enable(bp);
6393
6394         if (bp->flags & USING_MSIX_FLAG) {
6395                 rc = bnx2x_req_msix_irqs(bp);
6396                 if (rc) {
6397                         pci_disable_msix(bp->pdev);
6398                         goto load_error1;
6399                 }
6400                 printk(KERN_INFO PFX "%s: using MSI-X\n", bp->dev->name);
6401         } else {
6402                 bnx2x_ack_int(bp);
6403                 rc = bnx2x_req_irq(bp);
6404                 if (rc) {
6405                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
6406                         goto load_error1;
6407                 }
6408         }
6409
6410         /* Send LOAD_REQUEST command to MCP
6411            Returns the type of LOAD command:
6412            if it is the first port to be initialized
6413            common blocks should be initialized, otherwise - not
6414         */
6415         if (!BP_NOMCP(bp)) {
6416                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6417                 if (!load_code) {
6418                         BNX2X_ERR("MCP response failure, aborting\n");
6419                         rc = -EBUSY;
6420                         goto load_error2;
6421                 }
6422                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6423                         rc = -EBUSY; /* other port in diagnostic mode */
6424                         goto load_error2;
6425                 }
6426
6427         } else {
6428                 int port = BP_PORT(bp);
6429
6430                 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6431                    load_count[0], load_count[1], load_count[2]);
6432                 load_count[0]++;
6433                 load_count[1 + port]++;
6434                 DP(NETIF_MSG_IFUP, "NO MCP new load counts       %d, %d, %d\n",
6435                    load_count[0], load_count[1], load_count[2]);
6436                 if (load_count[0] == 1)
6437                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6438                 else if (load_count[1 + port] == 1)
6439                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6440                 else
6441                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6442         }
6443
6444         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6445             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6446                 bp->port.pmf = 1;
6447         else
6448                 bp->port.pmf = 0;
6449         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6450
6451         /* Initialize HW */
6452         rc = bnx2x_init_hw(bp, load_code);
6453         if (rc) {
6454                 BNX2X_ERR("HW init failed, aborting\n");
6455                 goto load_error2;
6456         }
6457
6458         /* Setup NIC internals and enable interrupts */
6459         bnx2x_nic_init(bp, load_code);
6460
6461         /* Send LOAD_DONE command to MCP */
6462         if (!BP_NOMCP(bp)) {
6463                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6464                 if (!load_code) {
6465                         BNX2X_ERR("MCP response failure, aborting\n");
6466                         rc = -EBUSY;
6467                         goto load_error3;
6468                 }
6469         }
6470
6471         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6472
6473         rc = bnx2x_setup_leading(bp);
6474         if (rc) {
6475                 BNX2X_ERR("Setup leading failed!\n");
6476                 goto load_error3;
6477         }
6478
6479         if (CHIP_IS_E1H(bp))
6480                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6481                         BNX2X_ERR("!!!  mf_cfg function disabled\n");
6482                         bp->state = BNX2X_STATE_DISABLED;
6483                 }
6484
6485         if (bp->state == BNX2X_STATE_OPEN)
6486                 for_each_nondefault_queue(bp, i) {
6487                         rc = bnx2x_setup_multi(bp, i);
6488                         if (rc)
6489                                 goto load_error3;
6490                 }
6491
6492         if (CHIP_IS_E1(bp))
6493                 bnx2x_set_mac_addr_e1(bp, 1);
6494         else
6495                 bnx2x_set_mac_addr_e1h(bp, 1);
6496
6497         if (bp->port.pmf)
6498                 bnx2x_initial_phy_init(bp);
6499
6500         /* Start fast path */
6501         switch (load_mode) {
6502         case LOAD_NORMAL:
6503                 /* Tx queue should be only reenabled */
6504                 netif_wake_queue(bp->dev);
6505                 /* Initialize the receive filter. */
6506                 bnx2x_set_rx_mode(bp->dev);
6507                 break;
6508
6509         case LOAD_OPEN:
6510                 netif_start_queue(bp->dev);
6511                 /* Initialize the receive filter. */
6512                 bnx2x_set_rx_mode(bp->dev);
6513                 break;
6514
6515         case LOAD_DIAG:
6516                 /* Initialize the receive filter. */
6517                 bnx2x_set_rx_mode(bp->dev);
6518                 bp->state = BNX2X_STATE_DIAG;
6519                 break;
6520
6521         default:
6522                 break;
6523         }
6524
6525         if (!bp->port.pmf)
6526                 bnx2x__link_status_update(bp);
6527
6528         /* start the timer */
6529         mod_timer(&bp->timer, jiffies + bp->current_interval);
6530
6531
6532         return 0;
6533
6534 load_error3:
6535         bnx2x_int_disable_sync(bp, 1);
6536         if (!BP_NOMCP(bp)) {
6537                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6538                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6539         }
6540         bp->port.pmf = 0;
6541         /* Free SKBs, SGEs, TPA pool and driver internals */
6542         bnx2x_free_skbs(bp);
6543         for_each_queue(bp, i)
6544                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6545 load_error2:
6546         /* Release IRQs */
6547         bnx2x_free_irq(bp);
6548 load_error1:
6549         bnx2x_napi_disable(bp);
6550         for_each_queue(bp, i)
6551                 netif_napi_del(&bnx2x_fp(bp, i, napi));
6552         bnx2x_free_mem(bp);
6553
6554         /* TBD we really need to reset the chip
6555            if we want to recover from this */
6556         return rc;
6557 }
6558
6559 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6560 {
6561         int rc;
6562
6563         /* halt the connection */
6564         bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6565         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
6566
6567         /* Wait for completion */
6568         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6569                                &(bp->fp[index].state), 1);
6570         if (rc) /* timeout */
6571                 return rc;
6572
6573         /* delete cfc entry */
6574         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6575
6576         /* Wait for completion */
6577         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6578                                &(bp->fp[index].state), 1);
6579         return rc;
6580 }
6581
6582 static int bnx2x_stop_leading(struct bnx2x *bp)
6583 {
6584         u16 dsb_sp_prod_idx;
6585         /* if the other port is handling traffic,
6586            this can take a lot of time */
6587         int cnt = 500;
6588         int rc;
6589
6590         might_sleep();
6591
6592         /* Send HALT ramrod */
6593         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6594         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6595
6596         /* Wait for completion */
6597         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6598                                &(bp->fp[0].state), 1);
6599         if (rc) /* timeout */
6600                 return rc;
6601
6602         dsb_sp_prod_idx = *bp->dsb_sp_prod;
6603
6604         /* Send PORT_DELETE ramrod */
6605         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6606
6607         /* Wait for completion to arrive on default status block
6608            we are going to reset the chip anyway
6609            so there is not much to do if this times out
6610          */
6611         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6612                 if (!cnt) {
6613                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6614                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6615                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
6616 #ifdef BNX2X_STOP_ON_ERROR
6617                         bnx2x_panic();
6618 #else
6619                         rc = -EBUSY;
6620 #endif
6621                         break;
6622                 }
6623                 cnt--;
6624                 msleep(1);
6625                 rmb(); /* Refresh the dsb_sp_prod */
6626         }
6627         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6628         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6629
6630         return rc;
6631 }
6632
6633 static void bnx2x_reset_func(struct bnx2x *bp)
6634 {
6635         int port = BP_PORT(bp);
6636         int func = BP_FUNC(bp);
6637         int base, i;
6638
6639         /* Configure IGU */
6640         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6641         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6642
6643         REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6644
6645         /* Clear ILT */
6646         base = FUNC_ILT_BASE(func);
6647         for (i = base; i < base + ILT_PER_FUNC; i++)
6648                 bnx2x_ilt_wr(bp, i, 0);
6649 }
6650
6651 static void bnx2x_reset_port(struct bnx2x *bp)
6652 {
6653         int port = BP_PORT(bp);
6654         u32 val;
6655
6656         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6657
6658         /* Do not rcv packets to BRB */
6659         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6660         /* Do not direct rcv packets that are not for MCP to the BRB */
6661         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6662                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6663
6664         /* Configure AEU */
6665         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6666
6667         msleep(100);
6668         /* Check for BRB port occupancy */
6669         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6670         if (val)
6671                 DP(NETIF_MSG_IFDOWN,
6672                    "BRB1 is not empty  %d blocks are occupied\n", val);
6673
6674         /* TODO: Close Doorbell port? */
6675 }
6676
6677 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6678 {
6679         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
6680            BP_FUNC(bp), reset_code);
6681
6682         switch (reset_code) {
6683         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6684                 bnx2x_reset_port(bp);
6685                 bnx2x_reset_func(bp);
6686                 bnx2x_reset_common(bp);
6687                 break;
6688
6689         case FW_MSG_CODE_DRV_UNLOAD_PORT:
6690                 bnx2x_reset_port(bp);
6691                 bnx2x_reset_func(bp);
6692                 break;
6693
6694         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6695                 bnx2x_reset_func(bp);
6696                 break;
6697
6698         default:
6699                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6700                 break;
6701         }
6702 }
6703
6704 /* must be called with rtnl_lock */
6705 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6706 {
6707         int port = BP_PORT(bp);
6708         u32 reset_code = 0;
6709         int i, cnt, rc;
6710
6711         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6712
6713         bp->rx_mode = BNX2X_RX_MODE_NONE;
6714         bnx2x_set_storm_rx_mode(bp);
6715
6716         bnx2x_netif_stop(bp, 1);
6717
6718         del_timer_sync(&bp->timer);
6719         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6720                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6721         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6722
6723         /* Release IRQs */
6724         bnx2x_free_irq(bp);
6725
6726         /* Wait until tx fast path tasks complete */
6727         for_each_queue(bp, i) {
6728                 struct bnx2x_fastpath *fp = &bp->fp[i];
6729
6730                 cnt = 1000;
6731                 smp_rmb();
6732                 while (bnx2x_has_tx_work(fp)) {
6733
6734                         bnx2x_tx_int(fp, 1000);
6735                         if (!cnt) {
6736                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
6737                                           i);
6738 #ifdef BNX2X_STOP_ON_ERROR
6739                                 bnx2x_panic();
6740                                 return -EBUSY;
6741 #else
6742                                 break;
6743 #endif
6744                         }
6745                         cnt--;
6746                         msleep(1);
6747                         smp_rmb();
6748                 }
6749         }
6750         /* Give HW time to discard old tx messages */
6751         msleep(1);
6752
6753         if (CHIP_IS_E1(bp)) {
6754                 struct mac_configuration_cmd *config =
6755                                                 bnx2x_sp(bp, mcast_config);
6756
6757                 bnx2x_set_mac_addr_e1(bp, 0);
6758
6759                 for (i = 0; i < config->hdr.length_6b; i++)
6760                         CAM_INVALIDATE(config->config_table[i]);
6761
6762                 config->hdr.length_6b = i;
6763                 if (CHIP_REV_IS_SLOW(bp))
6764                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6765                 else
6766                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6767                 config->hdr.client_id = BP_CL_ID(bp);
6768                 config->hdr.reserved1 = 0;
6769
6770                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6771                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6772                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6773
6774         } else { /* E1H */
6775                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6776
6777                 bnx2x_set_mac_addr_e1h(bp, 0);
6778
6779                 for (i = 0; i < MC_HASH_SIZE; i++)
6780                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6781         }
6782
6783         if (unload_mode == UNLOAD_NORMAL)
6784                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6785
6786         else if (bp->flags & NO_WOL_FLAG) {
6787                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6788                 if (CHIP_IS_E1H(bp))
6789                         REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6790
6791         } else if (bp->wol) {
6792                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6793                 u8 *mac_addr = bp->dev->dev_addr;
6794                 u32 val;
6795                 /* The mac address is written to entries 1-4 to
6796                    preserve entry 0 which is used by the PMF */
6797                 u8 entry = (BP_E1HVN(bp) + 1)*8;
6798
6799                 val = (mac_addr[0] << 8) | mac_addr[1];
6800                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6801
6802                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6803                       (mac_addr[4] << 8) | mac_addr[5];
6804                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6805
6806                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6807
6808         } else
6809                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6810
6811         /* Close multi and leading connections
6812            Completions for ramrods are collected in a synchronous way */
6813         for_each_nondefault_queue(bp, i)
6814                 if (bnx2x_stop_multi(bp, i))
6815                         goto unload_error;
6816
6817         rc = bnx2x_stop_leading(bp);
6818         if (rc) {
6819                 BNX2X_ERR("Stop leading failed!\n");
6820 #ifdef BNX2X_STOP_ON_ERROR
6821                 return -EBUSY;
6822 #else
6823                 goto unload_error;
6824 #endif
6825         }
6826
6827 unload_error:
6828         if (!BP_NOMCP(bp))
6829                 reset_code = bnx2x_fw_command(bp, reset_code);
6830         else {
6831                 DP(NETIF_MSG_IFDOWN, "NO MCP load counts      %d, %d, %d\n",
6832                    load_count[0], load_count[1], load_count[2]);
6833                 load_count[0]--;
6834                 load_count[1 + port]--;
6835                 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts  %d, %d, %d\n",
6836                    load_count[0], load_count[1], load_count[2]);
6837                 if (load_count[0] == 0)
6838                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6839                 else if (load_count[1 + port] == 0)
6840                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6841                 else
6842                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6843         }
6844
6845         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6846             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6847                 bnx2x__link_reset(bp);
6848
6849         /* Reset the chip */
6850         bnx2x_reset_chip(bp, reset_code);
6851
6852         /* Report UNLOAD_DONE to MCP */
6853         if (!BP_NOMCP(bp))
6854                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6855         bp->port.pmf = 0;
6856
6857         /* Free SKBs, SGEs, TPA pool and driver internals */
6858         bnx2x_free_skbs(bp);
6859         for_each_queue(bp, i)
6860                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6861         for_each_queue(bp, i)
6862                 netif_napi_del(&bnx2x_fp(bp, i, napi));
6863         bnx2x_free_mem(bp);
6864
6865         bp->state = BNX2X_STATE_CLOSED;
6866
6867         netif_carrier_off(bp->dev);
6868
6869         return 0;
6870 }
6871
6872 static void bnx2x_reset_task(struct work_struct *work)
6873 {
6874         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6875
6876 #ifdef BNX2X_STOP_ON_ERROR
6877         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6878                   " so reset not done to allow debug dump,\n"
6879          KERN_ERR " you will need to reboot when done\n");
6880         return;
6881 #endif
6882
6883         rtnl_lock();
6884
6885         if (!netif_running(bp->dev))
6886                 goto reset_task_exit;
6887
6888         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6889         bnx2x_nic_load(bp, LOAD_NORMAL);
6890
6891 reset_task_exit:
6892         rtnl_unlock();
6893 }
6894
6895 /* end of nic load/unload */
6896
6897 /* ethtool_ops */
6898
6899 /*
6900  * Init service functions
6901  */
6902
6903 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6904 {
6905         u32 val;
6906
6907         /* Check if there is any driver already loaded */
6908         val = REG_RD(bp, MISC_REG_UNPREPARED);
6909         if (val == 0x1) {
6910                 /* Check if it is the UNDI driver
6911                  * UNDI driver initializes CID offset for normal bell to 0x7
6912                  */
6913                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6914                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6915                 if (val == 0x7) {
6916                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6917                         /* save our func */
6918                         int func = BP_FUNC(bp);
6919                         u32 swap_en;
6920                         u32 swap_val;
6921
6922                         /* clear the UNDI indication */
6923                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6924
6925                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
6926
6927                         /* try unload UNDI on port 0 */
6928                         bp->func = 0;
6929                         bp->fw_seq =
6930                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6931                                 DRV_MSG_SEQ_NUMBER_MASK);
6932                         reset_code = bnx2x_fw_command(bp, reset_code);
6933
6934                         /* if UNDI is loaded on the other port */
6935                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6936
6937                                 /* send "DONE" for previous unload */
6938                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6939
6940                                 /* unload UNDI on port 1 */
6941                                 bp->func = 1;
6942                                 bp->fw_seq =
6943                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6944                                         DRV_MSG_SEQ_NUMBER_MASK);
6945                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6946
6947                                 bnx2x_fw_command(bp, reset_code);
6948                         }
6949
6950                         /* now it's safe to release the lock */
6951                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6952
6953                         REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6954                                     HC_REG_CONFIG_0), 0x1000);
6955
6956                         /* close input traffic and wait for it */
6957                         /* Do not rcv packets to BRB */
6958                         REG_WR(bp,
6959                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6960                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6961                         /* Do not direct rcv packets that are not for MCP to
6962                          * the BRB */
6963                         REG_WR(bp,
6964                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6965                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6966                         /* clear AEU */
6967                         REG_WR(bp,
6968                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6969                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6970                         msleep(10);
6971
6972                         /* save NIG port swap info */
6973                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6974                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6975                         /* reset device */
6976                         REG_WR(bp,
6977                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6978                                0xd3ffffff);
6979                         REG_WR(bp,
6980                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6981                                0x1403);
6982                         /* take the NIG out of reset and restore swap values */
6983                         REG_WR(bp,
6984                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6985                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
6986                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6987                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6988
6989                         /* send unload done to the MCP */
6990                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6991
6992                         /* restore our func and fw_seq */
6993                         bp->func = func;
6994                         bp->fw_seq =
6995                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6996                                 DRV_MSG_SEQ_NUMBER_MASK);
6997
6998                 } else
6999                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7000         }
7001 }
7002
7003 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7004 {
7005         u32 val, val2, val3, val4, id;
7006         u16 pmc;
7007
7008         /* Get the chip revision id and number. */
7009         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7010         val = REG_RD(bp, MISC_REG_CHIP_NUM);
7011         id = ((val & 0xffff) << 16);
7012         val = REG_RD(bp, MISC_REG_CHIP_REV);
7013         id |= ((val & 0xf) << 12);
7014         val = REG_RD(bp, MISC_REG_CHIP_METAL);
7015         id |= ((val & 0xff) << 4);
7016         val = REG_RD(bp, MISC_REG_BOND_ID);
7017         id |= (val & 0xf);
7018         bp->common.chip_id = id;
7019         bp->link_params.chip_id = bp->common.chip_id;
7020         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7021
7022         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7023         bp->common.flash_size = (NVRAM_1MB_SIZE <<
7024                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
7025         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7026                        bp->common.flash_size, bp->common.flash_size);
7027
7028         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7029         bp->link_params.shmem_base = bp->common.shmem_base;
7030         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7031
7032         if (!bp->common.shmem_base ||
7033             (bp->common.shmem_base < 0xA0000) ||
7034             (bp->common.shmem_base >= 0xC0000)) {
7035                 BNX2X_DEV_INFO("MCP not active\n");
7036                 bp->flags |= NO_MCP_FLAG;
7037                 return;
7038         }
7039
7040         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7041         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7042                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7043                 BNX2X_ERR("BAD MCP validity signature\n");
7044
7045         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7046         bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7047
7048         BNX2X_DEV_INFO("hw_config 0x%08x  board 0x%08x\n",
7049                        bp->common.hw_config, bp->common.board);
7050
7051         bp->link_params.hw_led_mode = ((bp->common.hw_config &
7052                                         SHARED_HW_CFG_LED_MODE_MASK) >>
7053                                        SHARED_HW_CFG_LED_MODE_SHIFT);
7054
7055         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7056         bp->common.bc_ver = val;
7057         BNX2X_DEV_INFO("bc_ver %X\n", val);
7058         if (val < BNX2X_BC_VER) {
7059                 /* for now only warn
7060                  * later we might need to enforce this */
7061                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7062                           " please upgrade BC\n", BNX2X_BC_VER, val);
7063         }
7064
7065         if (BP_E1HVN(bp) == 0) {
7066                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7067                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7068         } else {
7069                 /* no WOL capability for E1HVN != 0 */
7070                 bp->flags |= NO_WOL_FLAG;
7071         }
7072         BNX2X_DEV_INFO("%sWoL capable\n",
7073                        (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7074
7075         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7076         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7077         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7078         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7079
7080         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7081                val, val2, val3, val4);
7082 }
7083
7084 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7085                                                     u32 switch_cfg)
7086 {
7087         int port = BP_PORT(bp);
7088         u32 ext_phy_type;
7089
7090         switch (switch_cfg) {
7091         case SWITCH_CFG_1G:
7092                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7093
7094                 ext_phy_type =
7095                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7096                 switch (ext_phy_type) {
7097                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7098                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7099                                        ext_phy_type);
7100
7101                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7102                                                SUPPORTED_10baseT_Full |
7103                                                SUPPORTED_100baseT_Half |
7104                                                SUPPORTED_100baseT_Full |
7105                                                SUPPORTED_1000baseT_Full |
7106                                                SUPPORTED_2500baseX_Full |
7107                                                SUPPORTED_TP |
7108                                                SUPPORTED_FIBRE |
7109                                                SUPPORTED_Autoneg |
7110                                                SUPPORTED_Pause |
7111                                                SUPPORTED_Asym_Pause);
7112                         break;
7113
7114                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7115                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7116                                        ext_phy_type);
7117
7118                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7119                                                SUPPORTED_10baseT_Full |
7120                                                SUPPORTED_100baseT_Half |
7121                                                SUPPORTED_100baseT_Full |
7122                                                SUPPORTED_1000baseT_Full |
7123                                                SUPPORTED_TP |
7124                                                SUPPORTED_FIBRE |
7125                                                SUPPORTED_Autoneg |
7126                                                SUPPORTED_Pause |
7127                                                SUPPORTED_Asym_Pause);
7128                         break;
7129
7130                 default:
7131                         BNX2X_ERR("NVRAM config error. "
7132                                   "BAD SerDes ext_phy_config 0x%x\n",
7133                                   bp->link_params.ext_phy_config);
7134                         return;
7135                 }
7136
7137                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7138                                            port*0x10);
7139                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7140                 break;
7141
7142         case SWITCH_CFG_10G:
7143                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7144
7145                 ext_phy_type =
7146                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7147                 switch (ext_phy_type) {
7148                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7149                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7150                                        ext_phy_type);
7151
7152                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7153                                                SUPPORTED_10baseT_Full |
7154                                                SUPPORTED_100baseT_Half |
7155                                                SUPPORTED_100baseT_Full |
7156                                                SUPPORTED_1000baseT_Full |
7157                                                SUPPORTED_2500baseX_Full |
7158                                                SUPPORTED_10000baseT_Full |
7159                                                SUPPORTED_TP |
7160                                                SUPPORTED_FIBRE |
7161                                                SUPPORTED_Autoneg |
7162                                                SUPPORTED_Pause |
7163                                                SUPPORTED_Asym_Pause);
7164                         break;
7165
7166                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7167                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7168                                        ext_phy_type);
7169
7170                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7171                                                SUPPORTED_FIBRE |
7172                                                SUPPORTED_Pause |
7173                                                SUPPORTED_Asym_Pause);
7174                         break;
7175
7176                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7177                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7178                                        ext_phy_type);
7179
7180                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7181                                                SUPPORTED_1000baseT_Full |
7182                                                SUPPORTED_FIBRE |
7183                                                SUPPORTED_Pause |
7184                                                SUPPORTED_Asym_Pause);
7185                         break;
7186
7187                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7188                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7189                                        ext_phy_type);
7190
7191                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7192                                                SUPPORTED_1000baseT_Full |
7193                                                SUPPORTED_FIBRE |
7194                                                SUPPORTED_Autoneg |
7195                                                SUPPORTED_Pause |
7196                                                SUPPORTED_Asym_Pause);
7197                         break;
7198
7199                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7200                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7201                                        ext_phy_type);
7202
7203                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7204                                                SUPPORTED_2500baseX_Full |
7205                                                SUPPORTED_1000baseT_Full |
7206                                                SUPPORTED_FIBRE |
7207                                                SUPPORTED_Autoneg |
7208                                                SUPPORTED_Pause |
7209                                                SUPPORTED_Asym_Pause);
7210                         break;
7211
7212                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7213                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7214                                        ext_phy_type);
7215
7216                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7217                                                SUPPORTED_TP |
7218                                                SUPPORTED_Autoneg |
7219                                                SUPPORTED_Pause |
7220                                                SUPPORTED_Asym_Pause);
7221                         break;
7222
7223                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7224                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7225                                   bp->link_params.ext_phy_config);
7226                         break;
7227
7228                 default:
7229                         BNX2X_ERR("NVRAM config error. "
7230                                   "BAD XGXS ext_phy_config 0x%x\n",
7231                                   bp->link_params.ext_phy_config);
7232                         return;
7233                 }
7234
7235                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7236                                            port*0x18);
7237                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7238
7239                 break;
7240
7241         default:
7242                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7243                           bp->port.link_config);
7244                 return;
7245         }
7246         bp->link_params.phy_addr = bp->port.phy_addr;
7247
7248         /* mask what we support according to speed_cap_mask */
7249         if (!(bp->link_params.speed_cap_mask &
7250                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7251                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7252
7253         if (!(bp->link_params.speed_cap_mask &
7254                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7255                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7256
7257         if (!(bp->link_params.speed_cap_mask &
7258                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7259                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7260
7261         if (!(bp->link_params.speed_cap_mask &
7262                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7263                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7264
7265         if (!(bp->link_params.speed_cap_mask &
7266                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7267                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7268                                         SUPPORTED_1000baseT_Full);
7269
7270         if (!(bp->link_params.speed_cap_mask &
7271                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7272                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7273
7274         if (!(bp->link_params.speed_cap_mask &
7275                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7276                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7277
7278         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7279 }
7280
7281 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7282 {
7283         bp->link_params.req_duplex = DUPLEX_FULL;
7284
7285         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7286         case PORT_FEATURE_LINK_SPEED_AUTO:
7287                 if (bp->port.supported & SUPPORTED_Autoneg) {
7288                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7289                         bp->port.advertising = bp->port.supported;
7290                 } else {
7291                         u32 ext_phy_type =
7292                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7293
7294                         if ((ext_phy_type ==
7295                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7296                             (ext_phy_type ==
7297                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7298                                 /* force 10G, no AN */
7299                                 bp->link_params.req_line_speed = SPEED_10000;
7300                                 bp->port.advertising =
7301                                                 (ADVERTISED_10000baseT_Full |
7302                                                  ADVERTISED_FIBRE);
7303                                 break;
7304                         }
7305                         BNX2X_ERR("NVRAM config error. "
7306                                   "Invalid link_config 0x%x"
7307                                   "  Autoneg not supported\n",
7308                                   bp->port.link_config);
7309                         return;
7310                 }
7311                 break;
7312
7313         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7314                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7315                         bp->link_params.req_line_speed = SPEED_10;
7316                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7317                                                 ADVERTISED_TP);
7318                 } else {
7319                         BNX2X_ERR("NVRAM config error. "
7320                                   "Invalid link_config 0x%x"
7321                                   "  speed_cap_mask 0x%x\n",
7322                                   bp->port.link_config,
7323                                   bp->link_params.speed_cap_mask);
7324                         return;
7325                 }
7326                 break;
7327
7328         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7329                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7330                         bp->link_params.req_line_speed = SPEED_10;
7331                         bp->link_params.req_duplex = DUPLEX_HALF;
7332                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7333                                                 ADVERTISED_TP);
7334                 } else {
7335                         BNX2X_ERR("NVRAM config error. "
7336                                   "Invalid link_config 0x%x"
7337                                   "  speed_cap_mask 0x%x\n",
7338                                   bp->port.link_config,
7339                                   bp->link_params.speed_cap_mask);
7340                         return;
7341                 }
7342                 break;
7343
7344         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7345                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7346                         bp->link_params.req_line_speed = SPEED_100;
7347                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7348                                                 ADVERTISED_TP);
7349                 } else {
7350                         BNX2X_ERR("NVRAM config error. "
7351                                   "Invalid link_config 0x%x"
7352                                   "  speed_cap_mask 0x%x\n",
7353                                   bp->port.link_config,
7354                                   bp->link_params.speed_cap_mask);
7355                         return;
7356                 }
7357                 break;
7358
7359         case PORT_FEATURE_LINK_SPEED_100M_HALF:
7360                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7361                         bp->link_params.req_line_speed = SPEED_100;
7362                         bp->link_params.req_duplex = DUPLEX_HALF;
7363                         bp->port.advertising = (ADVERTISED_100baseT_Half |
7364                                                 ADVERTISED_TP);
7365                 } else {
7366                         BNX2X_ERR("NVRAM config error. "
7367                                   "Invalid link_config 0x%x"
7368                                   "  speed_cap_mask 0x%x\n",
7369                                   bp->port.link_config,
7370                                   bp->link_params.speed_cap_mask);
7371                         return;
7372                 }
7373                 break;
7374
7375         case PORT_FEATURE_LINK_SPEED_1G:
7376                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7377                         bp->link_params.req_line_speed = SPEED_1000;
7378                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
7379                                                 ADVERTISED_TP);
7380                 } else {
7381                         BNX2X_ERR("NVRAM config error. "
7382                                   "Invalid link_config 0x%x"
7383                                   "  speed_cap_mask 0x%x\n",
7384                                   bp->port.link_config,
7385                                   bp->link_params.speed_cap_mask);
7386                         return;
7387                 }
7388                 break;
7389
7390         case PORT_FEATURE_LINK_SPEED_2_5G:
7391                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7392                         bp->link_params.req_line_speed = SPEED_2500;
7393                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
7394                                                 ADVERTISED_TP);
7395                 } else {
7396                         BNX2X_ERR("NVRAM config error. "
7397                                   "Invalid link_config 0x%x"
7398                                   "  speed_cap_mask 0x%x\n",
7399                                   bp->port.link_config,
7400                                   bp->link_params.speed_cap_mask);
7401                         return;
7402                 }
7403                 break;
7404
7405         case PORT_FEATURE_LINK_SPEED_10G_CX4:
7406         case PORT_FEATURE_LINK_SPEED_10G_KX4:
7407         case PORT_FEATURE_LINK_SPEED_10G_KR:
7408                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7409                         bp->link_params.req_line_speed = SPEED_10000;
7410                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
7411                                                 ADVERTISED_FIBRE);
7412                 } else {
7413                         BNX2X_ERR("NVRAM config error. "
7414                                   "Invalid link_config 0x%x"
7415                                   "  speed_cap_mask 0x%x\n",
7416                                   bp->port.link_config,
7417                                   bp->link_params.speed_cap_mask);
7418                         return;
7419                 }
7420                 break;
7421
7422         default:
7423                 BNX2X_ERR("NVRAM config error. "
7424                           "BAD link speed link_config 0x%x\n",
7425                           bp->port.link_config);
7426                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7427                 bp->port.advertising = bp->port.supported;
7428                 break;
7429         }
7430
7431         bp->link_params.req_flow_ctrl = (bp->port.link_config &
7432                                          PORT_FEATURE_FLOW_CONTROL_MASK);
7433         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7434             !(bp->port.supported & SUPPORTED_Autoneg))
7435                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7436
7437         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
7438                        "  advertising 0x%x\n",
7439                        bp->link_params.req_line_speed,
7440                        bp->link_params.req_duplex,
7441                        bp->link_params.req_flow_ctrl, bp->port.advertising);
7442 }
7443
7444 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7445 {
7446         int port = BP_PORT(bp);
7447         u32 val, val2;
7448
7449         bp->link_params.bp = bp;
7450         bp->link_params.port = port;
7451
7452         bp->link_params.serdes_config =
7453                 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7454         bp->link_params.lane_config =
7455                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7456         bp->link_params.ext_phy_config =
7457                 SHMEM_RD(bp,
7458                          dev_info.port_hw_config[port].external_phy_config);
7459         bp->link_params.speed_cap_mask =
7460                 SHMEM_RD(bp,
7461                          dev_info.port_hw_config[port].speed_capability_mask);
7462
7463         bp->port.link_config =
7464                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7465
7466         BNX2X_DEV_INFO("serdes_config 0x%08x  lane_config 0x%08x\n"
7467              KERN_INFO "  ext_phy_config 0x%08x  speed_cap_mask 0x%08x"
7468                        "  link_config 0x%08x\n",
7469                        bp->link_params.serdes_config,
7470                        bp->link_params.lane_config,
7471                        bp->link_params.ext_phy_config,
7472                        bp->link_params.speed_cap_mask, bp->port.link_config);
7473
7474         bp->link_params.switch_cfg = (bp->port.link_config &
7475                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
7476         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7477
7478         bnx2x_link_settings_requested(bp);
7479
7480         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7481         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7482         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7483         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7484         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7485         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7486         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7487         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7488         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7489         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7490 }
7491
7492 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7493 {
7494         int func = BP_FUNC(bp);
7495         u32 val, val2;
7496         int rc = 0;
7497
7498         bnx2x_get_common_hwinfo(bp);
7499
7500         bp->e1hov = 0;
7501         bp->e1hmf = 0;
7502         if (CHIP_IS_E1H(bp)) {
7503                 bp->mf_config =
7504                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7505
7506                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7507                        FUNC_MF_CFG_E1HOV_TAG_MASK);
7508                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7509
7510                         bp->e1hov = val;
7511                         bp->e1hmf = 1;
7512                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
7513                                        "(0x%04x)\n",
7514                                        func, bp->e1hov, bp->e1hov);
7515                 } else {
7516                         BNX2X_DEV_INFO("Single function mode\n");
7517                         if (BP_E1HVN(bp)) {
7518                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
7519                                           "  aborting\n", func);
7520                                 rc = -EPERM;
7521                         }
7522                 }
7523         }
7524
7525         if (!BP_NOMCP(bp)) {
7526                 bnx2x_get_port_hwinfo(bp);
7527
7528                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7529                               DRV_MSG_SEQ_NUMBER_MASK);
7530                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7531         }
7532
7533         if (IS_E1HMF(bp)) {
7534                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7535                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
7536                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7537                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7538                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7539                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7540                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7541                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7542                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7543                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7544                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7545                                ETH_ALEN);
7546                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7547                                ETH_ALEN);
7548                 }
7549
7550                 return rc;
7551         }
7552
7553         if (BP_NOMCP(bp)) {
7554                 /* only supposed to happen on emulation/FPGA */
7555                 BNX2X_ERR("warning random MAC workaround active\n");
7556                 random_ether_addr(bp->dev->dev_addr);
7557                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7558         }
7559
7560         return rc;
7561 }
7562
7563 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7564 {
7565         int func = BP_FUNC(bp);
7566         int rc;
7567
7568         /* Disable interrupt handling until HW is initialized */
7569         atomic_set(&bp->intr_sem, 1);
7570
7571         mutex_init(&bp->port.phy_mutex);
7572
7573         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
7574         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7575
7576         rc = bnx2x_get_hwinfo(bp);
7577
7578         /* need to reset chip if undi was active */
7579         if (!BP_NOMCP(bp))
7580                 bnx2x_undi_unload(bp);
7581
7582         if (CHIP_REV_IS_FPGA(bp))
7583                 printk(KERN_ERR PFX "FPGA detected\n");
7584
7585         if (BP_NOMCP(bp) && (func == 0))
7586                 printk(KERN_ERR PFX
7587                        "MCP disabled, must load devices in order!\n");
7588
7589         /* Set TPA flags */
7590         if (disable_tpa) {
7591                 bp->flags &= ~TPA_ENABLE_FLAG;
7592                 bp->dev->features &= ~NETIF_F_LRO;
7593         } else {
7594                 bp->flags |= TPA_ENABLE_FLAG;
7595                 bp->dev->features |= NETIF_F_LRO;
7596         }
7597
7598
7599         bp->tx_ring_size = MAX_TX_AVAIL;
7600         bp->rx_ring_size = MAX_RX_AVAIL;
7601
7602         bp->rx_csum = 1;
7603         bp->rx_offset = 0;
7604
7605         bp->tx_ticks = 50;
7606         bp->rx_ticks = 25;
7607
7608         bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7609         bp->current_interval = (poll ? poll : bp->timer_interval);
7610
7611         init_timer(&bp->timer);
7612         bp->timer.expires = jiffies + bp->current_interval;
7613         bp->timer.data = (unsigned long) bp;
7614         bp->timer.function = bnx2x_timer;
7615
7616         return rc;
7617 }
7618
7619 /*
7620  * ethtool service functions
7621  */
7622
7623 /* All ethtool functions called with rtnl_lock */
7624
7625 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7626 {
7627         struct bnx2x *bp = netdev_priv(dev);
7628
7629         cmd->supported = bp->port.supported;
7630         cmd->advertising = bp->port.advertising;
7631
7632         if (netif_carrier_ok(dev)) {
7633                 cmd->speed = bp->link_vars.line_speed;
7634                 cmd->duplex = bp->link_vars.duplex;
7635         } else {
7636                 cmd->speed = bp->link_params.req_line_speed;
7637                 cmd->duplex = bp->link_params.req_duplex;
7638         }
7639         if (IS_E1HMF(bp)) {
7640                 u16 vn_max_rate;
7641
7642                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7643                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7644                 if (vn_max_rate < cmd->speed)
7645                         cmd->speed = vn_max_rate;
7646         }
7647
7648         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7649                 u32 ext_phy_type =
7650                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7651
7652                 switch (ext_phy_type) {
7653                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7654                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7655                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7656                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7657                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7658                         cmd->port = PORT_FIBRE;
7659                         break;
7660
7661                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7662                         cmd->port = PORT_TP;
7663                         break;
7664
7665                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7666                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7667                                   bp->link_params.ext_phy_config);
7668                         break;
7669
7670                 default:
7671                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7672                            bp->link_params.ext_phy_config);
7673                         break;
7674                 }
7675         } else
7676                 cmd->port = PORT_TP;
7677
7678         cmd->phy_address = bp->port.phy_addr;
7679         cmd->transceiver = XCVR_INTERNAL;
7680
7681         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7682                 cmd->autoneg = AUTONEG_ENABLE;
7683         else
7684                 cmd->autoneg = AUTONEG_DISABLE;
7685
7686         cmd->maxtxpkt = 0;
7687         cmd->maxrxpkt = 0;
7688
7689         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7690            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7691            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7692            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7693            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7694            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7695            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7696
7697         return 0;
7698 }
7699
7700 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7701 {
7702         struct bnx2x *bp = netdev_priv(dev);
7703         u32 advertising;
7704
7705         if (IS_E1HMF(bp))
7706                 return 0;
7707
7708         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7709            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7710            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7711            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7712            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7713            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7714            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7715
7716         if (cmd->autoneg == AUTONEG_ENABLE) {
7717                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7718                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7719                         return -EINVAL;
7720                 }
7721
7722                 /* advertise the requested speed and duplex if supported */
7723                 cmd->advertising &= bp->port.supported;
7724
7725                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7726                 bp->link_params.req_duplex = DUPLEX_FULL;
7727                 bp->port.advertising |= (ADVERTISED_Autoneg |
7728                                          cmd->advertising);
7729
7730         } else { /* forced speed */
7731                 /* advertise the requested speed and duplex if supported */
7732                 switch (cmd->speed) {
7733                 case SPEED_10:
7734                         if (cmd->duplex == DUPLEX_FULL) {
7735                                 if (!(bp->port.supported &
7736                                       SUPPORTED_10baseT_Full)) {
7737                                         DP(NETIF_MSG_LINK,
7738                                            "10M full not supported\n");
7739                                         return -EINVAL;
7740                                 }
7741
7742                                 advertising = (ADVERTISED_10baseT_Full |
7743                                                ADVERTISED_TP);
7744                         } else {
7745                                 if (!(bp->port.supported &
7746                                       SUPPORTED_10baseT_Half)) {
7747                                         DP(NETIF_MSG_LINK,
7748                                            "10M half not supported\n");
7749                                         return -EINVAL;
7750                                 }
7751
7752                                 advertising = (ADVERTISED_10baseT_Half |
7753                                                ADVERTISED_TP);
7754                         }
7755                         break;
7756
7757                 case SPEED_100:
7758                         if (cmd->duplex == DUPLEX_FULL) {
7759                                 if (!(bp->port.supported &
7760                                                 SUPPORTED_100baseT_Full)) {
7761                                         DP(NETIF_MSG_LINK,
7762                                            "100M full not supported\n");
7763                                         return -EINVAL;
7764                                 }
7765
7766                                 advertising = (ADVERTISED_100baseT_Full |
7767                                                ADVERTISED_TP);
7768                         } else {
7769                                 if (!(bp->port.supported &
7770                                                 SUPPORTED_100baseT_Half)) {
7771                                         DP(NETIF_MSG_LINK,
7772                                            "100M half not supported\n");
7773                                         return -EINVAL;
7774                                 }
7775
7776                                 advertising = (ADVERTISED_100baseT_Half |
7777                                                ADVERTISED_TP);
7778                         }
7779                         break;
7780
7781                 case SPEED_1000:
7782                         if (cmd->duplex != DUPLEX_FULL) {
7783                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
7784                                 return -EINVAL;
7785                         }
7786
7787                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7788                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
7789                                 return -EINVAL;
7790                         }
7791
7792                         advertising = (ADVERTISED_1000baseT_Full |
7793                                        ADVERTISED_TP);
7794                         break;
7795
7796                 case SPEED_2500:
7797                         if (cmd->duplex != DUPLEX_FULL) {
7798                                 DP(NETIF_MSG_LINK,
7799                                    "2.5G half not supported\n");
7800                                 return -EINVAL;
7801                         }
7802
7803                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7804                                 DP(NETIF_MSG_LINK,
7805                                    "2.5G full not supported\n");
7806                                 return -EINVAL;
7807                         }
7808
7809                         advertising = (ADVERTISED_2500baseX_Full |
7810                                        ADVERTISED_TP);
7811                         break;
7812
7813                 case SPEED_10000:
7814                         if (cmd->duplex != DUPLEX_FULL) {
7815                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
7816                                 return -EINVAL;
7817                         }
7818
7819                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7820                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
7821                                 return -EINVAL;
7822                         }
7823
7824                         advertising = (ADVERTISED_10000baseT_Full |
7825                                        ADVERTISED_FIBRE);
7826                         break;
7827
7828                 default:
7829                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
7830                         return -EINVAL;
7831                 }
7832
7833                 bp->link_params.req_line_speed = cmd->speed;
7834                 bp->link_params.req_duplex = cmd->duplex;
7835                 bp->port.advertising = advertising;
7836         }
7837
7838         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7839            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
7840            bp->link_params.req_line_speed, bp->link_params.req_duplex,
7841            bp->port.advertising);
7842
7843         if (netif_running(dev)) {
7844                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7845                 bnx2x_link_set(bp);
7846         }
7847
7848         return 0;
7849 }
7850
7851 #define PHY_FW_VER_LEN                  10
7852
7853 static void bnx2x_get_drvinfo(struct net_device *dev,
7854                               struct ethtool_drvinfo *info)
7855 {
7856         struct bnx2x *bp = netdev_priv(dev);
7857         u8 phy_fw_ver[PHY_FW_VER_LEN];
7858
7859         strcpy(info->driver, DRV_MODULE_NAME);
7860         strcpy(info->version, DRV_MODULE_VERSION);
7861
7862         phy_fw_ver[0] = '\0';
7863         if (bp->port.pmf) {
7864                 bnx2x_acquire_phy_lock(bp);
7865                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7866                                              (bp->state != BNX2X_STATE_CLOSED),
7867                                              phy_fw_ver, PHY_FW_VER_LEN);
7868                 bnx2x_release_phy_lock(bp);
7869         }
7870
7871         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7872                  (bp->common.bc_ver & 0xff0000) >> 16,
7873                  (bp->common.bc_ver & 0xff00) >> 8,
7874                  (bp->common.bc_ver & 0xff),
7875                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7876         strcpy(info->bus_info, pci_name(bp->pdev));
7877         info->n_stats = BNX2X_NUM_STATS;
7878         info->testinfo_len = BNX2X_NUM_TESTS;
7879         info->eedump_len = bp->common.flash_size;
7880         info->regdump_len = 0;
7881 }
7882
7883 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7884 {
7885         struct bnx2x *bp = netdev_priv(dev);
7886
7887         if (bp->flags & NO_WOL_FLAG) {
7888                 wol->supported = 0;
7889                 wol->wolopts = 0;
7890         } else {
7891                 wol->supported = WAKE_MAGIC;
7892                 if (bp->wol)
7893                         wol->wolopts = WAKE_MAGIC;
7894                 else
7895                         wol->wolopts = 0;
7896         }
7897         memset(&wol->sopass, 0, sizeof(wol->sopass));
7898 }
7899
7900 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7901 {
7902         struct bnx2x *bp = netdev_priv(dev);
7903
7904         if (wol->wolopts & ~WAKE_MAGIC)
7905                 return -EINVAL;
7906
7907         if (wol->wolopts & WAKE_MAGIC) {
7908                 if (bp->flags & NO_WOL_FLAG)
7909                         return -EINVAL;
7910
7911                 bp->wol = 1;
7912         } else
7913                 bp->wol = 0;
7914
7915         return 0;
7916 }
7917
7918 static u32 bnx2x_get_msglevel(struct net_device *dev)
7919 {
7920         struct bnx2x *bp = netdev_priv(dev);
7921
7922         return bp->msglevel;
7923 }
7924
7925 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7926 {
7927         struct bnx2x *bp = netdev_priv(dev);
7928
7929         if (capable(CAP_NET_ADMIN))
7930                 bp->msglevel = level;
7931 }
7932
7933 static int bnx2x_nway_reset(struct net_device *dev)
7934 {
7935         struct bnx2x *bp = netdev_priv(dev);
7936
7937         if (!bp->port.pmf)
7938                 return 0;
7939
7940         if (netif_running(dev)) {
7941                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7942                 bnx2x_link_set(bp);
7943         }
7944
7945         return 0;
7946 }
7947
7948 static int bnx2x_get_eeprom_len(struct net_device *dev)
7949 {
7950         struct bnx2x *bp = netdev_priv(dev);
7951
7952         return bp->common.flash_size;
7953 }
7954
7955 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7956 {
7957         int port = BP_PORT(bp);
7958         int count, i;
7959         u32 val = 0;
7960
7961         /* adjust timeout for emulation/FPGA */
7962         count = NVRAM_TIMEOUT_COUNT;
7963         if (CHIP_REV_IS_SLOW(bp))
7964                 count *= 100;
7965
7966         /* request access to nvram interface */
7967         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7968                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7969
7970         for (i = 0; i < count*10; i++) {
7971                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7972                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7973                         break;
7974
7975                 udelay(5);
7976         }
7977
7978         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7979                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7980                 return -EBUSY;
7981         }
7982
7983         return 0;
7984 }
7985
7986 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7987 {
7988         int port = BP_PORT(bp);
7989         int count, i;
7990         u32 val = 0;
7991
7992         /* adjust timeout for emulation/FPGA */
7993         count = NVRAM_TIMEOUT_COUNT;
7994         if (CHIP_REV_IS_SLOW(bp))
7995                 count *= 100;
7996
7997         /* relinquish nvram interface */
7998         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7999                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8000
8001         for (i = 0; i < count*10; i++) {
8002                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8003                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8004                         break;
8005
8006                 udelay(5);
8007         }
8008
8009         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8010                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8011                 return -EBUSY;
8012         }
8013
8014         return 0;
8015 }
8016
8017 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8018 {
8019         u32 val;
8020
8021         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8022
8023         /* enable both bits, even on read */
8024         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8025                (val | MCPR_NVM_ACCESS_ENABLE_EN |
8026                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
8027 }
8028
8029 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8030 {
8031         u32 val;
8032
8033         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8034
8035         /* disable both bits, even after read */
8036         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8037                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8038                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8039 }
8040
8041 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8042                                   u32 cmd_flags)
8043 {
8044         int count, i, rc;
8045         u32 val;
8046
8047         /* build the command word */
8048         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8049
8050         /* need to clear DONE bit separately */
8051         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8052
8053         /* address of the NVRAM to read from */
8054         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8055                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8056
8057         /* issue a read command */
8058         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8059
8060         /* adjust timeout for emulation/FPGA */
8061         count = NVRAM_TIMEOUT_COUNT;
8062         if (CHIP_REV_IS_SLOW(bp))
8063                 count *= 100;
8064
8065         /* wait for completion */
8066         *ret_val = 0;
8067         rc = -EBUSY;
8068         for (i = 0; i < count; i++) {
8069                 udelay(5);
8070                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8071
8072                 if (val & MCPR_NVM_COMMAND_DONE) {
8073                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8074                         /* we read nvram data in cpu order
8075                          * but ethtool sees it as an array of bytes
8076                          * converting to big-endian will do the work */
8077                         val = cpu_to_be32(val);
8078                         *ret_val = val;
8079                         rc = 0;
8080                         break;
8081                 }
8082         }
8083
8084         return rc;
8085 }
8086
8087 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8088                             int buf_size)
8089 {
8090         int rc;
8091         u32 cmd_flags;
8092         u32 val;
8093
8094         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8095                 DP(BNX2X_MSG_NVM,
8096                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8097                    offset, buf_size);
8098                 return -EINVAL;
8099         }
8100
8101         if (offset + buf_size > bp->common.flash_size) {
8102                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8103                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8104                    offset, buf_size, bp->common.flash_size);
8105                 return -EINVAL;
8106         }
8107
8108         /* request access to nvram interface */
8109         rc = bnx2x_acquire_nvram_lock(bp);
8110         if (rc)
8111                 return rc;
8112
8113         /* enable access to nvram interface */
8114         bnx2x_enable_nvram_access(bp);
8115
8116         /* read the first word(s) */
8117         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8118         while ((buf_size > sizeof(u32)) && (rc == 0)) {
8119                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8120                 memcpy(ret_buf, &val, 4);
8121
8122                 /* advance to the next dword */
8123                 offset += sizeof(u32);
8124                 ret_buf += sizeof(u32);
8125                 buf_size -= sizeof(u32);
8126                 cmd_flags = 0;
8127         }
8128
8129         if (rc == 0) {
8130                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8131                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8132                 memcpy(ret_buf, &val, 4);
8133         }
8134
8135         /* disable access to nvram interface */
8136         bnx2x_disable_nvram_access(bp);
8137         bnx2x_release_nvram_lock(bp);
8138
8139         return rc;
8140 }
8141
8142 static int bnx2x_get_eeprom(struct net_device *dev,
8143                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8144 {
8145         struct bnx2x *bp = netdev_priv(dev);
8146         int rc;
8147
8148         if (!netif_running(dev))
8149                 return -EAGAIN;
8150
8151         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8152            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8153            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8154            eeprom->len, eeprom->len);
8155
8156         /* parameters already validated in ethtool_get_eeprom */
8157
8158         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8159
8160         return rc;
8161 }
8162
8163 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8164                                    u32 cmd_flags)
8165 {
8166         int count, i, rc;
8167
8168         /* build the command word */
8169         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8170
8171         /* need to clear DONE bit separately */
8172         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8173
8174         /* write the data */
8175         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8176
8177         /* address of the NVRAM to write to */
8178         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8179                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8180
8181         /* issue the write command */
8182         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8183
8184         /* adjust timeout for emulation/FPGA */
8185         count = NVRAM_TIMEOUT_COUNT;
8186         if (CHIP_REV_IS_SLOW(bp))
8187                 count *= 100;
8188
8189         /* wait for completion */
8190         rc = -EBUSY;
8191         for (i = 0; i < count; i++) {
8192                 udelay(5);
8193                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8194                 if (val & MCPR_NVM_COMMAND_DONE) {
8195                         rc = 0;
8196                         break;
8197                 }
8198         }
8199
8200         return rc;
8201 }
8202
8203 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
8204
8205 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8206                               int buf_size)
8207 {
8208         int rc;
8209         u32 cmd_flags;
8210         u32 align_offset;
8211         u32 val;
8212
8213         if (offset + buf_size > bp->common.flash_size) {
8214                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8215                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8216                    offset, buf_size, bp->common.flash_size);
8217                 return -EINVAL;
8218         }
8219
8220         /* request access to nvram interface */
8221         rc = bnx2x_acquire_nvram_lock(bp);
8222         if (rc)
8223                 return rc;
8224
8225         /* enable access to nvram interface */
8226         bnx2x_enable_nvram_access(bp);
8227
8228         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8229         align_offset = (offset & ~0x03);
8230         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8231
8232         if (rc == 0) {
8233                 val &= ~(0xff << BYTE_OFFSET(offset));
8234                 val |= (*data_buf << BYTE_OFFSET(offset));
8235
8236                 /* nvram data is returned as an array of bytes
8237                  * convert it back to cpu order */
8238                 val = be32_to_cpu(val);
8239
8240                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8241                                              cmd_flags);
8242         }
8243
8244         /* disable access to nvram interface */
8245         bnx2x_disable_nvram_access(bp);
8246         bnx2x_release_nvram_lock(bp);
8247
8248         return rc;
8249 }
8250
8251 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8252                              int buf_size)
8253 {
8254         int rc;
8255         u32 cmd_flags;
8256         u32 val;
8257         u32 written_so_far;
8258
8259         if (buf_size == 1)      /* ethtool */
8260                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8261
8262         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8263                 DP(BNX2X_MSG_NVM,
8264                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8265                    offset, buf_size);
8266                 return -EINVAL;
8267         }
8268
8269         if (offset + buf_size > bp->common.flash_size) {
8270                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8271                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8272                    offset, buf_size, bp->common.flash_size);
8273                 return -EINVAL;
8274         }
8275
8276         /* request access to nvram interface */
8277         rc = bnx2x_acquire_nvram_lock(bp);
8278         if (rc)
8279                 return rc;
8280
8281         /* enable access to nvram interface */
8282         bnx2x_enable_nvram_access(bp);
8283
8284         written_so_far = 0;
8285         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8286         while ((written_so_far < buf_size) && (rc == 0)) {
8287                 if (written_so_far == (buf_size - sizeof(u32)))
8288                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8289                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8290                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8291                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8292                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8293
8294                 memcpy(&val, data_buf, 4);
8295
8296                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8297
8298                 /* advance to the next dword */
8299                 offset += sizeof(u32);
8300                 data_buf += sizeof(u32);
8301                 written_so_far += sizeof(u32);
8302                 cmd_flags = 0;
8303         }
8304
8305         /* disable access to nvram interface */
8306         bnx2x_disable_nvram_access(bp);
8307         bnx2x_release_nvram_lock(bp);
8308
8309         return rc;
8310 }
8311
8312 static int bnx2x_set_eeprom(struct net_device *dev,
8313                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8314 {
8315         struct bnx2x *bp = netdev_priv(dev);
8316         int rc;
8317
8318         if (!netif_running(dev))
8319                 return -EAGAIN;
8320
8321         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8322            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8323            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8324            eeprom->len, eeprom->len);
8325
8326         /* parameters already validated in ethtool_set_eeprom */
8327
8328         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8329         if (eeprom->magic == 0x00504859)
8330                 if (bp->port.pmf) {
8331
8332                         bnx2x_acquire_phy_lock(bp);
8333                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
8334                                              bp->link_params.ext_phy_config,
8335                                              (bp->state != BNX2X_STATE_CLOSED),
8336                                              eebuf, eeprom->len);
8337                         if ((bp->state == BNX2X_STATE_OPEN) ||
8338                             (bp->state == BNX2X_STATE_DISABLED)) {
8339                                 rc |= bnx2x_link_reset(&bp->link_params,
8340                                                        &bp->link_vars);
8341                                 rc |= bnx2x_phy_init(&bp->link_params,
8342                                                      &bp->link_vars);
8343                         }
8344                         bnx2x_release_phy_lock(bp);
8345
8346                 } else /* Only the PMF can access the PHY */
8347                         return -EINVAL;
8348         else
8349                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8350
8351         return rc;
8352 }
8353
8354 static int bnx2x_get_coalesce(struct net_device *dev,
8355                               struct ethtool_coalesce *coal)
8356 {
8357         struct bnx2x *bp = netdev_priv(dev);
8358
8359         memset(coal, 0, sizeof(struct ethtool_coalesce));
8360
8361         coal->rx_coalesce_usecs = bp->rx_ticks;
8362         coal->tx_coalesce_usecs = bp->tx_ticks;
8363
8364         return 0;
8365 }
8366
8367 static int bnx2x_set_coalesce(struct net_device *dev,
8368                               struct ethtool_coalesce *coal)
8369 {
8370         struct bnx2x *bp = netdev_priv(dev);
8371
8372         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8373         if (bp->rx_ticks > 3000)
8374                 bp->rx_ticks = 3000;
8375
8376         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8377         if (bp->tx_ticks > 0x3000)
8378                 bp->tx_ticks = 0x3000;
8379
8380         if (netif_running(dev))
8381                 bnx2x_update_coalesce(bp);
8382
8383         return 0;
8384 }
8385
8386 static void bnx2x_get_ringparam(struct net_device *dev,
8387                                 struct ethtool_ringparam *ering)
8388 {
8389         struct bnx2x *bp = netdev_priv(dev);
8390
8391         ering->rx_max_pending = MAX_RX_AVAIL;
8392         ering->rx_mini_max_pending = 0;
8393         ering->rx_jumbo_max_pending = 0;
8394
8395         ering->rx_pending = bp->rx_ring_size;
8396         ering->rx_mini_pending = 0;
8397         ering->rx_jumbo_pending = 0;
8398
8399         ering->tx_max_pending = MAX_TX_AVAIL;
8400         ering->tx_pending = bp->tx_ring_size;
8401 }
8402
8403 static int bnx2x_set_ringparam(struct net_device *dev,
8404                                struct ethtool_ringparam *ering)
8405 {
8406         struct bnx2x *bp = netdev_priv(dev);
8407         int rc = 0;
8408
8409         if ((ering->rx_pending > MAX_RX_AVAIL) ||
8410             (ering->tx_pending > MAX_TX_AVAIL) ||
8411             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8412                 return -EINVAL;
8413
8414         bp->rx_ring_size = ering->rx_pending;
8415         bp->tx_ring_size = ering->tx_pending;
8416
8417         if (netif_running(dev)) {
8418                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8419                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8420         }
8421
8422         return rc;
8423 }
8424
8425 static void bnx2x_get_pauseparam(struct net_device *dev,
8426                                  struct ethtool_pauseparam *epause)
8427 {
8428         struct bnx2x *bp = netdev_priv(dev);
8429
8430         epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8431                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8432
8433         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8434                             BNX2X_FLOW_CTRL_RX);
8435         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8436                             BNX2X_FLOW_CTRL_TX);
8437
8438         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8439            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8440            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8441 }
8442
8443 static int bnx2x_set_pauseparam(struct net_device *dev,
8444                                 struct ethtool_pauseparam *epause)
8445 {
8446         struct bnx2x *bp = netdev_priv(dev);
8447
8448         if (IS_E1HMF(bp))
8449                 return 0;
8450
8451         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8452            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8453            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8454
8455         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8456
8457         if (epause->rx_pause)
8458                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8459
8460         if (epause->tx_pause)
8461                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8462
8463         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8464                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8465
8466         if (epause->autoneg) {
8467                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8468                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
8469                         return -EINVAL;
8470                 }
8471
8472                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8473                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8474         }
8475
8476         DP(NETIF_MSG_LINK,
8477            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8478
8479         if (netif_running(dev)) {
8480                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8481                 bnx2x_link_set(bp);
8482         }
8483
8484         return 0;
8485 }
8486
8487 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8488 {
8489         struct bnx2x *bp = netdev_priv(dev);
8490         int changed = 0;
8491         int rc = 0;
8492
8493         /* TPA requires Rx CSUM offloading */
8494         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8495                 if (!(dev->features & NETIF_F_LRO)) {
8496                         dev->features |= NETIF_F_LRO;
8497                         bp->flags |= TPA_ENABLE_FLAG;
8498                         changed = 1;
8499                 }
8500
8501         } else if (dev->features & NETIF_F_LRO) {
8502                 dev->features &= ~NETIF_F_LRO;
8503                 bp->flags &= ~TPA_ENABLE_FLAG;
8504                 changed = 1;
8505         }
8506
8507         if (changed && netif_running(dev)) {
8508                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8509                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8510         }
8511
8512         return rc;
8513 }
8514
8515 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8516 {
8517         struct bnx2x *bp = netdev_priv(dev);
8518
8519         return bp->rx_csum;
8520 }
8521
8522 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8523 {
8524         struct bnx2x *bp = netdev_priv(dev);
8525         int rc = 0;
8526
8527         bp->rx_csum = data;
8528
8529         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8530            TPA'ed packets will be discarded due to wrong TCP CSUM */
8531         if (!data) {
8532                 u32 flags = ethtool_op_get_flags(dev);
8533
8534                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8535         }
8536
8537         return rc;
8538 }
8539
8540 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8541 {
8542         if (data) {
8543                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8544                 dev->features |= NETIF_F_TSO6;
8545         } else {
8546                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8547                 dev->features &= ~NETIF_F_TSO6;
8548         }
8549
8550         return 0;
8551 }
8552
8553 static const struct {
8554         char string[ETH_GSTRING_LEN];
8555 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8556         { "register_test (offline)" },
8557         { "memory_test (offline)" },
8558         { "loopback_test (offline)" },
8559         { "nvram_test (online)" },
8560         { "interrupt_test (online)" },
8561         { "link_test (online)" },
8562         { "idle check (online)" },
8563         { "MC errors (online)" }
8564 };
8565
8566 static int bnx2x_self_test_count(struct net_device *dev)
8567 {
8568         return BNX2X_NUM_TESTS;
8569 }
8570
8571 static int bnx2x_test_registers(struct bnx2x *bp)
8572 {
8573         int idx, i, rc = -ENODEV;
8574         u32 wr_val = 0;
8575         int port = BP_PORT(bp);
8576         static const struct {
8577                 u32  offset0;
8578                 u32  offset1;
8579                 u32  mask;
8580         } reg_tbl[] = {
8581 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
8582                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
8583                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
8584                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
8585                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
8586                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
8587                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
8588                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
8589                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
8590                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
8591 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
8592                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
8593                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
8594                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
8595                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
8596                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8597                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
8598                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
8599                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
8600                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
8601 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
8602                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
8603                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
8604                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
8605                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
8606                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
8607                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
8608                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
8609                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
8610                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
8611 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
8612                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
8613                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
8614                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
8615                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8616                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
8617                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8618                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
8619
8620                 { 0xffffffff, 0, 0x00000000 }
8621         };
8622
8623         if (!netif_running(bp->dev))
8624                 return rc;
8625
8626         /* Repeat the test twice:
8627            First by writing 0x00000000, second by writing 0xffffffff */
8628         for (idx = 0; idx < 2; idx++) {
8629
8630                 switch (idx) {
8631                 case 0:
8632                         wr_val = 0;
8633                         break;
8634                 case 1:
8635                         wr_val = 0xffffffff;
8636                         break;
8637                 }
8638
8639                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8640                         u32 offset, mask, save_val, val;
8641
8642                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8643                         mask = reg_tbl[i].mask;
8644
8645                         save_val = REG_RD(bp, offset);
8646
8647                         REG_WR(bp, offset, wr_val);
8648                         val = REG_RD(bp, offset);
8649
8650                         /* Restore the original register's value */
8651                         REG_WR(bp, offset, save_val);
8652
8653                         /* verify that value is as expected value */
8654                         if ((val & mask) != (wr_val & mask))
8655                                 goto test_reg_exit;
8656                 }
8657         }
8658
8659         rc = 0;
8660
8661 test_reg_exit:
8662         return rc;
8663 }
8664
8665 static int bnx2x_test_memory(struct bnx2x *bp)
8666 {
8667         int i, j, rc = -ENODEV;
8668         u32 val;
8669         static const struct {
8670                 u32 offset;
8671                 int size;
8672         } mem_tbl[] = {
8673                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
8674                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8675                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
8676                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
8677                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
8678                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
8679                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
8680
8681                 { 0xffffffff, 0 }
8682         };
8683         static const struct {
8684                 char *name;
8685                 u32 offset;
8686                 u32 e1_mask;
8687                 u32 e1h_mask;
8688         } prty_tbl[] = {
8689                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
8690                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
8691                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
8692                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
8693                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
8694                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
8695
8696                 { NULL, 0xffffffff, 0, 0 }
8697         };
8698
8699         if (!netif_running(bp->dev))
8700                 return rc;
8701
8702         /* Go through all the memories */
8703         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8704                 for (j = 0; j < mem_tbl[i].size; j++)
8705                         REG_RD(bp, mem_tbl[i].offset + j*4);
8706
8707         /* Check the parity status */
8708         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8709                 val = REG_RD(bp, prty_tbl[i].offset);
8710                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8711                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8712                         DP(NETIF_MSG_HW,
8713                            "%s is 0x%x\n", prty_tbl[i].name, val);
8714                         goto test_mem_exit;
8715                 }
8716         }
8717
8718         rc = 0;
8719
8720 test_mem_exit:
8721         return rc;
8722 }
8723
8724 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8725 {
8726         int cnt = 1000;
8727
8728         if (link_up)
8729                 while (bnx2x_link_test(bp) && cnt--)
8730                         msleep(10);
8731 }
8732
8733 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8734 {
8735         unsigned int pkt_size, num_pkts, i;
8736         struct sk_buff *skb;
8737         unsigned char *packet;
8738         struct bnx2x_fastpath *fp = &bp->fp[0];
8739         u16 tx_start_idx, tx_idx;
8740         u16 rx_start_idx, rx_idx;
8741         u16 pkt_prod;
8742         struct sw_tx_bd *tx_buf;
8743         struct eth_tx_bd *tx_bd;
8744         dma_addr_t mapping;
8745         union eth_rx_cqe *cqe;
8746         u8 cqe_fp_flags;
8747         struct sw_rx_bd *rx_buf;
8748         u16 len;
8749         int rc = -ENODEV;
8750
8751         if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8752                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8753                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8754
8755         } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8756                 u16 cnt = 1000;
8757                 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8758                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8759                 /* wait until link state is restored */
8760                 if (link_up)
8761                         while (cnt-- && bnx2x_test_link(&bp->link_params,
8762                                                         &bp->link_vars))
8763                                 msleep(10);
8764         } else
8765                 return -EINVAL;
8766
8767         pkt_size = 1514;
8768         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8769         if (!skb) {
8770                 rc = -ENOMEM;
8771                 goto test_loopback_exit;
8772         }
8773         packet = skb_put(skb, pkt_size);
8774         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8775         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8776         for (i = ETH_HLEN; i < pkt_size; i++)
8777                 packet[i] = (unsigned char) (i & 0xff);
8778
8779         num_pkts = 0;
8780         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8781         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8782
8783         pkt_prod = fp->tx_pkt_prod++;
8784         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8785         tx_buf->first_bd = fp->tx_bd_prod;
8786         tx_buf->skb = skb;
8787
8788         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8789         mapping = pci_map_single(bp->pdev, skb->data,
8790                                  skb_headlen(skb), PCI_DMA_TODEVICE);
8791         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8792         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8793         tx_bd->nbd = cpu_to_le16(1);
8794         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8795         tx_bd->vlan = cpu_to_le16(pkt_prod);
8796         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8797                                        ETH_TX_BD_FLAGS_END_BD);
8798         tx_bd->general_data = ((UNICAST_ADDRESS <<
8799                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8800
8801         wmb();
8802
8803         fp->hw_tx_prods->bds_prod =
8804                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8805         mb(); /* FW restriction: must not reorder writing nbd and packets */
8806         fp->hw_tx_prods->packets_prod =
8807                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8808         DOORBELL(bp, FP_IDX(fp), 0);
8809
8810         mmiowb();
8811
8812         num_pkts++;
8813         fp->tx_bd_prod++;
8814         bp->dev->trans_start = jiffies;
8815
8816         udelay(100);
8817
8818         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8819         if (tx_idx != tx_start_idx + num_pkts)
8820                 goto test_loopback_exit;
8821
8822         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8823         if (rx_idx != rx_start_idx + num_pkts)
8824                 goto test_loopback_exit;
8825
8826         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8827         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8828         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8829                 goto test_loopback_rx_exit;
8830
8831         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8832         if (len != pkt_size)
8833                 goto test_loopback_rx_exit;
8834
8835         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8836         skb = rx_buf->skb;
8837         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8838         for (i = ETH_HLEN; i < pkt_size; i++)
8839                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8840                         goto test_loopback_rx_exit;
8841
8842         rc = 0;
8843
8844 test_loopback_rx_exit:
8845
8846         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8847         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8848         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8849         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8850
8851         /* Update producers */
8852         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8853                              fp->rx_sge_prod);
8854
8855 test_loopback_exit:
8856         bp->link_params.loopback_mode = LOOPBACK_NONE;
8857
8858         return rc;
8859 }
8860
8861 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8862 {
8863         int rc = 0;
8864
8865         if (!netif_running(bp->dev))
8866                 return BNX2X_LOOPBACK_FAILED;
8867
8868         bnx2x_netif_stop(bp, 1);
8869         bnx2x_acquire_phy_lock(bp);
8870
8871         if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8872                 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8873                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8874         }
8875
8876         if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8877                 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8878                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8879         }
8880
8881         bnx2x_release_phy_lock(bp);
8882         bnx2x_netif_start(bp);
8883
8884         return rc;
8885 }
8886
8887 #define CRC32_RESIDUAL                  0xdebb20e3
8888
8889 static int bnx2x_test_nvram(struct bnx2x *bp)
8890 {
8891         static const struct {
8892                 int offset;
8893                 int size;
8894         } nvram_tbl[] = {
8895                 {     0,  0x14 }, /* bootstrap */
8896                 {  0x14,  0xec }, /* dir */
8897                 { 0x100, 0x350 }, /* manuf_info */
8898                 { 0x450,  0xf0 }, /* feature_info */
8899                 { 0x640,  0x64 }, /* upgrade_key_info */
8900                 { 0x6a4,  0x64 },
8901                 { 0x708,  0x70 }, /* manuf_key_info */
8902                 { 0x778,  0x70 },
8903                 {     0,     0 }
8904         };
8905         u32 buf[0x350 / 4];
8906         u8 *data = (u8 *)buf;
8907         int i, rc;
8908         u32 magic, csum;
8909
8910         rc = bnx2x_nvram_read(bp, 0, data, 4);
8911         if (rc) {
8912                 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8913                 goto test_nvram_exit;
8914         }
8915
8916         magic = be32_to_cpu(buf[0]);
8917         if (magic != 0x669955aa) {
8918                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8919                 rc = -ENODEV;
8920                 goto test_nvram_exit;
8921         }
8922
8923         for (i = 0; nvram_tbl[i].size; i++) {
8924
8925                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8926                                       nvram_tbl[i].size);
8927                 if (rc) {
8928                         DP(NETIF_MSG_PROBE,
8929                            "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8930                         goto test_nvram_exit;
8931                 }
8932
8933                 csum = ether_crc_le(nvram_tbl[i].size, data);
8934                 if (csum != CRC32_RESIDUAL) {
8935                         DP(NETIF_MSG_PROBE,
8936                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8937                         rc = -ENODEV;
8938                         goto test_nvram_exit;
8939                 }
8940         }
8941
8942 test_nvram_exit:
8943         return rc;
8944 }
8945
8946 static int bnx2x_test_intr(struct bnx2x *bp)
8947 {
8948         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8949         int i, rc;
8950
8951         if (!netif_running(bp->dev))
8952                 return -ENODEV;
8953
8954         config->hdr.length_6b = 0;
8955         if (CHIP_IS_E1(bp))
8956                 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
8957         else
8958                 config->hdr.offset = BP_FUNC(bp);
8959         config->hdr.client_id = BP_CL_ID(bp);
8960         config->hdr.reserved1 = 0;
8961
8962         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8963                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8964                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8965         if (rc == 0) {
8966                 bp->set_mac_pending++;
8967                 for (i = 0; i < 10; i++) {
8968                         if (!bp->set_mac_pending)
8969                                 break;
8970                         msleep_interruptible(10);
8971                 }
8972                 if (i == 10)
8973                         rc = -ENODEV;
8974         }
8975
8976         return rc;
8977 }
8978
8979 static void bnx2x_self_test(struct net_device *dev,
8980                             struct ethtool_test *etest, u64 *buf)
8981 {
8982         struct bnx2x *bp = netdev_priv(dev);
8983
8984         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8985
8986         if (!netif_running(dev))
8987                 return;
8988
8989         /* offline tests are not supported in MF mode */
8990         if (IS_E1HMF(bp))
8991                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8992
8993         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8994                 u8 link_up;
8995
8996                 link_up = bp->link_vars.link_up;
8997                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8998                 bnx2x_nic_load(bp, LOAD_DIAG);
8999                 /* wait until link state is restored */
9000                 bnx2x_wait_for_link(bp, link_up);
9001
9002                 if (bnx2x_test_registers(bp) != 0) {
9003                         buf[0] = 1;
9004                         etest->flags |= ETH_TEST_FL_FAILED;
9005                 }
9006                 if (bnx2x_test_memory(bp) != 0) {
9007                         buf[1] = 1;
9008                         etest->flags |= ETH_TEST_FL_FAILED;
9009                 }
9010                 buf[2] = bnx2x_test_loopback(bp, link_up);
9011                 if (buf[2] != 0)
9012                         etest->flags |= ETH_TEST_FL_FAILED;
9013
9014                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9015                 bnx2x_nic_load(bp, LOAD_NORMAL);
9016                 /* wait until link state is restored */
9017                 bnx2x_wait_for_link(bp, link_up);
9018         }
9019         if (bnx2x_test_nvram(bp) != 0) {
9020                 buf[3] = 1;
9021                 etest->flags |= ETH_TEST_FL_FAILED;
9022         }
9023         if (bnx2x_test_intr(bp) != 0) {
9024                 buf[4] = 1;
9025                 etest->flags |= ETH_TEST_FL_FAILED;
9026         }
9027         if (bp->port.pmf)
9028                 if (bnx2x_link_test(bp) != 0) {
9029                         buf[5] = 1;
9030                         etest->flags |= ETH_TEST_FL_FAILED;
9031                 }
9032         buf[7] = bnx2x_mc_assert(bp);
9033         if (buf[7] != 0)
9034                 etest->flags |= ETH_TEST_FL_FAILED;
9035
9036 #ifdef BNX2X_EXTRA_DEBUG
9037         bnx2x_panic_dump(bp);
9038 #endif
9039 }
9040
9041 static const struct {
9042         long offset;
9043         int size;
9044         u32 flags;
9045 #define STATS_FLAGS_PORT                1
9046 #define STATS_FLAGS_FUNC                2
9047         u8 string[ETH_GSTRING_LEN];
9048 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9049 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
9050                                 8, STATS_FLAGS_FUNC, "rx_bytes" },
9051         { STATS_OFFSET32(error_bytes_received_hi),
9052                                 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
9053         { STATS_OFFSET32(total_bytes_transmitted_hi),
9054                                 8, STATS_FLAGS_FUNC, "tx_bytes" },
9055         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9056                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9057         { STATS_OFFSET32(total_unicast_packets_received_hi),
9058                                 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
9059         { STATS_OFFSET32(total_multicast_packets_received_hi),
9060                                 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
9061         { STATS_OFFSET32(total_broadcast_packets_received_hi),
9062                                 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
9063         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9064                                 8, STATS_FLAGS_FUNC, "tx_packets" },
9065         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9066                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9067 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9068                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9069         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9070                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9071         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9072                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
9073         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9074                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9075         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9076                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9077         { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9078                                 8, STATS_FLAGS_PORT, "tx_deferred" },
9079         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9080                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9081         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9082                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9083         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9084                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9085         { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9086                                 8, STATS_FLAGS_PORT, "rx_fragments" },
9087 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9088                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
9089         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9090                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9091         { STATS_OFFSET32(jabber_packets_received),
9092                                 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9093         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9094                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9095         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9096                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9097         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9098                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9099         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9100                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9101         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9102                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9103         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9104                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9105         { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9106                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9107 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9108                                 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9109         { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9110                                 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9111         { STATS_OFFSET32(tx_stat_outxonsent_hi),
9112                                 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9113         { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9114                                 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9115         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9116                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9117         { STATS_OFFSET32(mac_filter_discard),
9118                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9119         { STATS_OFFSET32(no_buff_discard),
9120                                 4, STATS_FLAGS_FUNC, "rx_discards" },
9121         { STATS_OFFSET32(xxoverflow_discard),
9122                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9123         { STATS_OFFSET32(brb_drop_hi),
9124                                 8, STATS_FLAGS_PORT, "brb_discard" },
9125         { STATS_OFFSET32(brb_truncate_hi),
9126                                 8, STATS_FLAGS_PORT, "brb_truncate" },
9127 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9128                                 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9129         { STATS_OFFSET32(rx_skb_alloc_failed),
9130                                 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9131 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9132                                 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9133 };
9134
9135 #define IS_NOT_E1HMF_STAT(bp, i) \
9136                 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9137
9138 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9139 {
9140         struct bnx2x *bp = netdev_priv(dev);
9141         int i, j;
9142
9143         switch (stringset) {
9144         case ETH_SS_STATS:
9145                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9146                         if (IS_NOT_E1HMF_STAT(bp, i))
9147                                 continue;
9148                         strcpy(buf + j*ETH_GSTRING_LEN,
9149                                bnx2x_stats_arr[i].string);
9150                         j++;
9151                 }
9152                 break;
9153
9154         case ETH_SS_TEST:
9155                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9156                 break;
9157         }
9158 }
9159
9160 static int bnx2x_get_stats_count(struct net_device *dev)
9161 {
9162         struct bnx2x *bp = netdev_priv(dev);
9163         int i, num_stats = 0;
9164
9165         for (i = 0; i < BNX2X_NUM_STATS; i++) {
9166                 if (IS_NOT_E1HMF_STAT(bp, i))
9167                         continue;
9168                 num_stats++;
9169         }
9170         return num_stats;
9171 }
9172
9173 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9174                                     struct ethtool_stats *stats, u64 *buf)
9175 {
9176         struct bnx2x *bp = netdev_priv(dev);
9177         u32 *hw_stats = (u32 *)&bp->eth_stats;
9178         int i, j;
9179
9180         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9181                 if (IS_NOT_E1HMF_STAT(bp, i))
9182                         continue;
9183
9184                 if (bnx2x_stats_arr[i].size == 0) {
9185                         /* skip this counter */
9186                         buf[j] = 0;
9187                         j++;
9188                         continue;
9189                 }
9190                 if (bnx2x_stats_arr[i].size == 4) {
9191                         /* 4-byte counter */
9192                         buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9193                         j++;
9194                         continue;
9195                 }
9196                 /* 8-byte counter */
9197                 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9198                                   *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9199                 j++;
9200         }
9201 }
9202
9203 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9204 {
9205         struct bnx2x *bp = netdev_priv(dev);
9206         int port = BP_PORT(bp);
9207         int i;
9208
9209         if (!netif_running(dev))
9210                 return 0;
9211
9212         if (!bp->port.pmf)
9213                 return 0;
9214
9215         if (data == 0)
9216                 data = 2;
9217
9218         for (i = 0; i < (data * 2); i++) {
9219                 if ((i % 2) == 0)
9220                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9221                                       bp->link_params.hw_led_mode,
9222                                       bp->link_params.chip_id);
9223                 else
9224                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9225                                       bp->link_params.hw_led_mode,
9226                                       bp->link_params.chip_id);
9227
9228                 msleep_interruptible(500);
9229                 if (signal_pending(current))
9230                         break;
9231         }
9232
9233         if (bp->link_vars.link_up)
9234                 bnx2x_set_led(bp, port, LED_MODE_OPER,
9235                               bp->link_vars.line_speed,
9236                               bp->link_params.hw_led_mode,
9237                               bp->link_params.chip_id);
9238
9239         return 0;
9240 }
9241
9242 static struct ethtool_ops bnx2x_ethtool_ops = {
9243         .get_settings           = bnx2x_get_settings,
9244         .set_settings           = bnx2x_set_settings,
9245         .get_drvinfo            = bnx2x_get_drvinfo,
9246         .get_wol                = bnx2x_get_wol,
9247         .set_wol                = bnx2x_set_wol,
9248         .get_msglevel           = bnx2x_get_msglevel,
9249         .set_msglevel           = bnx2x_set_msglevel,
9250         .nway_reset             = bnx2x_nway_reset,
9251         .get_link               = ethtool_op_get_link,
9252         .get_eeprom_len         = bnx2x_get_eeprom_len,
9253         .get_eeprom             = bnx2x_get_eeprom,
9254         .set_eeprom             = bnx2x_set_eeprom,
9255         .get_coalesce           = bnx2x_get_coalesce,
9256         .set_coalesce           = bnx2x_set_coalesce,
9257         .get_ringparam          = bnx2x_get_ringparam,
9258         .set_ringparam          = bnx2x_set_ringparam,
9259         .get_pauseparam         = bnx2x_get_pauseparam,
9260         .set_pauseparam         = bnx2x_set_pauseparam,
9261         .get_rx_csum            = bnx2x_get_rx_csum,
9262         .set_rx_csum            = bnx2x_set_rx_csum,
9263         .get_tx_csum            = ethtool_op_get_tx_csum,
9264         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
9265         .set_flags              = bnx2x_set_flags,
9266         .get_flags              = ethtool_op_get_flags,
9267         .get_sg                 = ethtool_op_get_sg,
9268         .set_sg                 = ethtool_op_set_sg,
9269         .get_tso                = ethtool_op_get_tso,
9270         .set_tso                = bnx2x_set_tso,
9271         .self_test_count        = bnx2x_self_test_count,
9272         .self_test              = bnx2x_self_test,
9273         .get_strings            = bnx2x_get_strings,
9274         .phys_id                = bnx2x_phys_id,
9275         .get_stats_count        = bnx2x_get_stats_count,
9276         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
9277 };
9278
9279 /* end of ethtool_ops */
9280
9281 /****************************************************************************
9282 * General service functions
9283 ****************************************************************************/
9284
9285 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9286 {
9287         u16 pmcsr;
9288
9289         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9290
9291         switch (state) {
9292         case PCI_D0:
9293                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9294                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9295                                        PCI_PM_CTRL_PME_STATUS));
9296
9297                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9298                         /* delay required during transition out of D3hot */
9299                         msleep(20);
9300                 break;
9301
9302         case PCI_D3hot:
9303                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9304                 pmcsr |= 3;
9305
9306                 if (bp->wol)
9307                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9308
9309                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9310                                       pmcsr);
9311
9312                 /* No more memory access after this point until
9313                 * device is brought back to D0.
9314                 */
9315                 break;
9316
9317         default:
9318                 return -EINVAL;
9319         }
9320         return 0;
9321 }
9322
9323 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9324 {
9325         u16 rx_cons_sb;
9326
9327         /* Tell compiler that status block fields can change */
9328         barrier();
9329         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9330         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9331                 rx_cons_sb++;
9332         return (fp->rx_comp_cons != rx_cons_sb);
9333 }
9334
9335 /*
9336  * net_device service functions
9337  */
9338
9339 static int bnx2x_poll(struct napi_struct *napi, int budget)
9340 {
9341         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9342                                                  napi);
9343         struct bnx2x *bp = fp->bp;
9344         int work_done = 0;
9345
9346 #ifdef BNX2X_STOP_ON_ERROR
9347         if (unlikely(bp->panic))
9348                 goto poll_panic;
9349 #endif
9350
9351         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9352         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9353         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9354
9355         bnx2x_update_fpsb_idx(fp);
9356
9357         if (bnx2x_has_tx_work(fp))
9358                 bnx2x_tx_int(fp, budget);
9359
9360         if (bnx2x_has_rx_work(fp))
9361                 work_done = bnx2x_rx_int(fp, budget);
9362         rmb(); /* BNX2X_HAS_WORK() reads the status block */
9363
9364         /* must not complete if we consumed full budget */
9365         if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9366
9367 #ifdef BNX2X_STOP_ON_ERROR
9368 poll_panic:
9369 #endif
9370                 netif_rx_complete(napi);
9371
9372                 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9373                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9374                 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9375                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9376         }
9377         return work_done;
9378 }
9379
9380
9381 /* we split the first BD into headers and data BDs
9382  * to ease the pain of our fellow microcode engineers
9383  * we use one mapping for both BDs
9384  * So far this has only been observed to happen
9385  * in Other Operating Systems(TM)
9386  */
9387 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9388                                    struct bnx2x_fastpath *fp,
9389                                    struct eth_tx_bd **tx_bd, u16 hlen,
9390                                    u16 bd_prod, int nbd)
9391 {
9392         struct eth_tx_bd *h_tx_bd = *tx_bd;
9393         struct eth_tx_bd *d_tx_bd;
9394         dma_addr_t mapping;
9395         int old_len = le16_to_cpu(h_tx_bd->nbytes);
9396
9397         /* first fix first BD */
9398         h_tx_bd->nbd = cpu_to_le16(nbd);
9399         h_tx_bd->nbytes = cpu_to_le16(hlen);
9400
9401         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9402            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9403            h_tx_bd->addr_lo, h_tx_bd->nbd);
9404
9405         /* now get a new data BD
9406          * (after the pbd) and fill it */
9407         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9408         d_tx_bd = &fp->tx_desc_ring[bd_prod];
9409
9410         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9411                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9412
9413         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9414         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9415         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9416         d_tx_bd->vlan = 0;
9417         /* this marks the BD as one that has no individual mapping
9418          * the FW ignores this flag in a BD not marked start
9419          */
9420         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9421         DP(NETIF_MSG_TX_QUEUED,
9422            "TSO split data size is %d (%x:%x)\n",
9423            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9424
9425         /* update tx_bd for marking the last BD flag */
9426         *tx_bd = d_tx_bd;
9427
9428         return bd_prod;
9429 }
9430
9431 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9432 {
9433         if (fix > 0)
9434                 csum = (u16) ~csum_fold(csum_sub(csum,
9435                                 csum_partial(t_header - fix, fix, 0)));
9436
9437         else if (fix < 0)
9438                 csum = (u16) ~csum_fold(csum_add(csum,
9439                                 csum_partial(t_header, -fix, 0)));
9440
9441         return swab16(csum);
9442 }
9443
9444 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9445 {
9446         u32 rc;
9447
9448         if (skb->ip_summed != CHECKSUM_PARTIAL)
9449                 rc = XMIT_PLAIN;
9450
9451         else {
9452                 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9453                         rc = XMIT_CSUM_V6;
9454                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9455                                 rc |= XMIT_CSUM_TCP;
9456
9457                 } else {
9458                         rc = XMIT_CSUM_V4;
9459                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9460                                 rc |= XMIT_CSUM_TCP;
9461                 }
9462         }
9463
9464         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9465                 rc |= XMIT_GSO_V4;
9466
9467         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9468                 rc |= XMIT_GSO_V6;
9469
9470         return rc;
9471 }
9472
9473 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9474 /* check if packet requires linearization (packet is too fragmented) */
9475 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9476                              u32 xmit_type)
9477 {
9478         int to_copy = 0;
9479         int hlen = 0;
9480         int first_bd_sz = 0;
9481
9482         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9483         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9484
9485                 if (xmit_type & XMIT_GSO) {
9486                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9487                         /* Check if LSO packet needs to be copied:
9488                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9489                         int wnd_size = MAX_FETCH_BD - 3;
9490                         /* Number of windows to check */
9491                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9492                         int wnd_idx = 0;
9493                         int frag_idx = 0;
9494                         u32 wnd_sum = 0;
9495
9496                         /* Headers length */
9497                         hlen = (int)(skb_transport_header(skb) - skb->data) +
9498                                 tcp_hdrlen(skb);
9499
9500                         /* Amount of data (w/o headers) on linear part of SKB*/
9501                         first_bd_sz = skb_headlen(skb) - hlen;
9502
9503                         wnd_sum  = first_bd_sz;
9504
9505                         /* Calculate the first sum - it's special */
9506                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9507                                 wnd_sum +=
9508                                         skb_shinfo(skb)->frags[frag_idx].size;
9509
9510                         /* If there was data on linear skb data - check it */
9511                         if (first_bd_sz > 0) {
9512                                 if (unlikely(wnd_sum < lso_mss)) {
9513                                         to_copy = 1;
9514                                         goto exit_lbl;
9515                                 }
9516
9517                                 wnd_sum -= first_bd_sz;
9518                         }
9519
9520                         /* Others are easier: run through the frag list and
9521                            check all windows */
9522                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9523                                 wnd_sum +=
9524                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9525
9526                                 if (unlikely(wnd_sum < lso_mss)) {
9527                                         to_copy = 1;
9528                                         break;
9529                                 }
9530                                 wnd_sum -=
9531                                         skb_shinfo(skb)->frags[wnd_idx].size;
9532                         }
9533
9534                 } else {
9535                         /* in non-LSO too fragmented packet should always
9536                            be linearized */
9537                         to_copy = 1;
9538                 }
9539         }
9540
9541 exit_lbl:
9542         if (unlikely(to_copy))
9543                 DP(NETIF_MSG_TX_QUEUED,
9544                    "Linearization IS REQUIRED for %s packet. "
9545                    "num_frags %d  hlen %d  first_bd_sz %d\n",
9546                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9547                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9548
9549         return to_copy;
9550 }
9551 #endif
9552
9553 /* called with netif_tx_lock
9554  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9555  * netif_wake_queue()
9556  */
9557 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9558 {
9559         struct bnx2x *bp = netdev_priv(dev);
9560         struct bnx2x_fastpath *fp;
9561         struct sw_tx_bd *tx_buf;
9562         struct eth_tx_bd *tx_bd;
9563         struct eth_tx_parse_bd *pbd = NULL;
9564         u16 pkt_prod, bd_prod;
9565         int nbd, fp_index;
9566         dma_addr_t mapping;
9567         u32 xmit_type = bnx2x_xmit_type(bp, skb);
9568         int vlan_off = (bp->e1hov ? 4 : 0);
9569         int i;
9570         u8 hlen = 0;
9571
9572 #ifdef BNX2X_STOP_ON_ERROR
9573         if (unlikely(bp->panic))
9574                 return NETDEV_TX_BUSY;
9575 #endif
9576
9577         fp_index = (smp_processor_id() % bp->num_queues);
9578         fp = &bp->fp[fp_index];
9579
9580         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
9581                 bp->eth_stats.driver_xoff++,
9582                 netif_stop_queue(dev);
9583                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9584                 return NETDEV_TX_BUSY;
9585         }
9586
9587         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
9588            "  gso type %x  xmit_type %x\n",
9589            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9590            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9591
9592 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9593         /* First, check if we need to linearize the skb
9594            (due to FW restrictions) */
9595         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9596                 /* Statistics of linearization */
9597                 bp->lin_cnt++;
9598                 if (skb_linearize(skb) != 0) {
9599                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9600                            "silently dropping this SKB\n");
9601                         dev_kfree_skb_any(skb);
9602                         return NETDEV_TX_OK;
9603                 }
9604         }
9605 #endif
9606
9607         /*
9608         Please read carefully. First we use one BD which we mark as start,
9609         then for TSO or xsum we have a parsing info BD,
9610         and only then we have the rest of the TSO BDs.
9611         (don't forget to mark the last one as last,
9612         and to unmap only AFTER you write to the BD ...)
9613         And above all, all pdb sizes are in words - NOT DWORDS!
9614         */
9615
9616         pkt_prod = fp->tx_pkt_prod++;
9617         bd_prod = TX_BD(fp->tx_bd_prod);
9618
9619         /* get a tx_buf and first BD */
9620         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9621         tx_bd = &fp->tx_desc_ring[bd_prod];
9622
9623         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9624         tx_bd->general_data = (UNICAST_ADDRESS <<
9625                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9626         /* header nbd */
9627         tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9628
9629         /* remember the first BD of the packet */
9630         tx_buf->first_bd = fp->tx_bd_prod;
9631         tx_buf->skb = skb;
9632
9633         DP(NETIF_MSG_TX_QUEUED,
9634            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
9635            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9636
9637 #ifdef BCM_VLAN
9638         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
9639             (bp->flags & HW_VLAN_TX_FLAG)) {
9640                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9641                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9642                 vlan_off += 4;
9643         } else
9644 #endif
9645                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9646
9647         if (xmit_type) {
9648                 /* turn on parsing and get a BD */
9649                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9650                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9651
9652                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9653         }
9654
9655         if (xmit_type & XMIT_CSUM) {
9656                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9657
9658                 /* for now NS flag is not used in Linux */
9659                 pbd->global_data = (hlen |
9660                                     ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9661                                      ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9662
9663                 pbd->ip_hlen = (skb_transport_header(skb) -
9664                                 skb_network_header(skb)) / 2;
9665
9666                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9667
9668                 pbd->total_hlen = cpu_to_le16(hlen);
9669                 hlen = hlen*2 - vlan_off;
9670
9671                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9672
9673                 if (xmit_type & XMIT_CSUM_V4)
9674                         tx_bd->bd_flags.as_bitfield |=
9675                                                 ETH_TX_BD_FLAGS_IP_CSUM;
9676                 else
9677                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9678
9679                 if (xmit_type & XMIT_CSUM_TCP) {
9680                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9681
9682                 } else {
9683                         s8 fix = SKB_CS_OFF(skb); /* signed! */
9684
9685                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9686                         pbd->cs_offset = fix / 2;
9687
9688                         DP(NETIF_MSG_TX_QUEUED,
9689                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
9690                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9691                            SKB_CS(skb));
9692
9693                         /* HW bug: fixup the CSUM */
9694                         pbd->tcp_pseudo_csum =
9695                                 bnx2x_csum_fix(skb_transport_header(skb),
9696                                                SKB_CS(skb), fix);
9697
9698                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9699                            pbd->tcp_pseudo_csum);
9700                 }
9701         }
9702
9703         mapping = pci_map_single(bp->pdev, skb->data,
9704                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9705
9706         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9707         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9708         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9709         tx_bd->nbd = cpu_to_le16(nbd);
9710         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9711
9712         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
9713            "  nbytes %d  flags %x  vlan %x\n",
9714            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9715            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9716            le16_to_cpu(tx_bd->vlan));
9717
9718         if (xmit_type & XMIT_GSO) {
9719
9720                 DP(NETIF_MSG_TX_QUEUED,
9721                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
9722                    skb->len, hlen, skb_headlen(skb),
9723                    skb_shinfo(skb)->gso_size);
9724
9725                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9726
9727                 if (unlikely(skb_headlen(skb) > hlen))
9728                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9729                                                  bd_prod, ++nbd);
9730
9731                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9732                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9733                 pbd->tcp_flags = pbd_tcp_flags(skb);
9734
9735                 if (xmit_type & XMIT_GSO_V4) {
9736                         pbd->ip_id = swab16(ip_hdr(skb)->id);
9737                         pbd->tcp_pseudo_csum =
9738                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9739                                                           ip_hdr(skb)->daddr,
9740                                                           0, IPPROTO_TCP, 0));
9741
9742                 } else
9743                         pbd->tcp_pseudo_csum =
9744                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9745                                                         &ipv6_hdr(skb)->daddr,
9746                                                         0, IPPROTO_TCP, 0));
9747
9748                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9749         }
9750
9751         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9752                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9753
9754                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9755                 tx_bd = &fp->tx_desc_ring[bd_prod];
9756
9757                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9758                                        frag->size, PCI_DMA_TODEVICE);
9759
9760                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9761                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9762                 tx_bd->nbytes = cpu_to_le16(frag->size);
9763                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9764                 tx_bd->bd_flags.as_bitfield = 0;
9765
9766                 DP(NETIF_MSG_TX_QUEUED,
9767                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
9768                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9769                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9770         }
9771
9772         /* now at last mark the BD as the last BD */
9773         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9774
9775         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
9776            tx_bd, tx_bd->bd_flags.as_bitfield);
9777
9778         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9779
9780         /* now send a tx doorbell, counting the next BD
9781          * if the packet contains or ends with it
9782          */
9783         if (TX_BD_POFF(bd_prod) < nbd)
9784                 nbd++;
9785
9786         if (pbd)
9787                 DP(NETIF_MSG_TX_QUEUED,
9788                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
9789                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
9790                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9791                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9792                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9793
9794         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
9795
9796         /*
9797          * Make sure that the BD data is updated before updating the producer
9798          * since FW might read the BD right after the producer is updated.
9799          * This is only applicable for weak-ordered memory model archs such
9800          * as IA-64. The following barrier is also mandatory since FW will
9801          * assumes packets must have BDs.
9802          */
9803         wmb();
9804
9805         fp->hw_tx_prods->bds_prod =
9806                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9807         mb(); /* FW restriction: must not reorder writing nbd and packets */
9808         fp->hw_tx_prods->packets_prod =
9809                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9810         DOORBELL(bp, FP_IDX(fp), 0);
9811
9812         mmiowb();
9813
9814         fp->tx_bd_prod += nbd;
9815         dev->trans_start = jiffies;
9816
9817         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9818                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9819                    if we put Tx into XOFF state. */
9820                 smp_mb();
9821                 netif_stop_queue(dev);
9822                 bp->eth_stats.driver_xoff++;
9823                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9824                         netif_wake_queue(dev);
9825         }
9826         fp->tx_pkt++;
9827
9828         return NETDEV_TX_OK;
9829 }
9830
9831 /* called with rtnl_lock */
9832 static int bnx2x_open(struct net_device *dev)
9833 {
9834         struct bnx2x *bp = netdev_priv(dev);
9835
9836         netif_carrier_off(dev);
9837
9838         bnx2x_set_power_state(bp, PCI_D0);
9839
9840         return bnx2x_nic_load(bp, LOAD_OPEN);
9841 }
9842
9843 /* called with rtnl_lock */
9844 static int bnx2x_close(struct net_device *dev)
9845 {
9846         struct bnx2x *bp = netdev_priv(dev);
9847
9848         /* Unload the driver, release IRQs */
9849         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9850         if (atomic_read(&bp->pdev->enable_cnt) == 1)
9851                 if (!CHIP_REV_IS_SLOW(bp))
9852                         bnx2x_set_power_state(bp, PCI_D3hot);
9853
9854         return 0;
9855 }
9856
9857 /* called with netif_tx_lock from set_multicast */
9858 static void bnx2x_set_rx_mode(struct net_device *dev)
9859 {
9860         struct bnx2x *bp = netdev_priv(dev);
9861         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9862         int port = BP_PORT(bp);
9863
9864         if (bp->state != BNX2X_STATE_OPEN) {
9865                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9866                 return;
9867         }
9868
9869         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9870
9871         if (dev->flags & IFF_PROMISC)
9872                 rx_mode = BNX2X_RX_MODE_PROMISC;
9873
9874         else if ((dev->flags & IFF_ALLMULTI) ||
9875                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9876                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9877
9878         else { /* some multicasts */
9879                 if (CHIP_IS_E1(bp)) {
9880                         int i, old, offset;
9881                         struct dev_mc_list *mclist;
9882                         struct mac_configuration_cmd *config =
9883                                                 bnx2x_sp(bp, mcast_config);
9884
9885                         for (i = 0, mclist = dev->mc_list;
9886                              mclist && (i < dev->mc_count);
9887                              i++, mclist = mclist->next) {
9888
9889                                 config->config_table[i].
9890                                         cam_entry.msb_mac_addr =
9891                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
9892                                 config->config_table[i].
9893                                         cam_entry.middle_mac_addr =
9894                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
9895                                 config->config_table[i].
9896                                         cam_entry.lsb_mac_addr =
9897                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
9898                                 config->config_table[i].cam_entry.flags =
9899                                                         cpu_to_le16(port);
9900                                 config->config_table[i].
9901                                         target_table_entry.flags = 0;
9902                                 config->config_table[i].
9903                                         target_table_entry.client_id = 0;
9904                                 config->config_table[i].
9905                                         target_table_entry.vlan_id = 0;
9906
9907                                 DP(NETIF_MSG_IFUP,
9908                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9909                                    config->config_table[i].
9910                                                 cam_entry.msb_mac_addr,
9911                                    config->config_table[i].
9912                                                 cam_entry.middle_mac_addr,
9913                                    config->config_table[i].
9914                                                 cam_entry.lsb_mac_addr);
9915                         }
9916                         old = config->hdr.length_6b;
9917                         if (old > i) {
9918                                 for (; i < old; i++) {
9919                                         if (CAM_IS_INVALID(config->
9920                                                            config_table[i])) {
9921                                                 /* already invalidated */
9922                                                 break;
9923                                         }
9924                                         /* invalidate */
9925                                         CAM_INVALIDATE(config->
9926                                                        config_table[i]);
9927                                 }
9928                         }
9929
9930                         if (CHIP_REV_IS_SLOW(bp))
9931                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9932                         else
9933                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
9934
9935                         config->hdr.length_6b = i;
9936                         config->hdr.offset = offset;
9937                         config->hdr.client_id = BP_CL_ID(bp);
9938                         config->hdr.reserved1 = 0;
9939
9940                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9941                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9942                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9943                                       0);
9944                 } else { /* E1H */
9945                         /* Accept one or more multicasts */
9946                         struct dev_mc_list *mclist;
9947                         u32 mc_filter[MC_HASH_SIZE];
9948                         u32 crc, bit, regidx;
9949                         int i;
9950
9951                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9952
9953                         for (i = 0, mclist = dev->mc_list;
9954                              mclist && (i < dev->mc_count);
9955                              i++, mclist = mclist->next) {
9956
9957                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9958                                    mclist->dmi_addr);
9959
9960                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9961                                 bit = (crc >> 24) & 0xff;
9962                                 regidx = bit >> 5;
9963                                 bit &= 0x1f;
9964                                 mc_filter[regidx] |= (1 << bit);
9965                         }
9966
9967                         for (i = 0; i < MC_HASH_SIZE; i++)
9968                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9969                                        mc_filter[i]);
9970                 }
9971         }
9972
9973         bp->rx_mode = rx_mode;
9974         bnx2x_set_storm_rx_mode(bp);
9975 }
9976
9977 /* called with rtnl_lock */
9978 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9979 {
9980         struct sockaddr *addr = p;
9981         struct bnx2x *bp = netdev_priv(dev);
9982
9983         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9984                 return -EINVAL;
9985
9986         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9987         if (netif_running(dev)) {
9988                 if (CHIP_IS_E1(bp))
9989                         bnx2x_set_mac_addr_e1(bp, 1);
9990                 else
9991                         bnx2x_set_mac_addr_e1h(bp, 1);
9992         }
9993
9994         return 0;
9995 }
9996
9997 /* called with rtnl_lock */
9998 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9999 {
10000         struct mii_ioctl_data *data = if_mii(ifr);
10001         struct bnx2x *bp = netdev_priv(dev);
10002         int port = BP_PORT(bp);
10003         int err;
10004
10005         switch (cmd) {
10006         case SIOCGMIIPHY:
10007                 data->phy_id = bp->port.phy_addr;
10008
10009                 /* fallthrough */
10010
10011         case SIOCGMIIREG: {
10012                 u16 mii_regval;
10013
10014                 if (!netif_running(dev))
10015                         return -EAGAIN;
10016
10017                 mutex_lock(&bp->port.phy_mutex);
10018                 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10019                                       DEFAULT_PHY_DEV_ADDR,
10020                                       (data->reg_num & 0x1f), &mii_regval);
10021                 data->val_out = mii_regval;
10022                 mutex_unlock(&bp->port.phy_mutex);
10023                 return err;
10024         }
10025
10026         case SIOCSMIIREG:
10027                 if (!capable(CAP_NET_ADMIN))
10028                         return -EPERM;
10029
10030                 if (!netif_running(dev))
10031                         return -EAGAIN;
10032
10033                 mutex_lock(&bp->port.phy_mutex);
10034                 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10035                                        DEFAULT_PHY_DEV_ADDR,
10036                                        (data->reg_num & 0x1f), data->val_in);
10037                 mutex_unlock(&bp->port.phy_mutex);
10038                 return err;
10039
10040         default:
10041                 /* do nothing */
10042                 break;
10043         }
10044
10045         return -EOPNOTSUPP;
10046 }
10047
10048 /* called with rtnl_lock */
10049 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10050 {
10051         struct bnx2x *bp = netdev_priv(dev);
10052         int rc = 0;
10053
10054         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10055             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10056                 return -EINVAL;
10057
10058         /* This does not race with packet allocation
10059          * because the actual alloc size is
10060          * only updated as part of load
10061          */
10062         dev->mtu = new_mtu;
10063
10064         if (netif_running(dev)) {
10065                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10066                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10067         }
10068
10069         return rc;
10070 }
10071
10072 static void bnx2x_tx_timeout(struct net_device *dev)
10073 {
10074         struct bnx2x *bp = netdev_priv(dev);
10075
10076 #ifdef BNX2X_STOP_ON_ERROR
10077         if (!bp->panic)
10078                 bnx2x_panic();
10079 #endif
10080         /* This allows the netif to be shutdown gracefully before resetting */
10081         schedule_work(&bp->reset_task);
10082 }
10083
10084 #ifdef BCM_VLAN
10085 /* called with rtnl_lock */
10086 static void bnx2x_vlan_rx_register(struct net_device *dev,
10087                                    struct vlan_group *vlgrp)
10088 {
10089         struct bnx2x *bp = netdev_priv(dev);
10090
10091         bp->vlgrp = vlgrp;
10092
10093         /* Set flags according to the required capabilities */
10094         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10095
10096         if (dev->features & NETIF_F_HW_VLAN_TX)
10097                 bp->flags |= HW_VLAN_TX_FLAG;
10098
10099         if (dev->features & NETIF_F_HW_VLAN_RX)
10100                 bp->flags |= HW_VLAN_RX_FLAG;
10101
10102         if (netif_running(dev))
10103                 bnx2x_set_client_config(bp);
10104 }
10105
10106 #endif
10107
10108 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10109 static void poll_bnx2x(struct net_device *dev)
10110 {
10111         struct bnx2x *bp = netdev_priv(dev);
10112
10113         disable_irq(bp->pdev->irq);
10114         bnx2x_interrupt(bp->pdev->irq, dev);
10115         enable_irq(bp->pdev->irq);
10116 }
10117 #endif
10118
10119 static const struct net_device_ops bnx2x_netdev_ops = {
10120         .ndo_open               = bnx2x_open,
10121         .ndo_stop               = bnx2x_close,
10122         .ndo_start_xmit         = bnx2x_start_xmit,
10123         .ndo_set_multicast_list = bnx2x_set_rx_mode,
10124         .ndo_set_mac_address    = bnx2x_change_mac_addr,
10125         .ndo_validate_addr      = eth_validate_addr,
10126         .ndo_do_ioctl           = bnx2x_ioctl,
10127         .ndo_change_mtu         = bnx2x_change_mtu,
10128         .ndo_tx_timeout         = bnx2x_tx_timeout,
10129 #ifdef BCM_VLAN
10130         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
10131 #endif
10132 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10133         .ndo_poll_controller    = poll_bnx2x,
10134 #endif
10135 };
10136
10137
10138 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10139                                     struct net_device *dev)
10140 {
10141         struct bnx2x *bp;
10142         int rc;
10143
10144         SET_NETDEV_DEV(dev, &pdev->dev);
10145         bp = netdev_priv(dev);
10146
10147         bp->dev = dev;
10148         bp->pdev = pdev;
10149         bp->flags = 0;
10150         bp->func = PCI_FUNC(pdev->devfn);
10151
10152         rc = pci_enable_device(pdev);
10153         if (rc) {
10154                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10155                 goto err_out;
10156         }
10157
10158         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10159                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10160                        " aborting\n");
10161                 rc = -ENODEV;
10162                 goto err_out_disable;
10163         }
10164
10165         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10166                 printk(KERN_ERR PFX "Cannot find second PCI device"
10167                        " base address, aborting\n");
10168                 rc = -ENODEV;
10169                 goto err_out_disable;
10170         }
10171
10172         if (atomic_read(&pdev->enable_cnt) == 1) {
10173                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10174                 if (rc) {
10175                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10176                                " aborting\n");
10177                         goto err_out_disable;
10178                 }
10179
10180                 pci_set_master(pdev);
10181                 pci_save_state(pdev);
10182         }
10183
10184         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10185         if (bp->pm_cap == 0) {
10186                 printk(KERN_ERR PFX "Cannot find power management"
10187                        " capability, aborting\n");
10188                 rc = -EIO;
10189                 goto err_out_release;
10190         }
10191
10192         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10193         if (bp->pcie_cap == 0) {
10194                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10195                        " aborting\n");
10196                 rc = -EIO;
10197                 goto err_out_release;
10198         }
10199
10200         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10201                 bp->flags |= USING_DAC_FLAG;
10202                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10203                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10204                                " failed, aborting\n");
10205                         rc = -EIO;
10206                         goto err_out_release;
10207                 }
10208
10209         } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10210                 printk(KERN_ERR PFX "System does not support DMA,"
10211                        " aborting\n");
10212                 rc = -EIO;
10213                 goto err_out_release;
10214         }
10215
10216         dev->mem_start = pci_resource_start(pdev, 0);
10217         dev->base_addr = dev->mem_start;
10218         dev->mem_end = pci_resource_end(pdev, 0);
10219
10220         dev->irq = pdev->irq;
10221
10222         bp->regview = pci_ioremap_bar(pdev, 0);
10223         if (!bp->regview) {
10224                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10225                 rc = -ENOMEM;
10226                 goto err_out_release;
10227         }
10228
10229         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10230                                         min_t(u64, BNX2X_DB_SIZE,
10231                                               pci_resource_len(pdev, 2)));
10232         if (!bp->doorbells) {
10233                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10234                 rc = -ENOMEM;
10235                 goto err_out_unmap;
10236         }
10237
10238         bnx2x_set_power_state(bp, PCI_D0);
10239
10240         /* clean indirect addresses */
10241         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10242                                PCICFG_VENDOR_ID_OFFSET);
10243         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10244         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10245         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10246         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10247
10248         dev->watchdog_timeo = TX_TIMEOUT;
10249
10250         dev->netdev_ops = &bnx2x_netdev_ops;
10251         dev->ethtool_ops = &bnx2x_ethtool_ops;
10252         dev->features |= NETIF_F_SG;
10253         dev->features |= NETIF_F_HW_CSUM;
10254         if (bp->flags & USING_DAC_FLAG)
10255                 dev->features |= NETIF_F_HIGHDMA;
10256 #ifdef BCM_VLAN
10257         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10258         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10259 #endif
10260         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10261         dev->features |= NETIF_F_TSO6;
10262
10263         return 0;
10264
10265 err_out_unmap:
10266         if (bp->regview) {
10267                 iounmap(bp->regview);
10268                 bp->regview = NULL;
10269         }
10270         if (bp->doorbells) {
10271                 iounmap(bp->doorbells);
10272                 bp->doorbells = NULL;
10273         }
10274
10275 err_out_release:
10276         if (atomic_read(&pdev->enable_cnt) == 1)
10277                 pci_release_regions(pdev);
10278
10279 err_out_disable:
10280         pci_disable_device(pdev);
10281         pci_set_drvdata(pdev, NULL);
10282
10283 err_out:
10284         return rc;
10285 }
10286
10287 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10288 {
10289         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10290
10291         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10292         return val;
10293 }
10294
10295 /* return value of 1=2.5GHz 2=5GHz */
10296 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10297 {
10298         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10299
10300         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10301         return val;
10302 }
10303
10304 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10305                                     const struct pci_device_id *ent)
10306 {
10307         static int version_printed;
10308         struct net_device *dev = NULL;
10309         struct bnx2x *bp;
10310         int rc;
10311
10312         if (version_printed++ == 0)
10313                 printk(KERN_INFO "%s", version);
10314
10315         /* dev zeroed in init_etherdev */
10316         dev = alloc_etherdev(sizeof(*bp));
10317         if (!dev) {
10318                 printk(KERN_ERR PFX "Cannot allocate net device\n");
10319                 return -ENOMEM;
10320         }
10321
10322         bp = netdev_priv(dev);
10323         bp->msglevel = debug;
10324
10325         rc = bnx2x_init_dev(pdev, dev);
10326         if (rc < 0) {
10327                 free_netdev(dev);
10328                 return rc;
10329         }
10330
10331         pci_set_drvdata(pdev, dev);
10332
10333         rc = bnx2x_init_bp(bp);
10334         if (rc)
10335                 goto init_one_exit;
10336
10337         rc = register_netdev(dev);
10338         if (rc) {
10339                 dev_err(&pdev->dev, "Cannot register net device\n");
10340                 goto init_one_exit;
10341         }
10342
10343         bp->common.name = board_info[ent->driver_data].name;
10344         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10345                " IRQ %d, ", dev->name, bp->common.name,
10346                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10347                bnx2x_get_pcie_width(bp),
10348                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10349                dev->base_addr, bp->pdev->irq);
10350         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10351         return 0;
10352
10353 init_one_exit:
10354         if (bp->regview)
10355                 iounmap(bp->regview);
10356
10357         if (bp->doorbells)
10358                 iounmap(bp->doorbells);
10359
10360         free_netdev(dev);
10361
10362         if (atomic_read(&pdev->enable_cnt) == 1)
10363                 pci_release_regions(pdev);
10364
10365         pci_disable_device(pdev);
10366         pci_set_drvdata(pdev, NULL);
10367
10368         return rc;
10369 }
10370
10371 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10372 {
10373         struct net_device *dev = pci_get_drvdata(pdev);
10374         struct bnx2x *bp;
10375
10376         if (!dev) {
10377                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10378                 return;
10379         }
10380         bp = netdev_priv(dev);
10381
10382         unregister_netdev(dev);
10383
10384         if (bp->regview)
10385                 iounmap(bp->regview);
10386
10387         if (bp->doorbells)
10388                 iounmap(bp->doorbells);
10389
10390         free_netdev(dev);
10391
10392         if (atomic_read(&pdev->enable_cnt) == 1)
10393                 pci_release_regions(pdev);
10394
10395         pci_disable_device(pdev);
10396         pci_set_drvdata(pdev, NULL);
10397 }
10398
10399 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10400 {
10401         struct net_device *dev = pci_get_drvdata(pdev);
10402         struct bnx2x *bp;
10403
10404         if (!dev) {
10405                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10406                 return -ENODEV;
10407         }
10408         bp = netdev_priv(dev);
10409
10410         rtnl_lock();
10411
10412         pci_save_state(pdev);
10413
10414         if (!netif_running(dev)) {
10415                 rtnl_unlock();
10416                 return 0;
10417         }
10418
10419         netif_device_detach(dev);
10420
10421         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10422
10423         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10424
10425         rtnl_unlock();
10426
10427         return 0;
10428 }
10429
10430 static int bnx2x_resume(struct pci_dev *pdev)
10431 {
10432         struct net_device *dev = pci_get_drvdata(pdev);
10433         struct bnx2x *bp;
10434         int rc;
10435
10436         if (!dev) {
10437                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10438                 return -ENODEV;
10439         }
10440         bp = netdev_priv(dev);
10441
10442         rtnl_lock();
10443
10444         pci_restore_state(pdev);
10445
10446         if (!netif_running(dev)) {
10447                 rtnl_unlock();
10448                 return 0;
10449         }
10450
10451         bnx2x_set_power_state(bp, PCI_D0);
10452         netif_device_attach(dev);
10453
10454         rc = bnx2x_nic_load(bp, LOAD_OPEN);
10455
10456         rtnl_unlock();
10457
10458         return rc;
10459 }
10460
10461 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10462 {
10463         int i;
10464
10465         bp->state = BNX2X_STATE_ERROR;
10466
10467         bp->rx_mode = BNX2X_RX_MODE_NONE;
10468
10469         bnx2x_netif_stop(bp, 0);
10470
10471         del_timer_sync(&bp->timer);
10472         bp->stats_state = STATS_STATE_DISABLED;
10473         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10474
10475         /* Release IRQs */
10476         bnx2x_free_irq(bp);
10477
10478         if (CHIP_IS_E1(bp)) {
10479                 struct mac_configuration_cmd *config =
10480                                                 bnx2x_sp(bp, mcast_config);
10481
10482                 for (i = 0; i < config->hdr.length_6b; i++)
10483                         CAM_INVALIDATE(config->config_table[i]);
10484         }
10485
10486         /* Free SKBs, SGEs, TPA pool and driver internals */
10487         bnx2x_free_skbs(bp);
10488         for_each_queue(bp, i)
10489                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10490         for_each_queue(bp, i)
10491                 netif_napi_del(&bnx2x_fp(bp, i, napi));
10492         bnx2x_free_mem(bp);
10493
10494         bp->state = BNX2X_STATE_CLOSED;
10495
10496         netif_carrier_off(bp->dev);
10497
10498         return 0;
10499 }
10500
10501 static void bnx2x_eeh_recover(struct bnx2x *bp)
10502 {
10503         u32 val;
10504
10505         mutex_init(&bp->port.phy_mutex);
10506
10507         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10508         bp->link_params.shmem_base = bp->common.shmem_base;
10509         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10510
10511         if (!bp->common.shmem_base ||
10512             (bp->common.shmem_base < 0xA0000) ||
10513             (bp->common.shmem_base >= 0xC0000)) {
10514                 BNX2X_DEV_INFO("MCP not active\n");
10515                 bp->flags |= NO_MCP_FLAG;
10516                 return;
10517         }
10518
10519         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10520         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10521                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10522                 BNX2X_ERR("BAD MCP validity signature\n");
10523
10524         if (!BP_NOMCP(bp)) {
10525                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10526                               & DRV_MSG_SEQ_NUMBER_MASK);
10527                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10528         }
10529 }
10530
10531 /**
10532  * bnx2x_io_error_detected - called when PCI error is detected
10533  * @pdev: Pointer to PCI device
10534  * @state: The current pci connection state
10535  *
10536  * This function is called after a PCI bus error affecting
10537  * this device has been detected.
10538  */
10539 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10540                                                 pci_channel_state_t state)
10541 {
10542         struct net_device *dev = pci_get_drvdata(pdev);
10543         struct bnx2x *bp = netdev_priv(dev);
10544
10545         rtnl_lock();
10546
10547         netif_device_detach(dev);
10548
10549         if (netif_running(dev))
10550                 bnx2x_eeh_nic_unload(bp);
10551
10552         pci_disable_device(pdev);
10553
10554         rtnl_unlock();
10555
10556         /* Request a slot reset */
10557         return PCI_ERS_RESULT_NEED_RESET;
10558 }
10559
10560 /**
10561  * bnx2x_io_slot_reset - called after the PCI bus has been reset
10562  * @pdev: Pointer to PCI device
10563  *
10564  * Restart the card from scratch, as if from a cold-boot.
10565  */
10566 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10567 {
10568         struct net_device *dev = pci_get_drvdata(pdev);
10569         struct bnx2x *bp = netdev_priv(dev);
10570
10571         rtnl_lock();
10572
10573         if (pci_enable_device(pdev)) {
10574                 dev_err(&pdev->dev,
10575                         "Cannot re-enable PCI device after reset\n");
10576                 rtnl_unlock();
10577                 return PCI_ERS_RESULT_DISCONNECT;
10578         }
10579
10580         pci_set_master(pdev);
10581         pci_restore_state(pdev);
10582
10583         if (netif_running(dev))
10584                 bnx2x_set_power_state(bp, PCI_D0);
10585
10586         rtnl_unlock();
10587
10588         return PCI_ERS_RESULT_RECOVERED;
10589 }
10590
10591 /**
10592  * bnx2x_io_resume - called when traffic can start flowing again
10593  * @pdev: Pointer to PCI device
10594  *
10595  * This callback is called when the error recovery driver tells us that
10596  * its OK to resume normal operation.
10597  */
10598 static void bnx2x_io_resume(struct pci_dev *pdev)
10599 {
10600         struct net_device *dev = pci_get_drvdata(pdev);
10601         struct bnx2x *bp = netdev_priv(dev);
10602
10603         rtnl_lock();
10604
10605         bnx2x_eeh_recover(bp);
10606
10607         if (netif_running(dev))
10608                 bnx2x_nic_load(bp, LOAD_NORMAL);
10609
10610         netif_device_attach(dev);
10611
10612         rtnl_unlock();
10613 }
10614
10615 static struct pci_error_handlers bnx2x_err_handler = {
10616         .error_detected = bnx2x_io_error_detected,
10617         .slot_reset = bnx2x_io_slot_reset,
10618         .resume = bnx2x_io_resume,
10619 };
10620
10621 static struct pci_driver bnx2x_pci_driver = {
10622         .name        = DRV_MODULE_NAME,
10623         .id_table    = bnx2x_pci_tbl,
10624         .probe       = bnx2x_init_one,
10625         .remove      = __devexit_p(bnx2x_remove_one),
10626         .suspend     = bnx2x_suspend,
10627         .resume      = bnx2x_resume,
10628         .err_handler = &bnx2x_err_handler,
10629 };
10630
10631 static int __init bnx2x_init(void)
10632 {
10633         bnx2x_wq = create_singlethread_workqueue("bnx2x");
10634         if (bnx2x_wq == NULL) {
10635                 printk(KERN_ERR PFX "Cannot create workqueue\n");
10636                 return -ENOMEM;
10637         }
10638
10639         return pci_register_driver(&bnx2x_pci_driver);
10640 }
10641
10642 static void __exit bnx2x_cleanup(void)
10643 {
10644         pci_unregister_driver(&bnx2x_pci_driver);
10645
10646         destroy_workqueue(bnx2x_wq);
10647 }
10648
10649 module_init(bnx2x_init);
10650 module_exit(bnx2x_cleanup);
10651