]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/bnx2x_main.c
ad440c8616a5fbcd51154e154ef2c3295d669d60
[net-next-2.6.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52 #include <linux/stringify.h>
53
54
55 #include "bnx2x.h"
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
59
60 #define DRV_MODULE_VERSION      "1.52.1-8"
61 #define DRV_MODULE_RELDATE      "2010/04/01"
62 #define BNX2X_BC_VER            0x040200
63
64 #include <linux/firmware.h>
65 #include "bnx2x_fw_file_hdr.h"
66 /* FW files */
67 #define FW_FILE_VERSION                                 \
68         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
69         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
70         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
71         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72 #define FW_FILE_NAME_E1         "bnx2x-e1-" FW_FILE_VERSION ".fw"
73 #define FW_FILE_NAME_E1H        "bnx2x-e1h-" FW_FILE_VERSION ".fw"
74
75 /* Time in jiffies before concluding the transmitter is hung */
76 #define TX_TIMEOUT              (5*HZ)
77
78 static char version[] __devinitdata =
79         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
80         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
82 MODULE_AUTHOR("Eliezer Tamir");
83 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_MODULE_VERSION);
86 MODULE_FIRMWARE(FW_FILE_NAME_E1);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
88
89 static int multi_mode = 1;
90 module_param(multi_mode, int, 0);
91 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92                              "(0 Disable; 1 Enable (default))");
93
94 static int num_queues;
95 module_param(num_queues, int, 0);
96 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97                                 " (default is as a number of CPUs)");
98
99 static int disable_tpa;
100 module_param(disable_tpa, int, 0);
101 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
102
103 static int int_mode;
104 module_param(int_mode, int, 0);
105 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
106                                 "(1 INT#x; 2 MSI)");
107
108 static int dropless_fc;
109 module_param(dropless_fc, int, 0);
110 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
111
112 static int poll;
113 module_param(poll, int, 0);
114 MODULE_PARM_DESC(poll, " Use polling (for debug)");
115
116 static int mrrs = -1;
117 module_param(mrrs, int, 0);
118 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
119
120 static int debug;
121 module_param(debug, int, 0);
122 MODULE_PARM_DESC(debug, " Default debug msglevel");
123
124 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
125
126 static struct workqueue_struct *bnx2x_wq;
127
128 enum bnx2x_board_type {
129         BCM57710 = 0,
130         BCM57711 = 1,
131         BCM57711E = 2,
132 };
133
134 /* indexed by board_type, above */
135 static struct {
136         char *name;
137 } board_info[] __devinitdata = {
138         { "Broadcom NetXtreme II BCM57710 XGb" },
139         { "Broadcom NetXtreme II BCM57711 XGb" },
140         { "Broadcom NetXtreme II BCM57711E XGb" }
141 };
142
143
144 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
145         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
146         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
147         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
148         { 0 }
149 };
150
151 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
152
153 /****************************************************************************
154 * General service functions
155 ****************************************************************************/
156
157 /* used only at init
158  * locking is done by mcp
159  */
160 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
161 {
162         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
163         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
164         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
165                                PCICFG_VENDOR_ID_OFFSET);
166 }
167
168 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
169 {
170         u32 val;
171
172         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
173         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
174         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
175                                PCICFG_VENDOR_ID_OFFSET);
176
177         return val;
178 }
179
180 static const u32 dmae_reg_go_c[] = {
181         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
182         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
183         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
184         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
185 };
186
187 /* copy command into DMAE command memory and set DMAE command go */
188 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
189                             int idx)
190 {
191         u32 cmd_offset;
192         int i;
193
194         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
195         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
196                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
197
198                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
199                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
200         }
201         REG_WR(bp, dmae_reg_go_c[idx], 1);
202 }
203
204 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
205                       u32 len32)
206 {
207         struct dmae_command dmae;
208         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
209         int cnt = 200;
210
211         if (!bp->dmae_ready) {
212                 u32 *data = bnx2x_sp(bp, wb_data[0]);
213
214                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
215                    "  using indirect\n", dst_addr, len32);
216                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
217                 return;
218         }
219
220         memset(&dmae, 0, sizeof(struct dmae_command));
221
222         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
223                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
224                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
225 #ifdef __BIG_ENDIAN
226                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
227 #else
228                        DMAE_CMD_ENDIANITY_DW_SWAP |
229 #endif
230                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
231                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
232         dmae.src_addr_lo = U64_LO(dma_addr);
233         dmae.src_addr_hi = U64_HI(dma_addr);
234         dmae.dst_addr_lo = dst_addr >> 2;
235         dmae.dst_addr_hi = 0;
236         dmae.len = len32;
237         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
238         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
239         dmae.comp_val = DMAE_COMP_VAL;
240
241         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
242            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
243                     "dst_addr [%x:%08x (%08x)]\n"
244            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
245            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
246            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
247            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
248         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
249            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
250            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
251
252         mutex_lock(&bp->dmae_mutex);
253
254         *wb_comp = 0;
255
256         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
257
258         udelay(5);
259
260         while (*wb_comp != DMAE_COMP_VAL) {
261                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
262
263                 if (!cnt) {
264                         BNX2X_ERR("DMAE timeout!\n");
265                         break;
266                 }
267                 cnt--;
268                 /* adjust delay for emulation/FPGA */
269                 if (CHIP_REV_IS_SLOW(bp))
270                         msleep(100);
271                 else
272                         udelay(5);
273         }
274
275         mutex_unlock(&bp->dmae_mutex);
276 }
277
278 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
279 {
280         struct dmae_command dmae;
281         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
282         int cnt = 200;
283
284         if (!bp->dmae_ready) {
285                 u32 *data = bnx2x_sp(bp, wb_data[0]);
286                 int i;
287
288                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
289                    "  using indirect\n", src_addr, len32);
290                 for (i = 0; i < len32; i++)
291                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
292                 return;
293         }
294
295         memset(&dmae, 0, sizeof(struct dmae_command));
296
297         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
298                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
299                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
300 #ifdef __BIG_ENDIAN
301                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
302 #else
303                        DMAE_CMD_ENDIANITY_DW_SWAP |
304 #endif
305                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
306                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
307         dmae.src_addr_lo = src_addr >> 2;
308         dmae.src_addr_hi = 0;
309         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
310         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
311         dmae.len = len32;
312         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
313         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
314         dmae.comp_val = DMAE_COMP_VAL;
315
316         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
317            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
318                     "dst_addr [%x:%08x (%08x)]\n"
319            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
320            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
321            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
322            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
323
324         mutex_lock(&bp->dmae_mutex);
325
326         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
327         *wb_comp = 0;
328
329         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
330
331         udelay(5);
332
333         while (*wb_comp != DMAE_COMP_VAL) {
334
335                 if (!cnt) {
336                         BNX2X_ERR("DMAE timeout!\n");
337                         break;
338                 }
339                 cnt--;
340                 /* adjust delay for emulation/FPGA */
341                 if (CHIP_REV_IS_SLOW(bp))
342                         msleep(100);
343                 else
344                         udelay(5);
345         }
346         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
347            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
348            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
349
350         mutex_unlock(&bp->dmae_mutex);
351 }
352
353 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
354                                u32 addr, u32 len)
355 {
356         int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
357         int offset = 0;
358
359         while (len > dmae_wr_max) {
360                 bnx2x_write_dmae(bp, phys_addr + offset,
361                                  addr + offset, dmae_wr_max);
362                 offset += dmae_wr_max * 4;
363                 len -= dmae_wr_max;
364         }
365
366         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
367 }
368
369 /* used only for slowpath so not inlined */
370 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
371 {
372         u32 wb_write[2];
373
374         wb_write[0] = val_hi;
375         wb_write[1] = val_lo;
376         REG_WR_DMAE(bp, reg, wb_write, 2);
377 }
378
379 #ifdef USE_WB_RD
380 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
381 {
382         u32 wb_data[2];
383
384         REG_RD_DMAE(bp, reg, wb_data, 2);
385
386         return HILO_U64(wb_data[0], wb_data[1]);
387 }
388 #endif
389
390 static int bnx2x_mc_assert(struct bnx2x *bp)
391 {
392         char last_idx;
393         int i, rc = 0;
394         u32 row0, row1, row2, row3;
395
396         /* XSTORM */
397         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
398                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
399         if (last_idx)
400                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
401
402         /* print the asserts */
403         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
404
405                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406                               XSTORM_ASSERT_LIST_OFFSET(i));
407                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
409                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
411                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
412                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
413
414                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
415                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
416                                   " 0x%08x 0x%08x 0x%08x\n",
417                                   i, row3, row2, row1, row0);
418                         rc++;
419                 } else {
420                         break;
421                 }
422         }
423
424         /* TSTORM */
425         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
426                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
427         if (last_idx)
428                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
429
430         /* print the asserts */
431         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
432
433                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434                               TSTORM_ASSERT_LIST_OFFSET(i));
435                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
437                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
439                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
440                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
441
442                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
443                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
444                                   " 0x%08x 0x%08x 0x%08x\n",
445                                   i, row3, row2, row1, row0);
446                         rc++;
447                 } else {
448                         break;
449                 }
450         }
451
452         /* CSTORM */
453         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
454                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
455         if (last_idx)
456                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
457
458         /* print the asserts */
459         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
460
461                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462                               CSTORM_ASSERT_LIST_OFFSET(i));
463                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
465                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
467                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
468                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
469
470                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
471                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
472                                   " 0x%08x 0x%08x 0x%08x\n",
473                                   i, row3, row2, row1, row0);
474                         rc++;
475                 } else {
476                         break;
477                 }
478         }
479
480         /* USTORM */
481         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
482                            USTORM_ASSERT_LIST_INDEX_OFFSET);
483         if (last_idx)
484                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
485
486         /* print the asserts */
487         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
488
489                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
490                               USTORM_ASSERT_LIST_OFFSET(i));
491                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
492                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
493                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
494                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
495                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
496                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
497
498                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
499                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
500                                   " 0x%08x 0x%08x 0x%08x\n",
501                                   i, row3, row2, row1, row0);
502                         rc++;
503                 } else {
504                         break;
505                 }
506         }
507
508         return rc;
509 }
510
511 static void bnx2x_fw_dump(struct bnx2x *bp)
512 {
513         u32 addr;
514         u32 mark, offset;
515         __be32 data[9];
516         int word;
517
518         if (BP_NOMCP(bp)) {
519                 BNX2X_ERR("NO MCP - can not dump\n");
520                 return;
521         }
522
523         addr = bp->common.shmem_base - 0x0800 + 4;
524         mark = REG_RD(bp, addr);
525         mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
526         pr_err("begin fw dump (mark 0x%x)\n", mark);
527
528         pr_err("");
529         for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
530                 for (word = 0; word < 8; word++)
531                         data[word] = htonl(REG_RD(bp, offset + 4*word));
532                 data[8] = 0x0;
533                 pr_cont("%s", (char *)data);
534         }
535         for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
536                 for (word = 0; word < 8; word++)
537                         data[word] = htonl(REG_RD(bp, offset + 4*word));
538                 data[8] = 0x0;
539                 pr_cont("%s", (char *)data);
540         }
541         pr_err("end of fw dump\n");
542 }
543
544 static void bnx2x_panic_dump(struct bnx2x *bp)
545 {
546         int i;
547         u16 j, start, end;
548
549         bp->stats_state = STATS_STATE_DISABLED;
550         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
551
552         BNX2X_ERR("begin crash dump -----------------\n");
553
554         /* Indices */
555         /* Common */
556         BNX2X_ERR("def_c_idx(0x%x)  def_u_idx(0x%x)  def_x_idx(0x%x)"
557                   "  def_t_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
558                   "  spq_prod_idx(0x%x)\n",
559                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
560                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
561
562         /* Rx */
563         for_each_queue(bp, i) {
564                 struct bnx2x_fastpath *fp = &bp->fp[i];
565
566                 BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
567                           "  *rx_bd_cons_sb(0x%x)  rx_comp_prod(0x%x)"
568                           "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
569                           i, fp->rx_bd_prod, fp->rx_bd_cons,
570                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
571                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
572                 BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
573                           "  fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
574                           fp->rx_sge_prod, fp->last_max_sge,
575                           le16_to_cpu(fp->fp_u_idx),
576                           fp->status_blk->u_status_block.status_block_index);
577         }
578
579         /* Tx */
580         for_each_queue(bp, i) {
581                 struct bnx2x_fastpath *fp = &bp->fp[i];
582
583                 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
584                           "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
585                           "  *tx_cons_sb(0x%x)\n",
586                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
587                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
588                 BNX2X_ERR("     fp_c_idx(0x%x)  *sb_c_idx(0x%x)"
589                           "  tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
590                           fp->status_blk->c_status_block.status_block_index,
591                           fp->tx_db.data.prod);
592         }
593
594         /* Rings */
595         /* Rx */
596         for_each_queue(bp, i) {
597                 struct bnx2x_fastpath *fp = &bp->fp[i];
598
599                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
600                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
601                 for (j = start; j != end; j = RX_BD(j + 1)) {
602                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
603                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
604
605                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
606                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
607                 }
608
609                 start = RX_SGE(fp->rx_sge_prod);
610                 end = RX_SGE(fp->last_max_sge);
611                 for (j = start; j != end; j = RX_SGE(j + 1)) {
612                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
613                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
614
615                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
616                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
617                 }
618
619                 start = RCQ_BD(fp->rx_comp_cons - 10);
620                 end = RCQ_BD(fp->rx_comp_cons + 503);
621                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
622                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
623
624                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
625                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
626                 }
627         }
628
629         /* Tx */
630         for_each_queue(bp, i) {
631                 struct bnx2x_fastpath *fp = &bp->fp[i];
632
633                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
634                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
635                 for (j = start; j != end; j = TX_BD(j + 1)) {
636                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
637
638                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
639                                   i, j, sw_bd->skb, sw_bd->first_bd);
640                 }
641
642                 start = TX_BD(fp->tx_bd_cons - 10);
643                 end = TX_BD(fp->tx_bd_cons + 254);
644                 for (j = start; j != end; j = TX_BD(j + 1)) {
645                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
646
647                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
648                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
649                 }
650         }
651
652         bnx2x_fw_dump(bp);
653         bnx2x_mc_assert(bp);
654         BNX2X_ERR("end crash dump -----------------\n");
655 }
656
657 static void bnx2x_int_enable(struct bnx2x *bp)
658 {
659         int port = BP_PORT(bp);
660         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
661         u32 val = REG_RD(bp, addr);
662         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
663         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
664
665         if (msix) {
666                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667                          HC_CONFIG_0_REG_INT_LINE_EN_0);
668                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
669                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
670         } else if (msi) {
671                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
672                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
673                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
674                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
675         } else {
676                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
677                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
678                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
679                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
680
681                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
682                    val, port, addr);
683
684                 REG_WR(bp, addr, val);
685
686                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
687         }
688
689         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
690            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
691
692         REG_WR(bp, addr, val);
693         /*
694          * Ensure that HC_CONFIG is written before leading/trailing edge config
695          */
696         mmiowb();
697         barrier();
698
699         if (CHIP_IS_E1H(bp)) {
700                 /* init leading/trailing edge */
701                 if (IS_E1HMF(bp)) {
702                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
703                         if (bp->port.pmf)
704                                 /* enable nig and gpio3 attention */
705                                 val |= 0x1100;
706                 } else
707                         val = 0xffff;
708
709                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
710                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
711         }
712
713         /* Make sure that interrupts are indeed enabled from here on */
714         mmiowb();
715 }
716
717 static void bnx2x_int_disable(struct bnx2x *bp)
718 {
719         int port = BP_PORT(bp);
720         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
721         u32 val = REG_RD(bp, addr);
722
723         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
724                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
725                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
726                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
727
728         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
729            val, port, addr);
730
731         /* flush all outstanding writes */
732         mmiowb();
733
734         REG_WR(bp, addr, val);
735         if (REG_RD(bp, addr) != val)
736                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
737 }
738
739 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
740 {
741         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
742         int i, offset;
743
744         /* disable interrupt handling */
745         atomic_inc(&bp->intr_sem);
746         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
747
748         if (disable_hw)
749                 /* prevent the HW from sending interrupts */
750                 bnx2x_int_disable(bp);
751
752         /* make sure all ISRs are done */
753         if (msix) {
754                 synchronize_irq(bp->msix_table[0].vector);
755                 offset = 1;
756 #ifdef BCM_CNIC
757                 offset++;
758 #endif
759                 for_each_queue(bp, i)
760                         synchronize_irq(bp->msix_table[i + offset].vector);
761         } else
762                 synchronize_irq(bp->pdev->irq);
763
764         /* make sure sp_task is not running */
765         cancel_delayed_work(&bp->sp_task);
766         flush_workqueue(bnx2x_wq);
767 }
768
769 /* fast path */
770
771 /*
772  * General service functions
773  */
774
775 /* Return true if succeeded to acquire the lock */
776 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
777 {
778         u32 lock_status;
779         u32 resource_bit = (1 << resource);
780         int func = BP_FUNC(bp);
781         u32 hw_lock_control_reg;
782
783         DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
784
785         /* Validating that the resource is within range */
786         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
787                 DP(NETIF_MSG_HW,
788                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
789                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
790                 return -EINVAL;
791         }
792
793         if (func <= 5)
794                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
795         else
796                 hw_lock_control_reg =
797                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
798
799         /* Try to acquire the lock */
800         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
801         lock_status = REG_RD(bp, hw_lock_control_reg);
802         if (lock_status & resource_bit)
803                 return true;
804
805         DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
806         return false;
807 }
808
809 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
810                                 u8 storm, u16 index, u8 op, u8 update)
811 {
812         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
813                        COMMAND_REG_INT_ACK);
814         struct igu_ack_register igu_ack;
815
816         igu_ack.status_block_index = index;
817         igu_ack.sb_id_and_flags =
818                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
819                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
820                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
821                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
822
823         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
824            (*(u32 *)&igu_ack), hc_addr);
825         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
826
827         /* Make sure that ACK is written */
828         mmiowb();
829         barrier();
830 }
831
832 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
833 {
834         struct host_status_block *fpsb = fp->status_blk;
835
836         barrier(); /* status block is written to by the chip */
837         fp->fp_c_idx = fpsb->c_status_block.status_block_index;
838         fp->fp_u_idx = fpsb->u_status_block.status_block_index;
839 }
840
841 static u16 bnx2x_ack_int(struct bnx2x *bp)
842 {
843         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
844                        COMMAND_REG_SIMD_MASK);
845         u32 result = REG_RD(bp, hc_addr);
846
847         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
848            result, hc_addr);
849
850         return result;
851 }
852
853
854 /*
855  * fast path service functions
856  */
857
858 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
859 {
860         /* Tell compiler that consumer and producer can change */
861         barrier();
862         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
863 }
864
865 /* free skb in the packet ring at pos idx
866  * return idx of last bd freed
867  */
868 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
869                              u16 idx)
870 {
871         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
872         struct eth_tx_start_bd *tx_start_bd;
873         struct eth_tx_bd *tx_data_bd;
874         struct sk_buff *skb = tx_buf->skb;
875         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
876         int nbd;
877
878         /* prefetch skb end pointer to speedup dev_kfree_skb() */
879         prefetch(&skb->end);
880
881         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
882            idx, tx_buf, skb);
883
884         /* unmap first bd */
885         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
886         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
887         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
888                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
889
890         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
891 #ifdef BNX2X_STOP_ON_ERROR
892         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
893                 BNX2X_ERR("BAD nbd!\n");
894                 bnx2x_panic();
895         }
896 #endif
897         new_cons = nbd + tx_buf->first_bd;
898
899         /* Get the next bd */
900         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
901
902         /* Skip a parse bd... */
903         --nbd;
904         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
905
906         /* ...and the TSO split header bd since they have no mapping */
907         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
908                 --nbd;
909                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
910         }
911
912         /* now free frags */
913         while (nbd > 0) {
914
915                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
916                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
917                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
918                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
919                 if (--nbd)
920                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
921         }
922
923         /* release skb */
924         WARN_ON(!skb);
925         dev_kfree_skb(skb);
926         tx_buf->first_bd = 0;
927         tx_buf->skb = NULL;
928
929         return new_cons;
930 }
931
932 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
933 {
934         s16 used;
935         u16 prod;
936         u16 cons;
937
938         prod = fp->tx_bd_prod;
939         cons = fp->tx_bd_cons;
940
941         /* NUM_TX_RINGS = number of "next-page" entries
942            It will be used as a threshold */
943         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
944
945 #ifdef BNX2X_STOP_ON_ERROR
946         WARN_ON(used < 0);
947         WARN_ON(used > fp->bp->tx_ring_size);
948         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
949 #endif
950
951         return (s16)(fp->bp->tx_ring_size) - used;
952 }
953
954 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
955 {
956         u16 hw_cons;
957
958         /* Tell compiler that status block fields can change */
959         barrier();
960         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
961         return hw_cons != fp->tx_pkt_cons;
962 }
963
964 static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
965 {
966         struct bnx2x *bp = fp->bp;
967         struct netdev_queue *txq;
968         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
969
970 #ifdef BNX2X_STOP_ON_ERROR
971         if (unlikely(bp->panic))
972                 return -1;
973 #endif
974
975         txq = netdev_get_tx_queue(bp->dev, fp->index);
976         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
977         sw_cons = fp->tx_pkt_cons;
978
979         while (sw_cons != hw_cons) {
980                 u16 pkt_cons;
981
982                 pkt_cons = TX_BD(sw_cons);
983
984                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
985
986                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
987                    hw_cons, sw_cons, pkt_cons);
988
989 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
990                         rmb();
991                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
992                 }
993 */
994                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
995                 sw_cons++;
996         }
997
998         fp->tx_pkt_cons = sw_cons;
999         fp->tx_bd_cons = bd_cons;
1000
1001         /* Need to make the tx_bd_cons update visible to start_xmit()
1002          * before checking for netif_tx_queue_stopped().  Without the
1003          * memory barrier, there is a small possibility that
1004          * start_xmit() will miss it and cause the queue to be stopped
1005          * forever.
1006          */
1007         smp_mb();
1008
1009         /* TBD need a thresh? */
1010         if (unlikely(netif_tx_queue_stopped(txq))) {
1011                 /* Taking tx_lock() is needed to prevent reenabling the queue
1012                  * while it's empty. This could have happen if rx_action() gets
1013                  * suspended in bnx2x_tx_int() after the condition before
1014                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1015                  *
1016                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
1017                  * sends some packets consuming the whole queue again->
1018                  * stops the queue
1019                  */
1020
1021                 __netif_tx_lock(txq, smp_processor_id());
1022
1023                 if ((netif_tx_queue_stopped(txq)) &&
1024                     (bp->state == BNX2X_STATE_OPEN) &&
1025                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
1026                         netif_tx_wake_queue(txq);
1027
1028                 __netif_tx_unlock(txq);
1029         }
1030         return 0;
1031 }
1032
1033 #ifdef BCM_CNIC
1034 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1035 #endif
1036
1037 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1038                            union eth_rx_cqe *rr_cqe)
1039 {
1040         struct bnx2x *bp = fp->bp;
1041         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1042         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1043
1044         DP(BNX2X_MSG_SP,
1045            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
1046            fp->index, cid, command, bp->state,
1047            rr_cqe->ramrod_cqe.ramrod_type);
1048
1049         bp->spq_left++;
1050
1051         if (fp->index) {
1052                 switch (command | fp->state) {
1053                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1054                                                 BNX2X_FP_STATE_OPENING):
1055                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1056                            cid);
1057                         fp->state = BNX2X_FP_STATE_OPEN;
1058                         break;
1059
1060                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1061                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1062                            cid);
1063                         fp->state = BNX2X_FP_STATE_HALTED;
1064                         break;
1065
1066                 default:
1067                         BNX2X_ERR("unexpected MC reply (%d)  "
1068                                   "fp[%d] state is %x\n",
1069                                   command, fp->index, fp->state);
1070                         break;
1071                 }
1072                 mb(); /* force bnx2x_wait_ramrod() to see the change */
1073                 return;
1074         }
1075
1076         switch (command | bp->state) {
1077         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1078                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1079                 bp->state = BNX2X_STATE_OPEN;
1080                 break;
1081
1082         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1083                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1084                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1085                 fp->state = BNX2X_FP_STATE_HALTED;
1086                 break;
1087
1088         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1089                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1090                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1091                 break;
1092
1093 #ifdef BCM_CNIC
1094         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1095                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1096                 bnx2x_cnic_cfc_comp(bp, cid);
1097                 break;
1098 #endif
1099
1100         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1101         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1102                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1103                 bp->set_mac_pending--;
1104                 smp_wmb();
1105                 break;
1106
1107         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1108                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1109                 bp->set_mac_pending--;
1110                 smp_wmb();
1111                 break;
1112
1113         default:
1114                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1115                           command, bp->state);
1116                 break;
1117         }
1118         mb(); /* force bnx2x_wait_ramrod() to see the change */
1119 }
1120
1121 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1122                                      struct bnx2x_fastpath *fp, u16 index)
1123 {
1124         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1125         struct page *page = sw_buf->page;
1126         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1127
1128         /* Skip "next page" elements */
1129         if (!page)
1130                 return;
1131
1132         dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
1133                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1134         __free_pages(page, PAGES_PER_SGE_SHIFT);
1135
1136         sw_buf->page = NULL;
1137         sge->addr_hi = 0;
1138         sge->addr_lo = 0;
1139 }
1140
1141 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1142                                            struct bnx2x_fastpath *fp, int last)
1143 {
1144         int i;
1145
1146         for (i = 0; i < last; i++)
1147                 bnx2x_free_rx_sge(bp, fp, i);
1148 }
1149
1150 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1151                                      struct bnx2x_fastpath *fp, u16 index)
1152 {
1153         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1154         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1155         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1156         dma_addr_t mapping;
1157
1158         if (unlikely(page == NULL))
1159                 return -ENOMEM;
1160
1161         mapping = dma_map_page(&bp->pdev->dev, page, 0,
1162                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1163         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1164                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1165                 return -ENOMEM;
1166         }
1167
1168         sw_buf->page = page;
1169         dma_unmap_addr_set(sw_buf, mapping, mapping);
1170
1171         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1172         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1173
1174         return 0;
1175 }
1176
1177 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1178                                      struct bnx2x_fastpath *fp, u16 index)
1179 {
1180         struct sk_buff *skb;
1181         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1182         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1183         dma_addr_t mapping;
1184
1185         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1186         if (unlikely(skb == NULL))
1187                 return -ENOMEM;
1188
1189         mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1190                                  DMA_FROM_DEVICE);
1191         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1192                 dev_kfree_skb(skb);
1193                 return -ENOMEM;
1194         }
1195
1196         rx_buf->skb = skb;
1197         dma_unmap_addr_set(rx_buf, mapping, mapping);
1198
1199         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1200         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1201
1202         return 0;
1203 }
1204
1205 /* note that we are not allocating a new skb,
1206  * we are just moving one from cons to prod
1207  * we are not creating a new mapping,
1208  * so there is no need to check for dma_mapping_error().
1209  */
1210 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1211                                struct sk_buff *skb, u16 cons, u16 prod)
1212 {
1213         struct bnx2x *bp = fp->bp;
1214         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1215         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1216         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1217         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1218
1219         dma_sync_single_for_device(&bp->pdev->dev,
1220                                    dma_unmap_addr(cons_rx_buf, mapping),
1221                                    RX_COPY_THRESH, DMA_FROM_DEVICE);
1222
1223         prod_rx_buf->skb = cons_rx_buf->skb;
1224         dma_unmap_addr_set(prod_rx_buf, mapping,
1225                            dma_unmap_addr(cons_rx_buf, mapping));
1226         *prod_bd = *cons_bd;
1227 }
1228
1229 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1230                                              u16 idx)
1231 {
1232         u16 last_max = fp->last_max_sge;
1233
1234         if (SUB_S16(idx, last_max) > 0)
1235                 fp->last_max_sge = idx;
1236 }
1237
1238 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1239 {
1240         int i, j;
1241
1242         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1243                 int idx = RX_SGE_CNT * i - 1;
1244
1245                 for (j = 0; j < 2; j++) {
1246                         SGE_MASK_CLEAR_BIT(fp, idx);
1247                         idx--;
1248                 }
1249         }
1250 }
1251
1252 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1253                                   struct eth_fast_path_rx_cqe *fp_cqe)
1254 {
1255         struct bnx2x *bp = fp->bp;
1256         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1257                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1258                       SGE_PAGE_SHIFT;
1259         u16 last_max, last_elem, first_elem;
1260         u16 delta = 0;
1261         u16 i;
1262
1263         if (!sge_len)
1264                 return;
1265
1266         /* First mark all used pages */
1267         for (i = 0; i < sge_len; i++)
1268                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1269
1270         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1271            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1272
1273         /* Here we assume that the last SGE index is the biggest */
1274         prefetch((void *)(fp->sge_mask));
1275         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1276
1277         last_max = RX_SGE(fp->last_max_sge);
1278         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1279         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1280
1281         /* If ring is not full */
1282         if (last_elem + 1 != first_elem)
1283                 last_elem++;
1284
1285         /* Now update the prod */
1286         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1287                 if (likely(fp->sge_mask[i]))
1288                         break;
1289
1290                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1291                 delta += RX_SGE_MASK_ELEM_SZ;
1292         }
1293
1294         if (delta > 0) {
1295                 fp->rx_sge_prod += delta;
1296                 /* clear page-end entries */
1297                 bnx2x_clear_sge_mask_next_elems(fp);
1298         }
1299
1300         DP(NETIF_MSG_RX_STATUS,
1301            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1302            fp->last_max_sge, fp->rx_sge_prod);
1303 }
1304
1305 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1306 {
1307         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1308         memset(fp->sge_mask, 0xff,
1309                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1310
1311         /* Clear the two last indices in the page to 1:
1312            these are the indices that correspond to the "next" element,
1313            hence will never be indicated and should be removed from
1314            the calculations. */
1315         bnx2x_clear_sge_mask_next_elems(fp);
1316 }
1317
1318 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1319                             struct sk_buff *skb, u16 cons, u16 prod)
1320 {
1321         struct bnx2x *bp = fp->bp;
1322         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1323         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1324         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1325         dma_addr_t mapping;
1326
1327         /* move empty skb from pool to prod and map it */
1328         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1329         mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1330                                  bp->rx_buf_size, DMA_FROM_DEVICE);
1331         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
1332
1333         /* move partial skb from cons to pool (don't unmap yet) */
1334         fp->tpa_pool[queue] = *cons_rx_buf;
1335
1336         /* mark bin state as start - print error if current state != stop */
1337         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1338                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1339
1340         fp->tpa_state[queue] = BNX2X_TPA_START;
1341
1342         /* point prod_bd to new skb */
1343         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1344         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1345
1346 #ifdef BNX2X_STOP_ON_ERROR
1347         fp->tpa_queue_used |= (1 << queue);
1348 #ifdef _ASM_GENERIC_INT_L64_H
1349         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1350 #else
1351         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1352 #endif
1353            fp->tpa_queue_used);
1354 #endif
1355 }
1356
1357 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1358                                struct sk_buff *skb,
1359                                struct eth_fast_path_rx_cqe *fp_cqe,
1360                                u16 cqe_idx)
1361 {
1362         struct sw_rx_page *rx_pg, old_rx_pg;
1363         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1364         u32 i, frag_len, frag_size, pages;
1365         int err;
1366         int j;
1367
1368         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1369         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1370
1371         /* This is needed in order to enable forwarding support */
1372         if (frag_size)
1373                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1374                                                max(frag_size, (u32)len_on_bd));
1375
1376 #ifdef BNX2X_STOP_ON_ERROR
1377         if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
1378                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1379                           pages, cqe_idx);
1380                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1381                           fp_cqe->pkt_len, len_on_bd);
1382                 bnx2x_panic();
1383                 return -EINVAL;
1384         }
1385 #endif
1386
1387         /* Run through the SGL and compose the fragmented skb */
1388         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1389                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1390
1391                 /* FW gives the indices of the SGE as if the ring is an array
1392                    (meaning that "next" element will consume 2 indices) */
1393                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1394                 rx_pg = &fp->rx_page_ring[sge_idx];
1395                 old_rx_pg = *rx_pg;
1396
1397                 /* If we fail to allocate a substitute page, we simply stop
1398                    where we are and drop the whole packet */
1399                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1400                 if (unlikely(err)) {
1401                         fp->eth_q_stats.rx_skb_alloc_failed++;
1402                         return err;
1403                 }
1404
1405                 /* Unmap the page as we r going to pass it to the stack */
1406                 dma_unmap_page(&bp->pdev->dev,
1407                                dma_unmap_addr(&old_rx_pg, mapping),
1408                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1409
1410                 /* Add one frag and update the appropriate fields in the skb */
1411                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1412
1413                 skb->data_len += frag_len;
1414                 skb->truesize += frag_len;
1415                 skb->len += frag_len;
1416
1417                 frag_size -= frag_len;
1418         }
1419
1420         return 0;
1421 }
1422
1423 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1424                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1425                            u16 cqe_idx)
1426 {
1427         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1428         struct sk_buff *skb = rx_buf->skb;
1429         /* alloc new skb */
1430         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1431
1432         /* Unmap skb in the pool anyway, as we are going to change
1433            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1434            fails. */
1435         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1436                          bp->rx_buf_size, DMA_FROM_DEVICE);
1437
1438         if (likely(new_skb)) {
1439                 /* fix ip xsum and give it to the stack */
1440                 /* (no need to map the new skb) */
1441 #ifdef BCM_VLAN
1442                 int is_vlan_cqe =
1443                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1444                          PARSING_FLAGS_VLAN);
1445                 int is_not_hwaccel_vlan_cqe =
1446                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1447 #endif
1448
1449                 prefetch(skb);
1450                 prefetch(((char *)(skb)) + 128);
1451
1452 #ifdef BNX2X_STOP_ON_ERROR
1453                 if (pad + len > bp->rx_buf_size) {
1454                         BNX2X_ERR("skb_put is about to fail...  "
1455                                   "pad %d  len %d  rx_buf_size %d\n",
1456                                   pad, len, bp->rx_buf_size);
1457                         bnx2x_panic();
1458                         return;
1459                 }
1460 #endif
1461
1462                 skb_reserve(skb, pad);
1463                 skb_put(skb, len);
1464
1465                 skb->protocol = eth_type_trans(skb, bp->dev);
1466                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1467
1468                 {
1469                         struct iphdr *iph;
1470
1471                         iph = (struct iphdr *)skb->data;
1472 #ifdef BCM_VLAN
1473                         /* If there is no Rx VLAN offloading -
1474                            take VLAN tag into an account */
1475                         if (unlikely(is_not_hwaccel_vlan_cqe))
1476                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1477 #endif
1478                         iph->check = 0;
1479                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1480                 }
1481
1482                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1483                                          &cqe->fast_path_cqe, cqe_idx)) {
1484 #ifdef BCM_VLAN
1485                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1486                             (!is_not_hwaccel_vlan_cqe))
1487                                 vlan_gro_receive(&fp->napi, bp->vlgrp,
1488                                                  le16_to_cpu(cqe->fast_path_cqe.
1489                                                              vlan_tag), skb);
1490                         else
1491 #endif
1492                                 napi_gro_receive(&fp->napi, skb);
1493                 } else {
1494                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1495                            " - dropping packet!\n");
1496                         dev_kfree_skb(skb);
1497                 }
1498
1499
1500                 /* put new skb in bin */
1501                 fp->tpa_pool[queue].skb = new_skb;
1502
1503         } else {
1504                 /* else drop the packet and keep the buffer in the bin */
1505                 DP(NETIF_MSG_RX_STATUS,
1506                    "Failed to allocate new skb - dropping packet!\n");
1507                 fp->eth_q_stats.rx_skb_alloc_failed++;
1508         }
1509
1510         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1511 }
1512
1513 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1514                                         struct bnx2x_fastpath *fp,
1515                                         u16 bd_prod, u16 rx_comp_prod,
1516                                         u16 rx_sge_prod)
1517 {
1518         struct ustorm_eth_rx_producers rx_prods = {0};
1519         int i;
1520
1521         /* Update producers */
1522         rx_prods.bd_prod = bd_prod;
1523         rx_prods.cqe_prod = rx_comp_prod;
1524         rx_prods.sge_prod = rx_sge_prod;
1525
1526         /*
1527          * Make sure that the BD and SGE data is updated before updating the
1528          * producers since FW might read the BD/SGE right after the producer
1529          * is updated.
1530          * This is only applicable for weak-ordered memory model archs such
1531          * as IA-64. The following barrier is also mandatory since FW will
1532          * assumes BDs must have buffers.
1533          */
1534         wmb();
1535
1536         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1537                 REG_WR(bp, BAR_USTRORM_INTMEM +
1538                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1539                        ((u32 *)&rx_prods)[i]);
1540
1541         mmiowb(); /* keep prod updates ordered */
1542
1543         DP(NETIF_MSG_RX_STATUS,
1544            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1545            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1546 }
1547
1548 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1549 {
1550         struct bnx2x *bp = fp->bp;
1551         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1552         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1553         int rx_pkt = 0;
1554
1555 #ifdef BNX2X_STOP_ON_ERROR
1556         if (unlikely(bp->panic))
1557                 return 0;
1558 #endif
1559
1560         /* CQ "next element" is of the size of the regular element,
1561            that's why it's ok here */
1562         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1563         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1564                 hw_comp_cons++;
1565
1566         bd_cons = fp->rx_bd_cons;
1567         bd_prod = fp->rx_bd_prod;
1568         bd_prod_fw = bd_prod;
1569         sw_comp_cons = fp->rx_comp_cons;
1570         sw_comp_prod = fp->rx_comp_prod;
1571
1572         /* Memory barrier necessary as speculative reads of the rx
1573          * buffer can be ahead of the index in the status block
1574          */
1575         rmb();
1576
1577         DP(NETIF_MSG_RX_STATUS,
1578            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1579            fp->index, hw_comp_cons, sw_comp_cons);
1580
1581         while (sw_comp_cons != hw_comp_cons) {
1582                 struct sw_rx_bd *rx_buf = NULL;
1583                 struct sk_buff *skb;
1584                 union eth_rx_cqe *cqe;
1585                 u8 cqe_fp_flags;
1586                 u16 len, pad;
1587
1588                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1589                 bd_prod = RX_BD(bd_prod);
1590                 bd_cons = RX_BD(bd_cons);
1591
1592                 /* Prefetch the page containing the BD descriptor
1593                    at producer's index. It will be needed when new skb is
1594                    allocated */
1595                 prefetch((void *)(PAGE_ALIGN((unsigned long)
1596                                              (&fp->rx_desc_ring[bd_prod])) -
1597                                   PAGE_SIZE + 1));
1598
1599                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1600                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1601
1602                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1603                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1604                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1605                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1606                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1607                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1608
1609                 /* is this a slowpath msg? */
1610                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1611                         bnx2x_sp_event(fp, cqe);
1612                         goto next_cqe;
1613
1614                 /* this is an rx packet */
1615                 } else {
1616                         rx_buf = &fp->rx_buf_ring[bd_cons];
1617                         skb = rx_buf->skb;
1618                         prefetch(skb);
1619                         prefetch((u8 *)skb + 256);
1620                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1621                         pad = cqe->fast_path_cqe.placement_offset;
1622
1623                         /* If CQE is marked both TPA_START and TPA_END
1624                            it is a non-TPA CQE */
1625                         if ((!fp->disable_tpa) &&
1626                             (TPA_TYPE(cqe_fp_flags) !=
1627                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1628                                 u16 queue = cqe->fast_path_cqe.queue_index;
1629
1630                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1631                                         DP(NETIF_MSG_RX_STATUS,
1632                                            "calling tpa_start on queue %d\n",
1633                                            queue);
1634
1635                                         bnx2x_tpa_start(fp, queue, skb,
1636                                                         bd_cons, bd_prod);
1637                                         goto next_rx;
1638                                 }
1639
1640                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1641                                         DP(NETIF_MSG_RX_STATUS,
1642                                            "calling tpa_stop on queue %d\n",
1643                                            queue);
1644
1645                                         if (!BNX2X_RX_SUM_FIX(cqe))
1646                                                 BNX2X_ERR("STOP on none TCP "
1647                                                           "data\n");
1648
1649                                         /* This is a size of the linear data
1650                                            on this skb */
1651                                         len = le16_to_cpu(cqe->fast_path_cqe.
1652                                                                 len_on_bd);
1653                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1654                                                     len, cqe, comp_ring_cons);
1655 #ifdef BNX2X_STOP_ON_ERROR
1656                                         if (bp->panic)
1657                                                 return 0;
1658 #endif
1659
1660                                         bnx2x_update_sge_prod(fp,
1661                                                         &cqe->fast_path_cqe);
1662                                         goto next_cqe;
1663                                 }
1664                         }
1665
1666                         dma_sync_single_for_device(&bp->pdev->dev,
1667                                         dma_unmap_addr(rx_buf, mapping),
1668                                                    pad + RX_COPY_THRESH,
1669                                                    DMA_FROM_DEVICE);
1670                         prefetch(skb);
1671                         prefetch(((char *)(skb)) + 128);
1672
1673                         /* is this an error packet? */
1674                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1675                                 DP(NETIF_MSG_RX_ERR,
1676                                    "ERROR  flags %x  rx packet %u\n",
1677                                    cqe_fp_flags, sw_comp_cons);
1678                                 fp->eth_q_stats.rx_err_discard_pkt++;
1679                                 goto reuse_rx;
1680                         }
1681
1682                         /* Since we don't have a jumbo ring
1683                          * copy small packets if mtu > 1500
1684                          */
1685                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1686                             (len <= RX_COPY_THRESH)) {
1687                                 struct sk_buff *new_skb;
1688
1689                                 new_skb = netdev_alloc_skb(bp->dev,
1690                                                            len + pad);
1691                                 if (new_skb == NULL) {
1692                                         DP(NETIF_MSG_RX_ERR,
1693                                            "ERROR  packet dropped "
1694                                            "because of alloc failure\n");
1695                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1696                                         goto reuse_rx;
1697                                 }
1698
1699                                 /* aligned copy */
1700                                 skb_copy_from_linear_data_offset(skb, pad,
1701                                                     new_skb->data + pad, len);
1702                                 skb_reserve(new_skb, pad);
1703                                 skb_put(new_skb, len);
1704
1705                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1706
1707                                 skb = new_skb;
1708
1709                         } else
1710                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1711                                 dma_unmap_single(&bp->pdev->dev,
1712                                         dma_unmap_addr(rx_buf, mapping),
1713                                                  bp->rx_buf_size,
1714                                                  DMA_FROM_DEVICE);
1715                                 skb_reserve(skb, pad);
1716                                 skb_put(skb, len);
1717
1718                         } else {
1719                                 DP(NETIF_MSG_RX_ERR,
1720                                    "ERROR  packet dropped because "
1721                                    "of alloc failure\n");
1722                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1723 reuse_rx:
1724                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1725                                 goto next_rx;
1726                         }
1727
1728                         skb->protocol = eth_type_trans(skb, bp->dev);
1729
1730                         skb->ip_summed = CHECKSUM_NONE;
1731                         if (bp->rx_csum) {
1732                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1733                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1734                                 else
1735                                         fp->eth_q_stats.hw_csum_err++;
1736                         }
1737                 }
1738
1739                 skb_record_rx_queue(skb, fp->index);
1740
1741 #ifdef BCM_VLAN
1742                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1743                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1744                      PARSING_FLAGS_VLAN))
1745                         vlan_gro_receive(&fp->napi, bp->vlgrp,
1746                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
1747                 else
1748 #endif
1749                         napi_gro_receive(&fp->napi, skb);
1750
1751
1752 next_rx:
1753                 rx_buf->skb = NULL;
1754
1755                 bd_cons = NEXT_RX_IDX(bd_cons);
1756                 bd_prod = NEXT_RX_IDX(bd_prod);
1757                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1758                 rx_pkt++;
1759 next_cqe:
1760                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1761                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1762
1763                 if (rx_pkt == budget)
1764                         break;
1765         } /* while */
1766
1767         fp->rx_bd_cons = bd_cons;
1768         fp->rx_bd_prod = bd_prod_fw;
1769         fp->rx_comp_cons = sw_comp_cons;
1770         fp->rx_comp_prod = sw_comp_prod;
1771
1772         /* Update producers */
1773         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1774                              fp->rx_sge_prod);
1775
1776         fp->rx_pkt += rx_pkt;
1777         fp->rx_calls++;
1778
1779         return rx_pkt;
1780 }
1781
1782 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1783 {
1784         struct bnx2x_fastpath *fp = fp_cookie;
1785         struct bnx2x *bp = fp->bp;
1786
1787         /* Return here if interrupt is disabled */
1788         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1789                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1790                 return IRQ_HANDLED;
1791         }
1792
1793         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1794            fp->index, fp->sb_id);
1795         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1796
1797 #ifdef BNX2X_STOP_ON_ERROR
1798         if (unlikely(bp->panic))
1799                 return IRQ_HANDLED;
1800 #endif
1801
1802         /* Handle Rx and Tx according to MSI-X vector */
1803         prefetch(fp->rx_cons_sb);
1804         prefetch(fp->tx_cons_sb);
1805         prefetch(&fp->status_blk->u_status_block.status_block_index);
1806         prefetch(&fp->status_blk->c_status_block.status_block_index);
1807         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1808
1809         return IRQ_HANDLED;
1810 }
1811
1812 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1813 {
1814         struct bnx2x *bp = netdev_priv(dev_instance);
1815         u16 status = bnx2x_ack_int(bp);
1816         u16 mask;
1817         int i;
1818
1819         /* Return here if interrupt is shared and it's not for us */
1820         if (unlikely(status == 0)) {
1821                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1822                 return IRQ_NONE;
1823         }
1824         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1825
1826         /* Return here if interrupt is disabled */
1827         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1828                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1829                 return IRQ_HANDLED;
1830         }
1831
1832 #ifdef BNX2X_STOP_ON_ERROR
1833         if (unlikely(bp->panic))
1834                 return IRQ_HANDLED;
1835 #endif
1836
1837         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1838                 struct bnx2x_fastpath *fp = &bp->fp[i];
1839
1840                 mask = 0x2 << fp->sb_id;
1841                 if (status & mask) {
1842                         /* Handle Rx and Tx according to SB id */
1843                         prefetch(fp->rx_cons_sb);
1844                         prefetch(&fp->status_blk->u_status_block.
1845                                                 status_block_index);
1846                         prefetch(fp->tx_cons_sb);
1847                         prefetch(&fp->status_blk->c_status_block.
1848                                                 status_block_index);
1849                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1850                         status &= ~mask;
1851                 }
1852         }
1853
1854 #ifdef BCM_CNIC
1855         mask = 0x2 << CNIC_SB_ID(bp);
1856         if (status & (mask | 0x1)) {
1857                 struct cnic_ops *c_ops = NULL;
1858
1859                 rcu_read_lock();
1860                 c_ops = rcu_dereference(bp->cnic_ops);
1861                 if (c_ops)
1862                         c_ops->cnic_handler(bp->cnic_data, NULL);
1863                 rcu_read_unlock();
1864
1865                 status &= ~mask;
1866         }
1867 #endif
1868
1869         if (unlikely(status & 0x1)) {
1870                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1871
1872                 status &= ~0x1;
1873                 if (!status)
1874                         return IRQ_HANDLED;
1875         }
1876
1877         if (unlikely(status))
1878                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1879                    status);
1880
1881         return IRQ_HANDLED;
1882 }
1883
1884 /* end of fast path */
1885
1886 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1887
1888 /* Link */
1889
1890 /*
1891  * General service functions
1892  */
1893
1894 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1895 {
1896         u32 lock_status;
1897         u32 resource_bit = (1 << resource);
1898         int func = BP_FUNC(bp);
1899         u32 hw_lock_control_reg;
1900         int cnt;
1901
1902         /* Validating that the resource is within range */
1903         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1904                 DP(NETIF_MSG_HW,
1905                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1906                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1907                 return -EINVAL;
1908         }
1909
1910         if (func <= 5) {
1911                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1912         } else {
1913                 hw_lock_control_reg =
1914                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1915         }
1916
1917         /* Validating that the resource is not already taken */
1918         lock_status = REG_RD(bp, hw_lock_control_reg);
1919         if (lock_status & resource_bit) {
1920                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1921                    lock_status, resource_bit);
1922                 return -EEXIST;
1923         }
1924
1925         /* Try for 5 second every 5ms */
1926         for (cnt = 0; cnt < 1000; cnt++) {
1927                 /* Try to acquire the lock */
1928                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1929                 lock_status = REG_RD(bp, hw_lock_control_reg);
1930                 if (lock_status & resource_bit)
1931                         return 0;
1932
1933                 msleep(5);
1934         }
1935         DP(NETIF_MSG_HW, "Timeout\n");
1936         return -EAGAIN;
1937 }
1938
1939 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1940 {
1941         u32 lock_status;
1942         u32 resource_bit = (1 << resource);
1943         int func = BP_FUNC(bp);
1944         u32 hw_lock_control_reg;
1945
1946         DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1947
1948         /* Validating that the resource is within range */
1949         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1950                 DP(NETIF_MSG_HW,
1951                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1952                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1953                 return -EINVAL;
1954         }
1955
1956         if (func <= 5) {
1957                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1958         } else {
1959                 hw_lock_control_reg =
1960                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1961         }
1962
1963         /* Validating that the resource is currently taken */
1964         lock_status = REG_RD(bp, hw_lock_control_reg);
1965         if (!(lock_status & resource_bit)) {
1966                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1967                    lock_status, resource_bit);
1968                 return -EFAULT;
1969         }
1970
1971         REG_WR(bp, hw_lock_control_reg, resource_bit);
1972         return 0;
1973 }
1974
1975 /* HW Lock for shared dual port PHYs */
1976 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1977 {
1978         mutex_lock(&bp->port.phy_mutex);
1979
1980         if (bp->port.need_hw_lock)
1981                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1982 }
1983
1984 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1985 {
1986         if (bp->port.need_hw_lock)
1987                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1988
1989         mutex_unlock(&bp->port.phy_mutex);
1990 }
1991
1992 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1993 {
1994         /* The GPIO should be swapped if swap register is set and active */
1995         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1996                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1997         int gpio_shift = gpio_num +
1998                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1999         u32 gpio_mask = (1 << gpio_shift);
2000         u32 gpio_reg;
2001         int value;
2002
2003         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2004                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2005                 return -EINVAL;
2006         }
2007
2008         /* read GPIO value */
2009         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2010
2011         /* get the requested pin value */
2012         if ((gpio_reg & gpio_mask) == gpio_mask)
2013                 value = 1;
2014         else
2015                 value = 0;
2016
2017         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
2018
2019         return value;
2020 }
2021
2022 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2023 {
2024         /* The GPIO should be swapped if swap register is set and active */
2025         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2026                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2027         int gpio_shift = gpio_num +
2028                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2029         u32 gpio_mask = (1 << gpio_shift);
2030         u32 gpio_reg;
2031
2032         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2033                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2034                 return -EINVAL;
2035         }
2036
2037         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2038         /* read GPIO and mask except the float bits */
2039         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2040
2041         switch (mode) {
2042         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2043                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2044                    gpio_num, gpio_shift);
2045                 /* clear FLOAT and set CLR */
2046                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2047                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2048                 break;
2049
2050         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2051                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2052                    gpio_num, gpio_shift);
2053                 /* clear FLOAT and set SET */
2054                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2055                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2056                 break;
2057
2058         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2059                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2060                    gpio_num, gpio_shift);
2061                 /* set FLOAT */
2062                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2063                 break;
2064
2065         default:
2066                 break;
2067         }
2068
2069         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2070         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2071
2072         return 0;
2073 }
2074
2075 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2076 {
2077         /* The GPIO should be swapped if swap register is set and active */
2078         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2079                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2080         int gpio_shift = gpio_num +
2081                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2082         u32 gpio_mask = (1 << gpio_shift);
2083         u32 gpio_reg;
2084
2085         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2086                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2087                 return -EINVAL;
2088         }
2089
2090         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2091         /* read GPIO int */
2092         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2093
2094         switch (mode) {
2095         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2096                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2097                                    "output low\n", gpio_num, gpio_shift);
2098                 /* clear SET and set CLR */
2099                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2100                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2101                 break;
2102
2103         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2104                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2105                                    "output high\n", gpio_num, gpio_shift);
2106                 /* clear CLR and set SET */
2107                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2108                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2109                 break;
2110
2111         default:
2112                 break;
2113         }
2114
2115         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2116         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2117
2118         return 0;
2119 }
2120
2121 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2122 {
2123         u32 spio_mask = (1 << spio_num);
2124         u32 spio_reg;
2125
2126         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2127             (spio_num > MISC_REGISTERS_SPIO_7)) {
2128                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2129                 return -EINVAL;
2130         }
2131
2132         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2133         /* read SPIO and mask except the float bits */
2134         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2135
2136         switch (mode) {
2137         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2138                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2139                 /* clear FLOAT and set CLR */
2140                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2141                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2142                 break;
2143
2144         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2145                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2146                 /* clear FLOAT and set SET */
2147                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2148                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2149                 break;
2150
2151         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2152                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2153                 /* set FLOAT */
2154                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2155                 break;
2156
2157         default:
2158                 break;
2159         }
2160
2161         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2162         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2163
2164         return 0;
2165 }
2166
2167 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2168 {
2169         switch (bp->link_vars.ieee_fc &
2170                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2171         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2172                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2173                                           ADVERTISED_Pause);
2174                 break;
2175
2176         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2177                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2178                                          ADVERTISED_Pause);
2179                 break;
2180
2181         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2182                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2183                 break;
2184
2185         default:
2186                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2187                                           ADVERTISED_Pause);
2188                 break;
2189         }
2190 }
2191
2192 static void bnx2x_link_report(struct bnx2x *bp)
2193 {
2194         if (bp->flags & MF_FUNC_DIS) {
2195                 netif_carrier_off(bp->dev);
2196                 netdev_err(bp->dev, "NIC Link is Down\n");
2197                 return;
2198         }
2199
2200         if (bp->link_vars.link_up) {
2201                 u16 line_speed;
2202
2203                 if (bp->state == BNX2X_STATE_OPEN)
2204                         netif_carrier_on(bp->dev);
2205                 netdev_info(bp->dev, "NIC Link is Up, ");
2206
2207                 line_speed = bp->link_vars.line_speed;
2208                 if (IS_E1HMF(bp)) {
2209                         u16 vn_max_rate;
2210
2211                         vn_max_rate =
2212                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2213                                  FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2214                         if (vn_max_rate < line_speed)
2215                                 line_speed = vn_max_rate;
2216                 }
2217                 pr_cont("%d Mbps ", line_speed);
2218
2219                 if (bp->link_vars.duplex == DUPLEX_FULL)
2220                         pr_cont("full duplex");
2221                 else
2222                         pr_cont("half duplex");
2223
2224                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2225                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2226                                 pr_cont(", receive ");
2227                                 if (bp->link_vars.flow_ctrl &
2228                                     BNX2X_FLOW_CTRL_TX)
2229                                         pr_cont("& transmit ");
2230                         } else {
2231                                 pr_cont(", transmit ");
2232                         }
2233                         pr_cont("flow control ON");
2234                 }
2235                 pr_cont("\n");
2236
2237         } else { /* link_down */
2238                 netif_carrier_off(bp->dev);
2239                 netdev_err(bp->dev, "NIC Link is Down\n");
2240         }
2241 }
2242
2243 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2244 {
2245         if (!BP_NOMCP(bp)) {
2246                 u8 rc;
2247
2248                 /* Initialize link parameters structure variables */
2249                 /* It is recommended to turn off RX FC for jumbo frames
2250                    for better performance */
2251                 if (bp->dev->mtu > 5000)
2252                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2253                 else
2254                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2255
2256                 bnx2x_acquire_phy_lock(bp);
2257
2258                 if (load_mode == LOAD_DIAG)
2259                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2260
2261                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2262
2263                 bnx2x_release_phy_lock(bp);
2264
2265                 bnx2x_calc_fc_adv(bp);
2266
2267                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2268                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2269                         bnx2x_link_report(bp);
2270                 }
2271
2272                 return rc;
2273         }
2274         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2275         return -EINVAL;
2276 }
2277
2278 static void bnx2x_link_set(struct bnx2x *bp)
2279 {
2280         if (!BP_NOMCP(bp)) {
2281                 bnx2x_acquire_phy_lock(bp);
2282                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2283                 bnx2x_release_phy_lock(bp);
2284
2285                 bnx2x_calc_fc_adv(bp);
2286         } else
2287                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2288 }
2289
2290 static void bnx2x__link_reset(struct bnx2x *bp)
2291 {
2292         if (!BP_NOMCP(bp)) {
2293                 bnx2x_acquire_phy_lock(bp);
2294                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2295                 bnx2x_release_phy_lock(bp);
2296         } else
2297                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2298 }
2299
2300 static u8 bnx2x_link_test(struct bnx2x *bp)
2301 {
2302         u8 rc = 0;
2303
2304         if (!BP_NOMCP(bp)) {
2305                 bnx2x_acquire_phy_lock(bp);
2306                 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2307                 bnx2x_release_phy_lock(bp);
2308         } else
2309                 BNX2X_ERR("Bootcode is missing - can not test link\n");
2310
2311         return rc;
2312 }
2313
2314 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2315 {
2316         u32 r_param = bp->link_vars.line_speed / 8;
2317         u32 fair_periodic_timeout_usec;
2318         u32 t_fair;
2319
2320         memset(&(bp->cmng.rs_vars), 0,
2321                sizeof(struct rate_shaping_vars_per_port));
2322         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2323
2324         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2325         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2326
2327         /* this is the threshold below which no timer arming will occur
2328            1.25 coefficient is for the threshold to be a little bigger
2329            than the real time, to compensate for timer in-accuracy */
2330         bp->cmng.rs_vars.rs_threshold =
2331                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2332
2333         /* resolution of fairness timer */
2334         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2335         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2336         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2337
2338         /* this is the threshold below which we won't arm the timer anymore */
2339         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2340
2341         /* we multiply by 1e3/8 to get bytes/msec.
2342            We don't want the credits to pass a credit
2343            of the t_fair*FAIR_MEM (algorithm resolution) */
2344         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2345         /* since each tick is 4 usec */
2346         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2347 }
2348
2349 /* Calculates the sum of vn_min_rates.
2350    It's needed for further normalizing of the min_rates.
2351    Returns:
2352      sum of vn_min_rates.
2353        or
2354      0 - if all the min_rates are 0.
2355      In the later case fainess algorithm should be deactivated.
2356      If not all min_rates are zero then those that are zeroes will be set to 1.
2357  */
2358 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2359 {
2360         int all_zero = 1;
2361         int port = BP_PORT(bp);
2362         int vn;
2363
2364         bp->vn_weight_sum = 0;
2365         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2366                 int func = 2*vn + port;
2367                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2368                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2369                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2370
2371                 /* Skip hidden vns */
2372                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2373                         continue;
2374
2375                 /* If min rate is zero - set it to 1 */
2376                 if (!vn_min_rate)
2377                         vn_min_rate = DEF_MIN_RATE;
2378                 else
2379                         all_zero = 0;
2380
2381                 bp->vn_weight_sum += vn_min_rate;
2382         }
2383
2384         /* ... only if all min rates are zeros - disable fairness */
2385         if (all_zero) {
2386                 bp->cmng.flags.cmng_enables &=
2387                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2388                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2389                    "  fairness will be disabled\n");
2390         } else
2391                 bp->cmng.flags.cmng_enables |=
2392                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2393 }
2394
2395 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2396 {
2397         struct rate_shaping_vars_per_vn m_rs_vn;
2398         struct fairness_vars_per_vn m_fair_vn;
2399         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2400         u16 vn_min_rate, vn_max_rate;
2401         int i;
2402
2403         /* If function is hidden - set min and max to zeroes */
2404         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2405                 vn_min_rate = 0;
2406                 vn_max_rate = 0;
2407
2408         } else {
2409                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2410                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2411                 /* If min rate is zero - set it to 1 */
2412                 if (!vn_min_rate)
2413                         vn_min_rate = DEF_MIN_RATE;
2414                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2415                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2416         }
2417         DP(NETIF_MSG_IFUP,
2418            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
2419            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2420
2421         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2422         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2423
2424         /* global vn counter - maximal Mbps for this vn */
2425         m_rs_vn.vn_counter.rate = vn_max_rate;
2426
2427         /* quota - number of bytes transmitted in this period */
2428         m_rs_vn.vn_counter.quota =
2429                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2430
2431         if (bp->vn_weight_sum) {
2432                 /* credit for each period of the fairness algorithm:
2433                    number of bytes in T_FAIR (the vn share the port rate).
2434                    vn_weight_sum should not be larger than 10000, thus
2435                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2436                    than zero */
2437                 m_fair_vn.vn_credit_delta =
2438                         max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2439                                                    (8 * bp->vn_weight_sum))),
2440                               (bp->cmng.fair_vars.fair_threshold * 2));
2441                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2442                    m_fair_vn.vn_credit_delta);
2443         }
2444
2445         /* Store it to internal memory */
2446         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2447                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2448                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2449                        ((u32 *)(&m_rs_vn))[i]);
2450
2451         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2452                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2453                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2454                        ((u32 *)(&m_fair_vn))[i]);
2455 }
2456
2457
2458 /* This function is called upon link interrupt */
2459 static void bnx2x_link_attn(struct bnx2x *bp)
2460 {
2461         /* Make sure that we are synced with the current statistics */
2462         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2463
2464         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2465
2466         if (bp->link_vars.link_up) {
2467
2468                 /* dropless flow control */
2469                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2470                         int port = BP_PORT(bp);
2471                         u32 pause_enabled = 0;
2472
2473                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2474                                 pause_enabled = 1;
2475
2476                         REG_WR(bp, BAR_USTRORM_INTMEM +
2477                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2478                                pause_enabled);
2479                 }
2480
2481                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2482                         struct host_port_stats *pstats;
2483
2484                         pstats = bnx2x_sp(bp, port_stats);
2485                         /* reset old bmac stats */
2486                         memset(&(pstats->mac_stx[0]), 0,
2487                                sizeof(struct mac_stx));
2488                 }
2489                 if (bp->state == BNX2X_STATE_OPEN)
2490                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2491         }
2492
2493         /* indicate link status */
2494         bnx2x_link_report(bp);
2495
2496         if (IS_E1HMF(bp)) {
2497                 int port = BP_PORT(bp);
2498                 int func;
2499                 int vn;
2500
2501                 /* Set the attention towards other drivers on the same port */
2502                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2503                         if (vn == BP_E1HVN(bp))
2504                                 continue;
2505
2506                         func = ((vn << 1) | port);
2507                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2508                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2509                 }
2510
2511                 if (bp->link_vars.link_up) {
2512                         int i;
2513
2514                         /* Init rate shaping and fairness contexts */
2515                         bnx2x_init_port_minmax(bp);
2516
2517                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2518                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2519
2520                         /* Store it to internal memory */
2521                         for (i = 0;
2522                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2523                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2524                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2525                                        ((u32 *)(&bp->cmng))[i]);
2526                 }
2527         }
2528 }
2529
2530 static void bnx2x__link_status_update(struct bnx2x *bp)
2531 {
2532         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2533                 return;
2534
2535         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2536
2537         if (bp->link_vars.link_up)
2538                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2539         else
2540                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2541
2542         bnx2x_calc_vn_weight_sum(bp);
2543
2544         /* indicate link status */
2545         bnx2x_link_report(bp);
2546 }
2547
2548 static void bnx2x_pmf_update(struct bnx2x *bp)
2549 {
2550         int port = BP_PORT(bp);
2551         u32 val;
2552
2553         bp->port.pmf = 1;
2554         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2555
2556         /* enable nig attention */
2557         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2558         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2559         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2560
2561         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2562 }
2563
2564 /* end of Link */
2565
2566 /* slow path */
2567
2568 /*
2569  * General service functions
2570  */
2571
2572 /* send the MCP a request, block until there is a reply */
2573 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2574 {
2575         int func = BP_FUNC(bp);
2576         u32 seq = ++bp->fw_seq;
2577         u32 rc = 0;
2578         u32 cnt = 1;
2579         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2580
2581         mutex_lock(&bp->fw_mb_mutex);
2582         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2583         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2584
2585         do {
2586                 /* let the FW do it's magic ... */
2587                 msleep(delay);
2588
2589                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2590
2591                 /* Give the FW up to 5 second (500*10ms) */
2592         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2593
2594         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2595            cnt*delay, rc, seq);
2596
2597         /* is this a reply to our command? */
2598         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2599                 rc &= FW_MSG_CODE_MASK;
2600         else {
2601                 /* FW BUG! */
2602                 BNX2X_ERR("FW failed to respond!\n");
2603                 bnx2x_fw_dump(bp);
2604                 rc = 0;
2605         }
2606         mutex_unlock(&bp->fw_mb_mutex);
2607
2608         return rc;
2609 }
2610
2611 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2612 static void bnx2x_set_rx_mode(struct net_device *dev);
2613
2614 static void bnx2x_e1h_disable(struct bnx2x *bp)
2615 {
2616         int port = BP_PORT(bp);
2617
2618         netif_tx_disable(bp->dev);
2619
2620         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2621
2622         netif_carrier_off(bp->dev);
2623 }
2624
2625 static void bnx2x_e1h_enable(struct bnx2x *bp)
2626 {
2627         int port = BP_PORT(bp);
2628
2629         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2630
2631         /* Tx queue should be only reenabled */
2632         netif_tx_wake_all_queues(bp->dev);
2633
2634         /*
2635          * Should not call netif_carrier_on since it will be called if the link
2636          * is up when checking for link state
2637          */
2638 }
2639
2640 static void bnx2x_update_min_max(struct bnx2x *bp)
2641 {
2642         int port = BP_PORT(bp);
2643         int vn, i;
2644
2645         /* Init rate shaping and fairness contexts */
2646         bnx2x_init_port_minmax(bp);
2647
2648         bnx2x_calc_vn_weight_sum(bp);
2649
2650         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2651                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2652
2653         if (bp->port.pmf) {
2654                 int func;
2655
2656                 /* Set the attention towards other drivers on the same port */
2657                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2658                         if (vn == BP_E1HVN(bp))
2659                                 continue;
2660
2661                         func = ((vn << 1) | port);
2662                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2663                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2664                 }
2665
2666                 /* Store it to internal memory */
2667                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2668                         REG_WR(bp, BAR_XSTRORM_INTMEM +
2669                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2670                                ((u32 *)(&bp->cmng))[i]);
2671         }
2672 }
2673
2674 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2675 {
2676         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2677
2678         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2679
2680                 /*
2681                  * This is the only place besides the function initialization
2682                  * where the bp->flags can change so it is done without any
2683                  * locks
2684                  */
2685                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2686                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2687                         bp->flags |= MF_FUNC_DIS;
2688
2689                         bnx2x_e1h_disable(bp);
2690                 } else {
2691                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2692                         bp->flags &= ~MF_FUNC_DIS;
2693
2694                         bnx2x_e1h_enable(bp);
2695                 }
2696                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2697         }
2698         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2699
2700                 bnx2x_update_min_max(bp);
2701                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2702         }
2703
2704         /* Report results to MCP */
2705         if (dcc_event)
2706                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2707         else
2708                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2709 }
2710
2711 /* must be called under the spq lock */
2712 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2713 {
2714         struct eth_spe *next_spe = bp->spq_prod_bd;
2715
2716         if (bp->spq_prod_bd == bp->spq_last_bd) {
2717                 bp->spq_prod_bd = bp->spq;
2718                 bp->spq_prod_idx = 0;
2719                 DP(NETIF_MSG_TIMER, "end of spq\n");
2720         } else {
2721                 bp->spq_prod_bd++;
2722                 bp->spq_prod_idx++;
2723         }
2724         return next_spe;
2725 }
2726
2727 /* must be called under the spq lock */
2728 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2729 {
2730         int func = BP_FUNC(bp);
2731
2732         /* Make sure that BD data is updated before writing the producer */
2733         wmb();
2734
2735         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2736                bp->spq_prod_idx);
2737         mmiowb();
2738 }
2739
2740 /* the slow path queue is odd since completions arrive on the fastpath ring */
2741 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2742                          u32 data_hi, u32 data_lo, int common)
2743 {
2744         struct eth_spe *spe;
2745
2746 #ifdef BNX2X_STOP_ON_ERROR
2747         if (unlikely(bp->panic))
2748                 return -EIO;
2749 #endif
2750
2751         spin_lock_bh(&bp->spq_lock);
2752
2753         if (!bp->spq_left) {
2754                 BNX2X_ERR("BUG! SPQ ring full!\n");
2755                 spin_unlock_bh(&bp->spq_lock);
2756                 bnx2x_panic();
2757                 return -EBUSY;
2758         }
2759
2760         spe = bnx2x_sp_get_next(bp);
2761
2762         /* CID needs port number to be encoded int it */
2763         spe->hdr.conn_and_cmd_data =
2764                         cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2765                                     HW_CID(bp, cid));
2766         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2767         if (common)
2768                 spe->hdr.type |=
2769                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2770
2771         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2772         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2773
2774         bp->spq_left--;
2775
2776         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2777            "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2778            bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2779            (u32)(U64_LO(bp->spq_mapping) +
2780            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2781            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2782
2783         bnx2x_sp_prod_update(bp);
2784         spin_unlock_bh(&bp->spq_lock);
2785         return 0;
2786 }
2787
2788 /* acquire split MCP access lock register */
2789 static int bnx2x_acquire_alr(struct bnx2x *bp)
2790 {
2791         u32 j, val;
2792         int rc = 0;
2793
2794         might_sleep();
2795         for (j = 0; j < 1000; j++) {
2796                 val = (1UL << 31);
2797                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2798                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2799                 if (val & (1L << 31))
2800                         break;
2801
2802                 msleep(5);
2803         }
2804         if (!(val & (1L << 31))) {
2805                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2806                 rc = -EBUSY;
2807         }
2808
2809         return rc;
2810 }
2811
2812 /* release split MCP access lock register */
2813 static void bnx2x_release_alr(struct bnx2x *bp)
2814 {
2815         REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2816 }
2817
2818 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2819 {
2820         struct host_def_status_block *def_sb = bp->def_status_blk;
2821         u16 rc = 0;
2822
2823         barrier(); /* status block is written to by the chip */
2824         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2825                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2826                 rc |= 1;
2827         }
2828         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2829                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2830                 rc |= 2;
2831         }
2832         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2833                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2834                 rc |= 4;
2835         }
2836         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2837                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2838                 rc |= 8;
2839         }
2840         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2841                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2842                 rc |= 16;
2843         }
2844         return rc;
2845 }
2846
2847 /*
2848  * slow path service functions
2849  */
2850
2851 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2852 {
2853         int port = BP_PORT(bp);
2854         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2855                        COMMAND_REG_ATTN_BITS_SET);
2856         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2857                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2858         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2859                                        NIG_REG_MASK_INTERRUPT_PORT0;
2860         u32 aeu_mask;
2861         u32 nig_mask = 0;
2862
2863         if (bp->attn_state & asserted)
2864                 BNX2X_ERR("IGU ERROR\n");
2865
2866         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2867         aeu_mask = REG_RD(bp, aeu_addr);
2868
2869         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2870            aeu_mask, asserted);
2871         aeu_mask &= ~(asserted & 0x3ff);
2872         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2873
2874         REG_WR(bp, aeu_addr, aeu_mask);
2875         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2876
2877         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2878         bp->attn_state |= asserted;
2879         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2880
2881         if (asserted & ATTN_HARD_WIRED_MASK) {
2882                 if (asserted & ATTN_NIG_FOR_FUNC) {
2883
2884                         bnx2x_acquire_phy_lock(bp);
2885
2886                         /* save nig interrupt mask */
2887                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2888                         REG_WR(bp, nig_int_mask_addr, 0);
2889
2890                         bnx2x_link_attn(bp);
2891
2892                         /* handle unicore attn? */
2893                 }
2894                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2895                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2896
2897                 if (asserted & GPIO_2_FUNC)
2898                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2899
2900                 if (asserted & GPIO_3_FUNC)
2901                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2902
2903                 if (asserted & GPIO_4_FUNC)
2904                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2905
2906                 if (port == 0) {
2907                         if (asserted & ATTN_GENERAL_ATTN_1) {
2908                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2909                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2910                         }
2911                         if (asserted & ATTN_GENERAL_ATTN_2) {
2912                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2913                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2914                         }
2915                         if (asserted & ATTN_GENERAL_ATTN_3) {
2916                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2917                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2918                         }
2919                 } else {
2920                         if (asserted & ATTN_GENERAL_ATTN_4) {
2921                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2922                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2923                         }
2924                         if (asserted & ATTN_GENERAL_ATTN_5) {
2925                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2926                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2927                         }
2928                         if (asserted & ATTN_GENERAL_ATTN_6) {
2929                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2930                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2931                         }
2932                 }
2933
2934         } /* if hardwired */
2935
2936         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2937            asserted, hc_addr);
2938         REG_WR(bp, hc_addr, asserted);
2939
2940         /* now set back the mask */
2941         if (asserted & ATTN_NIG_FOR_FUNC) {
2942                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2943                 bnx2x_release_phy_lock(bp);
2944         }
2945 }
2946
2947 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2948 {
2949         int port = BP_PORT(bp);
2950
2951         /* mark the failure */
2952         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2953         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2954         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2955                  bp->link_params.ext_phy_config);
2956
2957         /* log the failure */
2958         netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2959                " the driver to shutdown the card to prevent permanent"
2960                " damage.  Please contact OEM Support for assistance\n");
2961 }
2962
2963 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2964 {
2965         int port = BP_PORT(bp);
2966         int reg_offset;
2967         u32 val, swap_val, swap_override;
2968
2969         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2970                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2971
2972         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2973
2974                 val = REG_RD(bp, reg_offset);
2975                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2976                 REG_WR(bp, reg_offset, val);
2977
2978                 BNX2X_ERR("SPIO5 hw attention\n");
2979
2980                 /* Fan failure attention */
2981                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2982                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2983                         /* Low power mode is controlled by GPIO 2 */
2984                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2985                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2986                         /* The PHY reset is controlled by GPIO 1 */
2987                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2988                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2989                         break;
2990
2991                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2992                         /* The PHY reset is controlled by GPIO 1 */
2993                         /* fake the port number to cancel the swap done in
2994                            set_gpio() */
2995                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2996                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2997                         port = (swap_val && swap_override) ^ 1;
2998                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2999                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3000                         break;
3001
3002                 default:
3003                         break;
3004                 }
3005                 bnx2x_fan_failure(bp);
3006         }
3007
3008         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
3009                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
3010                 bnx2x_acquire_phy_lock(bp);
3011                 bnx2x_handle_module_detect_int(&bp->link_params);
3012                 bnx2x_release_phy_lock(bp);
3013         }
3014
3015         if (attn & HW_INTERRUT_ASSERT_SET_0) {
3016
3017                 val = REG_RD(bp, reg_offset);
3018                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3019                 REG_WR(bp, reg_offset, val);
3020
3021                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
3022                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3023                 bnx2x_panic();
3024         }
3025 }
3026
3027 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3028 {
3029         u32 val;
3030
3031         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3032
3033                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3034                 BNX2X_ERR("DB hw attention 0x%x\n", val);
3035                 /* DORQ discard attention */
3036                 if (val & 0x2)
3037                         BNX2X_ERR("FATAL error from DORQ\n");
3038         }
3039
3040         if (attn & HW_INTERRUT_ASSERT_SET_1) {
3041
3042                 int port = BP_PORT(bp);
3043                 int reg_offset;
3044
3045                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3046                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3047
3048                 val = REG_RD(bp, reg_offset);
3049                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3050                 REG_WR(bp, reg_offset, val);
3051
3052                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3053                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3054                 bnx2x_panic();
3055         }
3056 }
3057
3058 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3059 {
3060         u32 val;
3061
3062         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3063
3064                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3065                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3066                 /* CFC error attention */
3067                 if (val & 0x2)
3068                         BNX2X_ERR("FATAL error from CFC\n");
3069         }
3070
3071         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3072
3073                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3074                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3075                 /* RQ_USDMDP_FIFO_OVERFLOW */
3076                 if (val & 0x18000)
3077                         BNX2X_ERR("FATAL error from PXP\n");
3078         }
3079
3080         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3081
3082                 int port = BP_PORT(bp);
3083                 int reg_offset;
3084
3085                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3086                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3087
3088                 val = REG_RD(bp, reg_offset);
3089                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3090                 REG_WR(bp, reg_offset, val);
3091
3092                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3093                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3094                 bnx2x_panic();
3095         }
3096 }
3097
3098 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3099 {
3100         u32 val;
3101
3102         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3103
3104                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3105                         int func = BP_FUNC(bp);
3106
3107                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3108                         bp->mf_config = SHMEM_RD(bp,
3109                                            mf_cfg.func_mf_config[func].config);
3110                         val = SHMEM_RD(bp, func_mb[func].drv_status);
3111                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3112                                 bnx2x_dcc_event(bp,
3113                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3114                         bnx2x__link_status_update(bp);
3115                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3116                                 bnx2x_pmf_update(bp);
3117
3118                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3119
3120                         BNX2X_ERR("MC assert!\n");
3121                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3122                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3123                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3124                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3125                         bnx2x_panic();
3126
3127                 } else if (attn & BNX2X_MCP_ASSERT) {
3128
3129                         BNX2X_ERR("MCP assert!\n");
3130                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3131                         bnx2x_fw_dump(bp);
3132
3133                 } else
3134                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3135         }
3136
3137         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3138                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3139                 if (attn & BNX2X_GRC_TIMEOUT) {
3140                         val = CHIP_IS_E1H(bp) ?
3141                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3142                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3143                 }
3144                 if (attn & BNX2X_GRC_RSV) {
3145                         val = CHIP_IS_E1H(bp) ?
3146                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3147                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3148                 }
3149                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3150         }
3151 }
3152
3153 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3154 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3155
3156
3157 #define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
3158 #define LOAD_COUNTER_BITS       16 /* Number of bits for load counter */
3159 #define LOAD_COUNTER_MASK       (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3160 #define RESET_DONE_FLAG_MASK    (~LOAD_COUNTER_MASK)
3161 #define RESET_DONE_FLAG_SHIFT   LOAD_COUNTER_BITS
3162 #define CHIP_PARITY_SUPPORTED(bp)   (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3163 /*
3164  * should be run under rtnl lock
3165  */
3166 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3167 {
3168         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3169         val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3170         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3171         barrier();
3172         mmiowb();
3173 }
3174
3175 /*
3176  * should be run under rtnl lock
3177  */
3178 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3179 {
3180         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3181         val |= (1 << 16);
3182         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3183         barrier();
3184         mmiowb();
3185 }
3186
3187 /*
3188  * should be run under rtnl lock
3189  */
3190 static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3191 {
3192         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3193         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3194         return (val & RESET_DONE_FLAG_MASK) ? false : true;
3195 }
3196
3197 /*
3198  * should be run under rtnl lock
3199  */
3200 static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3201 {
3202         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3203
3204         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3205
3206         val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3207         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3208         barrier();
3209         mmiowb();
3210 }
3211
3212 /*
3213  * should be run under rtnl lock
3214  */
3215 static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3216 {
3217         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3218
3219         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3220
3221         val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3222         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3223         barrier();
3224         mmiowb();
3225
3226         return val1;
3227 }
3228
3229 /*
3230  * should be run under rtnl lock
3231  */
3232 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3233 {
3234         return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3235 }
3236
3237 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3238 {
3239         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3240         REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3241 }
3242
3243 static inline void _print_next_block(int idx, const char *blk)
3244 {
3245         if (idx)
3246                 pr_cont(", ");
3247         pr_cont("%s", blk);
3248 }
3249
3250 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3251 {
3252         int i = 0;
3253         u32 cur_bit = 0;
3254         for (i = 0; sig; i++) {
3255                 cur_bit = ((u32)0x1 << i);
3256                 if (sig & cur_bit) {
3257                         switch (cur_bit) {
3258                         case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3259                                 _print_next_block(par_num++, "BRB");
3260                                 break;
3261                         case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3262                                 _print_next_block(par_num++, "PARSER");
3263                                 break;
3264                         case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3265                                 _print_next_block(par_num++, "TSDM");
3266                                 break;
3267                         case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3268                                 _print_next_block(par_num++, "SEARCHER");
3269                                 break;
3270                         case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3271                                 _print_next_block(par_num++, "TSEMI");
3272                                 break;
3273                         }
3274
3275                         /* Clear the bit */
3276                         sig &= ~cur_bit;
3277                 }
3278         }
3279
3280         return par_num;
3281 }
3282
3283 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3284 {
3285         int i = 0;
3286         u32 cur_bit = 0;
3287         for (i = 0; sig; i++) {
3288                 cur_bit = ((u32)0x1 << i);
3289                 if (sig & cur_bit) {
3290                         switch (cur_bit) {
3291                         case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3292                                 _print_next_block(par_num++, "PBCLIENT");
3293                                 break;
3294                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3295                                 _print_next_block(par_num++, "QM");
3296                                 break;
3297                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3298                                 _print_next_block(par_num++, "XSDM");
3299                                 break;
3300                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3301                                 _print_next_block(par_num++, "XSEMI");
3302                                 break;
3303                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3304                                 _print_next_block(par_num++, "DOORBELLQ");
3305                                 break;
3306                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3307                                 _print_next_block(par_num++, "VAUX PCI CORE");
3308                                 break;
3309                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3310                                 _print_next_block(par_num++, "DEBUG");
3311                                 break;
3312                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3313                                 _print_next_block(par_num++, "USDM");
3314                                 break;
3315                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3316                                 _print_next_block(par_num++, "USEMI");
3317                                 break;
3318                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3319                                 _print_next_block(par_num++, "UPB");
3320                                 break;
3321                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3322                                 _print_next_block(par_num++, "CSDM");
3323                                 break;
3324                         }
3325
3326                         /* Clear the bit */
3327                         sig &= ~cur_bit;
3328                 }
3329         }
3330
3331         return par_num;
3332 }
3333
3334 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3335 {
3336         int i = 0;
3337         u32 cur_bit = 0;
3338         for (i = 0; sig; i++) {
3339                 cur_bit = ((u32)0x1 << i);
3340                 if (sig & cur_bit) {
3341                         switch (cur_bit) {
3342                         case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3343                                 _print_next_block(par_num++, "CSEMI");
3344                                 break;
3345                         case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3346                                 _print_next_block(par_num++, "PXP");
3347                                 break;
3348                         case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3349                                 _print_next_block(par_num++,
3350                                         "PXPPCICLOCKCLIENT");
3351                                 break;
3352                         case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3353                                 _print_next_block(par_num++, "CFC");
3354                                 break;
3355                         case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3356                                 _print_next_block(par_num++, "CDU");
3357                                 break;
3358                         case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3359                                 _print_next_block(par_num++, "IGU");
3360                                 break;
3361                         case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3362                                 _print_next_block(par_num++, "MISC");
3363                                 break;
3364                         }
3365
3366                         /* Clear the bit */
3367                         sig &= ~cur_bit;
3368                 }
3369         }
3370
3371         return par_num;
3372 }
3373
3374 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3375 {
3376         int i = 0;
3377         u32 cur_bit = 0;
3378         for (i = 0; sig; i++) {
3379                 cur_bit = ((u32)0x1 << i);
3380                 if (sig & cur_bit) {
3381                         switch (cur_bit) {
3382                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3383                                 _print_next_block(par_num++, "MCP ROM");
3384                                 break;
3385                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3386                                 _print_next_block(par_num++, "MCP UMP RX");
3387                                 break;
3388                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3389                                 _print_next_block(par_num++, "MCP UMP TX");
3390                                 break;
3391                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3392                                 _print_next_block(par_num++, "MCP SCPAD");
3393                                 break;
3394                         }
3395
3396                         /* Clear the bit */
3397                         sig &= ~cur_bit;
3398                 }
3399         }
3400
3401         return par_num;
3402 }
3403
3404 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3405                                      u32 sig2, u32 sig3)
3406 {
3407         if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3408             (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3409                 int par_num = 0;
3410                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3411                         "[0]:0x%08x [1]:0x%08x "
3412                         "[2]:0x%08x [3]:0x%08x\n",
3413                           sig0 & HW_PRTY_ASSERT_SET_0,
3414                           sig1 & HW_PRTY_ASSERT_SET_1,
3415                           sig2 & HW_PRTY_ASSERT_SET_2,
3416                           sig3 & HW_PRTY_ASSERT_SET_3);
3417                 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3418                        bp->dev->name);
3419                 par_num = bnx2x_print_blocks_with_parity0(
3420                         sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3421                 par_num = bnx2x_print_blocks_with_parity1(
3422                         sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3423                 par_num = bnx2x_print_blocks_with_parity2(
3424                         sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3425                 par_num = bnx2x_print_blocks_with_parity3(
3426                         sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3427                 printk("\n");
3428                 return true;
3429         } else
3430                 return false;
3431 }
3432
3433 static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3434 {
3435         struct attn_route attn;
3436         int port = BP_PORT(bp);
3437
3438         attn.sig[0] = REG_RD(bp,
3439                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3440                              port*4);
3441         attn.sig[1] = REG_RD(bp,
3442                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3443                              port*4);
3444         attn.sig[2] = REG_RD(bp,
3445                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3446                              port*4);
3447         attn.sig[3] = REG_RD(bp,
3448                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3449                              port*4);
3450
3451         return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3452                                         attn.sig[3]);
3453 }
3454
3455 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3456 {
3457         struct attn_route attn, *group_mask;
3458         int port = BP_PORT(bp);
3459         int index;
3460         u32 reg_addr;
3461         u32 val;
3462         u32 aeu_mask;
3463
3464         /* need to take HW lock because MCP or other port might also
3465            try to handle this event */
3466         bnx2x_acquire_alr(bp);
3467
3468         if (bnx2x_chk_parity_attn(bp)) {
3469                 bp->recovery_state = BNX2X_RECOVERY_INIT;
3470                 bnx2x_set_reset_in_progress(bp);
3471                 schedule_delayed_work(&bp->reset_task, 0);
3472                 /* Disable HW interrupts */
3473                 bnx2x_int_disable(bp);
3474                 bnx2x_release_alr(bp);
3475                 /* In case of parity errors don't handle attentions so that
3476                  * other function would "see" parity errors.
3477                  */
3478                 return;
3479         }
3480
3481         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3482         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3483         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3484         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3485         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3486            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3487
3488         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3489                 if (deasserted & (1 << index)) {
3490                         group_mask = &bp->attn_group[index];
3491
3492                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3493                            index, group_mask->sig[0], group_mask->sig[1],
3494                            group_mask->sig[2], group_mask->sig[3]);
3495
3496                         bnx2x_attn_int_deasserted3(bp,
3497                                         attn.sig[3] & group_mask->sig[3]);
3498                         bnx2x_attn_int_deasserted1(bp,
3499                                         attn.sig[1] & group_mask->sig[1]);
3500                         bnx2x_attn_int_deasserted2(bp,
3501                                         attn.sig[2] & group_mask->sig[2]);
3502                         bnx2x_attn_int_deasserted0(bp,
3503                                         attn.sig[0] & group_mask->sig[0]);
3504                 }
3505         }
3506
3507         bnx2x_release_alr(bp);
3508
3509         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3510
3511         val = ~deasserted;
3512         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3513            val, reg_addr);
3514         REG_WR(bp, reg_addr, val);
3515
3516         if (~bp->attn_state & deasserted)
3517                 BNX2X_ERR("IGU ERROR\n");
3518
3519         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3520                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3521
3522         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3523         aeu_mask = REG_RD(bp, reg_addr);
3524
3525         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3526            aeu_mask, deasserted);
3527         aeu_mask |= (deasserted & 0x3ff);
3528         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3529
3530         REG_WR(bp, reg_addr, aeu_mask);
3531         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3532
3533         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3534         bp->attn_state &= ~deasserted;
3535         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3536 }
3537
3538 static void bnx2x_attn_int(struct bnx2x *bp)
3539 {
3540         /* read local copy of bits */
3541         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3542                                                                 attn_bits);
3543         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3544                                                                 attn_bits_ack);
3545         u32 attn_state = bp->attn_state;
3546
3547         /* look for changed bits */
3548         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3549         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3550
3551         DP(NETIF_MSG_HW,
3552            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3553            attn_bits, attn_ack, asserted, deasserted);
3554
3555         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3556                 BNX2X_ERR("BAD attention state\n");
3557
3558         /* handle bits that were raised */
3559         if (asserted)
3560                 bnx2x_attn_int_asserted(bp, asserted);
3561
3562         if (deasserted)
3563                 bnx2x_attn_int_deasserted(bp, deasserted);
3564 }
3565
3566 static void bnx2x_sp_task(struct work_struct *work)
3567 {
3568         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3569         u16 status;
3570
3571         /* Return here if interrupt is disabled */
3572         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3573                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3574                 return;
3575         }
3576
3577         status = bnx2x_update_dsb_idx(bp);
3578 /*      if (status == 0)                                     */
3579 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3580
3581         DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3582
3583         /* HW attentions */
3584         if (status & 0x1) {
3585                 bnx2x_attn_int(bp);
3586                 status &= ~0x1;
3587         }
3588
3589         /* CStorm events: STAT_QUERY */
3590         if (status & 0x2) {
3591                 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
3592                 status &= ~0x2;
3593         }
3594
3595         if (unlikely(status))
3596                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3597                    status);
3598
3599         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3600                      IGU_INT_NOP, 1);
3601         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3602                      IGU_INT_NOP, 1);
3603         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3604                      IGU_INT_NOP, 1);
3605         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3606                      IGU_INT_NOP, 1);
3607         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3608                      IGU_INT_ENABLE, 1);
3609 }
3610
3611 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3612 {
3613         struct net_device *dev = dev_instance;
3614         struct bnx2x *bp = netdev_priv(dev);
3615
3616         /* Return here if interrupt is disabled */
3617         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3618                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3619                 return IRQ_HANDLED;
3620         }
3621
3622         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3623
3624 #ifdef BNX2X_STOP_ON_ERROR
3625         if (unlikely(bp->panic))
3626                 return IRQ_HANDLED;
3627 #endif
3628
3629 #ifdef BCM_CNIC
3630         {
3631                 struct cnic_ops *c_ops;
3632
3633                 rcu_read_lock();
3634                 c_ops = rcu_dereference(bp->cnic_ops);
3635                 if (c_ops)
3636                         c_ops->cnic_handler(bp->cnic_data, NULL);
3637                 rcu_read_unlock();
3638         }
3639 #endif
3640         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3641
3642         return IRQ_HANDLED;
3643 }
3644
3645 /* end of slow path */
3646
3647 /* Statistics */
3648
3649 /****************************************************************************
3650 * Macros
3651 ****************************************************************************/
3652
3653 /* sum[hi:lo] += add[hi:lo] */
3654 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3655         do { \
3656                 s_lo += a_lo; \
3657                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3658         } while (0)
3659
3660 /* difference = minuend - subtrahend */
3661 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3662         do { \
3663                 if (m_lo < s_lo) { \
3664                         /* underflow */ \
3665                         d_hi = m_hi - s_hi; \
3666                         if (d_hi > 0) { \
3667                                 /* we can 'loan' 1 */ \
3668                                 d_hi--; \
3669                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3670                         } else { \
3671                                 /* m_hi <= s_hi */ \
3672                                 d_hi = 0; \
3673                                 d_lo = 0; \
3674                         } \
3675                 } else { \
3676                         /* m_lo >= s_lo */ \
3677                         if (m_hi < s_hi) { \
3678                                 d_hi = 0; \
3679                                 d_lo = 0; \
3680                         } else { \
3681                                 /* m_hi >= s_hi */ \
3682                                 d_hi = m_hi - s_hi; \
3683                                 d_lo = m_lo - s_lo; \
3684                         } \
3685                 } \
3686         } while (0)
3687
3688 #define UPDATE_STAT64(s, t) \
3689         do { \
3690                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3691                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3692                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3693                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3694                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3695                        pstats->mac_stx[1].t##_lo, diff.lo); \
3696         } while (0)
3697
3698 #define UPDATE_STAT64_NIG(s, t) \
3699         do { \
3700                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3701                         diff.lo, new->s##_lo, old->s##_lo); \
3702                 ADD_64(estats->t##_hi, diff.hi, \
3703                        estats->t##_lo, diff.lo); \
3704         } while (0)
3705
3706 /* sum[hi:lo] += add */
3707 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3708         do { \
3709                 s_lo += a; \
3710                 s_hi += (s_lo < a) ? 1 : 0; \
3711         } while (0)
3712
3713 #define UPDATE_EXTEND_STAT(s) \
3714         do { \
3715                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3716                               pstats->mac_stx[1].s##_lo, \
3717                               new->s); \
3718         } while (0)
3719
3720 #define UPDATE_EXTEND_TSTAT(s, t) \
3721         do { \
3722                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3723                 old_tclient->s = tclient->s; \
3724                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3725         } while (0)
3726
3727 #define UPDATE_EXTEND_USTAT(s, t) \
3728         do { \
3729                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3730                 old_uclient->s = uclient->s; \
3731                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3732         } while (0)
3733
3734 #define UPDATE_EXTEND_XSTAT(s, t) \
3735         do { \
3736                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3737                 old_xclient->s = xclient->s; \
3738                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3739         } while (0)
3740
3741 /* minuend -= subtrahend */
3742 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3743         do { \
3744                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3745         } while (0)
3746
3747 /* minuend[hi:lo] -= subtrahend */
3748 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3749         do { \
3750                 SUB_64(m_hi, 0, m_lo, s); \
3751         } while (0)
3752
3753 #define SUB_EXTEND_USTAT(s, t) \
3754         do { \
3755                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3756                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3757         } while (0)
3758
3759 /*
3760  * General service functions
3761  */
3762
3763 static inline long bnx2x_hilo(u32 *hiref)
3764 {
3765         u32 lo = *(hiref + 1);
3766 #if (BITS_PER_LONG == 64)
3767         u32 hi = *hiref;
3768
3769         return HILO_U64(hi, lo);
3770 #else
3771         return lo;
3772 #endif
3773 }
3774
3775 /*
3776  * Init service functions
3777  */
3778
3779 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3780 {
3781         if (!bp->stats_pending) {
3782                 struct eth_query_ramrod_data ramrod_data = {0};
3783                 int i, rc;
3784
3785                 ramrod_data.drv_counter = bp->stats_counter++;
3786                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3787                 for_each_queue(bp, i)
3788                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3789
3790                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3791                                    ((u32 *)&ramrod_data)[1],
3792                                    ((u32 *)&ramrod_data)[0], 0);
3793                 if (rc == 0) {
3794                         /* stats ramrod has it's own slot on the spq */
3795                         bp->spq_left++;
3796                         bp->stats_pending = 1;
3797                 }
3798         }
3799 }
3800
3801 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3802 {
3803         struct dmae_command *dmae = &bp->stats_dmae;
3804         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3805
3806         *stats_comp = DMAE_COMP_VAL;
3807         if (CHIP_REV_IS_SLOW(bp))
3808                 return;
3809
3810         /* loader */
3811         if (bp->executer_idx) {
3812                 int loader_idx = PMF_DMAE_C(bp);
3813
3814                 memset(dmae, 0, sizeof(struct dmae_command));
3815
3816                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3817                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3818                                 DMAE_CMD_DST_RESET |
3819 #ifdef __BIG_ENDIAN
3820                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3821 #else
3822                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3823 #endif
3824                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3825                                                DMAE_CMD_PORT_0) |
3826                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3827                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3828                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3829                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3830                                      sizeof(struct dmae_command) *
3831                                      (loader_idx + 1)) >> 2;
3832                 dmae->dst_addr_hi = 0;
3833                 dmae->len = sizeof(struct dmae_command) >> 2;
3834                 if (CHIP_IS_E1(bp))
3835                         dmae->len--;
3836                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3837                 dmae->comp_addr_hi = 0;
3838                 dmae->comp_val = 1;
3839
3840                 *stats_comp = 0;
3841                 bnx2x_post_dmae(bp, dmae, loader_idx);
3842
3843         } else if (bp->func_stx) {
3844                 *stats_comp = 0;
3845                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3846         }
3847 }
3848
3849 static int bnx2x_stats_comp(struct bnx2x *bp)
3850 {
3851         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3852         int cnt = 10;
3853
3854         might_sleep();
3855         while (*stats_comp != DMAE_COMP_VAL) {
3856                 if (!cnt) {
3857                         BNX2X_ERR("timeout waiting for stats finished\n");
3858                         break;
3859                 }
3860                 cnt--;
3861                 msleep(1);
3862         }
3863         return 1;
3864 }
3865
3866 /*
3867  * Statistics service functions
3868  */
3869
3870 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3871 {
3872         struct dmae_command *dmae;
3873         u32 opcode;
3874         int loader_idx = PMF_DMAE_C(bp);
3875         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3876
3877         /* sanity */
3878         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3879                 BNX2X_ERR("BUG!\n");
3880                 return;
3881         }
3882
3883         bp->executer_idx = 0;
3884
3885         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3886                   DMAE_CMD_C_ENABLE |
3887                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3888 #ifdef __BIG_ENDIAN
3889                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3890 #else
3891                   DMAE_CMD_ENDIANITY_DW_SWAP |
3892 #endif
3893                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3894                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3895
3896         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3897         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3898         dmae->src_addr_lo = bp->port.port_stx >> 2;
3899         dmae->src_addr_hi = 0;
3900         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3901         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3902         dmae->len = DMAE_LEN32_RD_MAX;
3903         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3904         dmae->comp_addr_hi = 0;
3905         dmae->comp_val = 1;
3906
3907         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3908         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3909         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3910         dmae->src_addr_hi = 0;
3911         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3912                                    DMAE_LEN32_RD_MAX * 4);
3913         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3914                                    DMAE_LEN32_RD_MAX * 4);
3915         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3916         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3917         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3918         dmae->comp_val = DMAE_COMP_VAL;
3919
3920         *stats_comp = 0;
3921         bnx2x_hw_stats_post(bp);
3922         bnx2x_stats_comp(bp);
3923 }
3924
3925 static void bnx2x_port_stats_init(struct bnx2x *bp)
3926 {
3927         struct dmae_command *dmae;
3928         int port = BP_PORT(bp);
3929         int vn = BP_E1HVN(bp);
3930         u32 opcode;
3931         int loader_idx = PMF_DMAE_C(bp);
3932         u32 mac_addr;
3933         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3934
3935         /* sanity */
3936         if (!bp->link_vars.link_up || !bp->port.pmf) {
3937                 BNX2X_ERR("BUG!\n");
3938                 return;
3939         }
3940
3941         bp->executer_idx = 0;
3942
3943         /* MCP */
3944         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3945                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3946                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3947 #ifdef __BIG_ENDIAN
3948                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3949 #else
3950                   DMAE_CMD_ENDIANITY_DW_SWAP |
3951 #endif
3952                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3953                   (vn << DMAE_CMD_E1HVN_SHIFT));
3954
3955         if (bp->port.port_stx) {
3956
3957                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3958                 dmae->opcode = opcode;
3959                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3960                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3961                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3962                 dmae->dst_addr_hi = 0;
3963                 dmae->len = sizeof(struct host_port_stats) >> 2;
3964                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3965                 dmae->comp_addr_hi = 0;
3966                 dmae->comp_val = 1;
3967         }
3968
3969         if (bp->func_stx) {
3970
3971                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3972                 dmae->opcode = opcode;
3973                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3974                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3975                 dmae->dst_addr_lo = bp->func_stx >> 2;
3976                 dmae->dst_addr_hi = 0;
3977                 dmae->len = sizeof(struct host_func_stats) >> 2;
3978                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3979                 dmae->comp_addr_hi = 0;
3980                 dmae->comp_val = 1;
3981         }
3982
3983         /* MAC */
3984         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3985                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3986                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3987 #ifdef __BIG_ENDIAN
3988                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3989 #else
3990                   DMAE_CMD_ENDIANITY_DW_SWAP |
3991 #endif
3992                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3993                   (vn << DMAE_CMD_E1HVN_SHIFT));
3994
3995         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3996
3997                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3998                                    NIG_REG_INGRESS_BMAC0_MEM);
3999
4000                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4001                    BIGMAC_REGISTER_TX_STAT_GTBYT */
4002                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4003                 dmae->opcode = opcode;
4004                 dmae->src_addr_lo = (mac_addr +
4005                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4006                 dmae->src_addr_hi = 0;
4007                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4008                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4009                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4010                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4011                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4012                 dmae->comp_addr_hi = 0;
4013                 dmae->comp_val = 1;
4014
4015                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4016                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
4017                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4018                 dmae->opcode = opcode;
4019                 dmae->src_addr_lo = (mac_addr +
4020                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4021                 dmae->src_addr_hi = 0;
4022                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4023                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4024                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4025                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4026                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4027                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4028                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4029                 dmae->comp_addr_hi = 0;
4030                 dmae->comp_val = 1;
4031
4032         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
4033
4034                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4035
4036                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4037                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4038                 dmae->opcode = opcode;
4039                 dmae->src_addr_lo = (mac_addr +
4040                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4041                 dmae->src_addr_hi = 0;
4042                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4043                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4044                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4045                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4046                 dmae->comp_addr_hi = 0;
4047                 dmae->comp_val = 1;
4048
4049                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4050                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4051                 dmae->opcode = opcode;
4052                 dmae->src_addr_lo = (mac_addr +
4053                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4054                 dmae->src_addr_hi = 0;
4055                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4056                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4057                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4058                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4059                 dmae->len = 1;
4060                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4061                 dmae->comp_addr_hi = 0;
4062                 dmae->comp_val = 1;
4063
4064                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4065                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4066                 dmae->opcode = opcode;
4067                 dmae->src_addr_lo = (mac_addr +
4068                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4069                 dmae->src_addr_hi = 0;
4070                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4071                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4072                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4073                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4074                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4075                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4076                 dmae->comp_addr_hi = 0;
4077                 dmae->comp_val = 1;
4078         }
4079
4080         /* NIG */
4081         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4082         dmae->opcode = opcode;
4083         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4084                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
4085         dmae->src_addr_hi = 0;
4086         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
4087         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
4088         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
4089         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4090         dmae->comp_addr_hi = 0;
4091         dmae->comp_val = 1;
4092
4093         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4094         dmae->opcode = opcode;
4095         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
4096                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
4097         dmae->src_addr_hi = 0;
4098         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4099                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
4100         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4101                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
4102         dmae->len = (2*sizeof(u32)) >> 2;
4103         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4104         dmae->comp_addr_hi = 0;
4105         dmae->comp_val = 1;
4106
4107         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4108         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4109                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4110                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4111 #ifdef __BIG_ENDIAN
4112                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4113 #else
4114                         DMAE_CMD_ENDIANITY_DW_SWAP |
4115 #endif
4116                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4117                         (vn << DMAE_CMD_E1HVN_SHIFT));
4118         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
4119                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
4120         dmae->src_addr_hi = 0;
4121         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4122                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
4123         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4124                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
4125         dmae->len = (2*sizeof(u32)) >> 2;
4126         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4127         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4128         dmae->comp_val = DMAE_COMP_VAL;
4129
4130         *stats_comp = 0;
4131 }
4132
4133 static void bnx2x_func_stats_init(struct bnx2x *bp)
4134 {
4135         struct dmae_command *dmae = &bp->stats_dmae;
4136         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4137
4138         /* sanity */
4139         if (!bp->func_stx) {
4140                 BNX2X_ERR("BUG!\n");
4141                 return;
4142         }
4143
4144         bp->executer_idx = 0;
4145         memset(dmae, 0, sizeof(struct dmae_command));
4146
4147         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4148                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4149                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4150 #ifdef __BIG_ENDIAN
4151                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4152 #else
4153                         DMAE_CMD_ENDIANITY_DW_SWAP |
4154 #endif
4155                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4156                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4157         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4158         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4159         dmae->dst_addr_lo = bp->func_stx >> 2;
4160         dmae->dst_addr_hi = 0;
4161         dmae->len = sizeof(struct host_func_stats) >> 2;
4162         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4163         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4164         dmae->comp_val = DMAE_COMP_VAL;
4165
4166         *stats_comp = 0;
4167 }
4168
4169 static void bnx2x_stats_start(struct bnx2x *bp)
4170 {
4171         if (bp->port.pmf)
4172                 bnx2x_port_stats_init(bp);
4173
4174         else if (bp->func_stx)
4175                 bnx2x_func_stats_init(bp);
4176
4177         bnx2x_hw_stats_post(bp);
4178         bnx2x_storm_stats_post(bp);
4179 }
4180
4181 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
4182 {
4183         bnx2x_stats_comp(bp);
4184         bnx2x_stats_pmf_update(bp);
4185         bnx2x_stats_start(bp);
4186 }
4187
4188 static void bnx2x_stats_restart(struct bnx2x *bp)
4189 {
4190         bnx2x_stats_comp(bp);
4191         bnx2x_stats_start(bp);
4192 }
4193
4194 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
4195 {
4196         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
4197         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4198         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4199         struct {
4200                 u32 lo;
4201                 u32 hi;
4202         } diff;
4203
4204         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
4205         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
4206         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
4207         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
4208         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
4209         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
4210         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
4211         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
4212         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
4213         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
4214         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
4215         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
4216         UPDATE_STAT64(tx_stat_gt127,
4217                                 tx_stat_etherstatspkts65octetsto127octets);
4218         UPDATE_STAT64(tx_stat_gt255,
4219                                 tx_stat_etherstatspkts128octetsto255octets);
4220         UPDATE_STAT64(tx_stat_gt511,
4221                                 tx_stat_etherstatspkts256octetsto511octets);
4222         UPDATE_STAT64(tx_stat_gt1023,
4223                                 tx_stat_etherstatspkts512octetsto1023octets);
4224         UPDATE_STAT64(tx_stat_gt1518,
4225                                 tx_stat_etherstatspkts1024octetsto1522octets);
4226         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
4227         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
4228         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
4229         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
4230         UPDATE_STAT64(tx_stat_gterr,
4231                                 tx_stat_dot3statsinternalmactransmiterrors);
4232         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
4233
4234         estats->pause_frames_received_hi =
4235                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
4236         estats->pause_frames_received_lo =
4237                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
4238
4239         estats->pause_frames_sent_hi =
4240                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
4241         estats->pause_frames_sent_lo =
4242                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
4243 }
4244
4245 static void bnx2x_emac_stats_update(struct bnx2x *bp)
4246 {
4247         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
4248         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4249         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4250
4251         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
4252         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
4253         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
4254         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
4255         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
4256         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
4257         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
4258         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
4259         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
4260         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
4261         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
4262         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
4263         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
4264         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
4265         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
4266         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
4267         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
4268         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
4269         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
4270         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
4271         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
4272         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
4273         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
4274         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
4275         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
4276         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
4277         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
4278         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
4279         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
4280         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
4281         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
4282
4283         estats->pause_frames_received_hi =
4284                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
4285         estats->pause_frames_received_lo =
4286                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
4287         ADD_64(estats->pause_frames_received_hi,
4288                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
4289                estats->pause_frames_received_lo,
4290                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
4291
4292         estats->pause_frames_sent_hi =
4293                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
4294         estats->pause_frames_sent_lo =
4295                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
4296         ADD_64(estats->pause_frames_sent_hi,
4297                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
4298                estats->pause_frames_sent_lo,
4299                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
4300 }
4301
4302 static int bnx2x_hw_stats_update(struct bnx2x *bp)
4303 {
4304         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
4305         struct nig_stats *old = &(bp->port.old_nig_stats);
4306         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4307         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4308         struct {
4309                 u32 lo;
4310                 u32 hi;
4311         } diff;
4312
4313         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
4314                 bnx2x_bmac_stats_update(bp);
4315
4316         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
4317                 bnx2x_emac_stats_update(bp);
4318
4319         else { /* unreached */
4320                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
4321                 return -1;
4322         }
4323
4324         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
4325                       new->brb_discard - old->brb_discard);
4326         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
4327                       new->brb_truncate - old->brb_truncate);
4328
4329         UPDATE_STAT64_NIG(egress_mac_pkt0,
4330                                         etherstatspkts1024octetsto1522octets);
4331         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
4332
4333         memcpy(old, new, sizeof(struct nig_stats));
4334
4335         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
4336                sizeof(struct mac_stx));
4337         estats->brb_drop_hi = pstats->brb_drop_hi;
4338         estats->brb_drop_lo = pstats->brb_drop_lo;
4339
4340         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
4341
4342         if (!BP_NOMCP(bp)) {
4343                 u32 nig_timer_max =
4344                         SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4345                 if (nig_timer_max != estats->nig_timer_max) {
4346                         estats->nig_timer_max = nig_timer_max;
4347                         BNX2X_ERR("NIG timer max (%u)\n",
4348                                   estats->nig_timer_max);
4349                 }
4350         }
4351
4352         return 0;
4353 }
4354
4355 static int bnx2x_storm_stats_update(struct bnx2x *bp)
4356 {
4357         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4358         struct tstorm_per_port_stats *tport =
4359                                         &stats->tstorm_common.port_statistics;
4360         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4361         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4362         int i;
4363
4364         memcpy(&(fstats->total_bytes_received_hi),
4365                &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4366                sizeof(struct host_func_stats) - 2*sizeof(u32));
4367         estats->error_bytes_received_hi = 0;
4368         estats->error_bytes_received_lo = 0;
4369         estats->etherstatsoverrsizepkts_hi = 0;
4370         estats->etherstatsoverrsizepkts_lo = 0;
4371         estats->no_buff_discard_hi = 0;
4372         estats->no_buff_discard_lo = 0;
4373
4374         for_each_queue(bp, i) {
4375                 struct bnx2x_fastpath *fp = &bp->fp[i];
4376                 int cl_id = fp->cl_id;
4377                 struct tstorm_per_client_stats *tclient =
4378                                 &stats->tstorm_common.client_statistics[cl_id];
4379                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4380                 struct ustorm_per_client_stats *uclient =
4381                                 &stats->ustorm_common.client_statistics[cl_id];
4382                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4383                 struct xstorm_per_client_stats *xclient =
4384                                 &stats->xstorm_common.client_statistics[cl_id];
4385                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4386                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4387                 u32 diff;
4388
4389                 /* are storm stats valid? */
4390                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4391                                                         bp->stats_counter) {
4392                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4393                            "  xstorm counter (0x%x) != stats_counter (0x%x)\n",
4394                            i, xclient->stats_counter, bp->stats_counter);
4395                         return -1;
4396                 }
4397                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4398                                                         bp->stats_counter) {
4399                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4400                            "  tstorm counter (0x%x) != stats_counter (0x%x)\n",
4401                            i, tclient->stats_counter, bp->stats_counter);
4402                         return -2;
4403                 }
4404                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4405                                                         bp->stats_counter) {
4406                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4407                            "  ustorm counter (0x%x) != stats_counter (0x%x)\n",
4408                            i, uclient->stats_counter, bp->stats_counter);
4409                         return -4;
4410                 }
4411
4412                 qstats->total_bytes_received_hi =
4413                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4414                 qstats->total_bytes_received_lo =
4415                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4416
4417                 ADD_64(qstats->total_bytes_received_hi,
4418                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4419                        qstats->total_bytes_received_lo,
4420                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4421
4422                 ADD_64(qstats->total_bytes_received_hi,
4423                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4424                        qstats->total_bytes_received_lo,
4425                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4426
4427                 SUB_64(qstats->total_bytes_received_hi,
4428                        le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
4429                        qstats->total_bytes_received_lo,
4430                        le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
4431
4432                 SUB_64(qstats->total_bytes_received_hi,
4433                        le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
4434                        qstats->total_bytes_received_lo,
4435                        le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
4436
4437                 SUB_64(qstats->total_bytes_received_hi,
4438                        le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
4439                        qstats->total_bytes_received_lo,
4440                        le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
4441
4442                 qstats->valid_bytes_received_hi =
4443                                         qstats->total_bytes_received_hi;
4444                 qstats->valid_bytes_received_lo =
4445                                         qstats->total_bytes_received_lo;
4446
4447                 qstats->error_bytes_received_hi =
4448                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
4449                 qstats->error_bytes_received_lo =
4450                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
4451
4452                 ADD_64(qstats->total_bytes_received_hi,
4453                        qstats->error_bytes_received_hi,
4454                        qstats->total_bytes_received_lo,
4455                        qstats->error_bytes_received_lo);
4456
4457                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4458                                         total_unicast_packets_received);
4459                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4460                                         total_multicast_packets_received);
4461                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4462                                         total_broadcast_packets_received);
4463                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4464                                         etherstatsoverrsizepkts);
4465                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4466
4467                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4468                                         total_unicast_packets_received);
4469                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4470                                         total_multicast_packets_received);
4471                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4472                                         total_broadcast_packets_received);
4473                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4474                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4475                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4476
4477                 qstats->total_bytes_transmitted_hi =
4478                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4479                 qstats->total_bytes_transmitted_lo =
4480                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4481
4482                 ADD_64(qstats->total_bytes_transmitted_hi,
4483                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
4484                        qstats->total_bytes_transmitted_lo,
4485                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
4486
4487                 ADD_64(qstats->total_bytes_transmitted_hi,
4488                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4489                        qstats->total_bytes_transmitted_lo,
4490                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4491
4492                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4493                                         total_unicast_packets_transmitted);
4494                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4495                                         total_multicast_packets_transmitted);
4496                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4497                                         total_broadcast_packets_transmitted);
4498
4499                 old_tclient->checksum_discard = tclient->checksum_discard;
4500                 old_tclient->ttl0_discard = tclient->ttl0_discard;
4501
4502                 ADD_64(fstats->total_bytes_received_hi,
4503                        qstats->total_bytes_received_hi,
4504                        fstats->total_bytes_received_lo,
4505                        qstats->total_bytes_received_lo);
4506                 ADD_64(fstats->total_bytes_transmitted_hi,
4507                        qstats->total_bytes_transmitted_hi,
4508                        fstats->total_bytes_transmitted_lo,
4509                        qstats->total_bytes_transmitted_lo);
4510                 ADD_64(fstats->total_unicast_packets_received_hi,
4511                        qstats->total_unicast_packets_received_hi,
4512                        fstats->total_unicast_packets_received_lo,
4513                        qstats->total_unicast_packets_received_lo);
4514                 ADD_64(fstats->total_multicast_packets_received_hi,
4515                        qstats->total_multicast_packets_received_hi,
4516                        fstats->total_multicast_packets_received_lo,
4517                        qstats->total_multicast_packets_received_lo);
4518                 ADD_64(fstats->total_broadcast_packets_received_hi,
4519                        qstats->total_broadcast_packets_received_hi,
4520                        fstats->total_broadcast_packets_received_lo,
4521                        qstats->total_broadcast_packets_received_lo);
4522                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4523                        qstats->total_unicast_packets_transmitted_hi,
4524                        fstats->total_unicast_packets_transmitted_lo,
4525                        qstats->total_unicast_packets_transmitted_lo);
4526                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4527                        qstats->total_multicast_packets_transmitted_hi,
4528                        fstats->total_multicast_packets_transmitted_lo,
4529                        qstats->total_multicast_packets_transmitted_lo);
4530                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4531                        qstats->total_broadcast_packets_transmitted_hi,
4532                        fstats->total_broadcast_packets_transmitted_lo,
4533                        qstats->total_broadcast_packets_transmitted_lo);
4534                 ADD_64(fstats->valid_bytes_received_hi,
4535                        qstats->valid_bytes_received_hi,
4536                        fstats->valid_bytes_received_lo,
4537                        qstats->valid_bytes_received_lo);
4538
4539                 ADD_64(estats->error_bytes_received_hi,
4540                        qstats->error_bytes_received_hi,
4541                        estats->error_bytes_received_lo,
4542                        qstats->error_bytes_received_lo);
4543                 ADD_64(estats->etherstatsoverrsizepkts_hi,
4544                        qstats->etherstatsoverrsizepkts_hi,
4545                        estats->etherstatsoverrsizepkts_lo,
4546                        qstats->etherstatsoverrsizepkts_lo);
4547                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4548                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4549         }
4550
4551         ADD_64(fstats->total_bytes_received_hi,
4552                estats->rx_stat_ifhcinbadoctets_hi,
4553                fstats->total_bytes_received_lo,
4554                estats->rx_stat_ifhcinbadoctets_lo);
4555
4556         memcpy(estats, &(fstats->total_bytes_received_hi),
4557                sizeof(struct host_func_stats) - 2*sizeof(u32));
4558
4559         ADD_64(estats->etherstatsoverrsizepkts_hi,
4560                estats->rx_stat_dot3statsframestoolong_hi,
4561                estats->etherstatsoverrsizepkts_lo,
4562                estats->rx_stat_dot3statsframestoolong_lo);
4563         ADD_64(estats->error_bytes_received_hi,
4564                estats->rx_stat_ifhcinbadoctets_hi,
4565                estats->error_bytes_received_lo,
4566                estats->rx_stat_ifhcinbadoctets_lo);
4567
4568         if (bp->port.pmf) {
4569                 estats->mac_filter_discard =
4570                                 le32_to_cpu(tport->mac_filter_discard);
4571                 estats->xxoverflow_discard =
4572                                 le32_to_cpu(tport->xxoverflow_discard);
4573                 estats->brb_truncate_discard =
4574                                 le32_to_cpu(tport->brb_truncate_discard);
4575                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4576         }
4577
4578         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4579
4580         bp->stats_pending = 0;
4581
4582         return 0;
4583 }
4584
4585 static void bnx2x_net_stats_update(struct bnx2x *bp)
4586 {
4587         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4588         struct net_device_stats *nstats = &bp->dev->stats;
4589         int i;
4590
4591         nstats->rx_packets =
4592                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4593                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4594                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4595
4596         nstats->tx_packets =
4597                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4598                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4599                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4600
4601         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4602
4603         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4604
4605         nstats->rx_dropped = estats->mac_discard;
4606         for_each_queue(bp, i)
4607                 nstats->rx_dropped +=
4608                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4609
4610         nstats->tx_dropped = 0;
4611
4612         nstats->multicast =
4613                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4614
4615         nstats->collisions =
4616                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4617
4618         nstats->rx_length_errors =
4619                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4620                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4621         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4622                                  bnx2x_hilo(&estats->brb_truncate_hi);
4623         nstats->rx_crc_errors =
4624                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4625         nstats->rx_frame_errors =
4626                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4627         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4628         nstats->rx_missed_errors = estats->xxoverflow_discard;
4629
4630         nstats->rx_errors = nstats->rx_length_errors +
4631                             nstats->rx_over_errors +
4632                             nstats->rx_crc_errors +
4633                             nstats->rx_frame_errors +
4634                             nstats->rx_fifo_errors +
4635                             nstats->rx_missed_errors;
4636
4637         nstats->tx_aborted_errors =
4638                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4639                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4640         nstats->tx_carrier_errors =
4641                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4642         nstats->tx_fifo_errors = 0;
4643         nstats->tx_heartbeat_errors = 0;
4644         nstats->tx_window_errors = 0;
4645
4646         nstats->tx_errors = nstats->tx_aborted_errors +
4647                             nstats->tx_carrier_errors +
4648             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4649 }
4650
4651 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4652 {
4653         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4654         int i;
4655
4656         estats->driver_xoff = 0;
4657         estats->rx_err_discard_pkt = 0;
4658         estats->rx_skb_alloc_failed = 0;
4659         estats->hw_csum_err = 0;
4660         for_each_queue(bp, i) {
4661                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4662
4663                 estats->driver_xoff += qstats->driver_xoff;
4664                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4665                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4666                 estats->hw_csum_err += qstats->hw_csum_err;
4667         }
4668 }
4669
4670 static void bnx2x_stats_update(struct bnx2x *bp)
4671 {
4672         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4673
4674         if (*stats_comp != DMAE_COMP_VAL)
4675                 return;
4676
4677         if (bp->port.pmf)
4678                 bnx2x_hw_stats_update(bp);
4679
4680         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4681                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4682                 bnx2x_panic();
4683                 return;
4684         }
4685
4686         bnx2x_net_stats_update(bp);
4687         bnx2x_drv_stats_update(bp);
4688
4689         if (netif_msg_timer(bp)) {
4690                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4691                 int i;
4692
4693                 printk(KERN_DEBUG "%s: brb drops %u  brb truncate %u\n",
4694                        bp->dev->name,
4695                        estats->brb_drop_lo, estats->brb_truncate_lo);
4696
4697                 for_each_queue(bp, i) {
4698                         struct bnx2x_fastpath *fp = &bp->fp[i];
4699                         struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4700
4701                         printk(KERN_DEBUG "%s: rx usage(%4u)  *rx_cons_sb(%u)"
4702                                           "  rx pkt(%lu)  rx calls(%lu %lu)\n",
4703                                fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
4704                                fp->rx_comp_cons),
4705                                le16_to_cpu(*fp->rx_cons_sb),
4706                                bnx2x_hilo(&qstats->
4707                                           total_unicast_packets_received_hi),
4708                                fp->rx_calls, fp->rx_pkt);
4709                 }
4710
4711                 for_each_queue(bp, i) {
4712                         struct bnx2x_fastpath *fp = &bp->fp[i];
4713                         struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4714                         struct netdev_queue *txq =
4715                                 netdev_get_tx_queue(bp->dev, i);
4716
4717                         printk(KERN_DEBUG "%s: tx avail(%4u)  *tx_cons_sb(%u)"
4718                                           "  tx pkt(%lu) tx calls (%lu)"
4719                                           "  %s (Xoff events %u)\n",
4720                                fp->name, bnx2x_tx_avail(fp),
4721                                le16_to_cpu(*fp->tx_cons_sb),
4722                                bnx2x_hilo(&qstats->
4723                                           total_unicast_packets_transmitted_hi),
4724                                fp->tx_pkt,
4725                                (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
4726                                qstats->driver_xoff);
4727                 }
4728         }
4729
4730         bnx2x_hw_stats_post(bp);
4731         bnx2x_storm_stats_post(bp);
4732 }
4733
4734 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4735 {
4736         struct dmae_command *dmae;
4737         u32 opcode;
4738         int loader_idx = PMF_DMAE_C(bp);
4739         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4740
4741         bp->executer_idx = 0;
4742
4743         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4744                   DMAE_CMD_C_ENABLE |
4745                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4746 #ifdef __BIG_ENDIAN
4747                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4748 #else
4749                   DMAE_CMD_ENDIANITY_DW_SWAP |
4750 #endif
4751                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4752                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4753
4754         if (bp->port.port_stx) {
4755
4756                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4757                 if (bp->func_stx)
4758                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4759                 else
4760                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4761                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4762                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4763                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4764                 dmae->dst_addr_hi = 0;
4765                 dmae->len = sizeof(struct host_port_stats) >> 2;
4766                 if (bp->func_stx) {
4767                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4768                         dmae->comp_addr_hi = 0;
4769                         dmae->comp_val = 1;
4770                 } else {
4771                         dmae->comp_addr_lo =
4772                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4773                         dmae->comp_addr_hi =
4774                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4775                         dmae->comp_val = DMAE_COMP_VAL;
4776
4777                         *stats_comp = 0;
4778                 }
4779         }
4780
4781         if (bp->func_stx) {
4782
4783                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4784                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4785                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4786                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4787                 dmae->dst_addr_lo = bp->func_stx >> 2;
4788                 dmae->dst_addr_hi = 0;
4789                 dmae->len = sizeof(struct host_func_stats) >> 2;
4790                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4791                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4792                 dmae->comp_val = DMAE_COMP_VAL;
4793
4794                 *stats_comp = 0;
4795         }
4796 }
4797
4798 static void bnx2x_stats_stop(struct bnx2x *bp)
4799 {
4800         int update = 0;
4801
4802         bnx2x_stats_comp(bp);
4803
4804         if (bp->port.pmf)
4805                 update = (bnx2x_hw_stats_update(bp) == 0);
4806
4807         update |= (bnx2x_storm_stats_update(bp) == 0);
4808
4809         if (update) {
4810                 bnx2x_net_stats_update(bp);
4811
4812                 if (bp->port.pmf)
4813                         bnx2x_port_stats_stop(bp);
4814
4815                 bnx2x_hw_stats_post(bp);
4816                 bnx2x_stats_comp(bp);
4817         }
4818 }
4819
4820 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4821 {
4822 }
4823
4824 static const struct {
4825         void (*action)(struct bnx2x *bp);
4826         enum bnx2x_stats_state next_state;
4827 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4828 /* state        event   */
4829 {
4830 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4831 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4832 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4833 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4834 },
4835 {
4836 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4837 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4838 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4839 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4840 }
4841 };
4842
4843 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4844 {
4845         enum bnx2x_stats_state state = bp->stats_state;
4846
4847         if (unlikely(bp->panic))
4848                 return;
4849
4850         bnx2x_stats_stm[state][event].action(bp);
4851         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4852
4853         /* Make sure the state has been "changed" */
4854         smp_wmb();
4855
4856         if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
4857                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4858                    state, event, bp->stats_state);
4859 }
4860
4861 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4862 {
4863         struct dmae_command *dmae;
4864         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4865
4866         /* sanity */
4867         if (!bp->port.pmf || !bp->port.port_stx) {
4868                 BNX2X_ERR("BUG!\n");
4869                 return;
4870         }
4871
4872         bp->executer_idx = 0;
4873
4874         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4875         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4876                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4877                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4878 #ifdef __BIG_ENDIAN
4879                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4880 #else
4881                         DMAE_CMD_ENDIANITY_DW_SWAP |
4882 #endif
4883                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4884                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4885         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4886         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4887         dmae->dst_addr_lo = bp->port.port_stx >> 2;
4888         dmae->dst_addr_hi = 0;
4889         dmae->len = sizeof(struct host_port_stats) >> 2;
4890         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4891         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4892         dmae->comp_val = DMAE_COMP_VAL;
4893
4894         *stats_comp = 0;
4895         bnx2x_hw_stats_post(bp);
4896         bnx2x_stats_comp(bp);
4897 }
4898
4899 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4900 {
4901         int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4902         int port = BP_PORT(bp);
4903         int func;
4904         u32 func_stx;
4905
4906         /* sanity */
4907         if (!bp->port.pmf || !bp->func_stx) {
4908                 BNX2X_ERR("BUG!\n");
4909                 return;
4910         }
4911
4912         /* save our func_stx */
4913         func_stx = bp->func_stx;
4914
4915         for (vn = VN_0; vn < vn_max; vn++) {
4916                 func = 2*vn + port;
4917
4918                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4919                 bnx2x_func_stats_init(bp);
4920                 bnx2x_hw_stats_post(bp);
4921                 bnx2x_stats_comp(bp);
4922         }
4923
4924         /* restore our func_stx */
4925         bp->func_stx = func_stx;
4926 }
4927
4928 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4929 {
4930         struct dmae_command *dmae = &bp->stats_dmae;
4931         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4932
4933         /* sanity */
4934         if (!bp->func_stx) {
4935                 BNX2X_ERR("BUG!\n");
4936                 return;
4937         }
4938
4939         bp->executer_idx = 0;
4940         memset(dmae, 0, sizeof(struct dmae_command));
4941
4942         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4943                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4944                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4945 #ifdef __BIG_ENDIAN
4946                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4947 #else
4948                         DMAE_CMD_ENDIANITY_DW_SWAP |
4949 #endif
4950                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4951                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4952         dmae->src_addr_lo = bp->func_stx >> 2;
4953         dmae->src_addr_hi = 0;
4954         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4955         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4956         dmae->len = sizeof(struct host_func_stats) >> 2;
4957         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4958         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4959         dmae->comp_val = DMAE_COMP_VAL;
4960
4961         *stats_comp = 0;
4962         bnx2x_hw_stats_post(bp);
4963         bnx2x_stats_comp(bp);
4964 }
4965
4966 static void bnx2x_stats_init(struct bnx2x *bp)
4967 {
4968         int port = BP_PORT(bp);
4969         int func = BP_FUNC(bp);
4970         int i;
4971
4972         bp->stats_pending = 0;
4973         bp->executer_idx = 0;
4974         bp->stats_counter = 0;
4975
4976         /* port and func stats for management */
4977         if (!BP_NOMCP(bp)) {
4978                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4979                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4980
4981         } else {
4982                 bp->port.port_stx = 0;
4983                 bp->func_stx = 0;
4984         }
4985         DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
4986            bp->port.port_stx, bp->func_stx);
4987
4988         /* port stats */
4989         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4990         bp->port.old_nig_stats.brb_discard =
4991                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4992         bp->port.old_nig_stats.brb_truncate =
4993                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4994         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4995                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4996         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4997                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4998
4999         /* function stats */
5000         for_each_queue(bp, i) {
5001                 struct bnx2x_fastpath *fp = &bp->fp[i];
5002
5003                 memset(&fp->old_tclient, 0,
5004                        sizeof(struct tstorm_per_client_stats));
5005                 memset(&fp->old_uclient, 0,
5006                        sizeof(struct ustorm_per_client_stats));
5007                 memset(&fp->old_xclient, 0,
5008                        sizeof(struct xstorm_per_client_stats));
5009                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
5010         }
5011
5012         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
5013         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
5014
5015         bp->stats_state = STATS_STATE_DISABLED;
5016
5017         if (bp->port.pmf) {
5018                 if (bp->port.port_stx)
5019                         bnx2x_port_stats_base_init(bp);
5020
5021                 if (bp->func_stx)
5022                         bnx2x_func_stats_base_init(bp);
5023
5024         } else if (bp->func_stx)
5025                 bnx2x_func_stats_base_update(bp);
5026 }
5027
5028 static void bnx2x_timer(unsigned long data)
5029 {
5030         struct bnx2x *bp = (struct bnx2x *) data;
5031
5032         if (!netif_running(bp->dev))
5033                 return;
5034
5035         if (atomic_read(&bp->intr_sem) != 0)
5036                 goto timer_restart;
5037
5038         if (poll) {
5039                 struct bnx2x_fastpath *fp = &bp->fp[0];
5040                 int rc;
5041
5042                 bnx2x_tx_int(fp);
5043                 rc = bnx2x_rx_int(fp, 1000);
5044         }
5045
5046         if (!BP_NOMCP(bp)) {
5047                 int func = BP_FUNC(bp);
5048                 u32 drv_pulse;
5049                 u32 mcp_pulse;
5050
5051                 ++bp->fw_drv_pulse_wr_seq;
5052                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5053                 /* TBD - add SYSTEM_TIME */
5054                 drv_pulse = bp->fw_drv_pulse_wr_seq;
5055                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
5056
5057                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
5058                              MCP_PULSE_SEQ_MASK);
5059                 /* The delta between driver pulse and mcp response
5060                  * should be 1 (before mcp response) or 0 (after mcp response)
5061                  */
5062                 if ((drv_pulse != mcp_pulse) &&
5063                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5064                         /* someone lost a heartbeat... */
5065                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5066                                   drv_pulse, mcp_pulse);
5067                 }
5068         }
5069
5070         if (bp->state == BNX2X_STATE_OPEN)
5071                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5072
5073 timer_restart:
5074         mod_timer(&bp->timer, jiffies + bp->current_interval);
5075 }
5076
5077 /* end of Statistics */
5078
5079 /* nic init */
5080
5081 /*
5082  * nic init service functions
5083  */
5084
5085 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
5086 {
5087         int port = BP_PORT(bp);
5088
5089         /* "CSTORM" */
5090         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5091                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
5092                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
5093         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5094                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
5095                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
5096 }
5097
5098 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5099                           dma_addr_t mapping, int sb_id)
5100 {
5101         int port = BP_PORT(bp);
5102         int func = BP_FUNC(bp);
5103         int index;
5104         u64 section;
5105
5106         /* USTORM */
5107         section = ((u64)mapping) + offsetof(struct host_status_block,
5108                                             u_status_block);
5109         sb->u_status_block.status_block_id = sb_id;
5110
5111         REG_WR(bp, BAR_CSTRORM_INTMEM +
5112                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
5113         REG_WR(bp, BAR_CSTRORM_INTMEM +
5114                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
5115                U64_HI(section));
5116         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
5117                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
5118
5119         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
5120                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5121                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
5122
5123         /* CSTORM */
5124         section = ((u64)mapping) + offsetof(struct host_status_block,
5125                                             c_status_block);
5126         sb->c_status_block.status_block_id = sb_id;
5127
5128         REG_WR(bp, BAR_CSTRORM_INTMEM +
5129                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
5130         REG_WR(bp, BAR_CSTRORM_INTMEM +
5131                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
5132                U64_HI(section));
5133         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
5134                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
5135
5136         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5137                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5138                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
5139
5140         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5141 }
5142
5143 static void bnx2x_zero_def_sb(struct bnx2x *bp)
5144 {
5145         int func = BP_FUNC(bp);
5146
5147         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
5148                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5149                         sizeof(struct tstorm_def_status_block)/4);
5150         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5151                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
5152                         sizeof(struct cstorm_def_status_block_u)/4);
5153         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5154                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
5155                         sizeof(struct cstorm_def_status_block_c)/4);
5156         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
5157                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5158                         sizeof(struct xstorm_def_status_block)/4);
5159 }
5160
5161 static void bnx2x_init_def_sb(struct bnx2x *bp,
5162                               struct host_def_status_block *def_sb,
5163                               dma_addr_t mapping, int sb_id)
5164 {
5165         int port = BP_PORT(bp);
5166         int func = BP_FUNC(bp);
5167         int index, val, reg_offset;
5168         u64 section;
5169
5170         /* ATTN */
5171         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5172                                             atten_status_block);
5173         def_sb->atten_status_block.status_block_id = sb_id;
5174
5175         bp->attn_state = 0;
5176
5177         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5178                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5179
5180         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5181                 bp->attn_group[index].sig[0] = REG_RD(bp,
5182                                                      reg_offset + 0x10*index);
5183                 bp->attn_group[index].sig[1] = REG_RD(bp,
5184                                                reg_offset + 0x4 + 0x10*index);
5185                 bp->attn_group[index].sig[2] = REG_RD(bp,
5186                                                reg_offset + 0x8 + 0x10*index);
5187                 bp->attn_group[index].sig[3] = REG_RD(bp,
5188                                                reg_offset + 0xc + 0x10*index);
5189         }
5190
5191         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5192                              HC_REG_ATTN_MSG0_ADDR_L);
5193
5194         REG_WR(bp, reg_offset, U64_LO(section));
5195         REG_WR(bp, reg_offset + 4, U64_HI(section));
5196
5197         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5198
5199         val = REG_RD(bp, reg_offset);
5200         val |= sb_id;
5201         REG_WR(bp, reg_offset, val);
5202
5203         /* USTORM */
5204         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5205                                             u_def_status_block);
5206         def_sb->u_def_status_block.status_block_id = sb_id;
5207
5208         REG_WR(bp, BAR_CSTRORM_INTMEM +
5209                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
5210         REG_WR(bp, BAR_CSTRORM_INTMEM +
5211                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
5212                U64_HI(section));
5213         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
5214                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
5215
5216         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
5217                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5218                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
5219
5220         /* CSTORM */
5221         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5222                                             c_def_status_block);
5223         def_sb->c_def_status_block.status_block_id = sb_id;
5224
5225         REG_WR(bp, BAR_CSTRORM_INTMEM +
5226                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
5227         REG_WR(bp, BAR_CSTRORM_INTMEM +
5228                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
5229                U64_HI(section));
5230         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
5231                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
5232
5233         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5234                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5235                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
5236
5237         /* TSTORM */
5238         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5239                                             t_def_status_block);
5240         def_sb->t_def_status_block.status_block_id = sb_id;
5241
5242         REG_WR(bp, BAR_TSTRORM_INTMEM +
5243                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5244         REG_WR(bp, BAR_TSTRORM_INTMEM +
5245                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5246                U64_HI(section));
5247         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
5248                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5249
5250         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5251                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
5252                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5253
5254         /* XSTORM */
5255         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5256                                             x_def_status_block);
5257         def_sb->x_def_status_block.status_block_id = sb_id;
5258
5259         REG_WR(bp, BAR_XSTRORM_INTMEM +
5260                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5261         REG_WR(bp, BAR_XSTRORM_INTMEM +
5262                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5263                U64_HI(section));
5264         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
5265                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5266
5267         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5268                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
5269                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5270
5271         bp->stats_pending = 0;
5272         bp->set_mac_pending = 0;
5273
5274         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5275 }
5276
5277 static void bnx2x_update_coalesce(struct bnx2x *bp)
5278 {
5279         int port = BP_PORT(bp);
5280         int i;
5281
5282         for_each_queue(bp, i) {
5283                 int sb_id = bp->fp[i].sb_id;
5284
5285                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
5286                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5287                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
5288                                                       U_SB_ETH_RX_CQ_INDEX),
5289                         bp->rx_ticks/(4 * BNX2X_BTR));
5290                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5291                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
5292                                                        U_SB_ETH_RX_CQ_INDEX),
5293                          (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5294
5295                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5296                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5297                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
5298                                                       C_SB_ETH_TX_CQ_INDEX),
5299                         bp->tx_ticks/(4 * BNX2X_BTR));
5300                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5301                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
5302                                                        C_SB_ETH_TX_CQ_INDEX),
5303                          (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5304         }
5305 }
5306
5307 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5308                                        struct bnx2x_fastpath *fp, int last)
5309 {
5310         int i;
5311
5312         for (i = 0; i < last; i++) {
5313                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5314                 struct sk_buff *skb = rx_buf->skb;
5315
5316                 if (skb == NULL) {
5317                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5318                         continue;
5319                 }
5320
5321                 if (fp->tpa_state[i] == BNX2X_TPA_START)
5322                         dma_unmap_single(&bp->pdev->dev,
5323                                          dma_unmap_addr(rx_buf, mapping),
5324                                          bp->rx_buf_size, DMA_FROM_DEVICE);
5325
5326                 dev_kfree_skb(skb);
5327                 rx_buf->skb = NULL;
5328         }
5329 }
5330
5331 static void bnx2x_init_rx_rings(struct bnx2x *bp)
5332 {
5333         int func = BP_FUNC(bp);
5334         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5335                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
5336         u16 ring_prod, cqe_ring_prod;
5337         int i, j;
5338
5339         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
5340         DP(NETIF_MSG_IFUP,
5341            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
5342
5343         if (bp->flags & TPA_ENABLE_FLAG) {
5344
5345                 for_each_queue(bp, j) {
5346                         struct bnx2x_fastpath *fp = &bp->fp[j];
5347
5348                         for (i = 0; i < max_agg_queues; i++) {
5349                                 fp->tpa_pool[i].skb =
5350                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5351                                 if (!fp->tpa_pool[i].skb) {
5352                                         BNX2X_ERR("Failed to allocate TPA "
5353                                                   "skb pool for queue[%d] - "
5354                                                   "disabling TPA on this "
5355                                                   "queue!\n", j);
5356                                         bnx2x_free_tpa_pool(bp, fp, i);
5357                                         fp->disable_tpa = 1;
5358                                         break;
5359                                 }
5360                                 dma_unmap_addr_set((struct sw_rx_bd *)
5361                                                         &bp->fp->tpa_pool[i],
5362                                                    mapping, 0);
5363                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
5364                         }
5365                 }
5366         }
5367
5368         for_each_queue(bp, j) {
5369                 struct bnx2x_fastpath *fp = &bp->fp[j];
5370
5371                 fp->rx_bd_cons = 0;
5372                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5373                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5374
5375                 /* "next page" elements initialization */
5376                 /* SGE ring */
5377                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5378                         struct eth_rx_sge *sge;
5379
5380                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5381                         sge->addr_hi =
5382                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5383                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5384                         sge->addr_lo =
5385                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5386                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5387                 }
5388
5389                 bnx2x_init_sge_ring_bit_mask(fp);
5390
5391                 /* RX BD ring */
5392                 for (i = 1; i <= NUM_RX_RINGS; i++) {
5393                         struct eth_rx_bd *rx_bd;
5394
5395                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5396                         rx_bd->addr_hi =
5397                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5398                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5399                         rx_bd->addr_lo =
5400                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5401                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5402                 }
5403
5404                 /* CQ ring */
5405                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5406                         struct eth_rx_cqe_next_page *nextpg;
5407
5408                         nextpg = (struct eth_rx_cqe_next_page *)
5409                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5410                         nextpg->addr_hi =
5411                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5412                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5413                         nextpg->addr_lo =
5414                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5415                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5416                 }
5417
5418                 /* Allocate SGEs and initialize the ring elements */
5419                 for (i = 0, ring_prod = 0;
5420                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5421
5422                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5423                                 BNX2X_ERR("was only able to allocate "
5424                                           "%d rx sges\n", i);
5425                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5426                                 /* Cleanup already allocated elements */
5427                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5428                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5429                                 fp->disable_tpa = 1;
5430                                 ring_prod = 0;
5431                                 break;
5432                         }
5433                         ring_prod = NEXT_SGE_IDX(ring_prod);
5434                 }
5435                 fp->rx_sge_prod = ring_prod;
5436
5437                 /* Allocate BDs and initialize BD ring */
5438                 fp->rx_comp_cons = 0;
5439                 cqe_ring_prod = ring_prod = 0;
5440                 for (i = 0; i < bp->rx_ring_size; i++) {
5441                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5442                                 BNX2X_ERR("was only able to allocate "
5443                                           "%d rx skbs on queue[%d]\n", i, j);
5444                                 fp->eth_q_stats.rx_skb_alloc_failed++;
5445                                 break;
5446                         }
5447                         ring_prod = NEXT_RX_IDX(ring_prod);
5448                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5449                         WARN_ON(ring_prod <= i);
5450                 }
5451
5452                 fp->rx_bd_prod = ring_prod;
5453                 /* must not have more available CQEs than BDs */
5454                 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
5455                                          cqe_ring_prod);
5456                 fp->rx_pkt = fp->rx_calls = 0;
5457
5458                 /* Warning!
5459                  * this will generate an interrupt (to the TSTORM)
5460                  * must only be done after chip is initialized
5461                  */
5462                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5463                                      fp->rx_sge_prod);
5464                 if (j != 0)
5465                         continue;
5466
5467                 REG_WR(bp, BAR_USTRORM_INTMEM +
5468                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5469                        U64_LO(fp->rx_comp_mapping));
5470                 REG_WR(bp, BAR_USTRORM_INTMEM +
5471                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5472                        U64_HI(fp->rx_comp_mapping));
5473         }
5474 }
5475
5476 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5477 {
5478         int i, j;
5479
5480         for_each_queue(bp, j) {
5481                 struct bnx2x_fastpath *fp = &bp->fp[j];
5482
5483                 for (i = 1; i <= NUM_TX_RINGS; i++) {
5484                         struct eth_tx_next_bd *tx_next_bd =
5485                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5486
5487                         tx_next_bd->addr_hi =
5488                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5489                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5490                         tx_next_bd->addr_lo =
5491                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5492                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5493                 }
5494
5495                 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5496                 fp->tx_db.data.zero_fill1 = 0;
5497                 fp->tx_db.data.prod = 0;
5498
5499                 fp->tx_pkt_prod = 0;
5500                 fp->tx_pkt_cons = 0;
5501                 fp->tx_bd_prod = 0;
5502                 fp->tx_bd_cons = 0;
5503                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5504                 fp->tx_pkt = 0;
5505         }
5506 }
5507
5508 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5509 {
5510         int func = BP_FUNC(bp);
5511
5512         spin_lock_init(&bp->spq_lock);
5513
5514         bp->spq_left = MAX_SPQ_PENDING;
5515         bp->spq_prod_idx = 0;
5516         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5517         bp->spq_prod_bd = bp->spq;
5518         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5519
5520         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5521                U64_LO(bp->spq_mapping));
5522         REG_WR(bp,
5523                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5524                U64_HI(bp->spq_mapping));
5525
5526         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5527                bp->spq_prod_idx);
5528 }
5529
5530 static void bnx2x_init_context(struct bnx2x *bp)
5531 {
5532         int i;
5533
5534         /* Rx */
5535         for_each_queue(bp, i) {
5536                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5537                 struct bnx2x_fastpath *fp = &bp->fp[i];
5538                 u8 cl_id = fp->cl_id;
5539
5540                 context->ustorm_st_context.common.sb_index_numbers =
5541                                                 BNX2X_RX_SB_INDEX_NUM;
5542                 context->ustorm_st_context.common.clientId = cl_id;
5543                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5544                 context->ustorm_st_context.common.flags =
5545                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5546                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5547                 context->ustorm_st_context.common.statistics_counter_id =
5548                                                 cl_id;
5549                 context->ustorm_st_context.common.mc_alignment_log_size =
5550                                                 BNX2X_RX_ALIGN_SHIFT;
5551                 context->ustorm_st_context.common.bd_buff_size =
5552                                                 bp->rx_buf_size;
5553                 context->ustorm_st_context.common.bd_page_base_hi =
5554                                                 U64_HI(fp->rx_desc_mapping);
5555                 context->ustorm_st_context.common.bd_page_base_lo =
5556                                                 U64_LO(fp->rx_desc_mapping);
5557                 if (!fp->disable_tpa) {
5558                         context->ustorm_st_context.common.flags |=
5559                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5560                         context->ustorm_st_context.common.sge_buff_size =
5561                                 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
5562                                            0xffff);
5563                         context->ustorm_st_context.common.sge_page_base_hi =
5564                                                 U64_HI(fp->rx_sge_mapping);
5565                         context->ustorm_st_context.common.sge_page_base_lo =
5566                                                 U64_LO(fp->rx_sge_mapping);
5567
5568                         context->ustorm_st_context.common.max_sges_for_packet =
5569                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5570                         context->ustorm_st_context.common.max_sges_for_packet =
5571                                 ((context->ustorm_st_context.common.
5572                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
5573                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5574                 }
5575
5576                 context->ustorm_ag_context.cdu_usage =
5577                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5578                                                CDU_REGION_NUMBER_UCM_AG,
5579                                                ETH_CONNECTION_TYPE);
5580
5581                 context->xstorm_ag_context.cdu_reserved =
5582                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5583                                                CDU_REGION_NUMBER_XCM_AG,
5584                                                ETH_CONNECTION_TYPE);
5585         }
5586
5587         /* Tx */
5588         for_each_queue(bp, i) {
5589                 struct bnx2x_fastpath *fp = &bp->fp[i];
5590                 struct eth_context *context =
5591                         bnx2x_sp(bp, context[i].eth);
5592
5593                 context->cstorm_st_context.sb_index_number =
5594                                                 C_SB_ETH_TX_CQ_INDEX;
5595                 context->cstorm_st_context.status_block_id = fp->sb_id;
5596
5597                 context->xstorm_st_context.tx_bd_page_base_hi =
5598                                                 U64_HI(fp->tx_desc_mapping);
5599                 context->xstorm_st_context.tx_bd_page_base_lo =
5600                                                 U64_LO(fp->tx_desc_mapping);
5601                 context->xstorm_st_context.statistics_data = (fp->cl_id |
5602                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5603         }
5604 }
5605
5606 static void bnx2x_init_ind_table(struct bnx2x *bp)
5607 {
5608         int func = BP_FUNC(bp);
5609         int i;
5610
5611         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5612                 return;
5613
5614         DP(NETIF_MSG_IFUP,
5615            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
5616         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5617                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5618                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5619                         bp->fp->cl_id + (i % bp->num_queues));
5620 }
5621
5622 static void bnx2x_set_client_config(struct bnx2x *bp)
5623 {
5624         struct tstorm_eth_client_config tstorm_client = {0};
5625         int port = BP_PORT(bp);
5626         int i;
5627
5628         tstorm_client.mtu = bp->dev->mtu;
5629         tstorm_client.config_flags =
5630                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5631                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5632 #ifdef BCM_VLAN
5633         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5634                 tstorm_client.config_flags |=
5635                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5636                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5637         }
5638 #endif
5639
5640         for_each_queue(bp, i) {
5641                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5642
5643                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5644                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5645                        ((u32 *)&tstorm_client)[0]);
5646                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5647                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5648                        ((u32 *)&tstorm_client)[1]);
5649         }
5650
5651         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5652            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5653 }
5654
5655 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5656 {
5657         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5658         int mode = bp->rx_mode;
5659         int mask = bp->rx_mode_cl_mask;
5660         int func = BP_FUNC(bp);
5661         int port = BP_PORT(bp);
5662         int i;
5663         /* All but management unicast packets should pass to the host as well */
5664         u32 llh_mask =
5665                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5666                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5667                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5668                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5669
5670         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
5671
5672         switch (mode) {
5673         case BNX2X_RX_MODE_NONE: /* no Rx */
5674                 tstorm_mac_filter.ucast_drop_all = mask;
5675                 tstorm_mac_filter.mcast_drop_all = mask;
5676                 tstorm_mac_filter.bcast_drop_all = mask;
5677                 break;
5678
5679         case BNX2X_RX_MODE_NORMAL:
5680                 tstorm_mac_filter.bcast_accept_all = mask;
5681                 break;
5682
5683         case BNX2X_RX_MODE_ALLMULTI:
5684                 tstorm_mac_filter.mcast_accept_all = mask;
5685                 tstorm_mac_filter.bcast_accept_all = mask;
5686                 break;
5687
5688         case BNX2X_RX_MODE_PROMISC:
5689                 tstorm_mac_filter.ucast_accept_all = mask;
5690                 tstorm_mac_filter.mcast_accept_all = mask;
5691                 tstorm_mac_filter.bcast_accept_all = mask;
5692                 /* pass management unicast packets as well */
5693                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5694                 break;
5695
5696         default:
5697                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5698                 break;
5699         }
5700
5701         REG_WR(bp,
5702                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5703                llh_mask);
5704
5705         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5706                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5707                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5708                        ((u32 *)&tstorm_mac_filter)[i]);
5709
5710 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5711                    ((u32 *)&tstorm_mac_filter)[i]); */
5712         }
5713
5714         if (mode != BNX2X_RX_MODE_NONE)
5715                 bnx2x_set_client_config(bp);
5716 }
5717
5718 static void bnx2x_init_internal_common(struct bnx2x *bp)
5719 {
5720         int i;
5721
5722         /* Zero this manually as its initialization is
5723            currently missing in the initTool */
5724         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5725                 REG_WR(bp, BAR_USTRORM_INTMEM +
5726                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5727 }
5728
5729 static void bnx2x_init_internal_port(struct bnx2x *bp)
5730 {
5731         int port = BP_PORT(bp);
5732
5733         REG_WR(bp,
5734                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5735         REG_WR(bp,
5736                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5737         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5738         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5739 }
5740
5741 static void bnx2x_init_internal_func(struct bnx2x *bp)
5742 {
5743         struct tstorm_eth_function_common_config tstorm_config = {0};
5744         struct stats_indication_flags stats_flags = {0};
5745         int port = BP_PORT(bp);
5746         int func = BP_FUNC(bp);
5747         int i, j;
5748         u32 offset;
5749         u16 max_agg_size;
5750
5751         if (is_multi(bp)) {
5752                 tstorm_config.config_flags = MULTI_FLAGS(bp);
5753                 tstorm_config.rss_result_mask = MULTI_MASK;
5754         }
5755
5756         /* Enable TPA if needed */
5757         if (bp->flags & TPA_ENABLE_FLAG)
5758                 tstorm_config.config_flags |=
5759                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5760
5761         if (IS_E1HMF(bp))
5762                 tstorm_config.config_flags |=
5763                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5764
5765         tstorm_config.leading_client_id = BP_L_ID(bp);
5766
5767         REG_WR(bp, BAR_TSTRORM_INTMEM +
5768                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5769                (*(u32 *)&tstorm_config));
5770
5771         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5772         bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5773         bnx2x_set_storm_rx_mode(bp);
5774
5775         for_each_queue(bp, i) {
5776                 u8 cl_id = bp->fp[i].cl_id;
5777
5778                 /* reset xstorm per client statistics */
5779                 offset = BAR_XSTRORM_INTMEM +
5780                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5781                 for (j = 0;
5782                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5783                         REG_WR(bp, offset + j*4, 0);
5784
5785                 /* reset tstorm per client statistics */
5786                 offset = BAR_TSTRORM_INTMEM +
5787                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5788                 for (j = 0;
5789                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5790                         REG_WR(bp, offset + j*4, 0);
5791
5792                 /* reset ustorm per client statistics */
5793                 offset = BAR_USTRORM_INTMEM +
5794                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5795                 for (j = 0;
5796                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5797                         REG_WR(bp, offset + j*4, 0);
5798         }
5799
5800         /* Init statistics related context */
5801         stats_flags.collect_eth = 1;
5802
5803         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5804                ((u32 *)&stats_flags)[0]);
5805         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5806                ((u32 *)&stats_flags)[1]);
5807
5808         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5809                ((u32 *)&stats_flags)[0]);
5810         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5811                ((u32 *)&stats_flags)[1]);
5812
5813         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5814                ((u32 *)&stats_flags)[0]);
5815         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5816                ((u32 *)&stats_flags)[1]);
5817
5818         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5819                ((u32 *)&stats_flags)[0]);
5820         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5821                ((u32 *)&stats_flags)[1]);
5822
5823         REG_WR(bp, BAR_XSTRORM_INTMEM +
5824                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5825                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5826         REG_WR(bp, BAR_XSTRORM_INTMEM +
5827                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5828                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5829
5830         REG_WR(bp, BAR_TSTRORM_INTMEM +
5831                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5832                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5833         REG_WR(bp, BAR_TSTRORM_INTMEM +
5834                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5835                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5836
5837         REG_WR(bp, BAR_USTRORM_INTMEM +
5838                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5839                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5840         REG_WR(bp, BAR_USTRORM_INTMEM +
5841                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5842                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5843
5844         if (CHIP_IS_E1H(bp)) {
5845                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5846                         IS_E1HMF(bp));
5847                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5848                         IS_E1HMF(bp));
5849                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5850                         IS_E1HMF(bp));
5851                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5852                         IS_E1HMF(bp));
5853
5854                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5855                          bp->e1hov);
5856         }
5857
5858         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5859         max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
5860                                    SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
5861         for_each_queue(bp, i) {
5862                 struct bnx2x_fastpath *fp = &bp->fp[i];
5863
5864                 REG_WR(bp, BAR_USTRORM_INTMEM +
5865                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5866                        U64_LO(fp->rx_comp_mapping));
5867                 REG_WR(bp, BAR_USTRORM_INTMEM +
5868                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5869                        U64_HI(fp->rx_comp_mapping));
5870
5871                 /* Next page */
5872                 REG_WR(bp, BAR_USTRORM_INTMEM +
5873                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5874                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5875                 REG_WR(bp, BAR_USTRORM_INTMEM +
5876                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5877                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5878
5879                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5880                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5881                          max_agg_size);
5882         }
5883
5884         /* dropless flow control */
5885         if (CHIP_IS_E1H(bp)) {
5886                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5887
5888                 rx_pause.bd_thr_low = 250;
5889                 rx_pause.cqe_thr_low = 250;
5890                 rx_pause.cos = 1;
5891                 rx_pause.sge_thr_low = 0;
5892                 rx_pause.bd_thr_high = 350;
5893                 rx_pause.cqe_thr_high = 350;
5894                 rx_pause.sge_thr_high = 0;
5895
5896                 for_each_queue(bp, i) {
5897                         struct bnx2x_fastpath *fp = &bp->fp[i];
5898
5899                         if (!fp->disable_tpa) {
5900                                 rx_pause.sge_thr_low = 150;
5901                                 rx_pause.sge_thr_high = 250;
5902                         }
5903
5904
5905                         offset = BAR_USTRORM_INTMEM +
5906                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5907                                                                    fp->cl_id);
5908                         for (j = 0;
5909                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5910                              j++)
5911                                 REG_WR(bp, offset + j*4,
5912                                        ((u32 *)&rx_pause)[j]);
5913                 }
5914         }
5915
5916         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5917
5918         /* Init rate shaping and fairness contexts */
5919         if (IS_E1HMF(bp)) {
5920                 int vn;
5921
5922                 /* During init there is no active link
5923                    Until link is up, set link rate to 10Gbps */
5924                 bp->link_vars.line_speed = SPEED_10000;
5925                 bnx2x_init_port_minmax(bp);
5926
5927                 if (!BP_NOMCP(bp))
5928                         bp->mf_config =
5929                               SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5930                 bnx2x_calc_vn_weight_sum(bp);
5931
5932                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5933                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5934
5935                 /* Enable rate shaping and fairness */
5936                 bp->cmng.flags.cmng_enables |=
5937                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5938
5939         } else {
5940                 /* rate shaping and fairness are disabled */
5941                 DP(NETIF_MSG_IFUP,
5942                    "single function mode  minmax will be disabled\n");
5943         }
5944
5945
5946         /* Store cmng structures to internal memory */
5947         if (bp->port.pmf)
5948                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5949                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5950                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5951                                ((u32 *)(&bp->cmng))[i]);
5952 }
5953
5954 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5955 {
5956         switch (load_code) {
5957         case FW_MSG_CODE_DRV_LOAD_COMMON:
5958                 bnx2x_init_internal_common(bp);
5959                 /* no break */
5960
5961         case FW_MSG_CODE_DRV_LOAD_PORT:
5962                 bnx2x_init_internal_port(bp);
5963                 /* no break */
5964
5965         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5966                 bnx2x_init_internal_func(bp);
5967                 break;
5968
5969         default:
5970                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5971                 break;
5972         }
5973 }
5974
5975 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5976 {
5977         int i;
5978
5979         for_each_queue(bp, i) {
5980                 struct bnx2x_fastpath *fp = &bp->fp[i];
5981
5982                 fp->bp = bp;
5983                 fp->state = BNX2X_FP_STATE_CLOSED;
5984                 fp->index = i;
5985                 fp->cl_id = BP_L_ID(bp) + i;
5986 #ifdef BCM_CNIC
5987                 fp->sb_id = fp->cl_id + 1;
5988 #else
5989                 fp->sb_id = fp->cl_id;
5990 #endif
5991                 DP(NETIF_MSG_IFUP,
5992                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5993                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5994                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5995                               fp->sb_id);
5996                 bnx2x_update_fpsb_idx(fp);
5997         }
5998
5999         /* ensure status block indices were read */
6000         rmb();
6001
6002
6003         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
6004                           DEF_SB_ID);
6005         bnx2x_update_dsb_idx(bp);
6006         bnx2x_update_coalesce(bp);
6007         bnx2x_init_rx_rings(bp);
6008         bnx2x_init_tx_ring(bp);
6009         bnx2x_init_sp_ring(bp);
6010         bnx2x_init_context(bp);
6011         bnx2x_init_internal(bp, load_code);
6012         bnx2x_init_ind_table(bp);
6013         bnx2x_stats_init(bp);
6014
6015         /* At this point, we are ready for interrupts */
6016         atomic_set(&bp->intr_sem, 0);
6017
6018         /* flush all before enabling interrupts */
6019         mb();
6020         mmiowb();
6021
6022         bnx2x_int_enable(bp);
6023
6024         /* Check for SPIO5 */
6025         bnx2x_attn_int_deasserted0(bp,
6026                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6027                                    AEU_INPUTS_ATTN_BITS_SPIO5);
6028 }
6029
6030 /* end of nic init */
6031
6032 /*
6033  * gzip service functions
6034  */
6035
6036 static int bnx2x_gunzip_init(struct bnx2x *bp)
6037 {
6038         bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6039                                             &bp->gunzip_mapping, GFP_KERNEL);
6040         if (bp->gunzip_buf  == NULL)
6041                 goto gunzip_nomem1;
6042
6043         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6044         if (bp->strm  == NULL)
6045                 goto gunzip_nomem2;
6046
6047         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
6048                                       GFP_KERNEL);
6049         if (bp->strm->workspace == NULL)
6050                 goto gunzip_nomem3;
6051
6052         return 0;
6053
6054 gunzip_nomem3:
6055         kfree(bp->strm);
6056         bp->strm = NULL;
6057
6058 gunzip_nomem2:
6059         dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6060                           bp->gunzip_mapping);
6061         bp->gunzip_buf = NULL;
6062
6063 gunzip_nomem1:
6064         netdev_err(bp->dev, "Cannot allocate firmware buffer for"
6065                " un-compression\n");
6066         return -ENOMEM;
6067 }
6068
6069 static void bnx2x_gunzip_end(struct bnx2x *bp)
6070 {
6071         kfree(bp->strm->workspace);
6072
6073         kfree(bp->strm);
6074         bp->strm = NULL;
6075
6076         if (bp->gunzip_buf) {
6077                 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6078                                   bp->gunzip_mapping);
6079                 bp->gunzip_buf = NULL;
6080         }
6081 }
6082
6083 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6084 {
6085         int n, rc;
6086
6087         /* check gzip header */
6088         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6089                 BNX2X_ERR("Bad gzip header\n");
6090                 return -EINVAL;
6091         }
6092
6093         n = 10;
6094
6095 #define FNAME                           0x8
6096
6097         if (zbuf[3] & FNAME)
6098                 while ((zbuf[n++] != 0) && (n < len));
6099
6100         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6101         bp->strm->avail_in = len - n;
6102         bp->strm->next_out = bp->gunzip_buf;
6103         bp->strm->avail_out = FW_BUF_SIZE;
6104
6105         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6106         if (rc != Z_OK)
6107                 return rc;
6108
6109         rc = zlib_inflate(bp->strm, Z_FINISH);
6110         if ((rc != Z_OK) && (rc != Z_STREAM_END))
6111                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6112                            bp->strm->msg);
6113
6114         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6115         if (bp->gunzip_outlen & 0x3)
6116                 netdev_err(bp->dev, "Firmware decompression error:"
6117                                     " gunzip_outlen (%d) not aligned\n",
6118                                 bp->gunzip_outlen);
6119         bp->gunzip_outlen >>= 2;
6120
6121         zlib_inflateEnd(bp->strm);
6122
6123         if (rc == Z_STREAM_END)
6124                 return 0;
6125
6126         return rc;
6127 }
6128
6129 /* nic load/unload */
6130
6131 /*
6132  * General service functions
6133  */
6134
6135 /* send a NIG loopback debug packet */
6136 static void bnx2x_lb_pckt(struct bnx2x *bp)
6137 {
6138         u32 wb_write[3];
6139
6140         /* Ethernet source and destination addresses */
6141         wb_write[0] = 0x55555555;
6142         wb_write[1] = 0x55555555;
6143         wb_write[2] = 0x20;             /* SOP */
6144         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6145
6146         /* NON-IP protocol */
6147         wb_write[0] = 0x09000000;
6148         wb_write[1] = 0x55555555;
6149         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
6150         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6151 }
6152
6153 /* some of the internal memories
6154  * are not directly readable from the driver
6155  * to test them we send debug packets
6156  */
6157 static int bnx2x_int_mem_test(struct bnx2x *bp)
6158 {
6159         int factor;
6160         int count, i;
6161         u32 val = 0;
6162
6163         if (CHIP_REV_IS_FPGA(bp))
6164                 factor = 120;
6165         else if (CHIP_REV_IS_EMUL(bp))
6166                 factor = 200;
6167         else
6168                 factor = 1;
6169
6170         DP(NETIF_MSG_HW, "start part1\n");
6171
6172         /* Disable inputs of parser neighbor blocks */
6173         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6174         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6175         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6176         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6177
6178         /*  Write 0 to parser credits for CFC search request */
6179         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6180
6181         /* send Ethernet packet */
6182         bnx2x_lb_pckt(bp);
6183
6184         /* TODO do i reset NIG statistic? */
6185         /* Wait until NIG register shows 1 packet of size 0x10 */
6186         count = 1000 * factor;
6187         while (count) {
6188
6189                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6190                 val = *bnx2x_sp(bp, wb_data[0]);
6191                 if (val == 0x10)
6192                         break;
6193
6194                 msleep(10);
6195                 count--;
6196         }
6197         if (val != 0x10) {
6198                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
6199                 return -1;
6200         }
6201
6202         /* Wait until PRS register shows 1 packet */
6203         count = 1000 * factor;
6204         while (count) {
6205                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6206                 if (val == 1)
6207                         break;
6208
6209                 msleep(10);
6210                 count--;
6211         }
6212         if (val != 0x1) {
6213                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6214                 return -2;
6215         }
6216
6217         /* Reset and init BRB, PRS */
6218         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6219         msleep(50);
6220         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6221         msleep(50);
6222         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6223         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6224
6225         DP(NETIF_MSG_HW, "part2\n");
6226
6227         /* Disable inputs of parser neighbor blocks */
6228         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6229         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6230         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6231         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6232
6233         /* Write 0 to parser credits for CFC search request */
6234         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6235
6236         /* send 10 Ethernet packets */
6237         for (i = 0; i < 10; i++)
6238                 bnx2x_lb_pckt(bp);
6239
6240         /* Wait until NIG register shows 10 + 1
6241            packets of size 11*0x10 = 0xb0 */
6242         count = 1000 * factor;
6243         while (count) {
6244
6245                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6246                 val = *bnx2x_sp(bp, wb_data[0]);
6247                 if (val == 0xb0)
6248                         break;
6249
6250                 msleep(10);
6251                 count--;
6252         }
6253         if (val != 0xb0) {
6254                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
6255                 return -3;
6256         }
6257
6258         /* Wait until PRS register shows 2 packets */
6259         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6260         if (val != 2)
6261                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
6262
6263         /* Write 1 to parser credits for CFC search request */
6264         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6265
6266         /* Wait until PRS register shows 3 packets */
6267         msleep(10 * factor);
6268         /* Wait until NIG register shows 1 packet of size 0x10 */
6269         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6270         if (val != 3)
6271                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
6272
6273         /* clear NIG EOP FIFO */
6274         for (i = 0; i < 11; i++)
6275                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6276         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6277         if (val != 1) {
6278                 BNX2X_ERR("clear of NIG failed\n");
6279                 return -4;
6280         }
6281
6282         /* Reset and init BRB, PRS, NIG */
6283         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6284         msleep(50);
6285         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6286         msleep(50);
6287         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6288         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6289 #ifndef BCM_CNIC
6290         /* set NIC mode */
6291         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6292 #endif
6293
6294         /* Enable inputs of parser neighbor blocks */
6295         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6296         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6297         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6298         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6299
6300         DP(NETIF_MSG_HW, "done\n");
6301
6302         return 0; /* OK */
6303 }
6304
6305 static void enable_blocks_attention(struct bnx2x *bp)
6306 {
6307         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6308         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6309         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6310         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6311         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6312         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6313         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6314         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6315         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6316 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6317 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6318         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6319         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6320         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6321 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6322 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6323         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6324         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6325         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6326         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6327 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6328 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6329         if (CHIP_REV_IS_FPGA(bp))
6330                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
6331         else
6332                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
6333         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6334         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6335         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6336 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6337 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
6338         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6339         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6340 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6341         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
6342 }
6343
6344 static const struct {
6345         u32 addr;
6346         u32 mask;
6347 } bnx2x_parity_mask[] = {
6348         {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6349         {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6350         {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6351         {HC_REG_HC_PRTY_MASK, 0xffffffff},
6352         {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6353         {QM_REG_QM_PRTY_MASK, 0x0},
6354         {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6355         {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6356         {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6357         {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6358         {CDU_REG_CDU_PRTY_MASK, 0x0},
6359         {CFC_REG_CFC_PRTY_MASK, 0x0},
6360         {DBG_REG_DBG_PRTY_MASK, 0x0},
6361         {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6362         {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6363         {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6364         {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6365         {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6366         {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6367         {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6368         {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6369         {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6370         {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6371         {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6372         {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6373         {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6374         {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6375         {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6376 };
6377
6378 static void enable_blocks_parity(struct bnx2x *bp)
6379 {
6380         int i, mask_arr_len =
6381                 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6382
6383         for (i = 0; i < mask_arr_len; i++)
6384                 REG_WR(bp, bnx2x_parity_mask[i].addr,
6385                         bnx2x_parity_mask[i].mask);
6386 }
6387
6388
6389 static void bnx2x_reset_common(struct bnx2x *bp)
6390 {
6391         /* reset_common */
6392         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6393                0xd3ffff7f);
6394         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6395 }
6396
6397 static void bnx2x_init_pxp(struct bnx2x *bp)
6398 {
6399         u16 devctl;
6400         int r_order, w_order;
6401
6402         pci_read_config_word(bp->pdev,
6403                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6404         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6405         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6406         if (bp->mrrs == -1)
6407                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6408         else {
6409                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6410                 r_order = bp->mrrs;
6411         }
6412
6413         bnx2x_init_pxp_arb(bp, r_order, w_order);
6414 }
6415
6416 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6417 {
6418         int is_required;
6419         u32 val;
6420         int port;
6421
6422         if (BP_NOMCP(bp))
6423                 return;
6424
6425         is_required = 0;
6426         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6427               SHARED_HW_CFG_FAN_FAILURE_MASK;
6428
6429         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6430                 is_required = 1;
6431
6432         /*
6433          * The fan failure mechanism is usually related to the PHY type since
6434          * the power consumption of the board is affected by the PHY. Currently,
6435          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6436          */
6437         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6438                 for (port = PORT_0; port < PORT_MAX; port++) {
6439                         u32 phy_type =
6440                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
6441                                          external_phy_config) &
6442                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6443                         is_required |=
6444                                 ((phy_type ==
6445                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6446                                  (phy_type ==
6447                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6448                                  (phy_type ==
6449                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6450                 }
6451
6452         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6453
6454         if (is_required == 0)
6455                 return;
6456
6457         /* Fan failure is indicated by SPIO 5 */
6458         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6459                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
6460
6461         /* set to active low mode */
6462         val = REG_RD(bp, MISC_REG_SPIO_INT);
6463         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6464                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6465         REG_WR(bp, MISC_REG_SPIO_INT, val);
6466
6467         /* enable interrupt to signal the IGU */
6468         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6469         val |= (1 << MISC_REGISTERS_SPIO_5);
6470         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6471 }
6472
6473 static int bnx2x_init_common(struct bnx2x *bp)
6474 {
6475         u32 val, i;
6476 #ifdef BCM_CNIC
6477         u32 wb_write[2];
6478 #endif
6479
6480         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
6481
6482         bnx2x_reset_common(bp);
6483         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6484         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6485
6486         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6487         if (CHIP_IS_E1H(bp))
6488                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6489
6490         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6491         msleep(30);
6492         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6493
6494         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6495         if (CHIP_IS_E1(bp)) {
6496                 /* enable HW interrupt from PXP on USDM overflow
6497                    bit 16 on INT_MASK_0 */
6498                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6499         }
6500
6501         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6502         bnx2x_init_pxp(bp);
6503
6504 #ifdef __BIG_ENDIAN
6505         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6506         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6507         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6508         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6509         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6510         /* make sure this value is 0 */
6511         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6512
6513 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6514         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6515         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6516         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6517         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6518 #endif
6519
6520         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6521 #ifdef BCM_CNIC
6522         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6523         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6524         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6525 #endif
6526
6527         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6528                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6529
6530         /* let the HW do it's magic ... */
6531         msleep(100);
6532         /* finish PXP init */
6533         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6534         if (val != 1) {
6535                 BNX2X_ERR("PXP2 CFG failed\n");
6536                 return -EBUSY;
6537         }
6538         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6539         if (val != 1) {
6540                 BNX2X_ERR("PXP2 RD_INIT failed\n");
6541                 return -EBUSY;
6542         }
6543
6544         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6545         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6546
6547         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6548
6549         /* clean the DMAE memory */
6550         bp->dmae_ready = 1;
6551         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6552
6553         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6554         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6555         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6556         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6557
6558         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6559         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6560         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6561         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6562
6563         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6564
6565 #ifdef BCM_CNIC
6566         wb_write[0] = 0;
6567         wb_write[1] = 0;
6568         for (i = 0; i < 64; i++) {
6569                 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6570                 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6571
6572                 if (CHIP_IS_E1H(bp)) {
6573                         REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6574                         bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6575                                           wb_write, 2);
6576                 }
6577         }
6578 #endif
6579         /* soft reset pulse */
6580         REG_WR(bp, QM_REG_SOFT_RESET, 1);
6581         REG_WR(bp, QM_REG_SOFT_RESET, 0);
6582
6583 #ifdef BCM_CNIC
6584         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6585 #endif
6586
6587         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6588         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6589         if (!CHIP_REV_IS_SLOW(bp)) {
6590                 /* enable hw interrupt from doorbell Q */
6591                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6592         }
6593
6594         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6595         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6596         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6597 #ifndef BCM_CNIC
6598         /* set NIC mode */
6599         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6600 #endif
6601         if (CHIP_IS_E1H(bp))
6602                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6603
6604         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6605         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6606         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6607         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6608
6609         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6610         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6611         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6612         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6613
6614         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6615         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6616         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6617         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6618
6619         /* sync semi rtc */
6620         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6621                0x80000000);
6622         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6623                0x80000000);
6624
6625         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6626         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6627         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6628
6629         REG_WR(bp, SRC_REG_SOFT_RST, 1);
6630         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6631                 REG_WR(bp, i, 0xc0cac01a);
6632                 /* TODO: replace with something meaningful */
6633         }
6634         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6635 #ifdef BCM_CNIC
6636         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6637         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6638         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6639         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6640         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6641         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6642         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6643         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6644         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6645         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6646 #endif
6647         REG_WR(bp, SRC_REG_SOFT_RST, 0);
6648
6649         if (sizeof(union cdu_context) != 1024)
6650                 /* we currently assume that a context is 1024 bytes */
6651                 dev_alert(&bp->pdev->dev, "please adjust the size "
6652                                           "of cdu_context(%ld)\n",
6653                          (long)sizeof(union cdu_context));
6654
6655         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6656         val = (4 << 24) + (0 << 12) + 1024;
6657         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6658
6659         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6660         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6661         /* enable context validation interrupt from CFC */
6662         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6663
6664         /* set the thresholds to prevent CFC/CDU race */
6665         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6666
6667         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6668         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6669
6670         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6671         /* Reset PCIE errors for debug */
6672         REG_WR(bp, 0x2814, 0xffffffff);
6673         REG_WR(bp, 0x3820, 0xffffffff);
6674
6675         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6676         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6677         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6678         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6679
6680         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6681         if (CHIP_IS_E1H(bp)) {
6682                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6683                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6684         }
6685
6686         if (CHIP_REV_IS_SLOW(bp))
6687                 msleep(200);
6688
6689         /* finish CFC init */
6690         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6691         if (val != 1) {
6692                 BNX2X_ERR("CFC LL_INIT failed\n");
6693                 return -EBUSY;
6694         }
6695         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6696         if (val != 1) {
6697                 BNX2X_ERR("CFC AC_INIT failed\n");
6698                 return -EBUSY;
6699         }
6700         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6701         if (val != 1) {
6702                 BNX2X_ERR("CFC CAM_INIT failed\n");
6703                 return -EBUSY;
6704         }
6705         REG_WR(bp, CFC_REG_DEBUG0, 0);
6706
6707         /* read NIG statistic
6708            to see if this is our first up since powerup */
6709         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6710         val = *bnx2x_sp(bp, wb_data[0]);
6711
6712         /* do internal memory self test */
6713         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6714                 BNX2X_ERR("internal mem self test failed\n");
6715                 return -EBUSY;
6716         }
6717
6718         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6719         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6720         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6721         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6722         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6723                 bp->port.need_hw_lock = 1;
6724                 break;
6725
6726         default:
6727                 break;
6728         }
6729
6730         bnx2x_setup_fan_failure_detection(bp);
6731
6732         /* clear PXP2 attentions */
6733         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6734
6735         enable_blocks_attention(bp);
6736         if (CHIP_PARITY_SUPPORTED(bp))
6737                 enable_blocks_parity(bp);
6738
6739         if (!BP_NOMCP(bp)) {
6740                 bnx2x_acquire_phy_lock(bp);
6741                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6742                 bnx2x_release_phy_lock(bp);
6743         } else
6744                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6745
6746         return 0;
6747 }
6748
6749 static int bnx2x_init_port(struct bnx2x *bp)
6750 {
6751         int port = BP_PORT(bp);
6752         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6753         u32 low, high;
6754         u32 val;
6755
6756         DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
6757
6758         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6759
6760         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6761         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6762
6763         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6764         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6765         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6766         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6767
6768 #ifdef BCM_CNIC
6769         REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6770
6771         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6772         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6773         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6774 #endif
6775
6776         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6777
6778         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6779         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6780                 /* no pause for emulation and FPGA */
6781                 low = 0;
6782                 high = 513;
6783         } else {
6784                 if (IS_E1HMF(bp))
6785                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6786                 else if (bp->dev->mtu > 4096) {
6787                         if (bp->flags & ONE_PORT_FLAG)
6788                                 low = 160;
6789                         else {
6790                                 val = bp->dev->mtu;
6791                                 /* (24*1024 + val*4)/256 */
6792                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6793                         }
6794                 } else
6795                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6796                 high = low + 56;        /* 14*1024/256 */
6797         }
6798         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6799         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6800
6801
6802         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6803
6804         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6805         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6806         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6807         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6808
6809         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6810         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6811         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6812         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6813
6814         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6815         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6816
6817         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6818
6819         /* configure PBF to work without PAUSE mtu 9000 */
6820         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6821
6822         /* update threshold */
6823         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6824         /* update init credit */
6825         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6826
6827         /* probe changes */
6828         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6829         msleep(5);
6830         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6831
6832 #ifdef BCM_CNIC
6833         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6834 #endif
6835         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6836         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6837
6838         if (CHIP_IS_E1(bp)) {
6839                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6840                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6841         }
6842         bnx2x_init_block(bp, HC_BLOCK, init_stage);
6843
6844         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6845         /* init aeu_mask_attn_func_0/1:
6846          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6847          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6848          *             bits 4-7 are used for "per vn group attention" */
6849         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6850                (IS_E1HMF(bp) ? 0xF7 : 0x7));
6851
6852         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6853         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6854         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6855         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6856         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6857
6858         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6859
6860         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6861
6862         if (CHIP_IS_E1H(bp)) {
6863                 /* 0x2 disable e1hov, 0x1 enable */
6864                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6865                        (IS_E1HMF(bp) ? 0x1 : 0x2));
6866
6867                 {
6868                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6869                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6870                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6871                 }
6872         }
6873
6874         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6875         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6876
6877         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6878         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6879                 {
6880                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6881
6882                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6883                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6884
6885                 /* The GPIO should be swapped if the swap register is
6886                    set and active */
6887                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6888                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6889
6890                 /* Select function upon port-swap configuration */
6891                 if (port == 0) {
6892                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6893                         aeu_gpio_mask = (swap_val && swap_override) ?
6894                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6895                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6896                 } else {
6897                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6898                         aeu_gpio_mask = (swap_val && swap_override) ?
6899                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6900                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6901                 }
6902                 val = REG_RD(bp, offset);
6903                 /* add GPIO3 to group */
6904                 val |= aeu_gpio_mask;
6905                 REG_WR(bp, offset, val);
6906                 }
6907                 break;
6908
6909         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6910         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6911                 /* add SPIO 5 to group 0 */
6912                 {
6913                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6914                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6915                 val = REG_RD(bp, reg_addr);
6916                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6917                 REG_WR(bp, reg_addr, val);
6918                 }
6919                 break;
6920
6921         default:
6922                 break;
6923         }
6924
6925         bnx2x__link_reset(bp);
6926
6927         return 0;
6928 }
6929
6930 #define ILT_PER_FUNC            (768/2)
6931 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6932 /* the phys address is shifted right 12 bits and has an added
6933    1=valid bit added to the 53rd bit
6934    then since this is a wide register(TM)
6935    we split it into two 32 bit writes
6936  */
6937 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6938 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6939 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6940 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6941
6942 #ifdef BCM_CNIC
6943 #define CNIC_ILT_LINES          127
6944 #define CNIC_CTX_PER_ILT        16
6945 #else
6946 #define CNIC_ILT_LINES          0
6947 #endif
6948
6949 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6950 {
6951         int reg;
6952
6953         if (CHIP_IS_E1H(bp))
6954                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6955         else /* E1 */
6956                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6957
6958         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6959 }
6960
6961 static int bnx2x_init_func(struct bnx2x *bp)
6962 {
6963         int port = BP_PORT(bp);
6964         int func = BP_FUNC(bp);
6965         u32 addr, val;
6966         int i;
6967
6968         DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
6969
6970         /* set MSI reconfigure capability */
6971         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6972         val = REG_RD(bp, addr);
6973         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6974         REG_WR(bp, addr, val);
6975
6976         i = FUNC_ILT_BASE(func);
6977
6978         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6979         if (CHIP_IS_E1H(bp)) {
6980                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6981                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6982         } else /* E1 */
6983                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6984                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6985
6986 #ifdef BCM_CNIC
6987         i += 1 + CNIC_ILT_LINES;
6988         bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6989         if (CHIP_IS_E1(bp))
6990                 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6991         else {
6992                 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6993                 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6994         }
6995
6996         i++;
6997         bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6998         if (CHIP_IS_E1(bp))
6999                 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
7000         else {
7001                 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
7002                 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
7003         }
7004
7005         i++;
7006         bnx2x_ilt_wr(bp, i, bp->t1_mapping);
7007         if (CHIP_IS_E1(bp))
7008                 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
7009         else {
7010                 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
7011                 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
7012         }
7013
7014         /* tell the searcher where the T2 table is */
7015         REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
7016
7017         bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
7018                     U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
7019
7020         bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
7021                     U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
7022                     U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
7023
7024         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
7025 #endif
7026
7027         if (CHIP_IS_E1H(bp)) {
7028                 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
7029                 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
7030                 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
7031                 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
7032                 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
7033                 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
7034                 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
7035                 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
7036                 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
7037
7038                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7039                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
7040         }
7041
7042         /* HC init per function */
7043         if (CHIP_IS_E1H(bp)) {
7044                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7045
7046                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7047                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7048         }
7049         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
7050
7051         /* Reset PCIE errors for debug */
7052         REG_WR(bp, 0x2114, 0xffffffff);
7053         REG_WR(bp, 0x2120, 0xffffffff);
7054
7055         return 0;
7056 }
7057
7058 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7059 {
7060         int i, rc = 0;
7061
7062         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
7063            BP_FUNC(bp), load_code);
7064
7065         bp->dmae_ready = 0;
7066         mutex_init(&bp->dmae_mutex);
7067         rc = bnx2x_gunzip_init(bp);
7068         if (rc)
7069                 return rc;
7070
7071         switch (load_code) {
7072         case FW_MSG_CODE_DRV_LOAD_COMMON:
7073                 rc = bnx2x_init_common(bp);
7074                 if (rc)
7075                         goto init_hw_err;
7076                 /* no break */
7077
7078         case FW_MSG_CODE_DRV_LOAD_PORT:
7079                 bp->dmae_ready = 1;
7080                 rc = bnx2x_init_port(bp);
7081                 if (rc)
7082                         goto init_hw_err;
7083                 /* no break */
7084
7085         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
7086                 bp->dmae_ready = 1;
7087                 rc = bnx2x_init_func(bp);
7088                 if (rc)
7089                         goto init_hw_err;
7090                 break;
7091
7092         default:
7093                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
7094                 break;
7095         }
7096
7097         if (!BP_NOMCP(bp)) {
7098                 int func = BP_FUNC(bp);
7099
7100                 bp->fw_drv_pulse_wr_seq =
7101                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
7102                                  DRV_PULSE_SEQ_MASK);
7103                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
7104         }
7105
7106         /* this needs to be done before gunzip end */
7107         bnx2x_zero_def_sb(bp);
7108         for_each_queue(bp, i)
7109                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7110 #ifdef BCM_CNIC
7111         bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7112 #endif
7113
7114 init_hw_err:
7115         bnx2x_gunzip_end(bp);
7116
7117         return rc;
7118 }
7119
7120 static void bnx2x_free_mem(struct bnx2x *bp)
7121 {
7122
7123 #define BNX2X_PCI_FREE(x, y, size) \
7124         do { \
7125                 if (x) { \
7126                         dma_free_coherent(&bp->pdev->dev, size, x, y); \
7127                         x = NULL; \
7128                         y = 0; \
7129                 } \
7130         } while (0)
7131
7132 #define BNX2X_FREE(x) \
7133         do { \
7134                 if (x) { \
7135                         vfree(x); \
7136                         x = NULL; \
7137                 } \
7138         } while (0)
7139
7140         int i;
7141
7142         /* fastpath */
7143         /* Common */
7144         for_each_queue(bp, i) {
7145
7146                 /* status blocks */
7147                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
7148                                bnx2x_fp(bp, i, status_blk_mapping),
7149                                sizeof(struct host_status_block));
7150         }
7151         /* Rx */
7152         for_each_queue(bp, i) {
7153
7154                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7155                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
7156                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
7157                                bnx2x_fp(bp, i, rx_desc_mapping),
7158                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
7159
7160                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
7161                                bnx2x_fp(bp, i, rx_comp_mapping),
7162                                sizeof(struct eth_fast_path_rx_cqe) *
7163                                NUM_RCQ_BD);
7164
7165                 /* SGE ring */
7166                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7167                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
7168                                bnx2x_fp(bp, i, rx_sge_mapping),
7169                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7170         }
7171         /* Tx */
7172         for_each_queue(bp, i) {
7173
7174                 /* fastpath tx rings: tx_buf tx_desc */
7175                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
7176                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
7177                                bnx2x_fp(bp, i, tx_desc_mapping),
7178                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7179         }
7180         /* end of fastpath */
7181
7182         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
7183                        sizeof(struct host_def_status_block));
7184
7185         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
7186                        sizeof(struct bnx2x_slowpath));
7187
7188 #ifdef BCM_CNIC
7189         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
7190         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
7191         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
7192         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
7193         BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
7194                        sizeof(struct host_status_block));
7195 #endif
7196         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
7197
7198 #undef BNX2X_PCI_FREE
7199 #undef BNX2X_KFREE
7200 }
7201
7202 static int bnx2x_alloc_mem(struct bnx2x *bp)
7203 {
7204
7205 #define BNX2X_PCI_ALLOC(x, y, size) \
7206         do { \
7207                 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
7208                 if (x == NULL) \
7209                         goto alloc_mem_err; \
7210                 memset(x, 0, size); \
7211         } while (0)
7212
7213 #define BNX2X_ALLOC(x, size) \
7214         do { \
7215                 x = vmalloc(size); \
7216                 if (x == NULL) \
7217                         goto alloc_mem_err; \
7218                 memset(x, 0, size); \
7219         } while (0)
7220
7221         int i;
7222
7223         /* fastpath */
7224         /* Common */
7225         for_each_queue(bp, i) {
7226                 bnx2x_fp(bp, i, bp) = bp;
7227
7228                 /* status blocks */
7229                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
7230                                 &bnx2x_fp(bp, i, status_blk_mapping),
7231                                 sizeof(struct host_status_block));
7232         }
7233         /* Rx */
7234         for_each_queue(bp, i) {
7235
7236                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7237                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
7238                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
7239                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
7240                                 &bnx2x_fp(bp, i, rx_desc_mapping),
7241                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7242
7243                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
7244                                 &bnx2x_fp(bp, i, rx_comp_mapping),
7245                                 sizeof(struct eth_fast_path_rx_cqe) *
7246                                 NUM_RCQ_BD);
7247
7248                 /* SGE ring */
7249                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
7250                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
7251                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
7252                                 &bnx2x_fp(bp, i, rx_sge_mapping),
7253                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7254         }
7255         /* Tx */
7256         for_each_queue(bp, i) {
7257
7258                 /* fastpath tx rings: tx_buf tx_desc */
7259                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
7260                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
7261                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
7262                                 &bnx2x_fp(bp, i, tx_desc_mapping),
7263                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7264         }
7265         /* end of fastpath */
7266
7267         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7268                         sizeof(struct host_def_status_block));
7269
7270         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7271                         sizeof(struct bnx2x_slowpath));
7272
7273 #ifdef BCM_CNIC
7274         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
7275
7276         /* allocate searcher T2 table
7277            we allocate 1/4 of alloc num for T2
7278           (which is not entered into the ILT) */
7279         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
7280
7281         /* Initialize T2 (for 1024 connections) */
7282         for (i = 0; i < 16*1024; i += 64)
7283                 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
7284
7285         /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
7286         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
7287
7288         /* QM queues (128*MAX_CONN) */
7289         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
7290
7291         BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
7292                         sizeof(struct host_status_block));
7293 #endif
7294
7295         /* Slow path ring */
7296         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7297
7298         return 0;
7299
7300 alloc_mem_err:
7301         bnx2x_free_mem(bp);
7302         return -ENOMEM;
7303
7304 #undef BNX2X_PCI_ALLOC
7305 #undef BNX2X_ALLOC
7306 }
7307
7308 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7309 {
7310         int i;
7311
7312         for_each_queue(bp, i) {
7313                 struct bnx2x_fastpath *fp = &bp->fp[i];
7314
7315                 u16 bd_cons = fp->tx_bd_cons;
7316                 u16 sw_prod = fp->tx_pkt_prod;
7317                 u16 sw_cons = fp->tx_pkt_cons;
7318
7319                 while (sw_cons != sw_prod) {
7320                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7321                         sw_cons++;
7322                 }
7323         }
7324 }
7325
7326 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7327 {
7328         int i, j;
7329
7330         for_each_queue(bp, j) {
7331                 struct bnx2x_fastpath *fp = &bp->fp[j];
7332
7333                 for (i = 0; i < NUM_RX_BD; i++) {
7334                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7335                         struct sk_buff *skb = rx_buf->skb;
7336
7337                         if (skb == NULL)
7338                                 continue;
7339
7340                         dma_unmap_single(&bp->pdev->dev,
7341                                          dma_unmap_addr(rx_buf, mapping),
7342                                          bp->rx_buf_size, DMA_FROM_DEVICE);
7343
7344                         rx_buf->skb = NULL;
7345                         dev_kfree_skb(skb);
7346                 }
7347                 if (!fp->disable_tpa)
7348                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7349                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
7350                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
7351         }
7352 }
7353
7354 static void bnx2x_free_skbs(struct bnx2x *bp)
7355 {
7356         bnx2x_free_tx_skbs(bp);
7357         bnx2x_free_rx_skbs(bp);
7358 }
7359
7360 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7361 {
7362         int i, offset = 1;
7363
7364         free_irq(bp->msix_table[0].vector, bp->dev);
7365         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
7366            bp->msix_table[0].vector);
7367
7368 #ifdef BCM_CNIC
7369         offset++;
7370 #endif
7371         for_each_queue(bp, i) {
7372                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
7373                    "state %x\n", i, bp->msix_table[i + offset].vector,
7374                    bnx2x_fp(bp, i, state));
7375
7376                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
7377         }
7378 }
7379
7380 static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
7381 {
7382         if (bp->flags & USING_MSIX_FLAG) {
7383                 if (!disable_only)
7384                         bnx2x_free_msix_irqs(bp);
7385                 pci_disable_msix(bp->pdev);
7386                 bp->flags &= ~USING_MSIX_FLAG;
7387
7388         } else if (bp->flags & USING_MSI_FLAG) {
7389                 if (!disable_only)
7390                         free_irq(bp->pdev->irq, bp->dev);
7391                 pci_disable_msi(bp->pdev);
7392                 bp->flags &= ~USING_MSI_FLAG;
7393
7394         } else if (!disable_only)
7395                 free_irq(bp->pdev->irq, bp->dev);
7396 }
7397
7398 static int bnx2x_enable_msix(struct bnx2x *bp)
7399 {
7400         int i, rc, offset = 1;
7401         int igu_vec = 0;
7402
7403         bp->msix_table[0].entry = igu_vec;
7404         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
7405
7406 #ifdef BCM_CNIC
7407         igu_vec = BP_L_ID(bp) + offset;
7408         bp->msix_table[1].entry = igu_vec;
7409         DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7410         offset++;
7411 #endif
7412         for_each_queue(bp, i) {
7413                 igu_vec = BP_L_ID(bp) + offset + i;
7414                 bp->msix_table[i + offset].entry = igu_vec;
7415                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7416                    "(fastpath #%u)\n", i + offset, igu_vec, i);
7417         }
7418
7419         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
7420                              BNX2X_NUM_QUEUES(bp) + offset);
7421         if (rc) {
7422                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
7423                 return rc;
7424         }
7425
7426         bp->flags |= USING_MSIX_FLAG;
7427
7428         return 0;
7429 }
7430
7431 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7432 {
7433         int i, rc, offset = 1;
7434
7435         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7436                          bp->dev->name, bp->dev);
7437         if (rc) {
7438                 BNX2X_ERR("request sp irq failed\n");
7439                 return -EBUSY;
7440         }
7441
7442 #ifdef BCM_CNIC
7443         offset++;
7444 #endif
7445         for_each_queue(bp, i) {
7446                 struct bnx2x_fastpath *fp = &bp->fp[i];
7447                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7448                          bp->dev->name, i);
7449
7450                 rc = request_irq(bp->msix_table[i + offset].vector,
7451                                  bnx2x_msix_fp_int, 0, fp->name, fp);
7452                 if (rc) {
7453                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
7454                         bnx2x_free_msix_irqs(bp);
7455                         return -EBUSY;
7456                 }
7457
7458                 fp->state = BNX2X_FP_STATE_IRQ;
7459         }
7460
7461         i = BNX2X_NUM_QUEUES(bp);
7462         netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d"
7463                " ... fp[%d] %d\n",
7464                bp->msix_table[0].vector,
7465                0, bp->msix_table[offset].vector,
7466                i - 1, bp->msix_table[offset + i - 1].vector);
7467
7468         return 0;
7469 }
7470
7471 static int bnx2x_enable_msi(struct bnx2x *bp)
7472 {
7473         int rc;
7474
7475         rc = pci_enable_msi(bp->pdev);
7476         if (rc) {
7477                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7478                 return -1;
7479         }
7480         bp->flags |= USING_MSI_FLAG;
7481
7482         return 0;
7483 }
7484
7485 static int bnx2x_req_irq(struct bnx2x *bp)
7486 {
7487         unsigned long flags;
7488         int rc;
7489
7490         if (bp->flags & USING_MSI_FLAG)
7491                 flags = 0;
7492         else
7493                 flags = IRQF_SHARED;
7494
7495         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7496                          bp->dev->name, bp->dev);
7497         if (!rc)
7498                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7499
7500         return rc;
7501 }
7502
7503 static void bnx2x_napi_enable(struct bnx2x *bp)
7504 {
7505         int i;
7506
7507         for_each_queue(bp, i)
7508                 napi_enable(&bnx2x_fp(bp, i, napi));
7509 }
7510
7511 static void bnx2x_napi_disable(struct bnx2x *bp)
7512 {
7513         int i;
7514
7515         for_each_queue(bp, i)
7516                 napi_disable(&bnx2x_fp(bp, i, napi));
7517 }
7518
7519 static void bnx2x_netif_start(struct bnx2x *bp)
7520 {
7521         int intr_sem;
7522
7523         intr_sem = atomic_dec_and_test(&bp->intr_sem);
7524         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7525
7526         if (intr_sem) {
7527                 if (netif_running(bp->dev)) {
7528                         bnx2x_napi_enable(bp);
7529                         bnx2x_int_enable(bp);
7530                         if (bp->state == BNX2X_STATE_OPEN)
7531                                 netif_tx_wake_all_queues(bp->dev);
7532                 }
7533         }
7534 }
7535
7536 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7537 {
7538         bnx2x_int_disable_sync(bp, disable_hw);
7539         bnx2x_napi_disable(bp);
7540         netif_tx_disable(bp->dev);
7541 }
7542
7543 /*
7544  * Init service functions
7545  */
7546
7547 /**
7548  * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7549  *
7550  * @param bp driver descriptor
7551  * @param set set or clear an entry (1 or 0)
7552  * @param mac pointer to a buffer containing a MAC
7553  * @param cl_bit_vec bit vector of clients to register a MAC for
7554  * @param cam_offset offset in a CAM to use
7555  * @param with_bcast set broadcast MAC as well
7556  */
7557 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7558                                       u32 cl_bit_vec, u8 cam_offset,
7559                                       u8 with_bcast)
7560 {
7561         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7562         int port = BP_PORT(bp);
7563
7564         /* CAM allocation
7565          * unicasts 0-31:port0 32-63:port1
7566          * multicast 64-127:port0 128-191:port1
7567          */
7568         config->hdr.length = 1 + (with_bcast ? 1 : 0);
7569         config->hdr.offset = cam_offset;
7570         config->hdr.client_id = 0xff;
7571         config->hdr.reserved1 = 0;
7572
7573         /* primary MAC */
7574         config->config_table[0].cam_entry.msb_mac_addr =
7575                                         swab16(*(u16 *)&mac[0]);
7576         config->config_table[0].cam_entry.middle_mac_addr =
7577                                         swab16(*(u16 *)&mac[2]);
7578         config->config_table[0].cam_entry.lsb_mac_addr =
7579                                         swab16(*(u16 *)&mac[4]);
7580         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7581         if (set)
7582                 config->config_table[0].target_table_entry.flags = 0;
7583         else
7584                 CAM_INVALIDATE(config->config_table[0]);
7585         config->config_table[0].target_table_entry.clients_bit_vector =
7586                                                 cpu_to_le32(cl_bit_vec);
7587         config->config_table[0].target_table_entry.vlan_id = 0;
7588
7589         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7590            (set ? "setting" : "clearing"),
7591            config->config_table[0].cam_entry.msb_mac_addr,
7592            config->config_table[0].cam_entry.middle_mac_addr,
7593            config->config_table[0].cam_entry.lsb_mac_addr);
7594
7595         /* broadcast */
7596         if (with_bcast) {
7597                 config->config_table[1].cam_entry.msb_mac_addr =
7598                         cpu_to_le16(0xffff);
7599                 config->config_table[1].cam_entry.middle_mac_addr =
7600                         cpu_to_le16(0xffff);
7601                 config->config_table[1].cam_entry.lsb_mac_addr =
7602                         cpu_to_le16(0xffff);
7603                 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7604                 if (set)
7605                         config->config_table[1].target_table_entry.flags =
7606                                         TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7607                 else
7608                         CAM_INVALIDATE(config->config_table[1]);
7609                 config->config_table[1].target_table_entry.clients_bit_vector =
7610                                                         cpu_to_le32(cl_bit_vec);
7611                 config->config_table[1].target_table_entry.vlan_id = 0;
7612         }
7613
7614         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7615                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7616                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7617 }
7618
7619 /**
7620  * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7621  *
7622  * @param bp driver descriptor
7623  * @param set set or clear an entry (1 or 0)
7624  * @param mac pointer to a buffer containing a MAC
7625  * @param cl_bit_vec bit vector of clients to register a MAC for
7626  * @param cam_offset offset in a CAM to use
7627  */
7628 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7629                                        u32 cl_bit_vec, u8 cam_offset)
7630 {
7631         struct mac_configuration_cmd_e1h *config =
7632                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7633
7634         config->hdr.length = 1;
7635         config->hdr.offset = cam_offset;
7636         config->hdr.client_id = 0xff;
7637         config->hdr.reserved1 = 0;
7638
7639         /* primary MAC */
7640         config->config_table[0].msb_mac_addr =
7641                                         swab16(*(u16 *)&mac[0]);
7642         config->config_table[0].middle_mac_addr =
7643                                         swab16(*(u16 *)&mac[2]);
7644         config->config_table[0].lsb_mac_addr =
7645                                         swab16(*(u16 *)&mac[4]);
7646         config->config_table[0].clients_bit_vector =
7647                                         cpu_to_le32(cl_bit_vec);
7648         config->config_table[0].vlan_id = 0;
7649         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7650         if (set)
7651                 config->config_table[0].flags = BP_PORT(bp);
7652         else
7653                 config->config_table[0].flags =
7654                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7655
7656         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
7657            (set ? "setting" : "clearing"),
7658            config->config_table[0].msb_mac_addr,
7659            config->config_table[0].middle_mac_addr,
7660            config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7661
7662         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7663                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7664                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7665 }
7666
7667 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7668                              int *state_p, int poll)
7669 {
7670         /* can take a while if any port is running */
7671         int cnt = 5000;
7672
7673         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7674            poll ? "polling" : "waiting", state, idx);
7675
7676         might_sleep();
7677         while (cnt--) {
7678                 if (poll) {
7679                         bnx2x_rx_int(bp->fp, 10);
7680                         /* if index is different from 0
7681                          * the reply for some commands will
7682                          * be on the non default queue
7683                          */
7684                         if (idx)
7685                                 bnx2x_rx_int(&bp->fp[idx], 10);
7686                 }
7687
7688                 mb(); /* state is changed by bnx2x_sp_event() */
7689                 if (*state_p == state) {
7690 #ifdef BNX2X_STOP_ON_ERROR
7691                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
7692 #endif
7693                         return 0;
7694                 }
7695
7696                 msleep(1);
7697
7698                 if (bp->panic)
7699                         return -EIO;
7700         }
7701
7702         /* timeout! */
7703         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7704                   poll ? "polling" : "waiting", state, idx);
7705 #ifdef BNX2X_STOP_ON_ERROR
7706         bnx2x_panic();
7707 #endif
7708
7709         return -EBUSY;
7710 }
7711
7712 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7713 {
7714         bp->set_mac_pending++;
7715         smp_wmb();
7716
7717         bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7718                                    (1 << bp->fp->cl_id), BP_FUNC(bp));
7719
7720         /* Wait for a completion */
7721         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7722 }
7723
7724 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7725 {
7726         bp->set_mac_pending++;
7727         smp_wmb();
7728
7729         bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7730                                   (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7731                                   1);
7732
7733         /* Wait for a completion */
7734         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7735 }
7736
7737 #ifdef BCM_CNIC
7738 /**
7739  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7740  * MAC(s). This function will wait until the ramdord completion
7741  * returns.
7742  *
7743  * @param bp driver handle
7744  * @param set set or clear the CAM entry
7745  *
7746  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7747  */
7748 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7749 {
7750         u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7751
7752         bp->set_mac_pending++;
7753         smp_wmb();
7754
7755         /* Send a SET_MAC ramrod */
7756         if (CHIP_IS_E1(bp))
7757                 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7758                                   cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7759                                   1);
7760         else
7761                 /* CAM allocation for E1H
7762                 * unicasts: by func number
7763                 * multicast: 20+FUNC*20, 20 each
7764                 */
7765                 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7766                                    cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7767
7768         /* Wait for a completion when setting */
7769         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7770
7771         return 0;
7772 }
7773 #endif
7774
7775 static int bnx2x_setup_leading(struct bnx2x *bp)
7776 {
7777         int rc;
7778
7779         /* reset IGU state */
7780         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7781
7782         /* SETUP ramrod */
7783         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7784
7785         /* Wait for completion */
7786         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7787
7788         return rc;
7789 }
7790
7791 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7792 {
7793         struct bnx2x_fastpath *fp = &bp->fp[index];
7794
7795         /* reset IGU state */
7796         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7797
7798         /* SETUP ramrod */
7799         fp->state = BNX2X_FP_STATE_OPENING;
7800         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7801                       fp->cl_id, 0);
7802
7803         /* Wait for completion */
7804         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7805                                  &(fp->state), 0);
7806 }
7807
7808 static int bnx2x_poll(struct napi_struct *napi, int budget);
7809
7810 static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7811 {
7812
7813         switch (bp->multi_mode) {
7814         case ETH_RSS_MODE_DISABLED:
7815                 bp->num_queues = 1;
7816                 break;
7817
7818         case ETH_RSS_MODE_REGULAR:
7819                 if (num_queues)
7820                         bp->num_queues = min_t(u32, num_queues,
7821                                                   BNX2X_MAX_QUEUES(bp));
7822                 else
7823                         bp->num_queues = min_t(u32, num_online_cpus(),
7824                                                   BNX2X_MAX_QUEUES(bp));
7825                 break;
7826
7827
7828         default:
7829                 bp->num_queues = 1;
7830                 break;
7831         }
7832 }
7833
7834 static int bnx2x_set_num_queues(struct bnx2x *bp)
7835 {
7836         int rc = 0;
7837
7838         switch (int_mode) {
7839         case INT_MODE_INTx:
7840         case INT_MODE_MSI:
7841                 bp->num_queues = 1;
7842                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7843                 break;
7844
7845         case INT_MODE_MSIX:
7846         default:
7847                 /* Set number of queues according to bp->multi_mode value */
7848                 bnx2x_set_num_queues_msix(bp);
7849
7850                 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7851                    bp->num_queues);
7852
7853                 /* if we can't use MSI-X we only need one fp,
7854                  * so try to enable MSI-X with the requested number of fp's
7855                  * and fallback to MSI or legacy INTx with one fp
7856                  */
7857                 rc = bnx2x_enable_msix(bp);
7858                 if (rc)
7859                         /* failed to enable MSI-X */
7860                         bp->num_queues = 1;
7861                 break;
7862         }
7863         bp->dev->real_num_tx_queues = bp->num_queues;
7864         return rc;
7865 }
7866
7867 #ifdef BCM_CNIC
7868 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7869 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7870 #endif
7871
7872 /* must be called with rtnl_lock */
7873 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7874 {
7875         u32 load_code;
7876         int i, rc;
7877
7878 #ifdef BNX2X_STOP_ON_ERROR
7879         if (unlikely(bp->panic))
7880                 return -EPERM;
7881 #endif
7882
7883         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7884
7885         rc = bnx2x_set_num_queues(bp);
7886
7887         if (bnx2x_alloc_mem(bp)) {
7888                 bnx2x_free_irq(bp, true);
7889                 return -ENOMEM;
7890         }
7891
7892         for_each_queue(bp, i)
7893                 bnx2x_fp(bp, i, disable_tpa) =
7894                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
7895
7896         for_each_queue(bp, i)
7897                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7898                                bnx2x_poll, 128);
7899
7900         bnx2x_napi_enable(bp);
7901
7902         if (bp->flags & USING_MSIX_FLAG) {
7903                 rc = bnx2x_req_msix_irqs(bp);
7904                 if (rc) {
7905                         bnx2x_free_irq(bp, true);
7906                         goto load_error1;
7907                 }
7908         } else {
7909                 /* Fall to INTx if failed to enable MSI-X due to lack of
7910                    memory (in bnx2x_set_num_queues()) */
7911                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7912                         bnx2x_enable_msi(bp);
7913                 bnx2x_ack_int(bp);
7914                 rc = bnx2x_req_irq(bp);
7915                 if (rc) {
7916                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
7917                         bnx2x_free_irq(bp, true);
7918                         goto load_error1;
7919                 }
7920                 if (bp->flags & USING_MSI_FLAG) {
7921                         bp->dev->irq = bp->pdev->irq;
7922                         netdev_info(bp->dev, "using MSI  IRQ %d\n",
7923                                     bp->pdev->irq);
7924                 }
7925         }
7926
7927         /* Send LOAD_REQUEST command to MCP
7928            Returns the type of LOAD command:
7929            if it is the first port to be initialized
7930            common blocks should be initialized, otherwise - not
7931         */
7932         if (!BP_NOMCP(bp)) {
7933                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7934                 if (!load_code) {
7935                         BNX2X_ERR("MCP response failure, aborting\n");
7936                         rc = -EBUSY;
7937                         goto load_error2;
7938                 }
7939                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7940                         rc = -EBUSY; /* other port in diagnostic mode */
7941                         goto load_error2;
7942                 }
7943
7944         } else {
7945                 int port = BP_PORT(bp);
7946
7947                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
7948                    load_count[0], load_count[1], load_count[2]);
7949                 load_count[0]++;
7950                 load_count[1 + port]++;
7951                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
7952                    load_count[0], load_count[1], load_count[2]);
7953                 if (load_count[0] == 1)
7954                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7955                 else if (load_count[1 + port] == 1)
7956                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7957                 else
7958                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7959         }
7960
7961         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7962             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7963                 bp->port.pmf = 1;
7964         else
7965                 bp->port.pmf = 0;
7966         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7967
7968         /* Initialize HW */
7969         rc = bnx2x_init_hw(bp, load_code);
7970         if (rc) {
7971                 BNX2X_ERR("HW init failed, aborting\n");
7972                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7973                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7974                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7975                 goto load_error2;
7976         }
7977
7978         /* Setup NIC internals and enable interrupts */
7979         bnx2x_nic_init(bp, load_code);
7980
7981         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7982             (bp->common.shmem2_base))
7983                 SHMEM2_WR(bp, dcc_support,
7984                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7985                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7986
7987         /* Send LOAD_DONE command to MCP */
7988         if (!BP_NOMCP(bp)) {
7989                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7990                 if (!load_code) {
7991                         BNX2X_ERR("MCP response failure, aborting\n");
7992                         rc = -EBUSY;
7993                         goto load_error3;
7994                 }
7995         }
7996
7997         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7998
7999         rc = bnx2x_setup_leading(bp);
8000         if (rc) {
8001                 BNX2X_ERR("Setup leading failed!\n");
8002 #ifndef BNX2X_STOP_ON_ERROR
8003                 goto load_error3;
8004 #else
8005                 bp->panic = 1;
8006                 return -EBUSY;
8007 #endif
8008         }
8009
8010         if (CHIP_IS_E1H(bp))
8011                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
8012                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
8013                         bp->flags |= MF_FUNC_DIS;
8014                 }
8015
8016         if (bp->state == BNX2X_STATE_OPEN) {
8017 #ifdef BCM_CNIC
8018                 /* Enable Timer scan */
8019                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
8020 #endif
8021                 for_each_nondefault_queue(bp, i) {
8022                         rc = bnx2x_setup_multi(bp, i);
8023                         if (rc)
8024 #ifdef BCM_CNIC
8025                                 goto load_error4;
8026 #else
8027                                 goto load_error3;
8028 #endif
8029                 }
8030
8031                 if (CHIP_IS_E1(bp))
8032                         bnx2x_set_eth_mac_addr_e1(bp, 1);
8033                 else
8034                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
8035 #ifdef BCM_CNIC
8036                 /* Set iSCSI L2 MAC */
8037                 mutex_lock(&bp->cnic_mutex);
8038                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
8039                         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8040                         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
8041                         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
8042                                       CNIC_SB_ID(bp));
8043                 }
8044                 mutex_unlock(&bp->cnic_mutex);
8045 #endif
8046         }
8047
8048         if (bp->port.pmf)
8049                 bnx2x_initial_phy_init(bp, load_mode);
8050
8051         /* Start fast path */
8052         switch (load_mode) {
8053         case LOAD_NORMAL:
8054                 if (bp->state == BNX2X_STATE_OPEN) {
8055                         /* Tx queue should be only reenabled */
8056                         netif_tx_wake_all_queues(bp->dev);
8057                 }
8058                 /* Initialize the receive filter. */
8059                 bnx2x_set_rx_mode(bp->dev);
8060                 break;
8061
8062         case LOAD_OPEN:
8063                 netif_tx_start_all_queues(bp->dev);
8064                 if (bp->state != BNX2X_STATE_OPEN)
8065                         netif_tx_disable(bp->dev);
8066                 /* Initialize the receive filter. */
8067                 bnx2x_set_rx_mode(bp->dev);
8068                 break;
8069
8070         case LOAD_DIAG:
8071                 /* Initialize the receive filter. */
8072                 bnx2x_set_rx_mode(bp->dev);
8073                 bp->state = BNX2X_STATE_DIAG;
8074                 break;
8075
8076         default:
8077                 break;
8078         }
8079
8080         if (!bp->port.pmf)
8081                 bnx2x__link_status_update(bp);
8082
8083         /* start the timer */
8084         mod_timer(&bp->timer, jiffies + bp->current_interval);
8085
8086 #ifdef BCM_CNIC
8087         bnx2x_setup_cnic_irq_info(bp);
8088         if (bp->state == BNX2X_STATE_OPEN)
8089                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8090 #endif
8091         bnx2x_inc_load_cnt(bp);
8092
8093         return 0;
8094
8095 #ifdef BCM_CNIC
8096 load_error4:
8097         /* Disable Timer scan */
8098         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8099 #endif
8100 load_error3:
8101         bnx2x_int_disable_sync(bp, 1);
8102         if (!BP_NOMCP(bp)) {
8103                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8104                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8105         }
8106         bp->port.pmf = 0;
8107         /* Free SKBs, SGEs, TPA pool and driver internals */
8108         bnx2x_free_skbs(bp);
8109         for_each_queue(bp, i)
8110                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8111 load_error2:
8112         /* Release IRQs */
8113         bnx2x_free_irq(bp, false);
8114 load_error1:
8115         bnx2x_napi_disable(bp);
8116         for_each_queue(bp, i)
8117                 netif_napi_del(&bnx2x_fp(bp, i, napi));
8118         bnx2x_free_mem(bp);
8119
8120         return rc;
8121 }
8122
8123 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8124 {
8125         struct bnx2x_fastpath *fp = &bp->fp[index];
8126         int rc;
8127
8128         /* halt the connection */
8129         fp->state = BNX2X_FP_STATE_HALTING;
8130         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
8131
8132         /* Wait for completion */
8133         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
8134                                &(fp->state), 1);
8135         if (rc) /* timeout */
8136                 return rc;
8137
8138         /* delete cfc entry */
8139         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
8140
8141         /* Wait for completion */
8142         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
8143                                &(fp->state), 1);
8144         return rc;
8145 }
8146
8147 static int bnx2x_stop_leading(struct bnx2x *bp)
8148 {
8149         __le16 dsb_sp_prod_idx;
8150         /* if the other port is handling traffic,
8151            this can take a lot of time */
8152         int cnt = 500;
8153         int rc;
8154
8155         might_sleep();
8156
8157         /* Send HALT ramrod */
8158         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
8159         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
8160
8161         /* Wait for completion */
8162         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
8163                                &(bp->fp[0].state), 1);
8164         if (rc) /* timeout */
8165                 return rc;
8166
8167         dsb_sp_prod_idx = *bp->dsb_sp_prod;
8168
8169         /* Send PORT_DELETE ramrod */
8170         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
8171
8172         /* Wait for completion to arrive on default status block
8173            we are going to reset the chip anyway
8174            so there is not much to do if this times out
8175          */
8176         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
8177                 if (!cnt) {
8178                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
8179                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
8180                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
8181 #ifdef BNX2X_STOP_ON_ERROR
8182                         bnx2x_panic();
8183 #endif
8184                         rc = -EBUSY;
8185                         break;
8186                 }
8187                 cnt--;
8188                 msleep(1);
8189                 rmb(); /* Refresh the dsb_sp_prod */
8190         }
8191         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
8192         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
8193
8194         return rc;
8195 }
8196
8197 static void bnx2x_reset_func(struct bnx2x *bp)
8198 {
8199         int port = BP_PORT(bp);
8200         int func = BP_FUNC(bp);
8201         int base, i;
8202
8203         /* Configure IGU */
8204         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8205         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8206
8207 #ifdef BCM_CNIC
8208         /* Disable Timer scan */
8209         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8210         /*
8211          * Wait for at least 10ms and up to 2 second for the timers scan to
8212          * complete
8213          */
8214         for (i = 0; i < 200; i++) {
8215                 msleep(10);
8216                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8217                         break;
8218         }
8219 #endif
8220         /* Clear ILT */
8221         base = FUNC_ILT_BASE(func);
8222         for (i = base; i < base + ILT_PER_FUNC; i++)
8223                 bnx2x_ilt_wr(bp, i, 0);
8224 }
8225
8226 static void bnx2x_reset_port(struct bnx2x *bp)
8227 {
8228         int port = BP_PORT(bp);
8229         u32 val;
8230
8231         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8232
8233         /* Do not rcv packets to BRB */
8234         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8235         /* Do not direct rcv packets that are not for MCP to the BRB */
8236         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8237                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8238
8239         /* Configure AEU */
8240         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8241
8242         msleep(100);
8243         /* Check for BRB port occupancy */
8244         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8245         if (val)
8246                 DP(NETIF_MSG_IFDOWN,
8247                    "BRB1 is not empty  %d blocks are occupied\n", val);
8248
8249         /* TODO: Close Doorbell port? */
8250 }
8251
8252 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8253 {
8254         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
8255            BP_FUNC(bp), reset_code);
8256
8257         switch (reset_code) {
8258         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
8259                 bnx2x_reset_port(bp);
8260                 bnx2x_reset_func(bp);
8261                 bnx2x_reset_common(bp);
8262                 break;
8263
8264         case FW_MSG_CODE_DRV_UNLOAD_PORT:
8265                 bnx2x_reset_port(bp);
8266                 bnx2x_reset_func(bp);
8267                 break;
8268
8269         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
8270                 bnx2x_reset_func(bp);
8271                 break;
8272
8273         default:
8274                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
8275                 break;
8276         }
8277 }
8278
8279 static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
8280 {
8281         int port = BP_PORT(bp);
8282         u32 reset_code = 0;
8283         int i, cnt, rc;
8284
8285         /* Wait until tx fastpath tasks complete */
8286         for_each_queue(bp, i) {
8287                 struct bnx2x_fastpath *fp = &bp->fp[i];
8288
8289                 cnt = 1000;
8290                 while (bnx2x_has_tx_work_unload(fp)) {
8291
8292                         bnx2x_tx_int(fp);
8293                         if (!cnt) {
8294                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
8295                                           i);
8296 #ifdef BNX2X_STOP_ON_ERROR
8297                                 bnx2x_panic();
8298                                 return -EBUSY;
8299 #else
8300                                 break;
8301 #endif
8302                         }
8303                         cnt--;
8304                         msleep(1);
8305                 }
8306         }
8307         /* Give HW time to discard old tx messages */
8308         msleep(1);
8309
8310         if (CHIP_IS_E1(bp)) {
8311                 struct mac_configuration_cmd *config =
8312                                                 bnx2x_sp(bp, mcast_config);
8313
8314                 bnx2x_set_eth_mac_addr_e1(bp, 0);
8315
8316                 for (i = 0; i < config->hdr.length; i++)
8317                         CAM_INVALIDATE(config->config_table[i]);
8318
8319                 config->hdr.length = i;
8320                 if (CHIP_REV_IS_SLOW(bp))
8321                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8322                 else
8323                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
8324                 config->hdr.client_id = bp->fp->cl_id;
8325                 config->hdr.reserved1 = 0;
8326
8327                 bp->set_mac_pending++;
8328                 smp_wmb();
8329
8330                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8331                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8332                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8333
8334         } else { /* E1H */
8335                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8336
8337                 bnx2x_set_eth_mac_addr_e1h(bp, 0);
8338
8339                 for (i = 0; i < MC_HASH_SIZE; i++)
8340                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
8341
8342                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
8343         }
8344 #ifdef BCM_CNIC
8345         /* Clear iSCSI L2 MAC */
8346         mutex_lock(&bp->cnic_mutex);
8347         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8348                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8349                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8350         }
8351         mutex_unlock(&bp->cnic_mutex);
8352 #endif
8353
8354         if (unload_mode == UNLOAD_NORMAL)
8355                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8356
8357         else if (bp->flags & NO_WOL_FLAG)
8358                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
8359
8360         else if (bp->wol) {
8361                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8362                 u8 *mac_addr = bp->dev->dev_addr;
8363                 u32 val;
8364                 /* The mac address is written to entries 1-4 to
8365                    preserve entry 0 which is used by the PMF */
8366                 u8 entry = (BP_E1HVN(bp) + 1)*8;
8367
8368                 val = (mac_addr[0] << 8) | mac_addr[1];
8369                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8370
8371                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8372                       (mac_addr[4] << 8) | mac_addr[5];
8373                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8374
8375                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8376
8377         } else
8378                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8379
8380         /* Close multi and leading connections
8381            Completions for ramrods are collected in a synchronous way */
8382         for_each_nondefault_queue(bp, i)
8383                 if (bnx2x_stop_multi(bp, i))
8384                         goto unload_error;
8385
8386         rc = bnx2x_stop_leading(bp);
8387         if (rc) {
8388                 BNX2X_ERR("Stop leading failed!\n");
8389 #ifdef BNX2X_STOP_ON_ERROR
8390                 return -EBUSY;
8391 #else
8392                 goto unload_error;
8393 #endif
8394         }
8395
8396 unload_error:
8397         if (!BP_NOMCP(bp))
8398                 reset_code = bnx2x_fw_command(bp, reset_code);
8399         else {
8400                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
8401                    load_count[0], load_count[1], load_count[2]);
8402                 load_count[0]--;
8403                 load_count[1 + port]--;
8404                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
8405                    load_count[0], load_count[1], load_count[2]);
8406                 if (load_count[0] == 0)
8407                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8408                 else if (load_count[1 + port] == 0)
8409                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8410                 else
8411                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8412         }
8413
8414         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8415             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8416                 bnx2x__link_reset(bp);
8417
8418         /* Reset the chip */
8419         bnx2x_reset_chip(bp, reset_code);
8420
8421         /* Report UNLOAD_DONE to MCP */
8422         if (!BP_NOMCP(bp))
8423                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8424
8425 }
8426
8427 static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8428 {
8429         u32 val;
8430
8431         DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8432
8433         if (CHIP_IS_E1(bp)) {
8434                 int port = BP_PORT(bp);
8435                 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8436                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
8437
8438                 val = REG_RD(bp, addr);
8439                 val &= ~(0x300);
8440                 REG_WR(bp, addr, val);
8441         } else if (CHIP_IS_E1H(bp)) {
8442                 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8443                 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8444                          MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8445                 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8446         }
8447 }
8448
8449 /* must be called with rtnl_lock */
8450 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8451 {
8452         int i;
8453
8454         if (bp->state == BNX2X_STATE_CLOSED) {
8455                 /* Interface has been removed - nothing to recover */
8456                 bp->recovery_state = BNX2X_RECOVERY_DONE;
8457                 bp->is_leader = 0;
8458                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8459                 smp_wmb();
8460
8461                 return -EINVAL;
8462         }
8463
8464 #ifdef BCM_CNIC
8465         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8466 #endif
8467         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8468
8469         /* Set "drop all" */
8470         bp->rx_mode = BNX2X_RX_MODE_NONE;
8471         bnx2x_set_storm_rx_mode(bp);
8472
8473         /* Disable HW interrupts, NAPI and Tx */
8474         bnx2x_netif_stop(bp, 1);
8475
8476         del_timer_sync(&bp->timer);
8477         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8478                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8479         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8480
8481         /* Release IRQs */
8482         bnx2x_free_irq(bp, false);
8483
8484         /* Cleanup the chip if needed */
8485         if (unload_mode != UNLOAD_RECOVERY)
8486                 bnx2x_chip_cleanup(bp, unload_mode);
8487
8488         bp->port.pmf = 0;
8489
8490         /* Free SKBs, SGEs, TPA pool and driver internals */
8491         bnx2x_free_skbs(bp);
8492         for_each_queue(bp, i)
8493                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8494         for_each_queue(bp, i)
8495                 netif_napi_del(&bnx2x_fp(bp, i, napi));
8496         bnx2x_free_mem(bp);
8497
8498         bp->state = BNX2X_STATE_CLOSED;
8499
8500         netif_carrier_off(bp->dev);
8501
8502         /* The last driver must disable a "close the gate" if there is no
8503          * parity attention or "process kill" pending.
8504          */
8505         if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8506             bnx2x_reset_is_done(bp))
8507                 bnx2x_disable_close_the_gate(bp);
8508
8509         /* Reset MCP mail box sequence if there is on going recovery */
8510         if (unload_mode == UNLOAD_RECOVERY)
8511                 bp->fw_seq = 0;
8512
8513         return 0;
8514 }
8515
8516 /* Close gates #2, #3 and #4: */
8517 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8518 {
8519         u32 val, addr;
8520
8521         /* Gates #2 and #4a are closed/opened for "not E1" only */
8522         if (!CHIP_IS_E1(bp)) {
8523                 /* #4 */
8524                 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8525                 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8526                        close ? (val | 0x1) : (val & (~(u32)1)));
8527                 /* #2 */
8528                 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8529                 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8530                        close ? (val | 0x1) : (val & (~(u32)1)));
8531         }
8532
8533         /* #3 */
8534         addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8535         val = REG_RD(bp, addr);
8536         REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8537
8538         DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8539                 close ? "closing" : "opening");
8540         mmiowb();
8541 }
8542
8543 #define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
8544
8545 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8546 {
8547         /* Do some magic... */
8548         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8549         *magic_val = val & SHARED_MF_CLP_MAGIC;
8550         MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8551 }
8552
8553 /* Restore the value of the `magic' bit.
8554  *
8555  * @param pdev Device handle.
8556  * @param magic_val Old value of the `magic' bit.
8557  */
8558 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8559 {
8560         /* Restore the `magic' bit value... */
8561         /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8562         SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8563                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8564         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8565         MF_CFG_WR(bp, shared_mf_config.clp_mb,
8566                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8567 }
8568
8569 /* Prepares for MCP reset: takes care of CLP configurations.
8570  *
8571  * @param bp
8572  * @param magic_val Old value of 'magic' bit.
8573  */
8574 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8575 {
8576         u32 shmem;
8577         u32 validity_offset;
8578
8579         DP(NETIF_MSG_HW, "Starting\n");
8580
8581         /* Set `magic' bit in order to save MF config */
8582         if (!CHIP_IS_E1(bp))
8583                 bnx2x_clp_reset_prep(bp, magic_val);
8584
8585         /* Get shmem offset */
8586         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8587         validity_offset = offsetof(struct shmem_region, validity_map[0]);
8588
8589         /* Clear validity map flags */
8590         if (shmem > 0)
8591                 REG_WR(bp, shmem + validity_offset, 0);
8592 }
8593
8594 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
8595 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
8596
8597 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8598  * depending on the HW type.
8599  *
8600  * @param bp
8601  */
8602 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8603 {
8604         /* special handling for emulation and FPGA,
8605            wait 10 times longer */
8606         if (CHIP_REV_IS_SLOW(bp))
8607                 msleep(MCP_ONE_TIMEOUT*10);
8608         else
8609                 msleep(MCP_ONE_TIMEOUT);
8610 }
8611
8612 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8613 {
8614         u32 shmem, cnt, validity_offset, val;
8615         int rc = 0;
8616
8617         msleep(100);
8618
8619         /* Get shmem offset */
8620         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8621         if (shmem == 0) {
8622                 BNX2X_ERR("Shmem 0 return failure\n");
8623                 rc = -ENOTTY;
8624                 goto exit_lbl;
8625         }
8626
8627         validity_offset = offsetof(struct shmem_region, validity_map[0]);
8628
8629         /* Wait for MCP to come up */
8630         for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8631                 /* TBD: its best to check validity map of last port.
8632                  * currently checks on port 0.
8633                  */
8634                 val = REG_RD(bp, shmem + validity_offset);
8635                 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8636                    shmem + validity_offset, val);
8637
8638                 /* check that shared memory is valid. */
8639                 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8640                     == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8641                         break;
8642
8643                 bnx2x_mcp_wait_one(bp);
8644         }
8645
8646         DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8647
8648         /* Check that shared memory is valid. This indicates that MCP is up. */
8649         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8650             (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8651                 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8652                 rc = -ENOTTY;
8653                 goto exit_lbl;
8654         }
8655
8656 exit_lbl:
8657         /* Restore the `magic' bit value */
8658         if (!CHIP_IS_E1(bp))
8659                 bnx2x_clp_reset_done(bp, magic_val);
8660
8661         return rc;
8662 }
8663
8664 static void bnx2x_pxp_prep(struct bnx2x *bp)
8665 {
8666         if (!CHIP_IS_E1(bp)) {
8667                 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8668                 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8669                 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8670                 mmiowb();
8671         }
8672 }
8673
8674 /*
8675  * Reset the whole chip except for:
8676  *      - PCIE core
8677  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8678  *              one reset bit)
8679  *      - IGU
8680  *      - MISC (including AEU)
8681  *      - GRC
8682  *      - RBCN, RBCP
8683  */
8684 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8685 {
8686         u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8687
8688         not_reset_mask1 =
8689                 MISC_REGISTERS_RESET_REG_1_RST_HC |
8690                 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8691                 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8692
8693         not_reset_mask2 =
8694                 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8695                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8696                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8697                 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8698                 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8699                 MISC_REGISTERS_RESET_REG_2_RST_GRC  |
8700                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8701                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8702
8703         reset_mask1 = 0xffffffff;
8704
8705         if (CHIP_IS_E1(bp))
8706                 reset_mask2 = 0xffff;
8707         else
8708                 reset_mask2 = 0x1ffff;
8709
8710         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8711                reset_mask1 & (~not_reset_mask1));
8712         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8713                reset_mask2 & (~not_reset_mask2));
8714
8715         barrier();
8716         mmiowb();
8717
8718         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8719         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8720         mmiowb();
8721 }
8722
8723 static int bnx2x_process_kill(struct bnx2x *bp)
8724 {
8725         int cnt = 1000;
8726         u32 val = 0;
8727         u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8728
8729
8730         /* Empty the Tetris buffer, wait for 1s */
8731         do {
8732                 sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8733                 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8734                 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8735                 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8736                 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8737                 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8738                     ((port_is_idle_0 & 0x1) == 0x1) &&
8739                     ((port_is_idle_1 & 0x1) == 0x1) &&
8740                     (pgl_exp_rom2 == 0xffffffff))
8741                         break;
8742                 msleep(1);
8743         } while (cnt-- > 0);
8744
8745         if (cnt <= 0) {
8746                 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8747                           " are still"
8748                           " outstanding read requests after 1s!\n");
8749                 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8750                           " port_is_idle_0=0x%08x,"
8751                           " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8752                           sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8753                           pgl_exp_rom2);
8754                 return -EAGAIN;
8755         }
8756
8757         barrier();
8758
8759         /* Close gates #2, #3 and #4 */
8760         bnx2x_set_234_gates(bp, true);
8761
8762         /* TBD: Indicate that "process kill" is in progress to MCP */
8763
8764         /* Clear "unprepared" bit */
8765         REG_WR(bp, MISC_REG_UNPREPARED, 0);
8766         barrier();
8767
8768         /* Make sure all is written to the chip before the reset */
8769         mmiowb();
8770
8771         /* Wait for 1ms to empty GLUE and PCI-E core queues,
8772          * PSWHST, GRC and PSWRD Tetris buffer.
8773          */
8774         msleep(1);
8775
8776         /* Prepare to chip reset: */
8777         /* MCP */
8778         bnx2x_reset_mcp_prep(bp, &val);
8779
8780         /* PXP */
8781         bnx2x_pxp_prep(bp);
8782         barrier();
8783
8784         /* reset the chip */
8785         bnx2x_process_kill_chip_reset(bp);
8786         barrier();
8787
8788         /* Recover after reset: */
8789         /* MCP */
8790         if (bnx2x_reset_mcp_comp(bp, val))
8791                 return -EAGAIN;
8792
8793         /* PXP */
8794         bnx2x_pxp_prep(bp);
8795
8796         /* Open the gates #2, #3 and #4 */
8797         bnx2x_set_234_gates(bp, false);
8798
8799         /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8800          * reset state, re-enable attentions. */
8801
8802         return 0;
8803 }
8804
8805 static int bnx2x_leader_reset(struct bnx2x *bp)
8806 {
8807         int rc = 0;
8808         /* Try to recover after the failure */
8809         if (bnx2x_process_kill(bp)) {
8810                 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8811                        bp->dev->name);
8812                 rc = -EAGAIN;
8813                 goto exit_leader_reset;
8814         }
8815
8816         /* Clear "reset is in progress" bit and update the driver state */
8817         bnx2x_set_reset_done(bp);
8818         bp->recovery_state = BNX2X_RECOVERY_DONE;
8819
8820 exit_leader_reset:
8821         bp->is_leader = 0;
8822         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8823         smp_wmb();
8824         return rc;
8825 }
8826
8827 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8828
8829 /* Assumption: runs under rtnl lock. This together with the fact
8830  * that it's called only from bnx2x_reset_task() ensure that it
8831  * will never be called when netif_running(bp->dev) is false.
8832  */
8833 static void bnx2x_parity_recover(struct bnx2x *bp)
8834 {
8835         DP(NETIF_MSG_HW, "Handling parity\n");
8836         while (1) {
8837                 switch (bp->recovery_state) {
8838                 case BNX2X_RECOVERY_INIT:
8839                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8840                         /* Try to get a LEADER_LOCK HW lock */
8841                         if (bnx2x_trylock_hw_lock(bp,
8842                                 HW_LOCK_RESOURCE_RESERVED_08))
8843                                 bp->is_leader = 1;
8844
8845                         /* Stop the driver */
8846                         /* If interface has been removed - break */
8847                         if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8848                                 return;
8849
8850                         bp->recovery_state = BNX2X_RECOVERY_WAIT;
8851                         /* Ensure "is_leader" and "recovery_state"
8852                          *  update values are seen on other CPUs
8853                          */
8854                         smp_wmb();
8855                         break;
8856
8857                 case BNX2X_RECOVERY_WAIT:
8858                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8859                         if (bp->is_leader) {
8860                                 u32 load_counter = bnx2x_get_load_cnt(bp);
8861                                 if (load_counter) {
8862                                         /* Wait until all other functions get
8863                                          * down.
8864                                          */
8865                                         schedule_delayed_work(&bp->reset_task,
8866                                                                 HZ/10);
8867                                         return;
8868                                 } else {
8869                                         /* If all other functions got down -
8870                                          * try to bring the chip back to
8871                                          * normal. In any case it's an exit
8872                                          * point for a leader.
8873                                          */
8874                                         if (bnx2x_leader_reset(bp) ||
8875                                         bnx2x_nic_load(bp, LOAD_NORMAL)) {
8876                                                 printk(KERN_ERR"%s: Recovery "
8877                                                 "has failed. Power cycle is "
8878                                                 "needed.\n", bp->dev->name);
8879                                                 /* Disconnect this device */
8880                                                 netif_device_detach(bp->dev);
8881                                                 /* Block ifup for all function
8882                                                  * of this ASIC until
8883                                                  * "process kill" or power
8884                                                  * cycle.
8885                                                  */
8886                                                 bnx2x_set_reset_in_progress(bp);
8887                                                 /* Shut down the power */
8888                                                 bnx2x_set_power_state(bp,
8889                                                                 PCI_D3hot);
8890                                                 return;
8891                                         }
8892
8893                                         return;
8894                                 }
8895                         } else { /* non-leader */
8896                                 if (!bnx2x_reset_is_done(bp)) {
8897                                         /* Try to get a LEADER_LOCK HW lock as
8898                                          * long as a former leader may have
8899                                          * been unloaded by the user or
8900                                          * released a leadership by another
8901                                          * reason.
8902                                          */
8903                                         if (bnx2x_trylock_hw_lock(bp,
8904                                             HW_LOCK_RESOURCE_RESERVED_08)) {
8905                                                 /* I'm a leader now! Restart a
8906                                                  * switch case.
8907                                                  */
8908                                                 bp->is_leader = 1;
8909                                                 break;
8910                                         }
8911
8912                                         schedule_delayed_work(&bp->reset_task,
8913                                                                 HZ/10);
8914                                         return;
8915
8916                                 } else { /* A leader has completed
8917                                           * the "process kill". It's an exit
8918                                           * point for a non-leader.
8919                                           */
8920                                         bnx2x_nic_load(bp, LOAD_NORMAL);
8921                                         bp->recovery_state =
8922                                                 BNX2X_RECOVERY_DONE;
8923                                         smp_wmb();
8924                                         return;
8925                                 }
8926                         }
8927                 default:
8928                         return;
8929                 }
8930         }
8931 }
8932
8933 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8934  * scheduled on a general queue in order to prevent a dead lock.
8935  */
8936 static void bnx2x_reset_task(struct work_struct *work)
8937 {
8938         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
8939
8940 #ifdef BNX2X_STOP_ON_ERROR
8941         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8942                   " so reset not done to allow debug dump,\n"
8943          KERN_ERR " you will need to reboot when done\n");
8944         return;
8945 #endif
8946
8947         rtnl_lock();
8948
8949         if (!netif_running(bp->dev))
8950                 goto reset_task_exit;
8951
8952         if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8953                 bnx2x_parity_recover(bp);
8954         else {
8955                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8956                 bnx2x_nic_load(bp, LOAD_NORMAL);
8957         }
8958
8959 reset_task_exit:
8960         rtnl_unlock();
8961 }
8962
8963 /* end of nic load/unload */
8964
8965 /* ethtool_ops */
8966
8967 /*
8968  * Init service functions
8969  */
8970
8971 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8972 {
8973         switch (func) {
8974         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8975         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8976         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8977         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8978         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8979         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8980         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8981         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8982         default:
8983                 BNX2X_ERR("Unsupported function index: %d\n", func);
8984                 return (u32)(-1);
8985         }
8986 }
8987
8988 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8989 {
8990         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8991
8992         /* Flush all outstanding writes */
8993         mmiowb();
8994
8995         /* Pretend to be function 0 */
8996         REG_WR(bp, reg, 0);
8997         /* Flush the GRC transaction (in the chip) */
8998         new_val = REG_RD(bp, reg);
8999         if (new_val != 0) {
9000                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
9001                           new_val);
9002                 BUG();
9003         }
9004
9005         /* From now we are in the "like-E1" mode */
9006         bnx2x_int_disable(bp);
9007
9008         /* Flush all outstanding writes */
9009         mmiowb();
9010
9011         /* Restore the original funtion settings */
9012         REG_WR(bp, reg, orig_func);
9013         new_val = REG_RD(bp, reg);
9014         if (new_val != orig_func) {
9015                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
9016                           orig_func, new_val);
9017                 BUG();
9018         }
9019 }
9020
9021 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
9022 {
9023         if (CHIP_IS_E1H(bp))
9024                 bnx2x_undi_int_disable_e1h(bp, func);
9025         else
9026                 bnx2x_int_disable(bp);
9027 }
9028
9029 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
9030 {
9031         u32 val;
9032
9033         /* Check if there is any driver already loaded */
9034         val = REG_RD(bp, MISC_REG_UNPREPARED);
9035         if (val == 0x1) {
9036                 /* Check if it is the UNDI driver
9037                  * UNDI driver initializes CID offset for normal bell to 0x7
9038                  */
9039                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9040                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
9041                 if (val == 0x7) {
9042                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9043                         /* save our func */
9044                         int func = BP_FUNC(bp);
9045                         u32 swap_en;
9046                         u32 swap_val;
9047
9048                         /* clear the UNDI indication */
9049                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9050
9051                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
9052
9053                         /* try unload UNDI on port 0 */
9054                         bp->func = 0;
9055                         bp->fw_seq =
9056                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9057                                 DRV_MSG_SEQ_NUMBER_MASK);
9058                         reset_code = bnx2x_fw_command(bp, reset_code);
9059
9060                         /* if UNDI is loaded on the other port */
9061                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9062
9063                                 /* send "DONE" for previous unload */
9064                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9065
9066                                 /* unload UNDI on port 1 */
9067                                 bp->func = 1;
9068                                 bp->fw_seq =
9069                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9070                                         DRV_MSG_SEQ_NUMBER_MASK);
9071                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9072
9073                                 bnx2x_fw_command(bp, reset_code);
9074                         }
9075
9076                         /* now it's safe to release the lock */
9077                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9078
9079                         bnx2x_undi_int_disable(bp, func);
9080
9081                         /* close input traffic and wait for it */
9082                         /* Do not rcv packets to BRB */
9083                         REG_WR(bp,
9084                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
9085                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
9086                         /* Do not direct rcv packets that are not for MCP to
9087                          * the BRB */
9088                         REG_WR(bp,
9089                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
9090                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9091                         /* clear AEU */
9092                         REG_WR(bp,
9093                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9094                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
9095                         msleep(10);
9096
9097                         /* save NIG port swap info */
9098                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
9099                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
9100                         /* reset device */
9101                         REG_WR(bp,
9102                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9103                                0xd3ffffff);
9104                         REG_WR(bp,
9105                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9106                                0x1403);
9107                         /* take the NIG out of reset and restore swap values */
9108                         REG_WR(bp,
9109                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
9110                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
9111                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
9112                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
9113
9114                         /* send unload done to the MCP */
9115                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9116
9117                         /* restore our func and fw_seq */
9118                         bp->func = func;
9119                         bp->fw_seq =
9120                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9121                                 DRV_MSG_SEQ_NUMBER_MASK);
9122
9123                 } else
9124                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9125         }
9126 }
9127
9128 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9129 {
9130         u32 val, val2, val3, val4, id;
9131         u16 pmc;
9132
9133         /* Get the chip revision id and number. */
9134         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
9135         val = REG_RD(bp, MISC_REG_CHIP_NUM);
9136         id = ((val & 0xffff) << 16);
9137         val = REG_RD(bp, MISC_REG_CHIP_REV);
9138         id |= ((val & 0xf) << 12);
9139         val = REG_RD(bp, MISC_REG_CHIP_METAL);
9140         id |= ((val & 0xff) << 4);
9141         val = REG_RD(bp, MISC_REG_BOND_ID);
9142         id |= (val & 0xf);
9143         bp->common.chip_id = id;
9144         bp->link_params.chip_id = bp->common.chip_id;
9145         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
9146
9147         val = (REG_RD(bp, 0x2874) & 0x55);
9148         if ((bp->common.chip_id & 0x1) ||
9149             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
9150                 bp->flags |= ONE_PORT_FLAG;
9151                 BNX2X_DEV_INFO("single port device\n");
9152         }
9153
9154         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
9155         bp->common.flash_size = (NVRAM_1MB_SIZE <<
9156                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
9157         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
9158                        bp->common.flash_size, bp->common.flash_size);
9159
9160         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9161         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
9162         bp->link_params.shmem_base = bp->common.shmem_base;
9163         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
9164                        bp->common.shmem_base, bp->common.shmem2_base);
9165
9166         if (!bp->common.shmem_base ||
9167             (bp->common.shmem_base < 0xA0000) ||
9168             (bp->common.shmem_base >= 0xC0000)) {
9169                 BNX2X_DEV_INFO("MCP not active\n");
9170                 bp->flags |= NO_MCP_FLAG;
9171                 return;
9172         }
9173
9174         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9175         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9176                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9177                 BNX2X_ERROR("BAD MCP validity signature\n");
9178
9179         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
9180         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
9181
9182         bp->link_params.hw_led_mode = ((bp->common.hw_config &
9183                                         SHARED_HW_CFG_LED_MODE_MASK) >>
9184                                        SHARED_HW_CFG_LED_MODE_SHIFT);
9185
9186         bp->link_params.feature_config_flags = 0;
9187         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
9188         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
9189                 bp->link_params.feature_config_flags |=
9190                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9191         else
9192                 bp->link_params.feature_config_flags &=
9193                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9194
9195         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
9196         bp->common.bc_ver = val;
9197         BNX2X_DEV_INFO("bc_ver %X\n", val);
9198         if (val < BNX2X_BC_VER) {
9199                 /* for now only warn
9200                  * later we might need to enforce this */
9201                 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
9202                             "please upgrade BC\n", BNX2X_BC_VER, val);
9203         }
9204         bp->link_params.feature_config_flags |=
9205                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
9206                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
9207
9208         if (BP_E1HVN(bp) == 0) {
9209                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
9210                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
9211         } else {
9212                 /* no WOL capability for E1HVN != 0 */
9213                 bp->flags |= NO_WOL_FLAG;
9214         }
9215         BNX2X_DEV_INFO("%sWoL capable\n",
9216                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
9217
9218         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
9219         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
9220         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
9221         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
9222
9223         dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
9224                  val, val2, val3, val4);
9225 }
9226
9227 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
9228                                                     u32 switch_cfg)
9229 {
9230         int port = BP_PORT(bp);
9231         u32 ext_phy_type;
9232
9233         switch (switch_cfg) {
9234         case SWITCH_CFG_1G:
9235                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
9236
9237                 ext_phy_type =
9238                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9239                 switch (ext_phy_type) {
9240                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
9241                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9242                                        ext_phy_type);
9243
9244                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9245                                                SUPPORTED_10baseT_Full |
9246                                                SUPPORTED_100baseT_Half |
9247                                                SUPPORTED_100baseT_Full |
9248                                                SUPPORTED_1000baseT_Full |
9249                                                SUPPORTED_2500baseX_Full |
9250                                                SUPPORTED_TP |
9251                                                SUPPORTED_FIBRE |
9252                                                SUPPORTED_Autoneg |
9253                                                SUPPORTED_Pause |
9254                                                SUPPORTED_Asym_Pause);
9255                         break;
9256
9257                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
9258                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
9259                                        ext_phy_type);
9260
9261                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9262                                                SUPPORTED_10baseT_Full |
9263                                                SUPPORTED_100baseT_Half |
9264                                                SUPPORTED_100baseT_Full |
9265                                                SUPPORTED_1000baseT_Full |
9266                                                SUPPORTED_TP |
9267                                                SUPPORTED_FIBRE |
9268                                                SUPPORTED_Autoneg |
9269                                                SUPPORTED_Pause |
9270                                                SUPPORTED_Asym_Pause);
9271                         break;
9272
9273                 default:
9274                         BNX2X_ERR("NVRAM config error. "
9275                                   "BAD SerDes ext_phy_config 0x%x\n",
9276                                   bp->link_params.ext_phy_config);
9277                         return;
9278                 }
9279
9280                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
9281                                            port*0x10);
9282                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9283                 break;
9284
9285         case SWITCH_CFG_10G:
9286                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
9287
9288                 ext_phy_type =
9289                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9290                 switch (ext_phy_type) {
9291                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9292                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9293                                        ext_phy_type);
9294
9295                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9296                                                SUPPORTED_10baseT_Full |
9297                                                SUPPORTED_100baseT_Half |
9298                                                SUPPORTED_100baseT_Full |
9299                                                SUPPORTED_1000baseT_Full |
9300                                                SUPPORTED_2500baseX_Full |
9301                                                SUPPORTED_10000baseT_Full |
9302                                                SUPPORTED_TP |
9303                                                SUPPORTED_FIBRE |
9304                                                SUPPORTED_Autoneg |
9305                                                SUPPORTED_Pause |
9306                                                SUPPORTED_Asym_Pause);
9307                         break;
9308
9309                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9310                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
9311                                        ext_phy_type);
9312
9313                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9314                                                SUPPORTED_1000baseT_Full |
9315                                                SUPPORTED_FIBRE |
9316                                                SUPPORTED_Autoneg |
9317                                                SUPPORTED_Pause |
9318                                                SUPPORTED_Asym_Pause);
9319                         break;
9320
9321                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9322                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
9323                                        ext_phy_type);
9324
9325                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9326                                                SUPPORTED_2500baseX_Full |
9327                                                SUPPORTED_1000baseT_Full |
9328                                                SUPPORTED_FIBRE |
9329                                                SUPPORTED_Autoneg |
9330                                                SUPPORTED_Pause |
9331                                                SUPPORTED_Asym_Pause);
9332                         break;
9333
9334                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9335                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
9336                                        ext_phy_type);
9337
9338                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9339                                                SUPPORTED_FIBRE |
9340                                                SUPPORTED_Pause |
9341                                                SUPPORTED_Asym_Pause);
9342                         break;
9343
9344                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9345                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
9346                                        ext_phy_type);
9347
9348                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9349                                                SUPPORTED_1000baseT_Full |
9350                                                SUPPORTED_FIBRE |
9351                                                SUPPORTED_Pause |
9352                                                SUPPORTED_Asym_Pause);
9353                         break;
9354
9355                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9356                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
9357                                        ext_phy_type);
9358
9359                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9360                                                SUPPORTED_1000baseT_Full |
9361                                                SUPPORTED_Autoneg |
9362                                                SUPPORTED_FIBRE |
9363                                                SUPPORTED_Pause |
9364                                                SUPPORTED_Asym_Pause);
9365                         break;
9366
9367                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9368                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
9369                                        ext_phy_type);
9370
9371                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9372                                                SUPPORTED_1000baseT_Full |
9373                                                SUPPORTED_Autoneg |
9374                                                SUPPORTED_FIBRE |
9375                                                SUPPORTED_Pause |
9376                                                SUPPORTED_Asym_Pause);
9377                         break;
9378
9379                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9380                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
9381                                        ext_phy_type);
9382
9383                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9384                                                SUPPORTED_TP |
9385                                                SUPPORTED_Autoneg |
9386                                                SUPPORTED_Pause |
9387                                                SUPPORTED_Asym_Pause);
9388                         break;
9389
9390                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9391                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
9392                                        ext_phy_type);
9393
9394                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9395                                                SUPPORTED_10baseT_Full |
9396                                                SUPPORTED_100baseT_Half |
9397                                                SUPPORTED_100baseT_Full |
9398                                                SUPPORTED_1000baseT_Full |
9399                                                SUPPORTED_10000baseT_Full |
9400                                                SUPPORTED_TP |
9401                                                SUPPORTED_Autoneg |
9402                                                SUPPORTED_Pause |
9403                                                SUPPORTED_Asym_Pause);
9404                         break;
9405
9406                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9407                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9408                                   bp->link_params.ext_phy_config);
9409                         break;
9410
9411                 default:
9412                         BNX2X_ERR("NVRAM config error. "
9413                                   "BAD XGXS ext_phy_config 0x%x\n",
9414                                   bp->link_params.ext_phy_config);
9415                         return;
9416                 }
9417
9418                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
9419                                            port*0x18);
9420                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9421
9422                 break;
9423
9424         default:
9425                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
9426                           bp->port.link_config);
9427                 return;
9428         }
9429         bp->link_params.phy_addr = bp->port.phy_addr;
9430
9431         /* mask what we support according to speed_cap_mask */
9432         if (!(bp->link_params.speed_cap_mask &
9433                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
9434                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
9435
9436         if (!(bp->link_params.speed_cap_mask &
9437                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
9438                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
9439
9440         if (!(bp->link_params.speed_cap_mask &
9441                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
9442                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
9443
9444         if (!(bp->link_params.speed_cap_mask &
9445                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
9446                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
9447
9448         if (!(bp->link_params.speed_cap_mask &
9449                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
9450                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
9451                                         SUPPORTED_1000baseT_Full);
9452
9453         if (!(bp->link_params.speed_cap_mask &
9454                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
9455                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
9456
9457         if (!(bp->link_params.speed_cap_mask &
9458                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
9459                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
9460
9461         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
9462 }
9463
9464 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
9465 {
9466         bp->link_params.req_duplex = DUPLEX_FULL;
9467
9468         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
9469         case PORT_FEATURE_LINK_SPEED_AUTO:
9470                 if (bp->port.supported & SUPPORTED_Autoneg) {
9471                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9472                         bp->port.advertising = bp->port.supported;
9473                 } else {
9474                         u32 ext_phy_type =
9475                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9476
9477                         if ((ext_phy_type ==
9478                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
9479                             (ext_phy_type ==
9480                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
9481                                 /* force 10G, no AN */
9482                                 bp->link_params.req_line_speed = SPEED_10000;
9483                                 bp->port.advertising =
9484                                                 (ADVERTISED_10000baseT_Full |
9485                                                  ADVERTISED_FIBRE);
9486                                 break;
9487                         }
9488                         BNX2X_ERR("NVRAM config error. "
9489                                   "Invalid link_config 0x%x"
9490                                   "  Autoneg not supported\n",
9491                                   bp->port.link_config);
9492                         return;
9493                 }
9494                 break;
9495
9496         case PORT_FEATURE_LINK_SPEED_10M_FULL:
9497                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
9498                         bp->link_params.req_line_speed = SPEED_10;
9499                         bp->port.advertising = (ADVERTISED_10baseT_Full |
9500                                                 ADVERTISED_TP);
9501                 } else {
9502                         BNX2X_ERROR("NVRAM config error. "
9503                                     "Invalid link_config 0x%x"
9504                                     "  speed_cap_mask 0x%x\n",
9505                                     bp->port.link_config,
9506                                     bp->link_params.speed_cap_mask);
9507                         return;
9508                 }
9509                 break;
9510
9511         case PORT_FEATURE_LINK_SPEED_10M_HALF:
9512                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
9513                         bp->link_params.req_line_speed = SPEED_10;
9514                         bp->link_params.req_duplex = DUPLEX_HALF;
9515                         bp->port.advertising = (ADVERTISED_10baseT_Half |
9516                                                 ADVERTISED_TP);
9517                 } else {
9518                         BNX2X_ERROR("NVRAM config error. "
9519                                     "Invalid link_config 0x%x"
9520                                     "  speed_cap_mask 0x%x\n",
9521                                     bp->port.link_config,
9522                                     bp->link_params.speed_cap_mask);
9523                         return;
9524                 }
9525                 break;
9526
9527         case PORT_FEATURE_LINK_SPEED_100M_FULL:
9528                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
9529                         bp->link_params.req_line_speed = SPEED_100;
9530                         bp->port.advertising = (ADVERTISED_100baseT_Full |
9531                                                 ADVERTISED_TP);
9532                 } else {
9533                         BNX2X_ERROR("NVRAM config error. "
9534                                     "Invalid link_config 0x%x"
9535                                     "  speed_cap_mask 0x%x\n",
9536                                     bp->port.link_config,
9537                                     bp->link_params.speed_cap_mask);
9538                         return;
9539                 }
9540                 break;
9541
9542         case PORT_FEATURE_LINK_SPEED_100M_HALF:
9543                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
9544                         bp->link_params.req_line_speed = SPEED_100;
9545                         bp->link_params.req_duplex = DUPLEX_HALF;
9546                         bp->port.advertising = (ADVERTISED_100baseT_Half |
9547                                                 ADVERTISED_TP);
9548                 } else {
9549                         BNX2X_ERROR("NVRAM config error. "
9550                                     "Invalid link_config 0x%x"
9551                                     "  speed_cap_mask 0x%x\n",
9552                                     bp->port.link_config,
9553                                     bp->link_params.speed_cap_mask);
9554                         return;
9555                 }
9556                 break;
9557
9558         case PORT_FEATURE_LINK_SPEED_1G:
9559                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
9560                         bp->link_params.req_line_speed = SPEED_1000;
9561                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
9562                                                 ADVERTISED_TP);
9563                 } else {
9564                         BNX2X_ERROR("NVRAM config error. "
9565                                     "Invalid link_config 0x%x"
9566                                     "  speed_cap_mask 0x%x\n",
9567                                     bp->port.link_config,
9568                                     bp->link_params.speed_cap_mask);
9569                         return;
9570                 }
9571                 break;
9572
9573         case PORT_FEATURE_LINK_SPEED_2_5G:
9574                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
9575                         bp->link_params.req_line_speed = SPEED_2500;
9576                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
9577                                                 ADVERTISED_TP);
9578                 } else {
9579                         BNX2X_ERROR("NVRAM config error. "
9580                                     "Invalid link_config 0x%x"
9581                                     "  speed_cap_mask 0x%x\n",
9582                                     bp->port.link_config,
9583                                     bp->link_params.speed_cap_mask);
9584                         return;
9585                 }
9586                 break;
9587
9588         case PORT_FEATURE_LINK_SPEED_10G_CX4:
9589         case PORT_FEATURE_LINK_SPEED_10G_KX4:
9590         case PORT_FEATURE_LINK_SPEED_10G_KR:
9591                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
9592                         bp->link_params.req_line_speed = SPEED_10000;
9593                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
9594                                                 ADVERTISED_FIBRE);
9595                 } else {
9596                         BNX2X_ERROR("NVRAM config error. "
9597                                     "Invalid link_config 0x%x"
9598                                     "  speed_cap_mask 0x%x\n",
9599                                     bp->port.link_config,
9600                                     bp->link_params.speed_cap_mask);
9601                         return;
9602                 }
9603                 break;
9604
9605         default:
9606                 BNX2X_ERROR("NVRAM config error. "
9607                             "BAD link speed link_config 0x%x\n",
9608                             bp->port.link_config);
9609                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9610                 bp->port.advertising = bp->port.supported;
9611                 break;
9612         }
9613
9614         bp->link_params.req_flow_ctrl = (bp->port.link_config &
9615                                          PORT_FEATURE_FLOW_CONTROL_MASK);
9616         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
9617             !(bp->port.supported & SUPPORTED_Autoneg))
9618                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9619
9620         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
9621                        "  advertising 0x%x\n",
9622                        bp->link_params.req_line_speed,
9623                        bp->link_params.req_duplex,
9624                        bp->link_params.req_flow_ctrl, bp->port.advertising);
9625 }
9626
9627 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
9628 {
9629         mac_hi = cpu_to_be16(mac_hi);
9630         mac_lo = cpu_to_be32(mac_lo);
9631         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
9632         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
9633 }
9634
9635 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
9636 {
9637         int port = BP_PORT(bp);
9638         u32 val, val2;
9639         u32 config;
9640         u16 i;
9641         u32 ext_phy_type;
9642
9643         bp->link_params.bp = bp;
9644         bp->link_params.port = port;
9645
9646         bp->link_params.lane_config =
9647                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
9648         bp->link_params.ext_phy_config =
9649                 SHMEM_RD(bp,
9650                          dev_info.port_hw_config[port].external_phy_config);
9651         /* BCM8727_NOC => BCM8727 no over current */
9652         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9653             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
9654                 bp->link_params.ext_phy_config &=
9655                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
9656                 bp->link_params.ext_phy_config |=
9657                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
9658                 bp->link_params.feature_config_flags |=
9659                         FEATURE_CONFIG_BCM8727_NOC;
9660         }
9661
9662         bp->link_params.speed_cap_mask =
9663                 SHMEM_RD(bp,
9664                          dev_info.port_hw_config[port].speed_capability_mask);
9665
9666         bp->port.link_config =
9667                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
9668
9669         /* Get the 4 lanes xgxs config rx and tx */
9670         for (i = 0; i < 2; i++) {
9671                 val = SHMEM_RD(bp,
9672                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
9673                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
9674                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
9675
9676                 val = SHMEM_RD(bp,
9677                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
9678                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
9679                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
9680         }
9681
9682         /* If the device is capable of WoL, set the default state according
9683          * to the HW
9684          */
9685         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
9686         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
9687                    (config & PORT_FEATURE_WOL_ENABLED));
9688
9689         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
9690                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
9691                        bp->link_params.lane_config,
9692                        bp->link_params.ext_phy_config,
9693                        bp->link_params.speed_cap_mask, bp->port.link_config);
9694
9695         bp->link_params.switch_cfg |= (bp->port.link_config &
9696                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
9697         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
9698
9699         bnx2x_link_settings_requested(bp);
9700
9701         /*
9702          * If connected directly, work with the internal PHY, otherwise, work
9703          * with the external PHY
9704          */
9705         ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9706         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
9707                 bp->mdio.prtad = bp->link_params.phy_addr;
9708
9709         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
9710                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
9711                 bp->mdio.prtad =
9712                         XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9713
9714         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
9715         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
9716         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
9717         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
9718         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9719
9720 #ifdef BCM_CNIC
9721         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
9722         val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
9723         bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
9724 #endif
9725 }
9726
9727 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9728 {
9729         int func = BP_FUNC(bp);
9730         u32 val, val2;
9731         int rc = 0;
9732
9733         bnx2x_get_common_hwinfo(bp);
9734
9735         bp->e1hov = 0;
9736         bp->e1hmf = 0;
9737         if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
9738                 bp->mf_config =
9739                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
9740
9741                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
9742                        FUNC_MF_CFG_E1HOV_TAG_MASK);
9743                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
9744                         bp->e1hmf = 1;
9745                 BNX2X_DEV_INFO("%s function mode\n",
9746                                IS_E1HMF(bp) ? "multi" : "single");
9747
9748                 if (IS_E1HMF(bp)) {
9749                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
9750                                                                 e1hov_tag) &
9751                                FUNC_MF_CFG_E1HOV_TAG_MASK);
9752                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
9753                                 bp->e1hov = val;
9754                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
9755                                                "(0x%04x)\n",
9756                                                func, bp->e1hov, bp->e1hov);
9757                         } else {
9758                                 BNX2X_ERROR("No valid E1HOV for func %d,"
9759                                             "  aborting\n", func);
9760                                 rc = -EPERM;
9761                         }
9762                 } else {
9763                         if (BP_E1HVN(bp)) {
9764                                 BNX2X_ERROR("VN %d in single function mode,"
9765                                             "  aborting\n", BP_E1HVN(bp));
9766                                 rc = -EPERM;
9767                         }
9768                 }
9769         }
9770
9771         if (!BP_NOMCP(bp)) {
9772                 bnx2x_get_port_hwinfo(bp);
9773
9774                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
9775                               DRV_MSG_SEQ_NUMBER_MASK);
9776                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9777         }
9778
9779         if (IS_E1HMF(bp)) {
9780                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
9781                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
9782                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
9783                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
9784                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
9785                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
9786                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
9787                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
9788                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
9789                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
9790                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
9791                                ETH_ALEN);
9792                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
9793                                ETH_ALEN);
9794                 }
9795
9796                 return rc;
9797         }
9798
9799         if (BP_NOMCP(bp)) {
9800                 /* only supposed to happen on emulation/FPGA */
9801                 BNX2X_ERROR("warning: random MAC workaround active\n");
9802                 random_ether_addr(bp->dev->dev_addr);
9803                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9804         }
9805
9806         return rc;
9807 }
9808
9809 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9810 {
9811         int cnt, i, block_end, rodi;
9812         char vpd_data[BNX2X_VPD_LEN+1];
9813         char str_id_reg[VENDOR_ID_LEN+1];
9814         char str_id_cap[VENDOR_ID_LEN+1];
9815         u8 len;
9816
9817         cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9818         memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9819
9820         if (cnt < BNX2X_VPD_LEN)
9821                 goto out_not_found;
9822
9823         i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9824                              PCI_VPD_LRDT_RO_DATA);
9825         if (i < 0)
9826                 goto out_not_found;
9827
9828
9829         block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9830                     pci_vpd_lrdt_size(&vpd_data[i]);
9831
9832         i += PCI_VPD_LRDT_TAG_SIZE;
9833
9834         if (block_end > BNX2X_VPD_LEN)
9835                 goto out_not_found;
9836
9837         rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9838                                    PCI_VPD_RO_KEYWORD_MFR_ID);
9839         if (rodi < 0)
9840                 goto out_not_found;
9841
9842         len = pci_vpd_info_field_size(&vpd_data[rodi]);
9843
9844         if (len != VENDOR_ID_LEN)
9845                 goto out_not_found;
9846
9847         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9848
9849         /* vendor specific info */
9850         snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9851         snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9852         if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9853             !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9854
9855                 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9856                                                 PCI_VPD_RO_KEYWORD_VENDOR0);
9857                 if (rodi >= 0) {
9858                         len = pci_vpd_info_field_size(&vpd_data[rodi]);
9859
9860                         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9861
9862                         if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9863                                 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9864                                 bp->fw_ver[len] = ' ';
9865                         }
9866                 }
9867                 return;
9868         }
9869 out_not_found:
9870         return;
9871 }
9872
9873 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9874 {
9875         int func = BP_FUNC(bp);
9876         int timer_interval;
9877         int rc;
9878
9879         /* Disable interrupt handling until HW is initialized */
9880         atomic_set(&bp->intr_sem, 1);
9881         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
9882
9883         mutex_init(&bp->port.phy_mutex);
9884         mutex_init(&bp->fw_mb_mutex);
9885 #ifdef BCM_CNIC
9886         mutex_init(&bp->cnic_mutex);
9887 #endif
9888
9889         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
9890         INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
9891
9892         rc = bnx2x_get_hwinfo(bp);
9893
9894         bnx2x_read_fwinfo(bp);
9895         /* need to reset chip if undi was active */
9896         if (!BP_NOMCP(bp))
9897                 bnx2x_undi_unload(bp);
9898
9899         if (CHIP_REV_IS_FPGA(bp))
9900                 dev_err(&bp->pdev->dev, "FPGA detected\n");
9901
9902         if (BP_NOMCP(bp) && (func == 0))
9903                 dev_err(&bp->pdev->dev, "MCP disabled, "
9904                                         "must load devices in order!\n");
9905
9906         /* Set multi queue mode */
9907         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
9908             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
9909                 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
9910                                         "requested is not MSI-X\n");
9911                 multi_mode = ETH_RSS_MODE_DISABLED;
9912         }
9913         bp->multi_mode = multi_mode;
9914
9915
9916         bp->dev->features |= NETIF_F_GRO;
9917
9918         /* Set TPA flags */
9919         if (disable_tpa) {
9920                 bp->flags &= ~TPA_ENABLE_FLAG;
9921                 bp->dev->features &= ~NETIF_F_LRO;
9922         } else {
9923                 bp->flags |= TPA_ENABLE_FLAG;
9924                 bp->dev->features |= NETIF_F_LRO;
9925         }
9926
9927         if (CHIP_IS_E1(bp))
9928                 bp->dropless_fc = 0;
9929         else
9930                 bp->dropless_fc = dropless_fc;
9931
9932         bp->mrrs = mrrs;
9933
9934         bp->tx_ring_size = MAX_TX_AVAIL;
9935         bp->rx_ring_size = MAX_RX_AVAIL;
9936
9937         bp->rx_csum = 1;
9938
9939         /* make sure that the numbers are in the right granularity */
9940         bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9941         bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9942
9943         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9944         bp->current_interval = (poll ? poll : timer_interval);
9945
9946         init_timer(&bp->timer);
9947         bp->timer.expires = jiffies + bp->current_interval;
9948         bp->timer.data = (unsigned long) bp;
9949         bp->timer.function = bnx2x_timer;
9950
9951         return rc;
9952 }
9953
9954 /*
9955  * ethtool service functions
9956  */
9957
9958 /* All ethtool functions called with rtnl_lock */
9959
9960 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9961 {
9962         struct bnx2x *bp = netdev_priv(dev);
9963
9964         cmd->supported = bp->port.supported;
9965         cmd->advertising = bp->port.advertising;
9966
9967         if ((bp->state == BNX2X_STATE_OPEN) &&
9968             !(bp->flags & MF_FUNC_DIS) &&
9969             (bp->link_vars.link_up)) {
9970                 cmd->speed = bp->link_vars.line_speed;
9971                 cmd->duplex = bp->link_vars.duplex;
9972                 if (IS_E1HMF(bp)) {
9973                         u16 vn_max_rate;
9974
9975                         vn_max_rate =
9976                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
9977                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
9978                         if (vn_max_rate < cmd->speed)
9979                                 cmd->speed = vn_max_rate;
9980                 }
9981         } else {
9982                 cmd->speed = -1;
9983                 cmd->duplex = -1;
9984         }
9985
9986         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9987                 u32 ext_phy_type =
9988                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9989
9990                 switch (ext_phy_type) {
9991                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9992                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9993                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9994                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9995                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9996                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9997                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9998                         cmd->port = PORT_FIBRE;
9999                         break;
10000
10001                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
10002                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
10003                         cmd->port = PORT_TP;
10004                         break;
10005
10006                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
10007                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
10008                                   bp->link_params.ext_phy_config);
10009                         break;
10010
10011                 default:
10012                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
10013                            bp->link_params.ext_phy_config);
10014                         break;
10015                 }
10016         } else
10017                 cmd->port = PORT_TP;
10018
10019         cmd->phy_address = bp->mdio.prtad;
10020         cmd->transceiver = XCVR_INTERNAL;
10021
10022         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10023                 cmd->autoneg = AUTONEG_ENABLE;
10024         else
10025                 cmd->autoneg = AUTONEG_DISABLE;
10026
10027         cmd->maxtxpkt = 0;
10028         cmd->maxrxpkt = 0;
10029
10030         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10031            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
10032            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
10033            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
10034            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10035            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10036            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10037
10038         return 0;
10039 }
10040
10041 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10042 {
10043         struct bnx2x *bp = netdev_priv(dev);
10044         u32 advertising;
10045
10046         if (IS_E1HMF(bp))
10047                 return 0;
10048
10049         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10050            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
10051            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
10052            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
10053            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10054            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10055            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10056
10057         if (cmd->autoneg == AUTONEG_ENABLE) {
10058                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10059                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
10060                         return -EINVAL;
10061                 }
10062
10063                 /* advertise the requested speed and duplex if supported */
10064                 cmd->advertising &= bp->port.supported;
10065
10066                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
10067                 bp->link_params.req_duplex = DUPLEX_FULL;
10068                 bp->port.advertising |= (ADVERTISED_Autoneg |
10069                                          cmd->advertising);
10070
10071         } else { /* forced speed */
10072                 /* advertise the requested speed and duplex if supported */
10073                 switch (cmd->speed) {
10074                 case SPEED_10:
10075                         if (cmd->duplex == DUPLEX_FULL) {
10076                                 if (!(bp->port.supported &
10077                                       SUPPORTED_10baseT_Full)) {
10078                                         DP(NETIF_MSG_LINK,
10079                                            "10M full not supported\n");
10080                                         return -EINVAL;
10081                                 }
10082
10083                                 advertising = (ADVERTISED_10baseT_Full |
10084                                                ADVERTISED_TP);
10085                         } else {
10086                                 if (!(bp->port.supported &
10087                                       SUPPORTED_10baseT_Half)) {
10088                                         DP(NETIF_MSG_LINK,
10089                                            "10M half not supported\n");
10090                                         return -EINVAL;
10091                                 }
10092
10093                                 advertising = (ADVERTISED_10baseT_Half |
10094                                                ADVERTISED_TP);
10095                         }
10096                         break;
10097
10098                 case SPEED_100:
10099                         if (cmd->duplex == DUPLEX_FULL) {
10100                                 if (!(bp->port.supported &
10101                                                 SUPPORTED_100baseT_Full)) {
10102                                         DP(NETIF_MSG_LINK,
10103                                            "100M full not supported\n");
10104                                         return -EINVAL;
10105                                 }
10106
10107                                 advertising = (ADVERTISED_100baseT_Full |
10108                                                ADVERTISED_TP);
10109                         } else {
10110                                 if (!(bp->port.supported &
10111                                                 SUPPORTED_100baseT_Half)) {
10112                                         DP(NETIF_MSG_LINK,
10113                                            "100M half not supported\n");
10114                                         return -EINVAL;
10115                                 }
10116
10117                                 advertising = (ADVERTISED_100baseT_Half |
10118                                                ADVERTISED_TP);
10119                         }
10120                         break;
10121
10122                 case SPEED_1000:
10123                         if (cmd->duplex != DUPLEX_FULL) {
10124                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
10125                                 return -EINVAL;
10126                         }
10127
10128                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
10129                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
10130                                 return -EINVAL;
10131                         }
10132
10133                         advertising = (ADVERTISED_1000baseT_Full |
10134                                        ADVERTISED_TP);
10135                         break;
10136
10137                 case SPEED_2500:
10138                         if (cmd->duplex != DUPLEX_FULL) {
10139                                 DP(NETIF_MSG_LINK,
10140                                    "2.5G half not supported\n");
10141                                 return -EINVAL;
10142                         }
10143
10144                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
10145                                 DP(NETIF_MSG_LINK,
10146                                    "2.5G full not supported\n");
10147                                 return -EINVAL;
10148                         }
10149
10150                         advertising = (ADVERTISED_2500baseX_Full |
10151                                        ADVERTISED_TP);
10152                         break;
10153
10154                 case SPEED_10000:
10155                         if (cmd->duplex != DUPLEX_FULL) {
10156                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
10157                                 return -EINVAL;
10158                         }
10159
10160                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
10161                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
10162                                 return -EINVAL;
10163                         }
10164
10165                         advertising = (ADVERTISED_10000baseT_Full |
10166                                        ADVERTISED_FIBRE);
10167                         break;
10168
10169                 default:
10170                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
10171                         return -EINVAL;
10172                 }
10173
10174                 bp->link_params.req_line_speed = cmd->speed;
10175                 bp->link_params.req_duplex = cmd->duplex;
10176                 bp->port.advertising = advertising;
10177         }
10178
10179         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
10180            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
10181            bp->link_params.req_line_speed, bp->link_params.req_duplex,
10182            bp->port.advertising);
10183
10184         if (netif_running(dev)) {
10185                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10186                 bnx2x_link_set(bp);
10187         }
10188
10189         return 0;
10190 }
10191
10192 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
10193 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
10194
10195 static int bnx2x_get_regs_len(struct net_device *dev)
10196 {
10197         struct bnx2x *bp = netdev_priv(dev);
10198         int regdump_len = 0;
10199         int i;
10200
10201         if (CHIP_IS_E1(bp)) {
10202                 for (i = 0; i < REGS_COUNT; i++)
10203                         if (IS_E1_ONLINE(reg_addrs[i].info))
10204                                 regdump_len += reg_addrs[i].size;
10205
10206                 for (i = 0; i < WREGS_COUNT_E1; i++)
10207                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
10208                                 regdump_len += wreg_addrs_e1[i].size *
10209                                         (1 + wreg_addrs_e1[i].read_regs_count);
10210
10211         } else { /* E1H */
10212                 for (i = 0; i < REGS_COUNT; i++)
10213                         if (IS_E1H_ONLINE(reg_addrs[i].info))
10214                                 regdump_len += reg_addrs[i].size;
10215
10216                 for (i = 0; i < WREGS_COUNT_E1H; i++)
10217                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
10218                                 regdump_len += wreg_addrs_e1h[i].size *
10219                                         (1 + wreg_addrs_e1h[i].read_regs_count);
10220         }
10221         regdump_len *= 4;
10222         regdump_len += sizeof(struct dump_hdr);
10223
10224         return regdump_len;
10225 }
10226
10227 static void bnx2x_get_regs(struct net_device *dev,
10228                            struct ethtool_regs *regs, void *_p)
10229 {
10230         u32 *p = _p, i, j;
10231         struct bnx2x *bp = netdev_priv(dev);
10232         struct dump_hdr dump_hdr = {0};
10233
10234         regs->version = 0;
10235         memset(p, 0, regs->len);
10236
10237         if (!netif_running(bp->dev))
10238                 return;
10239
10240         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
10241         dump_hdr.dump_sign = dump_sign_all;
10242         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
10243         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
10244         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
10245         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
10246         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
10247
10248         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
10249         p += dump_hdr.hdr_size + 1;
10250
10251         if (CHIP_IS_E1(bp)) {
10252                 for (i = 0; i < REGS_COUNT; i++)
10253                         if (IS_E1_ONLINE(reg_addrs[i].info))
10254                                 for (j = 0; j < reg_addrs[i].size; j++)
10255                                         *p++ = REG_RD(bp,
10256                                                       reg_addrs[i].addr + j*4);
10257
10258         } else { /* E1H */
10259                 for (i = 0; i < REGS_COUNT; i++)
10260                         if (IS_E1H_ONLINE(reg_addrs[i].info))
10261                                 for (j = 0; j < reg_addrs[i].size; j++)
10262                                         *p++ = REG_RD(bp,
10263                                                       reg_addrs[i].addr + j*4);
10264         }
10265 }
10266
10267 #define PHY_FW_VER_LEN                  10
10268
10269 static void bnx2x_get_drvinfo(struct net_device *dev,
10270                               struct ethtool_drvinfo *info)
10271 {
10272         struct bnx2x *bp = netdev_priv(dev);
10273         u8 phy_fw_ver[PHY_FW_VER_LEN];
10274
10275         strcpy(info->driver, DRV_MODULE_NAME);
10276         strcpy(info->version, DRV_MODULE_VERSION);
10277
10278         phy_fw_ver[0] = '\0';
10279         if (bp->port.pmf) {
10280                 bnx2x_acquire_phy_lock(bp);
10281                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
10282                                              (bp->state != BNX2X_STATE_CLOSED),
10283                                              phy_fw_ver, PHY_FW_VER_LEN);
10284                 bnx2x_release_phy_lock(bp);
10285         }
10286
10287         strncpy(info->fw_version, bp->fw_ver, 32);
10288         snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10289                  "bc %d.%d.%d%s%s",
10290                  (bp->common.bc_ver & 0xff0000) >> 16,
10291                  (bp->common.bc_ver & 0xff00) >> 8,
10292                  (bp->common.bc_ver & 0xff),
10293                  ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
10294         strcpy(info->bus_info, pci_name(bp->pdev));
10295         info->n_stats = BNX2X_NUM_STATS;
10296         info->testinfo_len = BNX2X_NUM_TESTS;
10297         info->eedump_len = bp->common.flash_size;
10298         info->regdump_len = bnx2x_get_regs_len(dev);
10299 }
10300
10301 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10302 {
10303         struct bnx2x *bp = netdev_priv(dev);
10304
10305         if (bp->flags & NO_WOL_FLAG) {
10306                 wol->supported = 0;
10307                 wol->wolopts = 0;
10308         } else {
10309                 wol->supported = WAKE_MAGIC;
10310                 if (bp->wol)
10311                         wol->wolopts = WAKE_MAGIC;
10312                 else
10313                         wol->wolopts = 0;
10314         }
10315         memset(&wol->sopass, 0, sizeof(wol->sopass));
10316 }
10317
10318 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10319 {
10320         struct bnx2x *bp = netdev_priv(dev);
10321
10322         if (wol->wolopts & ~WAKE_MAGIC)
10323                 return -EINVAL;
10324
10325         if (wol->wolopts & WAKE_MAGIC) {
10326                 if (bp->flags & NO_WOL_FLAG)
10327                         return -EINVAL;
10328
10329                 bp->wol = 1;
10330         } else
10331                 bp->wol = 0;
10332
10333         return 0;
10334 }
10335
10336 static u32 bnx2x_get_msglevel(struct net_device *dev)
10337 {
10338         struct bnx2x *bp = netdev_priv(dev);
10339
10340         return bp->msg_enable;
10341 }
10342
10343 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
10344 {
10345         struct bnx2x *bp = netdev_priv(dev);
10346
10347         if (capable(CAP_NET_ADMIN))
10348                 bp->msg_enable = level;
10349 }
10350
10351 static int bnx2x_nway_reset(struct net_device *dev)
10352 {
10353         struct bnx2x *bp = netdev_priv(dev);
10354
10355         if (!bp->port.pmf)
10356                 return 0;
10357
10358         if (netif_running(dev)) {
10359                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10360                 bnx2x_link_set(bp);
10361         }
10362
10363         return 0;
10364 }
10365
10366 static u32 bnx2x_get_link(struct net_device *dev)
10367 {
10368         struct bnx2x *bp = netdev_priv(dev);
10369
10370         if (bp->flags & MF_FUNC_DIS)
10371                 return 0;
10372
10373         return bp->link_vars.link_up;
10374 }
10375
10376 static int bnx2x_get_eeprom_len(struct net_device *dev)
10377 {
10378         struct bnx2x *bp = netdev_priv(dev);
10379
10380         return bp->common.flash_size;
10381 }
10382
10383 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
10384 {
10385         int port = BP_PORT(bp);
10386         int count, i;
10387         u32 val = 0;
10388
10389         /* adjust timeout for emulation/FPGA */
10390         count = NVRAM_TIMEOUT_COUNT;
10391         if (CHIP_REV_IS_SLOW(bp))
10392                 count *= 100;
10393
10394         /* request access to nvram interface */
10395         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10396                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
10397
10398         for (i = 0; i < count*10; i++) {
10399                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10400                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
10401                         break;
10402
10403                 udelay(5);
10404         }
10405
10406         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
10407                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
10408                 return -EBUSY;
10409         }
10410
10411         return 0;
10412 }
10413
10414 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
10415 {
10416         int port = BP_PORT(bp);
10417         int count, i;
10418         u32 val = 0;
10419
10420         /* adjust timeout for emulation/FPGA */
10421         count = NVRAM_TIMEOUT_COUNT;
10422         if (CHIP_REV_IS_SLOW(bp))
10423                 count *= 100;
10424
10425         /* relinquish nvram interface */
10426         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10427                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
10428
10429         for (i = 0; i < count*10; i++) {
10430                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10431                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
10432                         break;
10433
10434                 udelay(5);
10435         }
10436
10437         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
10438                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
10439                 return -EBUSY;
10440         }
10441
10442         return 0;
10443 }
10444
10445 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
10446 {
10447         u32 val;
10448
10449         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10450
10451         /* enable both bits, even on read */
10452         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10453                (val | MCPR_NVM_ACCESS_ENABLE_EN |
10454                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
10455 }
10456
10457 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
10458 {
10459         u32 val;
10460
10461         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10462
10463         /* disable both bits, even after read */
10464         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10465                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
10466                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
10467 }
10468
10469 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
10470                                   u32 cmd_flags)
10471 {
10472         int count, i, rc;
10473         u32 val;
10474
10475         /* build the command word */
10476         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
10477
10478         /* need to clear DONE bit separately */
10479         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10480
10481         /* address of the NVRAM to read from */
10482         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10483                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10484
10485         /* issue a read command */
10486         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10487
10488         /* adjust timeout for emulation/FPGA */
10489         count = NVRAM_TIMEOUT_COUNT;
10490         if (CHIP_REV_IS_SLOW(bp))
10491                 count *= 100;
10492
10493         /* wait for completion */
10494         *ret_val = 0;
10495         rc = -EBUSY;
10496         for (i = 0; i < count; i++) {
10497                 udelay(5);
10498                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10499
10500                 if (val & MCPR_NVM_COMMAND_DONE) {
10501                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
10502                         /* we read nvram data in cpu order
10503                          * but ethtool sees it as an array of bytes
10504                          * converting to big-endian will do the work */
10505                         *ret_val = cpu_to_be32(val);
10506                         rc = 0;
10507                         break;
10508                 }
10509         }
10510
10511         return rc;
10512 }
10513
10514 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
10515                             int buf_size)
10516 {
10517         int rc;
10518         u32 cmd_flags;
10519         __be32 val;
10520
10521         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10522                 DP(BNX2X_MSG_NVM,
10523                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
10524                    offset, buf_size);
10525                 return -EINVAL;
10526         }
10527
10528         if (offset + buf_size > bp->common.flash_size) {
10529                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10530                                   " buf_size (0x%x) > flash_size (0x%x)\n",
10531                    offset, buf_size, bp->common.flash_size);
10532                 return -EINVAL;
10533         }
10534
10535         /* request access to nvram interface */
10536         rc = bnx2x_acquire_nvram_lock(bp);
10537         if (rc)
10538                 return rc;
10539
10540         /* enable access to nvram interface */
10541         bnx2x_enable_nvram_access(bp);
10542
10543         /* read the first word(s) */
10544         cmd_flags = MCPR_NVM_COMMAND_FIRST;
10545         while ((buf_size > sizeof(u32)) && (rc == 0)) {
10546                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10547                 memcpy(ret_buf, &val, 4);
10548
10549                 /* advance to the next dword */
10550                 offset += sizeof(u32);
10551                 ret_buf += sizeof(u32);
10552                 buf_size -= sizeof(u32);
10553                 cmd_flags = 0;
10554         }
10555
10556         if (rc == 0) {
10557                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10558                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10559                 memcpy(ret_buf, &val, 4);
10560         }
10561
10562         /* disable access to nvram interface */
10563         bnx2x_disable_nvram_access(bp);
10564         bnx2x_release_nvram_lock(bp);
10565
10566         return rc;
10567 }
10568
10569 static int bnx2x_get_eeprom(struct net_device *dev,
10570                             struct ethtool_eeprom *eeprom, u8 *eebuf)
10571 {
10572         struct bnx2x *bp = netdev_priv(dev);
10573         int rc;
10574
10575         if (!netif_running(dev))
10576                 return -EAGAIN;
10577
10578         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10579            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
10580            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10581            eeprom->len, eeprom->len);
10582
10583         /* parameters already validated in ethtool_get_eeprom */
10584
10585         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
10586
10587         return rc;
10588 }
10589
10590 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
10591                                    u32 cmd_flags)
10592 {
10593         int count, i, rc;
10594
10595         /* build the command word */
10596         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
10597
10598         /* need to clear DONE bit separately */
10599         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10600
10601         /* write the data */
10602         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
10603
10604         /* address of the NVRAM to write to */
10605         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10606                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10607
10608         /* issue the write command */
10609         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10610
10611         /* adjust timeout for emulation/FPGA */
10612         count = NVRAM_TIMEOUT_COUNT;
10613         if (CHIP_REV_IS_SLOW(bp))
10614                 count *= 100;
10615
10616         /* wait for completion */
10617         rc = -EBUSY;
10618         for (i = 0; i < count; i++) {
10619                 udelay(5);
10620                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10621                 if (val & MCPR_NVM_COMMAND_DONE) {
10622                         rc = 0;
10623                         break;
10624                 }
10625         }
10626
10627         return rc;
10628 }
10629
10630 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
10631
10632 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
10633                               int buf_size)
10634 {
10635         int rc;
10636         u32 cmd_flags;
10637         u32 align_offset;
10638         __be32 val;
10639
10640         if (offset + buf_size > bp->common.flash_size) {
10641                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10642                                   " buf_size (0x%x) > flash_size (0x%x)\n",
10643                    offset, buf_size, bp->common.flash_size);
10644                 return -EINVAL;
10645         }
10646
10647         /* request access to nvram interface */
10648         rc = bnx2x_acquire_nvram_lock(bp);
10649         if (rc)
10650                 return rc;
10651
10652         /* enable access to nvram interface */
10653         bnx2x_enable_nvram_access(bp);
10654
10655         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
10656         align_offset = (offset & ~0x03);
10657         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
10658
10659         if (rc == 0) {
10660                 val &= ~(0xff << BYTE_OFFSET(offset));
10661                 val |= (*data_buf << BYTE_OFFSET(offset));
10662
10663                 /* nvram data is returned as an array of bytes
10664                  * convert it back to cpu order */
10665                 val = be32_to_cpu(val);
10666
10667                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
10668                                              cmd_flags);
10669         }
10670
10671         /* disable access to nvram interface */
10672         bnx2x_disable_nvram_access(bp);
10673         bnx2x_release_nvram_lock(bp);
10674
10675         return rc;
10676 }
10677
10678 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
10679                              int buf_size)
10680 {
10681         int rc;
10682         u32 cmd_flags;
10683         u32 val;
10684         u32 written_so_far;
10685
10686         if (buf_size == 1)      /* ethtool */
10687                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
10688
10689         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10690                 DP(BNX2X_MSG_NVM,
10691                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
10692                    offset, buf_size);
10693                 return -EINVAL;
10694         }
10695
10696         if (offset + buf_size > bp->common.flash_size) {
10697                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10698                                   " buf_size (0x%x) > flash_size (0x%x)\n",
10699                    offset, buf_size, bp->common.flash_size);
10700                 return -EINVAL;
10701         }
10702
10703         /* request access to nvram interface */
10704         rc = bnx2x_acquire_nvram_lock(bp);
10705         if (rc)
10706                 return rc;
10707
10708         /* enable access to nvram interface */
10709         bnx2x_enable_nvram_access(bp);
10710
10711         written_so_far = 0;
10712         cmd_flags = MCPR_NVM_COMMAND_FIRST;
10713         while ((written_so_far < buf_size) && (rc == 0)) {
10714                 if (written_so_far == (buf_size - sizeof(u32)))
10715                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
10716                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
10717                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
10718                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
10719                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
10720
10721                 memcpy(&val, data_buf, 4);
10722
10723                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
10724
10725                 /* advance to the next dword */
10726                 offset += sizeof(u32);
10727                 data_buf += sizeof(u32);
10728                 written_so_far += sizeof(u32);
10729                 cmd_flags = 0;
10730         }
10731
10732         /* disable access to nvram interface */
10733         bnx2x_disable_nvram_access(bp);
10734         bnx2x_release_nvram_lock(bp);
10735
10736         return rc;
10737 }
10738
10739 static int bnx2x_set_eeprom(struct net_device *dev,
10740                             struct ethtool_eeprom *eeprom, u8 *eebuf)
10741 {
10742         struct bnx2x *bp = netdev_priv(dev);
10743         int port = BP_PORT(bp);
10744         int rc = 0;
10745
10746         if (!netif_running(dev))
10747                 return -EAGAIN;
10748
10749         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10750            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
10751            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10752            eeprom->len, eeprom->len);
10753
10754         /* parameters already validated in ethtool_set_eeprom */
10755
10756         /* PHY eeprom can be accessed only by the PMF */
10757         if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
10758             !bp->port.pmf)
10759                 return -EINVAL;
10760
10761         if (eeprom->magic == 0x50485950) {
10762                 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
10763                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10764
10765                 bnx2x_acquire_phy_lock(bp);
10766                 rc |= bnx2x_link_reset(&bp->link_params,
10767                                        &bp->link_vars, 0);
10768                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10769                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
10770                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10771                                        MISC_REGISTERS_GPIO_HIGH, port);
10772                 bnx2x_release_phy_lock(bp);
10773                 bnx2x_link_report(bp);
10774
10775         } else if (eeprom->magic == 0x50485952) {
10776                 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
10777                 if (bp->state == BNX2X_STATE_OPEN) {
10778                         bnx2x_acquire_phy_lock(bp);
10779                         rc |= bnx2x_link_reset(&bp->link_params,
10780                                                &bp->link_vars, 1);
10781
10782                         rc |= bnx2x_phy_init(&bp->link_params,
10783                                              &bp->link_vars);
10784                         bnx2x_release_phy_lock(bp);
10785                         bnx2x_calc_fc_adv(bp);
10786                 }
10787         } else if (eeprom->magic == 0x53985943) {
10788                 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
10789                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10790                                        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
10791                         u8 ext_phy_addr =
10792                              XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
10793
10794                         /* DSP Remove Download Mode */
10795                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10796                                        MISC_REGISTERS_GPIO_LOW, port);
10797
10798                         bnx2x_acquire_phy_lock(bp);
10799
10800                         bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
10801
10802                         /* wait 0.5 sec to allow it to run */
10803                         msleep(500);
10804                         bnx2x_ext_phy_hw_reset(bp, port);
10805                         msleep(500);
10806                         bnx2x_release_phy_lock(bp);
10807                 }
10808         } else
10809                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
10810
10811         return rc;
10812 }
10813
10814 static int bnx2x_get_coalesce(struct net_device *dev,
10815                               struct ethtool_coalesce *coal)
10816 {
10817         struct bnx2x *bp = netdev_priv(dev);
10818
10819         memset(coal, 0, sizeof(struct ethtool_coalesce));
10820
10821         coal->rx_coalesce_usecs = bp->rx_ticks;
10822         coal->tx_coalesce_usecs = bp->tx_ticks;
10823
10824         return 0;
10825 }
10826
10827 static int bnx2x_set_coalesce(struct net_device *dev,
10828                               struct ethtool_coalesce *coal)
10829 {
10830         struct bnx2x *bp = netdev_priv(dev);
10831
10832         bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
10833         if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
10834                 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
10835
10836         bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
10837         if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
10838                 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
10839
10840         if (netif_running(dev))
10841                 bnx2x_update_coalesce(bp);
10842
10843         return 0;
10844 }
10845
10846 static void bnx2x_get_ringparam(struct net_device *dev,
10847                                 struct ethtool_ringparam *ering)
10848 {
10849         struct bnx2x *bp = netdev_priv(dev);
10850
10851         ering->rx_max_pending = MAX_RX_AVAIL;
10852         ering->rx_mini_max_pending = 0;
10853         ering->rx_jumbo_max_pending = 0;
10854
10855         ering->rx_pending = bp->rx_ring_size;
10856         ering->rx_mini_pending = 0;
10857         ering->rx_jumbo_pending = 0;
10858
10859         ering->tx_max_pending = MAX_TX_AVAIL;
10860         ering->tx_pending = bp->tx_ring_size;
10861 }
10862
10863 static int bnx2x_set_ringparam(struct net_device *dev,
10864                                struct ethtool_ringparam *ering)
10865 {
10866         struct bnx2x *bp = netdev_priv(dev);
10867         int rc = 0;
10868
10869         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10870                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10871                 return -EAGAIN;
10872         }
10873
10874         if ((ering->rx_pending > MAX_RX_AVAIL) ||
10875             (ering->tx_pending > MAX_TX_AVAIL) ||
10876             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
10877                 return -EINVAL;
10878
10879         bp->rx_ring_size = ering->rx_pending;
10880         bp->tx_ring_size = ering->tx_pending;
10881
10882         if (netif_running(dev)) {
10883                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10884                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10885         }
10886
10887         return rc;
10888 }
10889
10890 static void bnx2x_get_pauseparam(struct net_device *dev,
10891                                  struct ethtool_pauseparam *epause)
10892 {
10893         struct bnx2x *bp = netdev_priv(dev);
10894
10895         epause->autoneg = (bp->link_params.req_flow_ctrl ==
10896                            BNX2X_FLOW_CTRL_AUTO) &&
10897                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
10898
10899         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
10900                             BNX2X_FLOW_CTRL_RX);
10901         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
10902                             BNX2X_FLOW_CTRL_TX);
10903
10904         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10905            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
10906            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10907 }
10908
10909 static int bnx2x_set_pauseparam(struct net_device *dev,
10910                                 struct ethtool_pauseparam *epause)
10911 {
10912         struct bnx2x *bp = netdev_priv(dev);
10913
10914         if (IS_E1HMF(bp))
10915                 return 0;
10916
10917         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10918            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
10919            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10920
10921         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10922
10923         if (epause->rx_pause)
10924                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
10925
10926         if (epause->tx_pause)
10927                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
10928
10929         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10930                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
10931
10932         if (epause->autoneg) {
10933                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10934                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
10935                         return -EINVAL;
10936                 }
10937
10938                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10939                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10940         }
10941
10942         DP(NETIF_MSG_LINK,
10943            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
10944
10945         if (netif_running(dev)) {
10946                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10947                 bnx2x_link_set(bp);
10948         }
10949
10950         return 0;
10951 }
10952
10953 static int bnx2x_set_flags(struct net_device *dev, u32 data)
10954 {
10955         struct bnx2x *bp = netdev_priv(dev);
10956         int changed = 0;
10957         int rc = 0;
10958
10959         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10960                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10961                 return -EAGAIN;
10962         }
10963
10964         /* TPA requires Rx CSUM offloading */
10965         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
10966                 if (!disable_tpa) {
10967                         if (!(dev->features & NETIF_F_LRO)) {
10968                                 dev->features |= NETIF_F_LRO;
10969                                 bp->flags |= TPA_ENABLE_FLAG;
10970                                 changed = 1;
10971                         }
10972                 } else
10973                         rc = -EINVAL;
10974         } else if (dev->features & NETIF_F_LRO) {
10975                 dev->features &= ~NETIF_F_LRO;
10976                 bp->flags &= ~TPA_ENABLE_FLAG;
10977                 changed = 1;
10978         }
10979
10980         if (changed && netif_running(dev)) {
10981                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10982                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10983         }
10984
10985         return rc;
10986 }
10987
10988 static u32 bnx2x_get_rx_csum(struct net_device *dev)
10989 {
10990         struct bnx2x *bp = netdev_priv(dev);
10991
10992         return bp->rx_csum;
10993 }
10994
10995 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10996 {
10997         struct bnx2x *bp = netdev_priv(dev);
10998         int rc = 0;
10999
11000         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11001                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11002                 return -EAGAIN;
11003         }
11004
11005         bp->rx_csum = data;
11006
11007         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
11008            TPA'ed packets will be discarded due to wrong TCP CSUM */
11009         if (!data) {
11010                 u32 flags = ethtool_op_get_flags(dev);
11011
11012                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
11013         }
11014
11015         return rc;
11016 }
11017
11018 static int bnx2x_set_tso(struct net_device *dev, u32 data)
11019 {
11020         if (data) {
11021                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11022                 dev->features |= NETIF_F_TSO6;
11023         } else {
11024                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
11025                 dev->features &= ~NETIF_F_TSO6;
11026         }
11027
11028         return 0;
11029 }
11030
11031 static const struct {
11032         char string[ETH_GSTRING_LEN];
11033 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
11034         { "register_test (offline)" },
11035         { "memory_test (offline)" },
11036         { "loopback_test (offline)" },
11037         { "nvram_test (online)" },
11038         { "interrupt_test (online)" },
11039         { "link_test (online)" },
11040         { "idle check (online)" }
11041 };
11042
11043 static int bnx2x_test_registers(struct bnx2x *bp)
11044 {
11045         int idx, i, rc = -ENODEV;
11046         u32 wr_val = 0;
11047         int port = BP_PORT(bp);
11048         static const struct {
11049                 u32 offset0;
11050                 u32 offset1;
11051                 u32 mask;
11052         } reg_tbl[] = {
11053 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
11054                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
11055                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
11056                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
11057                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
11058                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
11059                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
11060                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
11061                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
11062                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
11063 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
11064                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
11065                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
11066                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
11067                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
11068                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
11069                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
11070                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
11071                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
11072                 { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
11073 /* 20 */        { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
11074                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
11075                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
11076                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
11077                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
11078                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
11079                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
11080                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
11081                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
11082                 { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
11083 /* 30 */        { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
11084                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
11085                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
11086                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
11087                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
11088                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
11089                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
11090
11091                 { 0xffffffff, 0, 0x00000000 }
11092         };
11093
11094         if (!netif_running(bp->dev))
11095                 return rc;
11096
11097         /* Repeat the test twice:
11098            First by writing 0x00000000, second by writing 0xffffffff */
11099         for (idx = 0; idx < 2; idx++) {
11100
11101                 switch (idx) {
11102                 case 0:
11103                         wr_val = 0;
11104                         break;
11105                 case 1:
11106                         wr_val = 0xffffffff;
11107                         break;
11108                 }
11109
11110                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
11111                         u32 offset, mask, save_val, val;
11112
11113                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
11114                         mask = reg_tbl[i].mask;
11115
11116                         save_val = REG_RD(bp, offset);
11117
11118                         REG_WR(bp, offset, wr_val);
11119                         val = REG_RD(bp, offset);
11120
11121                         /* Restore the original register's value */
11122                         REG_WR(bp, offset, save_val);
11123
11124                         /* verify value is as expected */
11125                         if ((val & mask) != (wr_val & mask)) {
11126                                 DP(NETIF_MSG_PROBE,
11127                                    "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
11128                                    offset, val, wr_val, mask);
11129                                 goto test_reg_exit;
11130                         }
11131                 }
11132         }
11133
11134         rc = 0;
11135
11136 test_reg_exit:
11137         return rc;
11138 }
11139
11140 static int bnx2x_test_memory(struct bnx2x *bp)
11141 {
11142         int i, j, rc = -ENODEV;
11143         u32 val;
11144         static const struct {
11145                 u32 offset;
11146                 int size;
11147         } mem_tbl[] = {
11148                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
11149                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
11150                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
11151                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
11152                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
11153                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
11154                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
11155
11156                 { 0xffffffff, 0 }
11157         };
11158         static const struct {
11159                 char *name;
11160                 u32 offset;
11161                 u32 e1_mask;
11162                 u32 e1h_mask;
11163         } prty_tbl[] = {
11164                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
11165                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
11166                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
11167                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
11168                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
11169                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
11170
11171                 { NULL, 0xffffffff, 0, 0 }
11172         };
11173
11174         if (!netif_running(bp->dev))
11175                 return rc;
11176
11177         /* Go through all the memories */
11178         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
11179                 for (j = 0; j < mem_tbl[i].size; j++)
11180                         REG_RD(bp, mem_tbl[i].offset + j*4);
11181
11182         /* Check the parity status */
11183         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
11184                 val = REG_RD(bp, prty_tbl[i].offset);
11185                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
11186                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
11187                         DP(NETIF_MSG_HW,
11188                            "%s is 0x%x\n", prty_tbl[i].name, val);
11189                         goto test_mem_exit;
11190                 }
11191         }
11192
11193         rc = 0;
11194
11195 test_mem_exit:
11196         return rc;
11197 }
11198
11199 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
11200 {
11201         int cnt = 1000;
11202
11203         if (link_up)
11204                 while (bnx2x_link_test(bp) && cnt--)
11205                         msleep(10);
11206 }
11207
11208 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
11209 {
11210         unsigned int pkt_size, num_pkts, i;
11211         struct sk_buff *skb;
11212         unsigned char *packet;
11213         struct bnx2x_fastpath *fp_rx = &bp->fp[0];
11214         struct bnx2x_fastpath *fp_tx = &bp->fp[0];
11215         u16 tx_start_idx, tx_idx;
11216         u16 rx_start_idx, rx_idx;
11217         u16 pkt_prod, bd_prod;
11218         struct sw_tx_bd *tx_buf;
11219         struct eth_tx_start_bd *tx_start_bd;
11220         struct eth_tx_parse_bd *pbd = NULL;
11221         dma_addr_t mapping;
11222         union eth_rx_cqe *cqe;
11223         u8 cqe_fp_flags;
11224         struct sw_rx_bd *rx_buf;
11225         u16 len;
11226         int rc = -ENODEV;
11227
11228         /* check the loopback mode */
11229         switch (loopback_mode) {
11230         case BNX2X_PHY_LOOPBACK:
11231                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
11232                         return -EINVAL;
11233                 break;
11234         case BNX2X_MAC_LOOPBACK:
11235                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
11236                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
11237                 break;
11238         default:
11239                 return -EINVAL;
11240         }
11241
11242         /* prepare the loopback packet */
11243         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
11244                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
11245         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
11246         if (!skb) {
11247                 rc = -ENOMEM;
11248                 goto test_loopback_exit;
11249         }
11250         packet = skb_put(skb, pkt_size);
11251         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
11252         memset(packet + ETH_ALEN, 0, ETH_ALEN);
11253         memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
11254         for (i = ETH_HLEN; i < pkt_size; i++)
11255                 packet[i] = (unsigned char) (i & 0xff);
11256
11257         /* send the loopback packet */
11258         num_pkts = 0;
11259         tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11260         rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11261
11262         pkt_prod = fp_tx->tx_pkt_prod++;
11263         tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
11264         tx_buf->first_bd = fp_tx->tx_bd_prod;
11265         tx_buf->skb = skb;
11266         tx_buf->flags = 0;
11267
11268         bd_prod = TX_BD(fp_tx->tx_bd_prod);
11269         tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
11270         mapping = dma_map_single(&bp->pdev->dev, skb->data,
11271                                  skb_headlen(skb), DMA_TO_DEVICE);
11272         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11273         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11274         tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
11275         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11276         tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11277         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11278         tx_start_bd->general_data = ((UNICAST_ADDRESS <<
11279                                 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
11280
11281         /* turn on parsing and get a BD */
11282         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11283         pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
11284
11285         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11286
11287         wmb();
11288
11289         fp_tx->tx_db.data.prod += 2;
11290         barrier();
11291         DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
11292
11293         mmiowb();
11294
11295         num_pkts++;
11296         fp_tx->tx_bd_prod += 2; /* start + pbd */
11297
11298         udelay(100);
11299
11300         tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11301         if (tx_idx != tx_start_idx + num_pkts)
11302                 goto test_loopback_exit;
11303
11304         rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11305         if (rx_idx != rx_start_idx + num_pkts)
11306                 goto test_loopback_exit;
11307
11308         cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
11309         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
11310         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
11311                 goto test_loopback_rx_exit;
11312
11313         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
11314         if (len != pkt_size)
11315                 goto test_loopback_rx_exit;
11316
11317         rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
11318         skb = rx_buf->skb;
11319         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
11320         for (i = ETH_HLEN; i < pkt_size; i++)
11321                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
11322                         goto test_loopback_rx_exit;
11323
11324         rc = 0;
11325
11326 test_loopback_rx_exit:
11327
11328         fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
11329         fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
11330         fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
11331         fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
11332
11333         /* Update producers */
11334         bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
11335                              fp_rx->rx_sge_prod);
11336
11337 test_loopback_exit:
11338         bp->link_params.loopback_mode = LOOPBACK_NONE;
11339
11340         return rc;
11341 }
11342
11343 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
11344 {
11345         int rc = 0, res;
11346
11347         if (BP_NOMCP(bp))
11348                 return rc;
11349
11350         if (!netif_running(bp->dev))
11351                 return BNX2X_LOOPBACK_FAILED;
11352
11353         bnx2x_netif_stop(bp, 1);
11354         bnx2x_acquire_phy_lock(bp);
11355
11356         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
11357         if (res) {
11358                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
11359                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
11360         }
11361
11362         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
11363         if (res) {
11364                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
11365                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
11366         }
11367
11368         bnx2x_release_phy_lock(bp);
11369         bnx2x_netif_start(bp);
11370
11371         return rc;
11372 }
11373
11374 #define CRC32_RESIDUAL                  0xdebb20e3
11375
11376 static int bnx2x_test_nvram(struct bnx2x *bp)
11377 {
11378         static const struct {
11379                 int offset;
11380                 int size;
11381         } nvram_tbl[] = {
11382                 {     0,  0x14 }, /* bootstrap */
11383                 {  0x14,  0xec }, /* dir */
11384                 { 0x100, 0x350 }, /* manuf_info */
11385                 { 0x450,  0xf0 }, /* feature_info */
11386                 { 0x640,  0x64 }, /* upgrade_key_info */
11387                 { 0x6a4,  0x64 },
11388                 { 0x708,  0x70 }, /* manuf_key_info */
11389                 { 0x778,  0x70 },
11390                 {     0,     0 }
11391         };
11392         __be32 buf[0x350 / 4];
11393         u8 *data = (u8 *)buf;
11394         int i, rc;
11395         u32 magic, crc;
11396
11397         if (BP_NOMCP(bp))
11398                 return 0;
11399
11400         rc = bnx2x_nvram_read(bp, 0, data, 4);
11401         if (rc) {
11402                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
11403                 goto test_nvram_exit;
11404         }
11405
11406         magic = be32_to_cpu(buf[0]);
11407         if (magic != 0x669955aa) {
11408                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
11409                 rc = -ENODEV;
11410                 goto test_nvram_exit;
11411         }
11412
11413         for (i = 0; nvram_tbl[i].size; i++) {
11414
11415                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
11416                                       nvram_tbl[i].size);
11417                 if (rc) {
11418                         DP(NETIF_MSG_PROBE,
11419                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
11420                         goto test_nvram_exit;
11421                 }
11422
11423                 crc = ether_crc_le(nvram_tbl[i].size, data);
11424                 if (crc != CRC32_RESIDUAL) {
11425                         DP(NETIF_MSG_PROBE,
11426                            "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
11427                         rc = -ENODEV;
11428                         goto test_nvram_exit;
11429                 }
11430         }
11431
11432 test_nvram_exit:
11433         return rc;
11434 }
11435
11436 static int bnx2x_test_intr(struct bnx2x *bp)
11437 {
11438         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
11439         int i, rc;
11440
11441         if (!netif_running(bp->dev))
11442                 return -ENODEV;
11443
11444         config->hdr.length = 0;
11445         if (CHIP_IS_E1(bp))
11446                 /* use last unicast entries */
11447                 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
11448         else
11449                 config->hdr.offset = BP_FUNC(bp);
11450         config->hdr.client_id = bp->fp->cl_id;
11451         config->hdr.reserved1 = 0;
11452
11453         bp->set_mac_pending++;
11454         smp_wmb();
11455         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11456                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
11457                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
11458         if (rc == 0) {
11459                 for (i = 0; i < 10; i++) {
11460                         if (!bp->set_mac_pending)
11461                                 break;
11462                         smp_rmb();
11463                         msleep_interruptible(10);
11464                 }
11465                 if (i == 10)
11466                         rc = -ENODEV;
11467         }
11468
11469         return rc;
11470 }
11471
11472 static void bnx2x_self_test(struct net_device *dev,
11473                             struct ethtool_test *etest, u64 *buf)
11474 {
11475         struct bnx2x *bp = netdev_priv(dev);
11476
11477         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11478                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11479                 etest->flags |= ETH_TEST_FL_FAILED;
11480                 return;
11481         }
11482
11483         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
11484
11485         if (!netif_running(dev))
11486                 return;
11487
11488         /* offline tests are not supported in MF mode */
11489         if (IS_E1HMF(bp))
11490                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
11491
11492         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11493                 int port = BP_PORT(bp);
11494                 u32 val;
11495                 u8 link_up;
11496
11497                 /* save current value of input enable for TX port IF */
11498                 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
11499                 /* disable input for TX port IF */
11500                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
11501
11502                 link_up = (bnx2x_link_test(bp) == 0);
11503                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11504                 bnx2x_nic_load(bp, LOAD_DIAG);
11505                 /* wait until link state is restored */
11506                 bnx2x_wait_for_link(bp, link_up);
11507
11508                 if (bnx2x_test_registers(bp) != 0) {
11509                         buf[0] = 1;
11510                         etest->flags |= ETH_TEST_FL_FAILED;
11511                 }
11512                 if (bnx2x_test_memory(bp) != 0) {
11513                         buf[1] = 1;
11514                         etest->flags |= ETH_TEST_FL_FAILED;
11515                 }
11516                 buf[2] = bnx2x_test_loopback(bp, link_up);
11517                 if (buf[2] != 0)
11518                         etest->flags |= ETH_TEST_FL_FAILED;
11519
11520                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11521
11522                 /* restore input for TX port IF */
11523                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
11524
11525                 bnx2x_nic_load(bp, LOAD_NORMAL);
11526                 /* wait until link state is restored */
11527                 bnx2x_wait_for_link(bp, link_up);
11528         }
11529         if (bnx2x_test_nvram(bp) != 0) {
11530                 buf[3] = 1;
11531                 etest->flags |= ETH_TEST_FL_FAILED;
11532         }
11533         if (bnx2x_test_intr(bp) != 0) {
11534                 buf[4] = 1;
11535                 etest->flags |= ETH_TEST_FL_FAILED;
11536         }
11537         if (bp->port.pmf)
11538                 if (bnx2x_link_test(bp) != 0) {
11539                         buf[5] = 1;
11540                         etest->flags |= ETH_TEST_FL_FAILED;
11541                 }
11542
11543 #ifdef BNX2X_EXTRA_DEBUG
11544         bnx2x_panic_dump(bp);
11545 #endif
11546 }
11547
11548 static const struct {
11549         long offset;
11550         int size;
11551         u8 string[ETH_GSTRING_LEN];
11552 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
11553 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
11554         { Q_STATS_OFFSET32(error_bytes_received_hi),
11555                                                 8, "[%d]: rx_error_bytes" },
11556         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
11557                                                 8, "[%d]: rx_ucast_packets" },
11558         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
11559                                                 8, "[%d]: rx_mcast_packets" },
11560         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
11561                                                 8, "[%d]: rx_bcast_packets" },
11562         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
11563         { Q_STATS_OFFSET32(rx_err_discard_pkt),
11564                                          4, "[%d]: rx_phy_ip_err_discards"},
11565         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
11566                                          4, "[%d]: rx_skb_alloc_discard" },
11567         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
11568
11569 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
11570         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11571                                                 8, "[%d]: tx_ucast_packets" },
11572         { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11573                                                 8, "[%d]: tx_mcast_packets" },
11574         { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11575                                                 8, "[%d]: tx_bcast_packets" }
11576 };
11577
11578 static const struct {
11579         long offset;
11580         int size;
11581         u32 flags;
11582 #define STATS_FLAGS_PORT                1
11583 #define STATS_FLAGS_FUNC                2
11584 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
11585         u8 string[ETH_GSTRING_LEN];
11586 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
11587 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
11588                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
11589         { STATS_OFFSET32(error_bytes_received_hi),
11590                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
11591         { STATS_OFFSET32(total_unicast_packets_received_hi),
11592                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
11593         { STATS_OFFSET32(total_multicast_packets_received_hi),
11594                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
11595         { STATS_OFFSET32(total_broadcast_packets_received_hi),
11596                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
11597         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
11598                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
11599         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
11600                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
11601         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
11602                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
11603         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
11604                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
11605 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
11606                                 8, STATS_FLAGS_PORT, "rx_fragments" },
11607         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
11608                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
11609         { STATS_OFFSET32(no_buff_discard_hi),
11610                                 8, STATS_FLAGS_BOTH, "rx_discards" },
11611         { STATS_OFFSET32(mac_filter_discard),
11612                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
11613         { STATS_OFFSET32(xxoverflow_discard),
11614                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
11615         { STATS_OFFSET32(brb_drop_hi),
11616                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
11617         { STATS_OFFSET32(brb_truncate_hi),
11618                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
11619         { STATS_OFFSET32(pause_frames_received_hi),
11620                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
11621         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
11622                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
11623         { STATS_OFFSET32(nig_timer_max),
11624                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
11625 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
11626                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
11627         { STATS_OFFSET32(rx_skb_alloc_failed),
11628                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
11629         { STATS_OFFSET32(hw_csum_err),
11630                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
11631
11632         { STATS_OFFSET32(total_bytes_transmitted_hi),
11633                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
11634         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
11635                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
11636         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11637                                 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
11638         { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11639                                 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
11640         { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11641                                 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
11642         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
11643                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
11644         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
11645                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
11646 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
11647                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
11648         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
11649                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
11650         { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
11651                                 8, STATS_FLAGS_PORT, "tx_deferred" },
11652         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
11653                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
11654         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
11655                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
11656         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
11657                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
11658         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
11659                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
11660         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
11661                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
11662         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
11663                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
11664         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
11665                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
11666 /* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
11667                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
11668         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
11669                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
11670         { STATS_OFFSET32(etherstatspktsover1522octets_hi),
11671                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
11672         { STATS_OFFSET32(pause_frames_sent_hi),
11673                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
11674 };
11675
11676 #define IS_PORT_STAT(i) \
11677         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
11678 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
11679 #define IS_E1HMF_MODE_STAT(bp) \
11680                         (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
11681
11682 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
11683 {
11684         struct bnx2x *bp = netdev_priv(dev);
11685         int i, num_stats;
11686
11687         switch (stringset) {
11688         case ETH_SS_STATS:
11689                 if (is_multi(bp)) {
11690                         num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
11691                         if (!IS_E1HMF_MODE_STAT(bp))
11692                                 num_stats += BNX2X_NUM_STATS;
11693                 } else {
11694                         if (IS_E1HMF_MODE_STAT(bp)) {
11695                                 num_stats = 0;
11696                                 for (i = 0; i < BNX2X_NUM_STATS; i++)
11697                                         if (IS_FUNC_STAT(i))
11698                                                 num_stats++;
11699                         } else
11700                                 num_stats = BNX2X_NUM_STATS;
11701                 }
11702                 return num_stats;
11703
11704         case ETH_SS_TEST:
11705                 return BNX2X_NUM_TESTS;
11706
11707         default:
11708                 return -EINVAL;
11709         }
11710 }
11711
11712 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11713 {
11714         struct bnx2x *bp = netdev_priv(dev);
11715         int i, j, k;
11716
11717         switch (stringset) {
11718         case ETH_SS_STATS:
11719                 if (is_multi(bp)) {
11720                         k = 0;
11721                         for_each_queue(bp, i) {
11722                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
11723                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
11724                                                 bnx2x_q_stats_arr[j].string, i);
11725                                 k += BNX2X_NUM_Q_STATS;
11726                         }
11727                         if (IS_E1HMF_MODE_STAT(bp))
11728                                 break;
11729                         for (j = 0; j < BNX2X_NUM_STATS; j++)
11730                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
11731                                        bnx2x_stats_arr[j].string);
11732                 } else {
11733                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11734                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11735                                         continue;
11736                                 strcpy(buf + j*ETH_GSTRING_LEN,
11737                                        bnx2x_stats_arr[i].string);
11738                                 j++;
11739                         }
11740                 }
11741                 break;
11742
11743         case ETH_SS_TEST:
11744                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
11745                 break;
11746         }
11747 }
11748
11749 static void bnx2x_get_ethtool_stats(struct net_device *dev,
11750                                     struct ethtool_stats *stats, u64 *buf)
11751 {
11752         struct bnx2x *bp = netdev_priv(dev);
11753         u32 *hw_stats, *offset;
11754         int i, j, k;
11755
11756         if (is_multi(bp)) {
11757                 k = 0;
11758                 for_each_queue(bp, i) {
11759                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
11760                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
11761                                 if (bnx2x_q_stats_arr[j].size == 0) {
11762                                         /* skip this counter */
11763                                         buf[k + j] = 0;
11764                                         continue;
11765                                 }
11766                                 offset = (hw_stats +
11767                                           bnx2x_q_stats_arr[j].offset);
11768                                 if (bnx2x_q_stats_arr[j].size == 4) {
11769                                         /* 4-byte counter */
11770                                         buf[k + j] = (u64) *offset;
11771                                         continue;
11772                                 }
11773                                 /* 8-byte counter */
11774                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11775                         }
11776                         k += BNX2X_NUM_Q_STATS;
11777                 }
11778                 if (IS_E1HMF_MODE_STAT(bp))
11779                         return;
11780                 hw_stats = (u32 *)&bp->eth_stats;
11781                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
11782                         if (bnx2x_stats_arr[j].size == 0) {
11783                                 /* skip this counter */
11784                                 buf[k + j] = 0;
11785                                 continue;
11786                         }
11787                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
11788                         if (bnx2x_stats_arr[j].size == 4) {
11789                                 /* 4-byte counter */
11790                                 buf[k + j] = (u64) *offset;
11791                                 continue;
11792                         }
11793                         /* 8-byte counter */
11794                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
11795                 }
11796         } else {
11797                 hw_stats = (u32 *)&bp->eth_stats;
11798                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11799                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11800                                 continue;
11801                         if (bnx2x_stats_arr[i].size == 0) {
11802                                 /* skip this counter */
11803                                 buf[j] = 0;
11804                                 j++;
11805                                 continue;
11806                         }
11807                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
11808                         if (bnx2x_stats_arr[i].size == 4) {
11809                                 /* 4-byte counter */
11810                                 buf[j] = (u64) *offset;
11811                                 j++;
11812                                 continue;
11813                         }
11814                         /* 8-byte counter */
11815                         buf[j] = HILO_U64(*offset, *(offset + 1));
11816                         j++;
11817                 }
11818         }
11819 }
11820
11821 static int bnx2x_phys_id(struct net_device *dev, u32 data)
11822 {
11823         struct bnx2x *bp = netdev_priv(dev);
11824         int i;
11825
11826         if (!netif_running(dev))
11827                 return 0;
11828
11829         if (!bp->port.pmf)
11830                 return 0;
11831
11832         if (data == 0)
11833                 data = 2;
11834
11835         for (i = 0; i < (data * 2); i++) {
11836                 if ((i % 2) == 0)
11837                         bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11838                                       SPEED_1000);
11839                 else
11840                         bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
11841
11842                 msleep_interruptible(500);
11843                 if (signal_pending(current))
11844                         break;
11845         }
11846
11847         if (bp->link_vars.link_up)
11848                 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11849                               bp->link_vars.line_speed);
11850
11851         return 0;
11852 }
11853
11854 static const struct ethtool_ops bnx2x_ethtool_ops = {
11855         .get_settings           = bnx2x_get_settings,
11856         .set_settings           = bnx2x_set_settings,
11857         .get_drvinfo            = bnx2x_get_drvinfo,
11858         .get_regs_len           = bnx2x_get_regs_len,
11859         .get_regs               = bnx2x_get_regs,
11860         .get_wol                = bnx2x_get_wol,
11861         .set_wol                = bnx2x_set_wol,
11862         .get_msglevel           = bnx2x_get_msglevel,
11863         .set_msglevel           = bnx2x_set_msglevel,
11864         .nway_reset             = bnx2x_nway_reset,
11865         .get_link               = bnx2x_get_link,
11866         .get_eeprom_len         = bnx2x_get_eeprom_len,
11867         .get_eeprom             = bnx2x_get_eeprom,
11868         .set_eeprom             = bnx2x_set_eeprom,
11869         .get_coalesce           = bnx2x_get_coalesce,
11870         .set_coalesce           = bnx2x_set_coalesce,
11871         .get_ringparam          = bnx2x_get_ringparam,
11872         .set_ringparam          = bnx2x_set_ringparam,
11873         .get_pauseparam         = bnx2x_get_pauseparam,
11874         .set_pauseparam         = bnx2x_set_pauseparam,
11875         .get_rx_csum            = bnx2x_get_rx_csum,
11876         .set_rx_csum            = bnx2x_set_rx_csum,
11877         .get_tx_csum            = ethtool_op_get_tx_csum,
11878         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
11879         .set_flags              = bnx2x_set_flags,
11880         .get_flags              = ethtool_op_get_flags,
11881         .get_sg                 = ethtool_op_get_sg,
11882         .set_sg                 = ethtool_op_set_sg,
11883         .get_tso                = ethtool_op_get_tso,
11884         .set_tso                = bnx2x_set_tso,
11885         .self_test              = bnx2x_self_test,
11886         .get_sset_count         = bnx2x_get_sset_count,
11887         .get_strings            = bnx2x_get_strings,
11888         .phys_id                = bnx2x_phys_id,
11889         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
11890 };
11891
11892 /* end of ethtool_ops */
11893
11894 /****************************************************************************
11895 * General service functions
11896 ****************************************************************************/
11897
11898 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11899 {
11900         u16 pmcsr;
11901
11902         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11903
11904         switch (state) {
11905         case PCI_D0:
11906                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11907                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11908                                        PCI_PM_CTRL_PME_STATUS));
11909
11910                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
11911                         /* delay required during transition out of D3hot */
11912                         msleep(20);
11913                 break;
11914
11915         case PCI_D3hot:
11916                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11917                 pmcsr |= 3;
11918
11919                 if (bp->wol)
11920                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
11921
11922                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11923                                       pmcsr);
11924
11925                 /* No more memory access after this point until
11926                 * device is brought back to D0.
11927                 */
11928                 break;
11929
11930         default:
11931                 return -EINVAL;
11932         }
11933         return 0;
11934 }
11935
11936 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11937 {
11938         u16 rx_cons_sb;
11939
11940         /* Tell compiler that status block fields can change */
11941         barrier();
11942         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
11943         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
11944                 rx_cons_sb++;
11945         return (fp->rx_comp_cons != rx_cons_sb);
11946 }
11947
11948 /*
11949  * net_device service functions
11950  */
11951
11952 static int bnx2x_poll(struct napi_struct *napi, int budget)
11953 {
11954         int work_done = 0;
11955         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
11956                                                  napi);
11957         struct bnx2x *bp = fp->bp;
11958
11959         while (1) {
11960 #ifdef BNX2X_STOP_ON_ERROR
11961                 if (unlikely(bp->panic)) {
11962                         napi_complete(napi);
11963                         return 0;
11964                 }
11965 #endif
11966
11967                 if (bnx2x_has_tx_work(fp))
11968                         bnx2x_tx_int(fp);
11969
11970                 if (bnx2x_has_rx_work(fp)) {
11971                         work_done += bnx2x_rx_int(fp, budget - work_done);
11972
11973                         /* must not complete if we consumed full budget */
11974                         if (work_done >= budget)
11975                                 break;
11976                 }
11977
11978                 /* Fall out from the NAPI loop if needed */
11979                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
11980                         bnx2x_update_fpsb_idx(fp);
11981                 /* bnx2x_has_rx_work() reads the status block, thus we need
11982                  * to ensure that status block indices have been actually read
11983                  * (bnx2x_update_fpsb_idx) prior to this check
11984                  * (bnx2x_has_rx_work) so that we won't write the "newer"
11985                  * value of the status block to IGU (if there was a DMA right
11986                  * after bnx2x_has_rx_work and if there is no rmb, the memory
11987                  * reading (bnx2x_update_fpsb_idx) may be postponed to right
11988                  * before bnx2x_ack_sb). In this case there will never be
11989                  * another interrupt until there is another update of the
11990                  * status block, while there is still unhandled work.
11991                  */
11992                         rmb();
11993
11994                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
11995                                 napi_complete(napi);
11996                                 /* Re-enable interrupts */
11997                                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
11998                                              le16_to_cpu(fp->fp_c_idx),
11999                                              IGU_INT_NOP, 1);
12000                                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
12001                                              le16_to_cpu(fp->fp_u_idx),
12002                                              IGU_INT_ENABLE, 1);
12003                                 break;
12004                         }
12005                 }
12006         }
12007
12008         return work_done;
12009 }
12010
12011
12012 /* we split the first BD into headers and data BDs
12013  * to ease the pain of our fellow microcode engineers
12014  * we use one mapping for both BDs
12015  * So far this has only been observed to happen
12016  * in Other Operating Systems(TM)
12017  */
12018 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
12019                                    struct bnx2x_fastpath *fp,
12020                                    struct sw_tx_bd *tx_buf,
12021                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
12022                                    u16 bd_prod, int nbd)
12023 {
12024         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
12025         struct eth_tx_bd *d_tx_bd;
12026         dma_addr_t mapping;
12027         int old_len = le16_to_cpu(h_tx_bd->nbytes);
12028
12029         /* first fix first BD */
12030         h_tx_bd->nbd = cpu_to_le16(nbd);
12031         h_tx_bd->nbytes = cpu_to_le16(hlen);
12032
12033         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
12034            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
12035            h_tx_bd->addr_lo, h_tx_bd->nbd);
12036
12037         /* now get a new data BD
12038          * (after the pbd) and fill it */
12039         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12040         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12041
12042         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
12043                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
12044
12045         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12046         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12047         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
12048
12049         /* this marks the BD as one that has no individual mapping */
12050         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
12051
12052         DP(NETIF_MSG_TX_QUEUED,
12053            "TSO split data size is %d (%x:%x)\n",
12054            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
12055
12056         /* update tx_bd */
12057         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
12058
12059         return bd_prod;
12060 }
12061
12062 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
12063 {
12064         if (fix > 0)
12065                 csum = (u16) ~csum_fold(csum_sub(csum,
12066                                 csum_partial(t_header - fix, fix, 0)));
12067
12068         else if (fix < 0)
12069                 csum = (u16) ~csum_fold(csum_add(csum,
12070                                 csum_partial(t_header, -fix, 0)));
12071
12072         return swab16(csum);
12073 }
12074
12075 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
12076 {
12077         u32 rc;
12078
12079         if (skb->ip_summed != CHECKSUM_PARTIAL)
12080                 rc = XMIT_PLAIN;
12081
12082         else {
12083                 if (skb->protocol == htons(ETH_P_IPV6)) {
12084                         rc = XMIT_CSUM_V6;
12085                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
12086                                 rc |= XMIT_CSUM_TCP;
12087
12088                 } else {
12089                         rc = XMIT_CSUM_V4;
12090                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
12091                                 rc |= XMIT_CSUM_TCP;
12092                 }
12093         }
12094
12095         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
12096                 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
12097
12098         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
12099                 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
12100
12101         return rc;
12102 }
12103
12104 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12105 /* check if packet requires linearization (packet is too fragmented)
12106    no need to check fragmentation if page size > 8K (there will be no
12107    violation to FW restrictions) */
12108 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
12109                              u32 xmit_type)
12110 {
12111         int to_copy = 0;
12112         int hlen = 0;
12113         int first_bd_sz = 0;
12114
12115         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
12116         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
12117
12118                 if (xmit_type & XMIT_GSO) {
12119                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
12120                         /* Check if LSO packet needs to be copied:
12121                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
12122                         int wnd_size = MAX_FETCH_BD - 3;
12123                         /* Number of windows to check */
12124                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
12125                         int wnd_idx = 0;
12126                         int frag_idx = 0;
12127                         u32 wnd_sum = 0;
12128
12129                         /* Headers length */
12130                         hlen = (int)(skb_transport_header(skb) - skb->data) +
12131                                 tcp_hdrlen(skb);
12132
12133                         /* Amount of data (w/o headers) on linear part of SKB*/
12134                         first_bd_sz = skb_headlen(skb) - hlen;
12135
12136                         wnd_sum  = first_bd_sz;
12137
12138                         /* Calculate the first sum - it's special */
12139                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12140                                 wnd_sum +=
12141                                         skb_shinfo(skb)->frags[frag_idx].size;
12142
12143                         /* If there was data on linear skb data - check it */
12144                         if (first_bd_sz > 0) {
12145                                 if (unlikely(wnd_sum < lso_mss)) {
12146                                         to_copy = 1;
12147                                         goto exit_lbl;
12148                                 }
12149
12150                                 wnd_sum -= first_bd_sz;
12151                         }
12152
12153                         /* Others are easier: run through the frag list and
12154                            check all windows */
12155                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12156                                 wnd_sum +=
12157                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12158
12159                                 if (unlikely(wnd_sum < lso_mss)) {
12160                                         to_copy = 1;
12161                                         break;
12162                                 }
12163                                 wnd_sum -=
12164                                         skb_shinfo(skb)->frags[wnd_idx].size;
12165                         }
12166                 } else {
12167                         /* in non-LSO too fragmented packet should always
12168                            be linearized */
12169                         to_copy = 1;
12170                 }
12171         }
12172
12173 exit_lbl:
12174         if (unlikely(to_copy))
12175                 DP(NETIF_MSG_TX_QUEUED,
12176                    "Linearization IS REQUIRED for %s packet. "
12177                    "num_frags %d  hlen %d  first_bd_sz %d\n",
12178                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12179                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12180
12181         return to_copy;
12182 }
12183 #endif
12184
12185 /* called with netif_tx_lock
12186  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
12187  * netif_wake_queue()
12188  */
12189 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
12190 {
12191         struct bnx2x *bp = netdev_priv(dev);
12192         struct bnx2x_fastpath *fp;
12193         struct netdev_queue *txq;
12194         struct sw_tx_bd *tx_buf;
12195         struct eth_tx_start_bd *tx_start_bd;
12196         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
12197         struct eth_tx_parse_bd *pbd = NULL;
12198         u16 pkt_prod, bd_prod;
12199         int nbd, fp_index;
12200         dma_addr_t mapping;
12201         u32 xmit_type = bnx2x_xmit_type(bp, skb);
12202         int i;
12203         u8 hlen = 0;
12204         __le16 pkt_size = 0;
12205         struct ethhdr *eth;
12206         u8 mac_type = UNICAST_ADDRESS;
12207
12208 #ifdef BNX2X_STOP_ON_ERROR
12209         if (unlikely(bp->panic))
12210                 return NETDEV_TX_BUSY;
12211 #endif
12212
12213         fp_index = skb_get_queue_mapping(skb);
12214         txq = netdev_get_tx_queue(dev, fp_index);
12215
12216         fp = &bp->fp[fp_index];
12217
12218         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
12219                 fp->eth_q_stats.driver_xoff++;
12220                 netif_tx_stop_queue(txq);
12221                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12222                 return NETDEV_TX_BUSY;
12223         }
12224
12225         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
12226            "  gso type %x  xmit_type %x\n",
12227            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12228            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12229
12230         eth = (struct ethhdr *)skb->data;
12231
12232         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
12233         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
12234                 if (is_broadcast_ether_addr(eth->h_dest))
12235                         mac_type = BROADCAST_ADDRESS;
12236                 else
12237                         mac_type = MULTICAST_ADDRESS;
12238         }
12239
12240 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12241         /* First, check if we need to linearize the skb (due to FW
12242            restrictions). No need to check fragmentation if page size > 8K
12243            (there will be no violation to FW restrictions) */
12244         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12245                 /* Statistics of linearization */
12246                 bp->lin_cnt++;
12247                 if (skb_linearize(skb) != 0) {
12248                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12249                            "silently dropping this SKB\n");
12250                         dev_kfree_skb_any(skb);
12251                         return NETDEV_TX_OK;
12252                 }
12253         }
12254 #endif
12255
12256         /*
12257         Please read carefully. First we use one BD which we mark as start,
12258         then we have a parsing info BD (used for TSO or xsum),
12259         and only then we have the rest of the TSO BDs.
12260         (don't forget to mark the last one as last,
12261         and to unmap only AFTER you write to the BD ...)
12262         And above all, all pdb sizes are in words - NOT DWORDS!
12263         */
12264
12265         pkt_prod = fp->tx_pkt_prod++;
12266         bd_prod = TX_BD(fp->tx_bd_prod);
12267
12268         /* get a tx_buf and first BD */
12269         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
12270         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
12271
12272         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
12273         tx_start_bd->general_data =  (mac_type <<
12274                                         ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
12275         /* header nbd */
12276         tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
12277
12278         /* remember the first BD of the packet */
12279         tx_buf->first_bd = fp->tx_bd_prod;
12280         tx_buf->skb = skb;
12281         tx_buf->flags = 0;
12282
12283         DP(NETIF_MSG_TX_QUEUED,
12284            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
12285            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
12286
12287 #ifdef BCM_VLAN
12288         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12289             (bp->flags & HW_VLAN_TX_FLAG)) {
12290                 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12291                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
12292         } else
12293 #endif
12294                 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
12295
12296         /* turn on parsing and get a BD */
12297         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12298         pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
12299
12300         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
12301
12302         if (xmit_type & XMIT_CSUM) {
12303                 hlen = (skb_network_header(skb) - skb->data) / 2;
12304
12305                 /* for now NS flag is not used in Linux */
12306                 pbd->global_data =
12307                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12308                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
12309
12310                 pbd->ip_hlen = (skb_transport_header(skb) -
12311                                 skb_network_header(skb)) / 2;
12312
12313                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
12314
12315                 pbd->total_hlen = cpu_to_le16(hlen);
12316                 hlen = hlen*2;
12317
12318                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
12319
12320                 if (xmit_type & XMIT_CSUM_V4)
12321                         tx_start_bd->bd_flags.as_bitfield |=
12322                                                 ETH_TX_BD_FLAGS_IP_CSUM;
12323                 else
12324                         tx_start_bd->bd_flags.as_bitfield |=
12325                                                 ETH_TX_BD_FLAGS_IPV6;
12326
12327                 if (xmit_type & XMIT_CSUM_TCP) {
12328                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12329
12330                 } else {
12331                         s8 fix = SKB_CS_OFF(skb); /* signed! */
12332
12333                         pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
12334
12335                         DP(NETIF_MSG_TX_QUEUED,
12336                            "hlen %d  fix %d  csum before fix %x\n",
12337                            le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
12338
12339                         /* HW bug: fixup the CSUM */
12340                         pbd->tcp_pseudo_csum =
12341                                 bnx2x_csum_fix(skb_transport_header(skb),
12342                                                SKB_CS(skb), fix);
12343
12344                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12345                            pbd->tcp_pseudo_csum);
12346                 }
12347         }
12348
12349         mapping = dma_map_single(&bp->pdev->dev, skb->data,
12350                                  skb_headlen(skb), DMA_TO_DEVICE);
12351
12352         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12353         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12354         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12355         tx_start_bd->nbd = cpu_to_le16(nbd);
12356         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12357         pkt_size = tx_start_bd->nbytes;
12358
12359         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
12360            "  nbytes %d  flags %x  vlan %x\n",
12361            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12362            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12363            tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
12364
12365         if (xmit_type & XMIT_GSO) {
12366
12367                 DP(NETIF_MSG_TX_QUEUED,
12368                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
12369                    skb->len, hlen, skb_headlen(skb),
12370                    skb_shinfo(skb)->gso_size);
12371
12372                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
12373
12374                 if (unlikely(skb_headlen(skb) > hlen))
12375                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12376                                                  hlen, bd_prod, ++nbd);
12377
12378                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12379                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
12380                 pbd->tcp_flags = pbd_tcp_flags(skb);
12381
12382                 if (xmit_type & XMIT_GSO_V4) {
12383                         pbd->ip_id = swab16(ip_hdr(skb)->id);
12384                         pbd->tcp_pseudo_csum =
12385                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12386                                                           ip_hdr(skb)->daddr,
12387                                                           0, IPPROTO_TCP, 0));
12388
12389                 } else
12390                         pbd->tcp_pseudo_csum =
12391                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12392                                                         &ipv6_hdr(skb)->daddr,
12393                                                         0, IPPROTO_TCP, 0));
12394
12395                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12396         }
12397         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
12398
12399         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12400                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
12401
12402                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12403                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12404                 if (total_pkt_bd == NULL)
12405                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12406
12407                 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12408                                        frag->page_offset,
12409                                        frag->size, DMA_TO_DEVICE);
12410
12411                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12412                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12413                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12414                 le16_add_cpu(&pkt_size, frag->size);
12415
12416                 DP(NETIF_MSG_TX_QUEUED,
12417                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
12418                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12419                    le16_to_cpu(tx_data_bd->nbytes));
12420         }
12421
12422         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
12423
12424         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12425
12426         /* now send a tx doorbell, counting the next BD
12427          * if the packet contains or ends with it
12428          */
12429         if (TX_BD_POFF(bd_prod) < nbd)
12430                 nbd++;
12431
12432         if (total_pkt_bd != NULL)
12433                 total_pkt_bd->total_pkt_bytes = pkt_size;
12434
12435         if (pbd)
12436                 DP(NETIF_MSG_TX_QUEUED,
12437                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
12438                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
12439                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12440                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
12441                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
12442
12443         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
12444
12445         /*
12446          * Make sure that the BD data is updated before updating the producer
12447          * since FW might read the BD right after the producer is updated.
12448          * This is only applicable for weak-ordered memory model archs such
12449          * as IA-64. The following barrier is also mandatory since FW will
12450          * assumes packets must have BDs.
12451          */
12452         wmb();
12453
12454         fp->tx_db.data.prod += nbd;
12455         barrier();
12456         DOORBELL(bp, fp->index, fp->tx_db.raw);
12457
12458         mmiowb();
12459
12460         fp->tx_bd_prod += nbd;
12461
12462         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
12463                 netif_tx_stop_queue(txq);
12464
12465                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12466                  * ordering of set_bit() in netif_tx_stop_queue() and read of
12467                  * fp->bd_tx_cons */
12468                 smp_mb();
12469
12470                 fp->eth_q_stats.driver_xoff++;
12471                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
12472                         netif_tx_wake_queue(txq);
12473         }
12474         fp->tx_pkt++;
12475
12476         return NETDEV_TX_OK;
12477 }
12478
12479 /* called with rtnl_lock */
12480 static int bnx2x_open(struct net_device *dev)
12481 {
12482         struct bnx2x *bp = netdev_priv(dev);
12483
12484         netif_carrier_off(dev);
12485
12486         bnx2x_set_power_state(bp, PCI_D0);
12487
12488         if (!bnx2x_reset_is_done(bp)) {
12489                 do {
12490                         /* Reset MCP mail box sequence if there is on going
12491                          * recovery
12492                          */
12493                         bp->fw_seq = 0;
12494
12495                         /* If it's the first function to load and reset done
12496                          * is still not cleared it may mean that. We don't
12497                          * check the attention state here because it may have
12498                          * already been cleared by a "common" reset but we
12499                          * shell proceed with "process kill" anyway.
12500                          */
12501                         if ((bnx2x_get_load_cnt(bp) == 0) &&
12502                                 bnx2x_trylock_hw_lock(bp,
12503                                 HW_LOCK_RESOURCE_RESERVED_08) &&
12504                                 (!bnx2x_leader_reset(bp))) {
12505                                 DP(NETIF_MSG_HW, "Recovered in open\n");
12506                                 break;
12507                         }
12508
12509                         bnx2x_set_power_state(bp, PCI_D3hot);
12510
12511                         printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12512                         " completed yet. Try again later. If u still see this"
12513                         " message after a few retries then power cycle is"
12514                         " required.\n", bp->dev->name);
12515
12516                         return -EAGAIN;
12517                 } while (0);
12518         }
12519
12520         bp->recovery_state = BNX2X_RECOVERY_DONE;
12521
12522         return bnx2x_nic_load(bp, LOAD_OPEN);
12523 }
12524
12525 /* called with rtnl_lock */
12526 static int bnx2x_close(struct net_device *dev)
12527 {
12528         struct bnx2x *bp = netdev_priv(dev);
12529
12530         /* Unload the driver, release IRQs */
12531         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12532         if (atomic_read(&bp->pdev->enable_cnt) == 1)
12533                 if (!CHIP_REV_IS_SLOW(bp))
12534                         bnx2x_set_power_state(bp, PCI_D3hot);
12535
12536         return 0;
12537 }
12538
12539 /* called with netif_tx_lock from dev_mcast.c */
12540 static void bnx2x_set_rx_mode(struct net_device *dev)
12541 {
12542         struct bnx2x *bp = netdev_priv(dev);
12543         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12544         int port = BP_PORT(bp);
12545
12546         if (bp->state != BNX2X_STATE_OPEN) {
12547                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12548                 return;
12549         }
12550
12551         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
12552
12553         if (dev->flags & IFF_PROMISC)
12554                 rx_mode = BNX2X_RX_MODE_PROMISC;
12555
12556         else if ((dev->flags & IFF_ALLMULTI) ||
12557                  ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
12558                   CHIP_IS_E1(bp)))
12559                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12560
12561         else { /* some multicasts */
12562                 if (CHIP_IS_E1(bp)) {
12563                         int i, old, offset;
12564                         struct netdev_hw_addr *ha;
12565                         struct mac_configuration_cmd *config =
12566                                                 bnx2x_sp(bp, mcast_config);
12567
12568                         i = 0;
12569                         netdev_for_each_mc_addr(ha, dev) {
12570                                 config->config_table[i].
12571                                         cam_entry.msb_mac_addr =
12572                                         swab16(*(u16 *)&ha->addr[0]);
12573                                 config->config_table[i].
12574                                         cam_entry.middle_mac_addr =
12575                                         swab16(*(u16 *)&ha->addr[2]);
12576                                 config->config_table[i].
12577                                         cam_entry.lsb_mac_addr =
12578                                         swab16(*(u16 *)&ha->addr[4]);
12579                                 config->config_table[i].cam_entry.flags =
12580                                                         cpu_to_le16(port);
12581                                 config->config_table[i].
12582                                         target_table_entry.flags = 0;
12583                                 config->config_table[i].target_table_entry.
12584                                         clients_bit_vector =
12585                                                 cpu_to_le32(1 << BP_L_ID(bp));
12586                                 config->config_table[i].
12587                                         target_table_entry.vlan_id = 0;
12588
12589                                 DP(NETIF_MSG_IFUP,
12590                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
12591                                    config->config_table[i].
12592                                                 cam_entry.msb_mac_addr,
12593                                    config->config_table[i].
12594                                                 cam_entry.middle_mac_addr,
12595                                    config->config_table[i].
12596                                                 cam_entry.lsb_mac_addr);
12597                                 i++;
12598                         }
12599                         old = config->hdr.length;
12600                         if (old > i) {
12601                                 for (; i < old; i++) {
12602                                         if (CAM_IS_INVALID(config->
12603                                                            config_table[i])) {
12604                                                 /* already invalidated */
12605                                                 break;
12606                                         }
12607                                         /* invalidate */
12608                                         CAM_INVALIDATE(config->
12609                                                        config_table[i]);
12610                                 }
12611                         }
12612
12613                         if (CHIP_REV_IS_SLOW(bp))
12614                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
12615                         else
12616                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
12617
12618                         config->hdr.length = i;
12619                         config->hdr.offset = offset;
12620                         config->hdr.client_id = bp->fp->cl_id;
12621                         config->hdr.reserved1 = 0;
12622
12623                         bp->set_mac_pending++;
12624                         smp_wmb();
12625
12626                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
12627                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
12628                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
12629                                       0);
12630                 } else { /* E1H */
12631                         /* Accept one or more multicasts */
12632                         struct netdev_hw_addr *ha;
12633                         u32 mc_filter[MC_HASH_SIZE];
12634                         u32 crc, bit, regidx;
12635                         int i;
12636
12637                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
12638
12639                         netdev_for_each_mc_addr(ha, dev) {
12640                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
12641                                    ha->addr);
12642
12643                                 crc = crc32c_le(0, ha->addr, ETH_ALEN);
12644                                 bit = (crc >> 24) & 0xff;
12645                                 regidx = bit >> 5;
12646                                 bit &= 0x1f;
12647                                 mc_filter[regidx] |= (1 << bit);
12648                         }
12649
12650                         for (i = 0; i < MC_HASH_SIZE; i++)
12651                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
12652                                        mc_filter[i]);
12653                 }
12654         }
12655
12656         bp->rx_mode = rx_mode;
12657         bnx2x_set_storm_rx_mode(bp);
12658 }
12659
12660 /* called with rtnl_lock */
12661 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12662 {
12663         struct sockaddr *addr = p;
12664         struct bnx2x *bp = netdev_priv(dev);
12665
12666         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
12667                 return -EINVAL;
12668
12669         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12670         if (netif_running(dev)) {
12671                 if (CHIP_IS_E1(bp))
12672                         bnx2x_set_eth_mac_addr_e1(bp, 1);
12673                 else
12674                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
12675         }
12676
12677         return 0;
12678 }
12679
12680 /* called with rtnl_lock */
12681 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12682                            int devad, u16 addr)
12683 {
12684         struct bnx2x *bp = netdev_priv(netdev);
12685         u16 value;
12686         int rc;
12687         u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12688
12689         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12690            prtad, devad, addr);
12691
12692         if (prtad != bp->mdio.prtad) {
12693                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12694                    prtad, bp->mdio.prtad);
12695                 return -EINVAL;
12696         }
12697
12698         /* The HW expects different devad if CL22 is used */
12699         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12700
12701         bnx2x_acquire_phy_lock(bp);
12702         rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
12703                              devad, addr, &value);
12704         bnx2x_release_phy_lock(bp);
12705         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12706
12707         if (!rc)
12708                 rc = value;
12709         return rc;
12710 }
12711
12712 /* called with rtnl_lock */
12713 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12714                             u16 addr, u16 value)
12715 {
12716         struct bnx2x *bp = netdev_priv(netdev);
12717         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12718         int rc;
12719
12720         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
12721                            " value 0x%x\n", prtad, devad, addr, value);
12722
12723         if (prtad != bp->mdio.prtad) {
12724                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12725                    prtad, bp->mdio.prtad);
12726                 return -EINVAL;
12727         }
12728
12729         /* The HW expects different devad if CL22 is used */
12730         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12731
12732         bnx2x_acquire_phy_lock(bp);
12733         rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
12734                               devad, addr, value);
12735         bnx2x_release_phy_lock(bp);
12736         return rc;
12737 }
12738
12739 /* called with rtnl_lock */
12740 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12741 {
12742         struct bnx2x *bp = netdev_priv(dev);
12743         struct mii_ioctl_data *mdio = if_mii(ifr);
12744
12745         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12746            mdio->phy_id, mdio->reg_num, mdio->val_in);
12747
12748         if (!netif_running(dev))
12749                 return -EAGAIN;
12750
12751         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12752 }
12753
12754 /* called with rtnl_lock */
12755 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12756 {
12757         struct bnx2x *bp = netdev_priv(dev);
12758         int rc = 0;
12759
12760         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12761                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12762                 return -EAGAIN;
12763         }
12764
12765         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12766             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12767                 return -EINVAL;
12768
12769         /* This does not race with packet allocation
12770          * because the actual alloc size is
12771          * only updated as part of load
12772          */
12773         dev->mtu = new_mtu;
12774
12775         if (netif_running(dev)) {
12776                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12777                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
12778         }
12779
12780         return rc;
12781 }
12782
12783 static void bnx2x_tx_timeout(struct net_device *dev)
12784 {
12785         struct bnx2x *bp = netdev_priv(dev);
12786
12787 #ifdef BNX2X_STOP_ON_ERROR
12788         if (!bp->panic)
12789                 bnx2x_panic();
12790 #endif
12791         /* This allows the netif to be shutdown gracefully before resetting */
12792         schedule_delayed_work(&bp->reset_task, 0);
12793 }
12794
12795 #ifdef BCM_VLAN
12796 /* called with rtnl_lock */
12797 static void bnx2x_vlan_rx_register(struct net_device *dev,
12798                                    struct vlan_group *vlgrp)
12799 {
12800         struct bnx2x *bp = netdev_priv(dev);
12801
12802         bp->vlgrp = vlgrp;
12803
12804         /* Set flags according to the required capabilities */
12805         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12806
12807         if (dev->features & NETIF_F_HW_VLAN_TX)
12808                 bp->flags |= HW_VLAN_TX_FLAG;
12809
12810         if (dev->features & NETIF_F_HW_VLAN_RX)
12811                 bp->flags |= HW_VLAN_RX_FLAG;
12812
12813         if (netif_running(dev))
12814                 bnx2x_set_client_config(bp);
12815 }
12816
12817 #endif
12818
12819 #ifdef CONFIG_NET_POLL_CONTROLLER
12820 static void poll_bnx2x(struct net_device *dev)
12821 {
12822         struct bnx2x *bp = netdev_priv(dev);
12823
12824         disable_irq(bp->pdev->irq);
12825         bnx2x_interrupt(bp->pdev->irq, dev);
12826         enable_irq(bp->pdev->irq);
12827 }
12828 #endif
12829
12830 static const struct net_device_ops bnx2x_netdev_ops = {
12831         .ndo_open               = bnx2x_open,
12832         .ndo_stop               = bnx2x_close,
12833         .ndo_start_xmit         = bnx2x_start_xmit,
12834         .ndo_set_multicast_list = bnx2x_set_rx_mode,
12835         .ndo_set_mac_address    = bnx2x_change_mac_addr,
12836         .ndo_validate_addr      = eth_validate_addr,
12837         .ndo_do_ioctl           = bnx2x_ioctl,
12838         .ndo_change_mtu         = bnx2x_change_mtu,
12839         .ndo_tx_timeout         = bnx2x_tx_timeout,
12840 #ifdef BCM_VLAN
12841         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
12842 #endif
12843 #ifdef CONFIG_NET_POLL_CONTROLLER
12844         .ndo_poll_controller    = poll_bnx2x,
12845 #endif
12846 };
12847
12848 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
12849                                     struct net_device *dev)
12850 {
12851         struct bnx2x *bp;
12852         int rc;
12853
12854         SET_NETDEV_DEV(dev, &pdev->dev);
12855         bp = netdev_priv(dev);
12856
12857         bp->dev = dev;
12858         bp->pdev = pdev;
12859         bp->flags = 0;
12860         bp->func = PCI_FUNC(pdev->devfn);
12861
12862         rc = pci_enable_device(pdev);
12863         if (rc) {
12864                 dev_err(&bp->pdev->dev,
12865                         "Cannot enable PCI device, aborting\n");
12866                 goto err_out;
12867         }
12868
12869         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12870                 dev_err(&bp->pdev->dev,
12871                         "Cannot find PCI device base address, aborting\n");
12872                 rc = -ENODEV;
12873                 goto err_out_disable;
12874         }
12875
12876         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12877                 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
12878                        " base address, aborting\n");
12879                 rc = -ENODEV;
12880                 goto err_out_disable;
12881         }
12882
12883         if (atomic_read(&pdev->enable_cnt) == 1) {
12884                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12885                 if (rc) {
12886                         dev_err(&bp->pdev->dev,
12887                                 "Cannot obtain PCI resources, aborting\n");
12888                         goto err_out_disable;
12889                 }
12890
12891                 pci_set_master(pdev);
12892                 pci_save_state(pdev);
12893         }
12894
12895         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12896         if (bp->pm_cap == 0) {
12897                 dev_err(&bp->pdev->dev,
12898                         "Cannot find power management capability, aborting\n");
12899                 rc = -EIO;
12900                 goto err_out_release;
12901         }
12902
12903         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
12904         if (bp->pcie_cap == 0) {
12905                 dev_err(&bp->pdev->dev,
12906                         "Cannot find PCI Express capability, aborting\n");
12907                 rc = -EIO;
12908                 goto err_out_release;
12909         }
12910
12911         if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
12912                 bp->flags |= USING_DAC_FLAG;
12913                 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
12914                         dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
12915                                " failed, aborting\n");
12916                         rc = -EIO;
12917                         goto err_out_release;
12918                 }
12919
12920         } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12921                 dev_err(&bp->pdev->dev,
12922                         "System does not support DMA, aborting\n");
12923                 rc = -EIO;
12924                 goto err_out_release;
12925         }
12926
12927         dev->mem_start = pci_resource_start(pdev, 0);
12928         dev->base_addr = dev->mem_start;
12929         dev->mem_end = pci_resource_end(pdev, 0);
12930
12931         dev->irq = pdev->irq;
12932
12933         bp->regview = pci_ioremap_bar(pdev, 0);
12934         if (!bp->regview) {
12935                 dev_err(&bp->pdev->dev,
12936                         "Cannot map register space, aborting\n");
12937                 rc = -ENOMEM;
12938                 goto err_out_release;
12939         }
12940
12941         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12942                                         min_t(u64, BNX2X_DB_SIZE,
12943                                               pci_resource_len(pdev, 2)));
12944         if (!bp->doorbells) {
12945                 dev_err(&bp->pdev->dev,
12946                         "Cannot map doorbell space, aborting\n");
12947                 rc = -ENOMEM;
12948                 goto err_out_unmap;
12949         }
12950
12951         bnx2x_set_power_state(bp, PCI_D0);
12952
12953         /* clean indirect addresses */
12954         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12955                                PCICFG_VENDOR_ID_OFFSET);
12956         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
12957         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
12958         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
12959         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
12960
12961         /* Reset the load counter */
12962         bnx2x_clear_load_cnt(bp);
12963
12964         dev->watchdog_timeo = TX_TIMEOUT;
12965
12966         dev->netdev_ops = &bnx2x_netdev_ops;
12967         dev->ethtool_ops = &bnx2x_ethtool_ops;
12968         dev->features |= NETIF_F_SG;
12969         dev->features |= NETIF_F_HW_CSUM;
12970         if (bp->flags & USING_DAC_FLAG)
12971                 dev->features |= NETIF_F_HIGHDMA;
12972         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
12973         dev->features |= NETIF_F_TSO6;
12974 #ifdef BCM_VLAN
12975         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
12976         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12977
12978         dev->vlan_features |= NETIF_F_SG;
12979         dev->vlan_features |= NETIF_F_HW_CSUM;
12980         if (bp->flags & USING_DAC_FLAG)
12981                 dev->vlan_features |= NETIF_F_HIGHDMA;
12982         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
12983         dev->vlan_features |= NETIF_F_TSO6;
12984 #endif
12985
12986         /* get_port_hwinfo() will set prtad and mmds properly */
12987         bp->mdio.prtad = MDIO_PRTAD_NONE;
12988         bp->mdio.mmds = 0;
12989         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
12990         bp->mdio.dev = dev;
12991         bp->mdio.mdio_read = bnx2x_mdio_read;
12992         bp->mdio.mdio_write = bnx2x_mdio_write;
12993
12994         return 0;
12995
12996 err_out_unmap:
12997         if (bp->regview) {
12998                 iounmap(bp->regview);
12999                 bp->regview = NULL;
13000         }
13001         if (bp->doorbells) {
13002                 iounmap(bp->doorbells);
13003                 bp->doorbells = NULL;
13004         }
13005
13006 err_out_release:
13007         if (atomic_read(&pdev->enable_cnt) == 1)
13008                 pci_release_regions(pdev);
13009
13010 err_out_disable:
13011         pci_disable_device(pdev);
13012         pci_set_drvdata(pdev, NULL);
13013
13014 err_out:
13015         return rc;
13016 }
13017
13018 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
13019                                                  int *width, int *speed)
13020 {
13021         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
13022
13023         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
13024
13025         /* return value of 1=2.5GHz 2=5GHz */
13026         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
13027 }
13028
13029 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
13030 {
13031         const struct firmware *firmware = bp->firmware;
13032         struct bnx2x_fw_file_hdr *fw_hdr;
13033         struct bnx2x_fw_file_section *sections;
13034         u32 offset, len, num_ops;
13035         u16 *ops_offsets;
13036         int i;
13037         const u8 *fw_ver;
13038
13039         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
13040                 return -EINVAL;
13041
13042         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13043         sections = (struct bnx2x_fw_file_section *)fw_hdr;
13044
13045         /* Make sure none of the offsets and sizes make us read beyond
13046          * the end of the firmware data */
13047         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13048                 offset = be32_to_cpu(sections[i].offset);
13049                 len = be32_to_cpu(sections[i].len);
13050                 if (offset + len > firmware->size) {
13051                         dev_err(&bp->pdev->dev,
13052                                 "Section %d length is out of bounds\n", i);
13053                         return -EINVAL;
13054                 }
13055         }
13056
13057         /* Likewise for the init_ops offsets */
13058         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13059         ops_offsets = (u16 *)(firmware->data + offset);
13060         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13061
13062         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13063                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
13064                         dev_err(&bp->pdev->dev,
13065                                 "Section offset %d is out of bounds\n", i);
13066                         return -EINVAL;
13067                 }
13068         }
13069
13070         /* Check FW version */
13071         offset = be32_to_cpu(fw_hdr->fw_version.offset);
13072         fw_ver = firmware->data + offset;
13073         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13074             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13075             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13076             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
13077                 dev_err(&bp->pdev->dev,
13078                         "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
13079                        fw_ver[0], fw_ver[1], fw_ver[2],
13080                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
13081                        BCM_5710_FW_MINOR_VERSION,
13082                        BCM_5710_FW_REVISION_VERSION,
13083                        BCM_5710_FW_ENGINEERING_VERSION);
13084                 return -EINVAL;
13085         }
13086
13087         return 0;
13088 }
13089
13090 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13091 {
13092         const __be32 *source = (const __be32 *)_source;
13093         u32 *target = (u32 *)_target;
13094         u32 i;
13095
13096         for (i = 0; i < n/4; i++)
13097                 target[i] = be32_to_cpu(source[i]);
13098 }
13099
13100 /*
13101    Ops array is stored in the following format:
13102    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
13103  */
13104 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
13105 {
13106         const __be32 *source = (const __be32 *)_source;
13107         struct raw_op *target = (struct raw_op *)_target;
13108         u32 i, j, tmp;
13109
13110         for (i = 0, j = 0; i < n/8; i++, j += 2) {
13111                 tmp = be32_to_cpu(source[j]);
13112                 target[i].op = (tmp >> 24) & 0xff;
13113                 target[i].offset = tmp & 0xffffff;
13114                 target[i].raw_data = be32_to_cpu(source[j + 1]);
13115         }
13116 }
13117
13118 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13119 {
13120         const __be16 *source = (const __be16 *)_source;
13121         u16 *target = (u16 *)_target;
13122         u32 i;
13123
13124         for (i = 0; i < n/2; i++)
13125                 target[i] = be16_to_cpu(source[i]);
13126 }
13127
13128 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
13129 do {                                                                    \
13130         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
13131         bp->arr = kmalloc(len, GFP_KERNEL);                             \
13132         if (!bp->arr) {                                                 \
13133                 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
13134                 goto lbl;                                               \
13135         }                                                               \
13136         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
13137              (u8 *)bp->arr, len);                                       \
13138 } while (0)
13139
13140 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
13141 {
13142         const char *fw_file_name;
13143         struct bnx2x_fw_file_hdr *fw_hdr;
13144         int rc;
13145
13146         if (CHIP_IS_E1(bp))
13147                 fw_file_name = FW_FILE_NAME_E1;
13148         else if (CHIP_IS_E1H(bp))
13149                 fw_file_name = FW_FILE_NAME_E1H;
13150         else {
13151                 dev_err(dev, "Unsupported chip revision\n");
13152                 return -EINVAL;
13153         }
13154
13155         dev_info(dev, "Loading %s\n", fw_file_name);
13156
13157         rc = request_firmware(&bp->firmware, fw_file_name, dev);
13158         if (rc) {
13159                 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
13160                 goto request_firmware_exit;
13161         }
13162
13163         rc = bnx2x_check_firmware(bp);
13164         if (rc) {
13165                 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
13166                 goto request_firmware_exit;
13167         }
13168
13169         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13170
13171         /* Initialize the pointers to the init arrays */
13172         /* Blob */
13173         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13174
13175         /* Opcodes */
13176         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13177
13178         /* Offsets */
13179         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13180                             be16_to_cpu_n);
13181
13182         /* STORMs firmware */
13183         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13184                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13185         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
13186                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13187         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13188                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13189         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
13190                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
13191         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13192                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13193         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
13194                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13195         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13196                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13197         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
13198                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
13199
13200         return 0;
13201
13202 init_offsets_alloc_err:
13203         kfree(bp->init_ops);
13204 init_ops_alloc_err:
13205         kfree(bp->init_data);
13206 request_firmware_exit:
13207         release_firmware(bp->firmware);
13208
13209         return rc;
13210 }
13211
13212
13213 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
13214                                     const struct pci_device_id *ent)
13215 {
13216         struct net_device *dev = NULL;
13217         struct bnx2x *bp;
13218         int pcie_width, pcie_speed;
13219         int rc;
13220
13221         /* dev zeroed in init_etherdev */
13222         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
13223         if (!dev) {
13224                 dev_err(&pdev->dev, "Cannot allocate net device\n");
13225                 return -ENOMEM;
13226         }
13227
13228         bp = netdev_priv(dev);
13229         bp->msg_enable = debug;
13230
13231         pci_set_drvdata(pdev, dev);
13232
13233         rc = bnx2x_init_dev(pdev, dev);
13234         if (rc < 0) {
13235                 free_netdev(dev);
13236                 return rc;
13237         }
13238
13239         rc = bnx2x_init_bp(bp);
13240         if (rc)
13241                 goto init_one_exit;
13242
13243         /* Set init arrays */
13244         rc = bnx2x_init_firmware(bp, &pdev->dev);
13245         if (rc) {
13246                 dev_err(&pdev->dev, "Error loading firmware\n");
13247                 goto init_one_exit;
13248         }
13249
13250         rc = register_netdev(dev);
13251         if (rc) {
13252                 dev_err(&pdev->dev, "Cannot register net device\n");
13253                 goto init_one_exit;
13254         }
13255
13256         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
13257         netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
13258                " IRQ %d, ", board_info[ent->driver_data].name,
13259                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13260                pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
13261                dev->base_addr, bp->pdev->irq);
13262         pr_cont("node addr %pM\n", dev->dev_addr);
13263
13264         return 0;
13265
13266 init_one_exit:
13267         if (bp->regview)
13268                 iounmap(bp->regview);
13269
13270         if (bp->doorbells)
13271                 iounmap(bp->doorbells);
13272
13273         free_netdev(dev);
13274
13275         if (atomic_read(&pdev->enable_cnt) == 1)
13276                 pci_release_regions(pdev);
13277
13278         pci_disable_device(pdev);
13279         pci_set_drvdata(pdev, NULL);
13280
13281         return rc;
13282 }
13283
13284 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13285 {
13286         struct net_device *dev = pci_get_drvdata(pdev);
13287         struct bnx2x *bp;
13288
13289         if (!dev) {
13290                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13291                 return;
13292         }
13293         bp = netdev_priv(dev);
13294
13295         unregister_netdev(dev);
13296
13297         /* Make sure RESET task is not scheduled before continuing */
13298         cancel_delayed_work_sync(&bp->reset_task);
13299
13300         kfree(bp->init_ops_offsets);
13301         kfree(bp->init_ops);
13302         kfree(bp->init_data);
13303         release_firmware(bp->firmware);
13304
13305         if (bp->regview)
13306                 iounmap(bp->regview);
13307
13308         if (bp->doorbells)
13309                 iounmap(bp->doorbells);
13310
13311         free_netdev(dev);
13312
13313         if (atomic_read(&pdev->enable_cnt) == 1)
13314                 pci_release_regions(pdev);
13315
13316         pci_disable_device(pdev);
13317         pci_set_drvdata(pdev, NULL);
13318 }
13319
13320 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13321 {
13322         struct net_device *dev = pci_get_drvdata(pdev);
13323         struct bnx2x *bp;
13324
13325         if (!dev) {
13326                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13327                 return -ENODEV;
13328         }
13329         bp = netdev_priv(dev);
13330
13331         rtnl_lock();
13332
13333         pci_save_state(pdev);
13334
13335         if (!netif_running(dev)) {
13336                 rtnl_unlock();
13337                 return 0;
13338         }
13339
13340         netif_device_detach(dev);
13341
13342         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
13343
13344         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
13345
13346         rtnl_unlock();
13347
13348         return 0;
13349 }
13350
13351 static int bnx2x_resume(struct pci_dev *pdev)
13352 {
13353         struct net_device *dev = pci_get_drvdata(pdev);
13354         struct bnx2x *bp;
13355         int rc;
13356
13357         if (!dev) {
13358                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13359                 return -ENODEV;
13360         }
13361         bp = netdev_priv(dev);
13362
13363         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13364                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13365                 return -EAGAIN;
13366         }
13367
13368         rtnl_lock();
13369
13370         pci_restore_state(pdev);
13371
13372         if (!netif_running(dev)) {
13373                 rtnl_unlock();
13374                 return 0;
13375         }
13376
13377         bnx2x_set_power_state(bp, PCI_D0);
13378         netif_device_attach(dev);
13379
13380         rc = bnx2x_nic_load(bp, LOAD_OPEN);
13381
13382         rtnl_unlock();
13383
13384         return rc;
13385 }
13386
13387 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13388 {
13389         int i;
13390
13391         bp->state = BNX2X_STATE_ERROR;
13392
13393         bp->rx_mode = BNX2X_RX_MODE_NONE;
13394
13395         bnx2x_netif_stop(bp, 0);
13396
13397         del_timer_sync(&bp->timer);
13398         bp->stats_state = STATS_STATE_DISABLED;
13399         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
13400
13401         /* Release IRQs */
13402         bnx2x_free_irq(bp, false);
13403
13404         if (CHIP_IS_E1(bp)) {
13405                 struct mac_configuration_cmd *config =
13406                                                 bnx2x_sp(bp, mcast_config);
13407
13408                 for (i = 0; i < config->hdr.length; i++)
13409                         CAM_INVALIDATE(config->config_table[i]);
13410         }
13411
13412         /* Free SKBs, SGEs, TPA pool and driver internals */
13413         bnx2x_free_skbs(bp);
13414         for_each_queue(bp, i)
13415                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
13416         for_each_queue(bp, i)
13417                 netif_napi_del(&bnx2x_fp(bp, i, napi));
13418         bnx2x_free_mem(bp);
13419
13420         bp->state = BNX2X_STATE_CLOSED;
13421
13422         netif_carrier_off(bp->dev);
13423
13424         return 0;
13425 }
13426
13427 static void bnx2x_eeh_recover(struct bnx2x *bp)
13428 {
13429         u32 val;
13430
13431         mutex_init(&bp->port.phy_mutex);
13432
13433         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
13434         bp->link_params.shmem_base = bp->common.shmem_base;
13435         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
13436
13437         if (!bp->common.shmem_base ||
13438             (bp->common.shmem_base < 0xA0000) ||
13439             (bp->common.shmem_base >= 0xC0000)) {
13440                 BNX2X_DEV_INFO("MCP not active\n");
13441                 bp->flags |= NO_MCP_FLAG;
13442                 return;
13443         }
13444
13445         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
13446         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13447                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13448                 BNX2X_ERR("BAD MCP validity signature\n");
13449
13450         if (!BP_NOMCP(bp)) {
13451                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
13452                               & DRV_MSG_SEQ_NUMBER_MASK);
13453                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
13454         }
13455 }
13456
13457 /**
13458  * bnx2x_io_error_detected - called when PCI error is detected
13459  * @pdev: Pointer to PCI device
13460  * @state: The current pci connection state
13461  *
13462  * This function is called after a PCI bus error affecting
13463  * this device has been detected.
13464  */
13465 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13466                                                 pci_channel_state_t state)
13467 {
13468         struct net_device *dev = pci_get_drvdata(pdev);
13469         struct bnx2x *bp = netdev_priv(dev);
13470
13471         rtnl_lock();
13472
13473         netif_device_detach(dev);
13474
13475         if (state == pci_channel_io_perm_failure) {
13476                 rtnl_unlock();
13477                 return PCI_ERS_RESULT_DISCONNECT;
13478         }
13479
13480         if (netif_running(dev))
13481                 bnx2x_eeh_nic_unload(bp);
13482
13483         pci_disable_device(pdev);
13484
13485         rtnl_unlock();
13486
13487         /* Request a slot reset */
13488         return PCI_ERS_RESULT_NEED_RESET;
13489 }
13490
13491 /**
13492  * bnx2x_io_slot_reset - called after the PCI bus has been reset
13493  * @pdev: Pointer to PCI device
13494  *
13495  * Restart the card from scratch, as if from a cold-boot.
13496  */
13497 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13498 {
13499         struct net_device *dev = pci_get_drvdata(pdev);
13500         struct bnx2x *bp = netdev_priv(dev);
13501
13502         rtnl_lock();
13503
13504         if (pci_enable_device(pdev)) {
13505                 dev_err(&pdev->dev,
13506                         "Cannot re-enable PCI device after reset\n");
13507                 rtnl_unlock();
13508                 return PCI_ERS_RESULT_DISCONNECT;
13509         }
13510
13511         pci_set_master(pdev);
13512         pci_restore_state(pdev);
13513
13514         if (netif_running(dev))
13515                 bnx2x_set_power_state(bp, PCI_D0);
13516
13517         rtnl_unlock();
13518
13519         return PCI_ERS_RESULT_RECOVERED;
13520 }
13521
13522 /**
13523  * bnx2x_io_resume - called when traffic can start flowing again
13524  * @pdev: Pointer to PCI device
13525  *
13526  * This callback is called when the error recovery driver tells us that
13527  * its OK to resume normal operation.
13528  */
13529 static void bnx2x_io_resume(struct pci_dev *pdev)
13530 {
13531         struct net_device *dev = pci_get_drvdata(pdev);
13532         struct bnx2x *bp = netdev_priv(dev);
13533
13534         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13535                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13536                 return;
13537         }
13538
13539         rtnl_lock();
13540
13541         bnx2x_eeh_recover(bp);
13542
13543         if (netif_running(dev))
13544                 bnx2x_nic_load(bp, LOAD_NORMAL);
13545
13546         netif_device_attach(dev);
13547
13548         rtnl_unlock();
13549 }
13550
13551 static struct pci_error_handlers bnx2x_err_handler = {
13552         .error_detected = bnx2x_io_error_detected,
13553         .slot_reset     = bnx2x_io_slot_reset,
13554         .resume         = bnx2x_io_resume,
13555 };
13556
13557 static struct pci_driver bnx2x_pci_driver = {
13558         .name        = DRV_MODULE_NAME,
13559         .id_table    = bnx2x_pci_tbl,
13560         .probe       = bnx2x_init_one,
13561         .remove      = __devexit_p(bnx2x_remove_one),
13562         .suspend     = bnx2x_suspend,
13563         .resume      = bnx2x_resume,
13564         .err_handler = &bnx2x_err_handler,
13565 };
13566
13567 static int __init bnx2x_init(void)
13568 {
13569         int ret;
13570
13571         pr_info("%s", version);
13572
13573         bnx2x_wq = create_singlethread_workqueue("bnx2x");
13574         if (bnx2x_wq == NULL) {
13575                 pr_err("Cannot create workqueue\n");
13576                 return -ENOMEM;
13577         }
13578
13579         ret = pci_register_driver(&bnx2x_pci_driver);
13580         if (ret) {
13581                 pr_err("Cannot register driver\n");
13582                 destroy_workqueue(bnx2x_wq);
13583         }
13584         return ret;
13585 }
13586
13587 static void __exit bnx2x_cleanup(void)
13588 {
13589         pci_unregister_driver(&bnx2x_pci_driver);
13590
13591         destroy_workqueue(bnx2x_wq);
13592 }
13593
13594 module_init(bnx2x_init);
13595 module_exit(bnx2x_cleanup);
13596
13597 #ifdef BCM_CNIC
13598
13599 /* count denotes the number of new completions we have seen */
13600 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13601 {
13602         struct eth_spe *spe;
13603
13604 #ifdef BNX2X_STOP_ON_ERROR
13605         if (unlikely(bp->panic))
13606                 return;
13607 #endif
13608
13609         spin_lock_bh(&bp->spq_lock);
13610         bp->cnic_spq_pending -= count;
13611
13612         for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
13613              bp->cnic_spq_pending++) {
13614
13615                 if (!bp->cnic_kwq_pending)
13616                         break;
13617
13618                 spe = bnx2x_sp_get_next(bp);
13619                 *spe = *bp->cnic_kwq_cons;
13620
13621                 bp->cnic_kwq_pending--;
13622
13623                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
13624                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13625
13626                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13627                         bp->cnic_kwq_cons = bp->cnic_kwq;
13628                 else
13629                         bp->cnic_kwq_cons++;
13630         }
13631         bnx2x_sp_prod_update(bp);
13632         spin_unlock_bh(&bp->spq_lock);
13633 }
13634
13635 static int bnx2x_cnic_sp_queue(struct net_device *dev,
13636                                struct kwqe_16 *kwqes[], u32 count)
13637 {
13638         struct bnx2x *bp = netdev_priv(dev);
13639         int i;
13640
13641 #ifdef BNX2X_STOP_ON_ERROR
13642         if (unlikely(bp->panic))
13643                 return -EIO;
13644 #endif
13645
13646         spin_lock_bh(&bp->spq_lock);
13647
13648         for (i = 0; i < count; i++) {
13649                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13650
13651                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13652                         break;
13653
13654                 *bp->cnic_kwq_prod = *spe;
13655
13656                 bp->cnic_kwq_pending++;
13657
13658                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
13659                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
13660                    spe->data.mac_config_addr.hi,
13661                    spe->data.mac_config_addr.lo,
13662                    bp->cnic_kwq_pending);
13663
13664                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13665                         bp->cnic_kwq_prod = bp->cnic_kwq;
13666                 else
13667                         bp->cnic_kwq_prod++;
13668         }
13669
13670         spin_unlock_bh(&bp->spq_lock);
13671
13672         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13673                 bnx2x_cnic_sp_post(bp, 0);
13674
13675         return i;
13676 }
13677
13678 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13679 {
13680         struct cnic_ops *c_ops;
13681         int rc = 0;
13682
13683         mutex_lock(&bp->cnic_mutex);
13684         c_ops = bp->cnic_ops;
13685         if (c_ops)
13686                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13687         mutex_unlock(&bp->cnic_mutex);
13688
13689         return rc;
13690 }
13691
13692 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13693 {
13694         struct cnic_ops *c_ops;
13695         int rc = 0;
13696
13697         rcu_read_lock();
13698         c_ops = rcu_dereference(bp->cnic_ops);
13699         if (c_ops)
13700                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13701         rcu_read_unlock();
13702
13703         return rc;
13704 }
13705
13706 /*
13707  * for commands that have no data
13708  */
13709 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13710 {
13711         struct cnic_ctl_info ctl = {0};
13712
13713         ctl.cmd = cmd;
13714
13715         return bnx2x_cnic_ctl_send(bp, &ctl);
13716 }
13717
13718 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
13719 {
13720         struct cnic_ctl_info ctl;
13721
13722         /* first we tell CNIC and only then we count this as a completion */
13723         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13724         ctl.data.comp.cid = cid;
13725
13726         bnx2x_cnic_ctl_send_bh(bp, &ctl);
13727         bnx2x_cnic_sp_post(bp, 1);
13728 }
13729
13730 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13731 {
13732         struct bnx2x *bp = netdev_priv(dev);
13733         int rc = 0;
13734
13735         switch (ctl->cmd) {
13736         case DRV_CTL_CTXTBL_WR_CMD: {
13737                 u32 index = ctl->data.io.offset;
13738                 dma_addr_t addr = ctl->data.io.dma_addr;
13739
13740                 bnx2x_ilt_wr(bp, index, addr);
13741                 break;
13742         }
13743
13744         case DRV_CTL_COMPLETION_CMD: {
13745                 int count = ctl->data.comp.comp_count;
13746
13747                 bnx2x_cnic_sp_post(bp, count);
13748                 break;
13749         }
13750
13751         /* rtnl_lock is held.  */
13752         case DRV_CTL_START_L2_CMD: {
13753                 u32 cli = ctl->data.ring.client_id;
13754
13755                 bp->rx_mode_cl_mask |= (1 << cli);
13756                 bnx2x_set_storm_rx_mode(bp);
13757                 break;
13758         }
13759
13760         /* rtnl_lock is held.  */
13761         case DRV_CTL_STOP_L2_CMD: {
13762                 u32 cli = ctl->data.ring.client_id;
13763
13764                 bp->rx_mode_cl_mask &= ~(1 << cli);
13765                 bnx2x_set_storm_rx_mode(bp);
13766                 break;
13767         }
13768
13769         default:
13770                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13771                 rc = -EINVAL;
13772         }
13773
13774         return rc;
13775 }
13776
13777 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13778 {
13779         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13780
13781         if (bp->flags & USING_MSIX_FLAG) {
13782                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13783                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13784                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13785         } else {
13786                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13787                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13788         }
13789         cp->irq_arr[0].status_blk = bp->cnic_sb;
13790         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
13791         cp->irq_arr[1].status_blk = bp->def_status_blk;
13792         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13793
13794         cp->num_irq = 2;
13795 }
13796
13797 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13798                                void *data)
13799 {
13800         struct bnx2x *bp = netdev_priv(dev);
13801         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13802
13803         if (ops == NULL)
13804                 return -EINVAL;
13805
13806         if (atomic_read(&bp->intr_sem) != 0)
13807                 return -EBUSY;
13808
13809         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13810         if (!bp->cnic_kwq)
13811                 return -ENOMEM;
13812
13813         bp->cnic_kwq_cons = bp->cnic_kwq;
13814         bp->cnic_kwq_prod = bp->cnic_kwq;
13815         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13816
13817         bp->cnic_spq_pending = 0;
13818         bp->cnic_kwq_pending = 0;
13819
13820         bp->cnic_data = data;
13821
13822         cp->num_irq = 0;
13823         cp->drv_state = CNIC_DRV_STATE_REGD;
13824
13825         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
13826
13827         bnx2x_setup_cnic_irq_info(bp);
13828         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
13829         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
13830         rcu_assign_pointer(bp->cnic_ops, ops);
13831
13832         return 0;
13833 }
13834
13835 static int bnx2x_unregister_cnic(struct net_device *dev)
13836 {
13837         struct bnx2x *bp = netdev_priv(dev);
13838         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13839
13840         mutex_lock(&bp->cnic_mutex);
13841         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
13842                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
13843                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
13844         }
13845         cp->drv_state = 0;
13846         rcu_assign_pointer(bp->cnic_ops, NULL);
13847         mutex_unlock(&bp->cnic_mutex);
13848         synchronize_rcu();
13849         kfree(bp->cnic_kwq);
13850         bp->cnic_kwq = NULL;
13851
13852         return 0;
13853 }
13854
13855 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13856 {
13857         struct bnx2x *bp = netdev_priv(dev);
13858         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13859
13860         cp->drv_owner = THIS_MODULE;
13861         cp->chip_id = CHIP_ID(bp);
13862         cp->pdev = bp->pdev;
13863         cp->io_base = bp->regview;
13864         cp->io_base2 = bp->doorbells;
13865         cp->max_kwqe_pending = 8;
13866         cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
13867         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
13868         cp->ctx_tbl_len = CNIC_ILT_LINES;
13869         cp->starting_cid = BCM_CNIC_CID_START;
13870         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13871         cp->drv_ctl = bnx2x_drv_ctl;
13872         cp->drv_register_cnic = bnx2x_register_cnic;
13873         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13874
13875         return cp;
13876 }
13877 EXPORT_SYMBOL(bnx2x_cnic_probe);
13878
13879 #endif /* BCM_CNIC */
13880