]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Parity errors handling for 57710 and 57711
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea 51#include <linux/io.h>
45229b42 52#include <linux/stringify.h>
a2fbb9ea 53
359d8b15 54
a2fbb9ea
ET
55#include "bnx2x.h"
56#include "bnx2x_init.h"
94a78b79 57#include "bnx2x_init_ops.h"
0a64ea57 58#include "bnx2x_dump.h"
a2fbb9ea 59
4fd89b7a
DK
60#define DRV_MODULE_VERSION "1.52.1-8"
61#define DRV_MODULE_RELDATE "2010/04/01"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
94a78b79
VZ
64#include <linux/firmware.h>
65#include "bnx2x_fw_file_hdr.h"
66/* FW files */
45229b42
BH
67#define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
94a78b79 74
34f80b04
EG
75/* Time in jiffies before concluding the transmitter is hung */
76#define TX_TIMEOUT (5*HZ)
a2fbb9ea 77
53a10565 78static char version[] __devinitdata =
34f80b04 79 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
80 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
24e3fcef 82MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 83MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
84MODULE_LICENSE("GPL");
85MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
86MODULE_FIRMWARE(FW_FILE_NAME_E1);
87MODULE_FIRMWARE(FW_FILE_NAME_E1H);
a2fbb9ea 88
555f6c78
EG
89static int multi_mode = 1;
90module_param(multi_mode, int, 0);
ca00392c
EG
91MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
93
54b9ddaa
VZ
94static int num_queues;
95module_param(num_queues, int, 0);
96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
555f6c78 98
19680c48 99static int disable_tpa;
19680c48 100module_param(disable_tpa, int, 0);
9898f86d 101MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
102
103static int int_mode;
104module_param(int_mode, int, 0);
105MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
106
a18f5128
EG
107static int dropless_fc;
108module_param(dropless_fc, int, 0);
109MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
9898f86d 111static int poll;
a2fbb9ea 112module_param(poll, int, 0);
9898f86d 113MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
114
115static int mrrs = -1;
116module_param(mrrs, int, 0);
117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
9898f86d 119static int debug;
a2fbb9ea 120module_param(debug, int, 0);
9898f86d
EG
121MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 124
1cf167f2 125static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
126
127enum bnx2x_board_type {
128 BCM57710 = 0,
34f80b04
EG
129 BCM57711 = 1,
130 BCM57711E = 2,
a2fbb9ea
ET
131};
132
34f80b04 133/* indexed by board_type, above */
53a10565 134static struct {
a2fbb9ea
ET
135 char *name;
136} board_info[] __devinitdata = {
34f80b04
EG
137 { "Broadcom NetXtreme II BCM57710 XGb" },
138 { "Broadcom NetXtreme II BCM57711 XGb" },
139 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
140};
141
34f80b04 142
a3aa1884 143static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
144 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
147 { 0 }
148};
149
150MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
151
152/****************************************************************************
153* General service functions
154****************************************************************************/
155
156/* used only at init
157 * locking is done by mcp
158 */
573f2035 159void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
160{
161 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164 PCICFG_VENDOR_ID_OFFSET);
165}
166
a2fbb9ea
ET
167static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168{
169 u32 val;
170
171 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174 PCICFG_VENDOR_ID_OFFSET);
175
176 return val;
177}
a2fbb9ea
ET
178
179static const u32 dmae_reg_go_c[] = {
180 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
184};
185
186/* copy command into DMAE command memory and set DMAE command go */
187static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188 int idx)
189{
190 u32 cmd_offset;
191 int i;
192
193 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
196
ad8d3948
EG
197 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
199 }
200 REG_WR(bp, dmae_reg_go_c[idx], 1);
201}
202
ad8d3948
EG
203void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204 u32 len32)
a2fbb9ea 205{
5ff7b6d4 206 struct dmae_command dmae;
a2fbb9ea 207 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
208 int cnt = 200;
209
210 if (!bp->dmae_ready) {
211 u32 *data = bnx2x_sp(bp, wb_data[0]);
212
213 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
214 " using indirect\n", dst_addr, len32);
215 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216 return;
217 }
218
5ff7b6d4 219 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 220
5ff7b6d4
EG
221 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 224#ifdef __BIG_ENDIAN
5ff7b6d4 225 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 226#else
5ff7b6d4 227 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 228#endif
5ff7b6d4
EG
229 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231 dmae.src_addr_lo = U64_LO(dma_addr);
232 dmae.src_addr_hi = U64_HI(dma_addr);
233 dmae.dst_addr_lo = dst_addr >> 2;
234 dmae.dst_addr_hi = 0;
235 dmae.len = len32;
236 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 239
c3eefaf6 240 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
241 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
242 "dst_addr [%x:%08x (%08x)]\n"
243 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
244 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 247 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
248 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 250
5ff7b6d4
EG
251 mutex_lock(&bp->dmae_mutex);
252
a2fbb9ea
ET
253 *wb_comp = 0;
254
5ff7b6d4 255 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
256
257 udelay(5);
ad8d3948
EG
258
259 while (*wb_comp != DMAE_COMP_VAL) {
260 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
261
ad8d3948 262 if (!cnt) {
c3eefaf6 263 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
264 break;
265 }
ad8d3948 266 cnt--;
12469401
YG
267 /* adjust delay for emulation/FPGA */
268 if (CHIP_REV_IS_SLOW(bp))
269 msleep(100);
270 else
271 udelay(5);
a2fbb9ea 272 }
ad8d3948
EG
273
274 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
275}
276
c18487ee 277void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 278{
5ff7b6d4 279 struct dmae_command dmae;
a2fbb9ea 280 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
281 int cnt = 200;
282
283 if (!bp->dmae_ready) {
284 u32 *data = bnx2x_sp(bp, wb_data[0]);
285 int i;
286
287 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
288 " using indirect\n", src_addr, len32);
289 for (i = 0; i < len32; i++)
290 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291 return;
292 }
293
5ff7b6d4 294 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 295
5ff7b6d4
EG
296 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 299#ifdef __BIG_ENDIAN
5ff7b6d4 300 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 301#else
5ff7b6d4 302 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 303#endif
5ff7b6d4
EG
304 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306 dmae.src_addr_lo = src_addr >> 2;
307 dmae.src_addr_hi = 0;
308 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310 dmae.len = len32;
311 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 314
c3eefaf6 315 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
316 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
317 "dst_addr [%x:%08x (%08x)]\n"
318 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
319 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 322
5ff7b6d4
EG
323 mutex_lock(&bp->dmae_mutex);
324
325 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
326 *wb_comp = 0;
327
5ff7b6d4 328 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
329
330 udelay(5);
ad8d3948
EG
331
332 while (*wb_comp != DMAE_COMP_VAL) {
333
ad8d3948 334 if (!cnt) {
c3eefaf6 335 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
336 break;
337 }
ad8d3948 338 cnt--;
12469401
YG
339 /* adjust delay for emulation/FPGA */
340 if (CHIP_REV_IS_SLOW(bp))
341 msleep(100);
342 else
343 udelay(5);
a2fbb9ea 344 }
ad8d3948 345 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
346 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
348
349 mutex_unlock(&bp->dmae_mutex);
350}
351
573f2035
EG
352void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353 u32 addr, u32 len)
354{
355 int offset = 0;
356
357 while (len > DMAE_LEN32_WR_MAX) {
358 bnx2x_write_dmae(bp, phys_addr + offset,
359 addr + offset, DMAE_LEN32_WR_MAX);
360 offset += DMAE_LEN32_WR_MAX * 4;
361 len -= DMAE_LEN32_WR_MAX;
362 }
363
364 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
365}
366
ad8d3948
EG
367/* used only for slowpath so not inlined */
368static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369{
370 u32 wb_write[2];
371
372 wb_write[0] = val_hi;
373 wb_write[1] = val_lo;
374 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 375}
a2fbb9ea 376
ad8d3948
EG
377#ifdef USE_WB_RD
378static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379{
380 u32 wb_data[2];
381
382 REG_RD_DMAE(bp, reg, wb_data, 2);
383
384 return HILO_U64(wb_data[0], wb_data[1]);
385}
386#endif
387
a2fbb9ea
ET
388static int bnx2x_mc_assert(struct bnx2x *bp)
389{
a2fbb9ea 390 char last_idx;
34f80b04
EG
391 int i, rc = 0;
392 u32 row0, row1, row2, row3;
393
394 /* XSTORM */
395 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
396 XSTORM_ASSERT_LIST_INDEX_OFFSET);
397 if (last_idx)
398 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
399
400 /* print the asserts */
401 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
402
403 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404 XSTORM_ASSERT_LIST_OFFSET(i));
405 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
407 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
409 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
411
412 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
413 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
414 " 0x%08x 0x%08x 0x%08x\n",
415 i, row3, row2, row1, row0);
416 rc++;
417 } else {
418 break;
419 }
420 }
421
422 /* TSTORM */
423 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
424 TSTORM_ASSERT_LIST_INDEX_OFFSET);
425 if (last_idx)
426 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
427
428 /* print the asserts */
429 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
430
431 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432 TSTORM_ASSERT_LIST_OFFSET(i));
433 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
435 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
437 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
439
440 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
441 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
442 " 0x%08x 0x%08x 0x%08x\n",
443 i, row3, row2, row1, row0);
444 rc++;
445 } else {
446 break;
447 }
448 }
449
450 /* CSTORM */
451 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
452 CSTORM_ASSERT_LIST_INDEX_OFFSET);
453 if (last_idx)
454 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
455
456 /* print the asserts */
457 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
458
459 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460 CSTORM_ASSERT_LIST_OFFSET(i));
461 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
463 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
465 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
467
468 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
469 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
470 " 0x%08x 0x%08x 0x%08x\n",
471 i, row3, row2, row1, row0);
472 rc++;
473 } else {
474 break;
475 }
476 }
477
478 /* USTORM */
479 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
480 USTORM_ASSERT_LIST_INDEX_OFFSET);
481 if (last_idx)
482 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
483
484 /* print the asserts */
485 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
486
487 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
488 USTORM_ASSERT_LIST_OFFSET(i));
489 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i) + 4);
491 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 8);
493 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 12);
495
496 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
497 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
498 " 0x%08x 0x%08x 0x%08x\n",
499 i, row3, row2, row1, row0);
500 rc++;
501 } else {
502 break;
a2fbb9ea
ET
503 }
504 }
34f80b04 505
a2fbb9ea
ET
506 return rc;
507}
c14423fe 508
a2fbb9ea
ET
509static void bnx2x_fw_dump(struct bnx2x *bp)
510{
511 u32 mark, offset;
4781bfad 512 __be32 data[9];
a2fbb9ea
ET
513 int word;
514
515 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772 516 mark = ((mark + 0x3) & ~0x3);
7995c64e 517 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 518
7995c64e 519 pr_err("");
a2fbb9ea
ET
520 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
521 for (word = 0; word < 8; word++)
522 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
523 offset + 4*word));
524 data[8] = 0x0;
7995c64e 525 pr_cont("%s", (char *)data);
a2fbb9ea
ET
526 }
527 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
528 for (word = 0; word < 8; word++)
529 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
530 offset + 4*word));
531 data[8] = 0x0;
7995c64e 532 pr_cont("%s", (char *)data);
a2fbb9ea 533 }
7995c64e 534 pr_err("end of fw dump\n");
a2fbb9ea
ET
535}
536
537static void bnx2x_panic_dump(struct bnx2x *bp)
538{
539 int i;
540 u16 j, start, end;
541
66e855f3
YG
542 bp->stats_state = STATS_STATE_DISABLED;
543 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
544
a2fbb9ea
ET
545 BNX2X_ERR("begin crash dump -----------------\n");
546
8440d2b6
EG
547 /* Indices */
548 /* Common */
549 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
550 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
551 " spq_prod_idx(%u)\n",
552 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
554
555 /* Rx */
54b9ddaa 556 for_each_queue(bp, i) {
a2fbb9ea 557 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 558
c3eefaf6 559 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
560 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
561 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 562 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
563 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 565 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
566 " fp_u_idx(%x) *sb_u_idx(%x)\n",
567 fp->rx_sge_prod, fp->last_max_sge,
568 le16_to_cpu(fp->fp_u_idx),
569 fp->status_blk->u_status_block.status_block_index);
570 }
a2fbb9ea 571
8440d2b6 572 /* Tx */
54b9ddaa 573 for_each_queue(bp, i) {
8440d2b6 574 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 575
c3eefaf6 576 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
577 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
578 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
579 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 580 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
ca00392c 581 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 582 fp->status_blk->c_status_block.status_block_index,
ca00392c 583 fp->tx_db.data.prod);
8440d2b6 584 }
a2fbb9ea 585
8440d2b6
EG
586 /* Rings */
587 /* Rx */
54b9ddaa 588 for_each_queue(bp, i) {
8440d2b6 589 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
590
591 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
592 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 593 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
594 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
595 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
596
c3eefaf6
EG
597 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
598 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
599 }
600
3196a88a
EG
601 start = RX_SGE(fp->rx_sge_prod);
602 end = RX_SGE(fp->last_max_sge);
8440d2b6 603 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
604 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
605 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
606
c3eefaf6
EG
607 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
608 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
609 }
610
a2fbb9ea
ET
611 start = RCQ_BD(fp->rx_comp_cons - 10);
612 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 613 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
614 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
615
c3eefaf6
EG
616 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
617 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
618 }
619 }
620
8440d2b6 621 /* Tx */
54b9ddaa 622 for_each_queue(bp, i) {
8440d2b6
EG
623 struct bnx2x_fastpath *fp = &bp->fp[i];
624
625 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
626 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
627 for (j = start; j != end; j = TX_BD(j + 1)) {
628 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
629
c3eefaf6
EG
630 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
631 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
632 }
633
634 start = TX_BD(fp->tx_bd_cons - 10);
635 end = TX_BD(fp->tx_bd_cons + 254);
636 for (j = start; j != end; j = TX_BD(j + 1)) {
637 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
638
c3eefaf6
EG
639 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
640 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
641 }
642 }
a2fbb9ea 643
34f80b04 644 bnx2x_fw_dump(bp);
a2fbb9ea
ET
645 bnx2x_mc_assert(bp);
646 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
647}
648
615f8fd9 649static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 650{
34f80b04 651 int port = BP_PORT(bp);
a2fbb9ea
ET
652 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
653 u32 val = REG_RD(bp, addr);
654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 655 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
656
657 if (msix) {
8badd27a
EG
658 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
659 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
660 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
661 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
662 } else if (msi) {
663 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
664 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
665 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
666 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
667 } else {
668 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
670 HC_CONFIG_0_REG_INT_LINE_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 672
8badd27a
EG
673 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674 val, port, addr);
615f8fd9
ET
675
676 REG_WR(bp, addr, val);
677
a2fbb9ea
ET
678 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
679 }
680
8badd27a
EG
681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
682 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
683
684 REG_WR(bp, addr, val);
37dbbf32
EG
685 /*
686 * Ensure that HC_CONFIG is written before leading/trailing edge config
687 */
688 mmiowb();
689 barrier();
34f80b04
EG
690
691 if (CHIP_IS_E1H(bp)) {
692 /* init leading/trailing edge */
693 if (IS_E1HMF(bp)) {
8badd27a 694 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 695 if (bp->port.pmf)
4acac6a5
EG
696 /* enable nig and gpio3 attention */
697 val |= 0x1100;
34f80b04
EG
698 } else
699 val = 0xffff;
700
701 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
702 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
703 }
37dbbf32
EG
704
705 /* Make sure that interrupts are indeed enabled from here on */
706 mmiowb();
a2fbb9ea
ET
707}
708
615f8fd9 709static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 710{
34f80b04 711 int port = BP_PORT(bp);
a2fbb9ea
ET
712 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
713 u32 val = REG_RD(bp, addr);
714
715 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
716 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
717 HC_CONFIG_0_REG_INT_LINE_EN_0 |
718 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
719
720 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
721 val, port, addr);
722
8badd27a
EG
723 /* flush all outstanding writes */
724 mmiowb();
725
a2fbb9ea
ET
726 REG_WR(bp, addr, val);
727 if (REG_RD(bp, addr) != val)
728 BNX2X_ERR("BUG! proper val not read from IGU!\n");
729}
730
f8ef6e44 731static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 732{
a2fbb9ea 733 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 734 int i, offset;
a2fbb9ea 735
34f80b04 736 /* disable interrupt handling */
a2fbb9ea 737 atomic_inc(&bp->intr_sem);
e1510706
EG
738 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
739
f8ef6e44
YG
740 if (disable_hw)
741 /* prevent the HW from sending interrupts */
742 bnx2x_int_disable(bp);
a2fbb9ea
ET
743
744 /* make sure all ISRs are done */
745 if (msix) {
8badd27a
EG
746 synchronize_irq(bp->msix_table[0].vector);
747 offset = 1;
37b091ba
MC
748#ifdef BCM_CNIC
749 offset++;
750#endif
a2fbb9ea 751 for_each_queue(bp, i)
8badd27a 752 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
753 } else
754 synchronize_irq(bp->pdev->irq);
755
756 /* make sure sp_task is not running */
1cf167f2
EG
757 cancel_delayed_work(&bp->sp_task);
758 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
759}
760
34f80b04 761/* fast path */
a2fbb9ea
ET
762
763/*
34f80b04 764 * General service functions
a2fbb9ea
ET
765 */
766
72fd0718
VZ
767/* Return true if succeeded to acquire the lock */
768static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
769{
770 u32 lock_status;
771 u32 resource_bit = (1 << resource);
772 int func = BP_FUNC(bp);
773 u32 hw_lock_control_reg;
774
775 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
776
777 /* Validating that the resource is within range */
778 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
779 DP(NETIF_MSG_HW,
780 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
781 resource, HW_LOCK_MAX_RESOURCE_VALUE);
782 return -EINVAL;
783 }
784
785 if (func <= 5)
786 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
787 else
788 hw_lock_control_reg =
789 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
790
791 /* Try to acquire the lock */
792 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
793 lock_status = REG_RD(bp, hw_lock_control_reg);
794 if (lock_status & resource_bit)
795 return true;
796
797 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
798 return false;
799}
800
34f80b04 801static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
802 u8 storm, u16 index, u8 op, u8 update)
803{
5c862848
EG
804 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
805 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
806 struct igu_ack_register igu_ack;
807
808 igu_ack.status_block_index = index;
809 igu_ack.sb_id_and_flags =
34f80b04 810 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
811 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
812 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
813 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
814
5c862848
EG
815 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
816 (*(u32 *)&igu_ack), hc_addr);
817 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
818
819 /* Make sure that ACK is written */
820 mmiowb();
821 barrier();
a2fbb9ea
ET
822}
823
54b9ddaa 824static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
825{
826 struct host_status_block *fpsb = fp->status_blk;
a2fbb9ea
ET
827
828 barrier(); /* status block is written to by the chip */
54b9ddaa
VZ
829 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
830 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
a2fbb9ea
ET
831}
832
a2fbb9ea
ET
833static u16 bnx2x_ack_int(struct bnx2x *bp)
834{
5c862848
EG
835 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
836 COMMAND_REG_SIMD_MASK);
837 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 838
5c862848
EG
839 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
840 result, hc_addr);
a2fbb9ea 841
a2fbb9ea
ET
842 return result;
843}
844
845
846/*
847 * fast path service functions
848 */
849
e8b5fc51
VZ
850static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
851{
852 /* Tell compiler that consumer and producer can change */
853 barrier();
854 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
855}
856
a2fbb9ea
ET
857/* free skb in the packet ring at pos idx
858 * return idx of last bd freed
859 */
860static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
861 u16 idx)
862{
863 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
864 struct eth_tx_start_bd *tx_start_bd;
865 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 866 struct sk_buff *skb = tx_buf->skb;
34f80b04 867 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
868 int nbd;
869
54b9ddaa
VZ
870 /* prefetch skb end pointer to speedup dev_kfree_skb() */
871 prefetch(&skb->end);
872
a2fbb9ea
ET
873 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
874 idx, tx_buf, skb);
875
876 /* unmap first bd */
877 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c 878 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
1a983142 879 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
ca00392c 880 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 881
ca00392c 882 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 883#ifdef BNX2X_STOP_ON_ERROR
ca00392c 884 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 885 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
886 bnx2x_panic();
887 }
888#endif
ca00392c 889 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 890
ca00392c
EG
891 /* Get the next bd */
892 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 893
ca00392c
EG
894 /* Skip a parse bd... */
895 --nbd;
896 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
897
898 /* ...and the TSO split header bd since they have no mapping */
899 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
900 --nbd;
901 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
902 }
903
904 /* now free frags */
905 while (nbd > 0) {
906
907 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c 908 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
1a983142
FT
909 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
910 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
a2fbb9ea
ET
911 if (--nbd)
912 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
913 }
914
915 /* release skb */
53e5e96e 916 WARN_ON(!skb);
54b9ddaa 917 dev_kfree_skb(skb);
a2fbb9ea
ET
918 tx_buf->first_bd = 0;
919 tx_buf->skb = NULL;
920
34f80b04 921 return new_cons;
a2fbb9ea
ET
922}
923
34f80b04 924static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 925{
34f80b04
EG
926 s16 used;
927 u16 prod;
928 u16 cons;
a2fbb9ea 929
a2fbb9ea
ET
930 prod = fp->tx_bd_prod;
931 cons = fp->tx_bd_cons;
932
34f80b04
EG
933 /* NUM_TX_RINGS = number of "next-page" entries
934 It will be used as a threshold */
935 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 936
34f80b04 937#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
938 WARN_ON(used < 0);
939 WARN_ON(used > fp->bp->tx_ring_size);
940 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 941#endif
a2fbb9ea 942
34f80b04 943 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
944}
945
54b9ddaa
VZ
946static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
947{
948 u16 hw_cons;
949
950 /* Tell compiler that status block fields can change */
951 barrier();
952 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
953 return hw_cons != fp->tx_pkt_cons;
954}
955
956static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
957{
958 struct bnx2x *bp = fp->bp;
555f6c78 959 struct netdev_queue *txq;
a2fbb9ea 960 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
a2fbb9ea
ET
961
962#ifdef BNX2X_STOP_ON_ERROR
963 if (unlikely(bp->panic))
54b9ddaa 964 return -1;
a2fbb9ea
ET
965#endif
966
54b9ddaa 967 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
968 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
969 sw_cons = fp->tx_pkt_cons;
970
971 while (sw_cons != hw_cons) {
972 u16 pkt_cons;
973
974 pkt_cons = TX_BD(sw_cons);
975
976 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
977
34f80b04 978 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
979 hw_cons, sw_cons, pkt_cons);
980
34f80b04 981/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
982 rmb();
983 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
984 }
985*/
986 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
987 sw_cons++;
a2fbb9ea
ET
988 }
989
990 fp->tx_pkt_cons = sw_cons;
991 fp->tx_bd_cons = bd_cons;
992
c16cc0b4
VZ
993 /* Need to make the tx_bd_cons update visible to start_xmit()
994 * before checking for netif_tx_queue_stopped(). Without the
995 * memory barrier, there is a small possibility that
996 * start_xmit() will miss it and cause the queue to be stopped
997 * forever.
998 */
2d99cf16 999 smp_mb();
c16cc0b4 1000
a2fbb9ea 1001 /* TBD need a thresh? */
555f6c78 1002 if (unlikely(netif_tx_queue_stopped(txq))) {
c16cc0b4
VZ
1003 /* Taking tx_lock() is needed to prevent reenabling the queue
1004 * while it's empty. This could have happen if rx_action() gets
1005 * suspended in bnx2x_tx_int() after the condition before
1006 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1007 *
1008 * stops the queue->sees fresh tx_bd_cons->releases the queue->
1009 * sends some packets consuming the whole queue again->
1010 * stops the queue
6044735d 1011 */
c16cc0b4
VZ
1012
1013 __netif_tx_lock(txq, smp_processor_id());
6044735d 1014
555f6c78 1015 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 1016 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 1017 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 1018 netif_tx_wake_queue(txq);
c16cc0b4
VZ
1019
1020 __netif_tx_unlock(txq);
a2fbb9ea 1021 }
54b9ddaa 1022 return 0;
a2fbb9ea
ET
1023}
1024
993ac7b5
MC
1025#ifdef BCM_CNIC
1026static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1027#endif
3196a88a 1028
a2fbb9ea
ET
1029static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1030 union eth_rx_cqe *rr_cqe)
1031{
1032 struct bnx2x *bp = fp->bp;
1033 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1034 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1035
34f80b04 1036 DP(BNX2X_MSG_SP,
a2fbb9ea 1037 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 1038 fp->index, cid, command, bp->state,
34f80b04 1039 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
1040
1041 bp->spq_left++;
1042
0626b899 1043 if (fp->index) {
a2fbb9ea
ET
1044 switch (command | fp->state) {
1045 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1046 BNX2X_FP_STATE_OPENING):
1047 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1048 cid);
1049 fp->state = BNX2X_FP_STATE_OPEN;
1050 break;
1051
1052 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1053 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1054 cid);
1055 fp->state = BNX2X_FP_STATE_HALTED;
1056 break;
1057
1058 default:
34f80b04
EG
1059 BNX2X_ERR("unexpected MC reply (%d) "
1060 "fp->state is %x\n", command, fp->state);
1061 break;
a2fbb9ea 1062 }
34f80b04 1063 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1064 return;
1065 }
c14423fe 1066
a2fbb9ea
ET
1067 switch (command | bp->state) {
1068 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1069 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1070 bp->state = BNX2X_STATE_OPEN;
1071 break;
1072
1073 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1074 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1075 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1076 fp->state = BNX2X_FP_STATE_HALTED;
1077 break;
1078
a2fbb9ea 1079 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1080 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1081 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1082 break;
1083
993ac7b5
MC
1084#ifdef BCM_CNIC
1085 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1086 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1087 bnx2x_cnic_cfc_comp(bp, cid);
1088 break;
1089#endif
3196a88a 1090
a2fbb9ea 1091 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1092 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1093 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
e665bfda
MC
1094 bp->set_mac_pending--;
1095 smp_wmb();
a2fbb9ea
ET
1096 break;
1097
49d66772 1098 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1099 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
e665bfda
MC
1100 bp->set_mac_pending--;
1101 smp_wmb();
49d66772
ET
1102 break;
1103
a2fbb9ea 1104 default:
34f80b04 1105 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1106 command, bp->state);
34f80b04 1107 break;
a2fbb9ea 1108 }
34f80b04 1109 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1110}
1111
7a9b2557
VZ
1112static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1113 struct bnx2x_fastpath *fp, u16 index)
1114{
1115 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1116 struct page *page = sw_buf->page;
1117 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1118
1119 /* Skip "next page" elements */
1120 if (!page)
1121 return;
1122
1a983142 1123 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
4f40f2cb 1124 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1125 __free_pages(page, PAGES_PER_SGE_SHIFT);
1126
1127 sw_buf->page = NULL;
1128 sge->addr_hi = 0;
1129 sge->addr_lo = 0;
1130}
1131
1132static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1133 struct bnx2x_fastpath *fp, int last)
1134{
1135 int i;
1136
1137 for (i = 0; i < last; i++)
1138 bnx2x_free_rx_sge(bp, fp, i);
1139}
1140
1141static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1142 struct bnx2x_fastpath *fp, u16 index)
1143{
1144 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1145 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1146 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1147 dma_addr_t mapping;
1148
1149 if (unlikely(page == NULL))
1150 return -ENOMEM;
1151
1a983142
FT
1152 mapping = dma_map_page(&bp->pdev->dev, page, 0,
1153 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
8d8bb39b 1154 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1155 __free_pages(page, PAGES_PER_SGE_SHIFT);
1156 return -ENOMEM;
1157 }
1158
1159 sw_buf->page = page;
1a983142 1160 dma_unmap_addr_set(sw_buf, mapping, mapping);
7a9b2557
VZ
1161
1162 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1163 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1164
1165 return 0;
1166}
1167
a2fbb9ea
ET
1168static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1169 struct bnx2x_fastpath *fp, u16 index)
1170{
1171 struct sk_buff *skb;
1172 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1173 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1174 dma_addr_t mapping;
1175
1176 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1177 if (unlikely(skb == NULL))
1178 return -ENOMEM;
1179
1a983142
FT
1180 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1181 DMA_FROM_DEVICE);
8d8bb39b 1182 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1183 dev_kfree_skb(skb);
1184 return -ENOMEM;
1185 }
1186
1187 rx_buf->skb = skb;
1a983142 1188 dma_unmap_addr_set(rx_buf, mapping, mapping);
a2fbb9ea
ET
1189
1190 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1191 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1192
1193 return 0;
1194}
1195
1196/* note that we are not allocating a new skb,
1197 * we are just moving one from cons to prod
1198 * we are not creating a new mapping,
1199 * so there is no need to check for dma_mapping_error().
1200 */
1201static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1202 struct sk_buff *skb, u16 cons, u16 prod)
1203{
1204 struct bnx2x *bp = fp->bp;
1205 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1206 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1207 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1208 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1209
1a983142
FT
1210 dma_sync_single_for_device(&bp->pdev->dev,
1211 dma_unmap_addr(cons_rx_buf, mapping),
1212 RX_COPY_THRESH, DMA_FROM_DEVICE);
a2fbb9ea
ET
1213
1214 prod_rx_buf->skb = cons_rx_buf->skb;
1a983142
FT
1215 dma_unmap_addr_set(prod_rx_buf, mapping,
1216 dma_unmap_addr(cons_rx_buf, mapping));
a2fbb9ea
ET
1217 *prod_bd = *cons_bd;
1218}
1219
7a9b2557
VZ
1220static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1221 u16 idx)
1222{
1223 u16 last_max = fp->last_max_sge;
1224
1225 if (SUB_S16(idx, last_max) > 0)
1226 fp->last_max_sge = idx;
1227}
1228
1229static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1230{
1231 int i, j;
1232
1233 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1234 int idx = RX_SGE_CNT * i - 1;
1235
1236 for (j = 0; j < 2; j++) {
1237 SGE_MASK_CLEAR_BIT(fp, idx);
1238 idx--;
1239 }
1240 }
1241}
1242
1243static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1244 struct eth_fast_path_rx_cqe *fp_cqe)
1245{
1246 struct bnx2x *bp = fp->bp;
4f40f2cb 1247 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1248 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1249 SGE_PAGE_SHIFT;
7a9b2557
VZ
1250 u16 last_max, last_elem, first_elem;
1251 u16 delta = 0;
1252 u16 i;
1253
1254 if (!sge_len)
1255 return;
1256
1257 /* First mark all used pages */
1258 for (i = 0; i < sge_len; i++)
1259 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1260
1261 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1262 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1263
1264 /* Here we assume that the last SGE index is the biggest */
1265 prefetch((void *)(fp->sge_mask));
1266 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1267
1268 last_max = RX_SGE(fp->last_max_sge);
1269 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1270 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1271
1272 /* If ring is not full */
1273 if (last_elem + 1 != first_elem)
1274 last_elem++;
1275
1276 /* Now update the prod */
1277 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1278 if (likely(fp->sge_mask[i]))
1279 break;
1280
1281 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1282 delta += RX_SGE_MASK_ELEM_SZ;
1283 }
1284
1285 if (delta > 0) {
1286 fp->rx_sge_prod += delta;
1287 /* clear page-end entries */
1288 bnx2x_clear_sge_mask_next_elems(fp);
1289 }
1290
1291 DP(NETIF_MSG_RX_STATUS,
1292 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1293 fp->last_max_sge, fp->rx_sge_prod);
1294}
1295
1296static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1297{
1298 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1299 memset(fp->sge_mask, 0xff,
1300 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1301
33471629
EG
1302 /* Clear the two last indices in the page to 1:
1303 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1304 hence will never be indicated and should be removed from
1305 the calculations. */
1306 bnx2x_clear_sge_mask_next_elems(fp);
1307}
1308
1309static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1310 struct sk_buff *skb, u16 cons, u16 prod)
1311{
1312 struct bnx2x *bp = fp->bp;
1313 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1314 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1315 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1316 dma_addr_t mapping;
1317
1318 /* move empty skb from pool to prod and map it */
1319 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1a983142
FT
1320 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1321 bp->rx_buf_size, DMA_FROM_DEVICE);
1322 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
7a9b2557
VZ
1323
1324 /* move partial skb from cons to pool (don't unmap yet) */
1325 fp->tpa_pool[queue] = *cons_rx_buf;
1326
1327 /* mark bin state as start - print error if current state != stop */
1328 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1329 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1330
1331 fp->tpa_state[queue] = BNX2X_TPA_START;
1332
1333 /* point prod_bd to new skb */
1334 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1335 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1336
1337#ifdef BNX2X_STOP_ON_ERROR
1338 fp->tpa_queue_used |= (1 << queue);
1339#ifdef __powerpc64__
1340 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1341#else
1342 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1343#endif
1344 fp->tpa_queue_used);
1345#endif
1346}
1347
1348static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1349 struct sk_buff *skb,
1350 struct eth_fast_path_rx_cqe *fp_cqe,
1351 u16 cqe_idx)
1352{
1353 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1354 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1355 u32 i, frag_len, frag_size, pages;
1356 int err;
1357 int j;
1358
1359 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1360 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1361
1362 /* This is needed in order to enable forwarding support */
1363 if (frag_size)
4f40f2cb 1364 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1365 max(frag_size, (u32)len_on_bd));
1366
1367#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1368 if (pages >
1369 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1370 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1371 pages, cqe_idx);
1372 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1373 fp_cqe->pkt_len, len_on_bd);
1374 bnx2x_panic();
1375 return -EINVAL;
1376 }
1377#endif
1378
1379 /* Run through the SGL and compose the fragmented skb */
1380 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1381 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1382
1383 /* FW gives the indices of the SGE as if the ring is an array
1384 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1385 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1386 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1387 old_rx_pg = *rx_pg;
1388
1389 /* If we fail to allocate a substitute page, we simply stop
1390 where we are and drop the whole packet */
1391 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1392 if (unlikely(err)) {
de832a55 1393 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1394 return err;
1395 }
1396
1397 /* Unmap the page as we r going to pass it to the stack */
1a983142
FT
1398 dma_unmap_page(&bp->pdev->dev,
1399 dma_unmap_addr(&old_rx_pg, mapping),
1400 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
7a9b2557
VZ
1401
1402 /* Add one frag and update the appropriate fields in the skb */
1403 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1404
1405 skb->data_len += frag_len;
1406 skb->truesize += frag_len;
1407 skb->len += frag_len;
1408
1409 frag_size -= frag_len;
1410 }
1411
1412 return 0;
1413}
1414
1415static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1416 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1417 u16 cqe_idx)
1418{
1419 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1420 struct sk_buff *skb = rx_buf->skb;
1421 /* alloc new skb */
1422 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1423
1424 /* Unmap skb in the pool anyway, as we are going to change
1425 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1426 fails. */
1a983142
FT
1427 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1428 bp->rx_buf_size, DMA_FROM_DEVICE);
7a9b2557 1429
7a9b2557 1430 if (likely(new_skb)) {
66e855f3
YG
1431 /* fix ip xsum and give it to the stack */
1432 /* (no need to map the new skb) */
0c6671b0
EG
1433#ifdef BCM_VLAN
1434 int is_vlan_cqe =
1435 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1436 PARSING_FLAGS_VLAN);
1437 int is_not_hwaccel_vlan_cqe =
1438 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1439#endif
7a9b2557
VZ
1440
1441 prefetch(skb);
1442 prefetch(((char *)(skb)) + 128);
1443
7a9b2557
VZ
1444#ifdef BNX2X_STOP_ON_ERROR
1445 if (pad + len > bp->rx_buf_size) {
1446 BNX2X_ERR("skb_put is about to fail... "
1447 "pad %d len %d rx_buf_size %d\n",
1448 pad, len, bp->rx_buf_size);
1449 bnx2x_panic();
1450 return;
1451 }
1452#endif
1453
1454 skb_reserve(skb, pad);
1455 skb_put(skb, len);
1456
1457 skb->protocol = eth_type_trans(skb, bp->dev);
1458 skb->ip_summed = CHECKSUM_UNNECESSARY;
1459
1460 {
1461 struct iphdr *iph;
1462
1463 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1464#ifdef BCM_VLAN
1465 /* If there is no Rx VLAN offloading -
1466 take VLAN tag into an account */
1467 if (unlikely(is_not_hwaccel_vlan_cqe))
1468 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1469#endif
7a9b2557
VZ
1470 iph->check = 0;
1471 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1472 }
1473
1474 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1475 &cqe->fast_path_cqe, cqe_idx)) {
1476#ifdef BCM_VLAN
0c6671b0
EG
1477 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1478 (!is_not_hwaccel_vlan_cqe))
4fd89b7a
DK
1479 vlan_gro_receive(&fp->napi, bp->vlgrp,
1480 le16_to_cpu(cqe->fast_path_cqe.
1481 vlan_tag), skb);
7a9b2557
VZ
1482 else
1483#endif
4fd89b7a 1484 napi_gro_receive(&fp->napi, skb);
7a9b2557
VZ
1485 } else {
1486 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1487 " - dropping packet!\n");
1488 dev_kfree_skb(skb);
1489 }
1490
7a9b2557
VZ
1491
1492 /* put new skb in bin */
1493 fp->tpa_pool[queue].skb = new_skb;
1494
1495 } else {
66e855f3 1496 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1497 DP(NETIF_MSG_RX_STATUS,
1498 "Failed to allocate new skb - dropping packet!\n");
de832a55 1499 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1500 }
1501
1502 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1503}
1504
1505static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1506 struct bnx2x_fastpath *fp,
1507 u16 bd_prod, u16 rx_comp_prod,
1508 u16 rx_sge_prod)
1509{
8d9c5f34 1510 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1511 int i;
1512
1513 /* Update producers */
1514 rx_prods.bd_prod = bd_prod;
1515 rx_prods.cqe_prod = rx_comp_prod;
1516 rx_prods.sge_prod = rx_sge_prod;
1517
58f4c4cf
EG
1518 /*
1519 * Make sure that the BD and SGE data is updated before updating the
1520 * producers since FW might read the BD/SGE right after the producer
1521 * is updated.
1522 * This is only applicable for weak-ordered memory model archs such
1523 * as IA-64. The following barrier is also mandatory since FW will
1524 * assumes BDs must have buffers.
1525 */
1526 wmb();
1527
8d9c5f34
EG
1528 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1529 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1530 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1531 ((u32 *)&rx_prods)[i]);
1532
58f4c4cf
EG
1533 mmiowb(); /* keep prod updates ordered */
1534
7a9b2557 1535 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1536 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1537 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1538}
1539
a2fbb9ea
ET
1540static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1541{
1542 struct bnx2x *bp = fp->bp;
34f80b04 1543 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1544 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1545 int rx_pkt = 0;
1546
1547#ifdef BNX2X_STOP_ON_ERROR
1548 if (unlikely(bp->panic))
1549 return 0;
1550#endif
1551
34f80b04
EG
1552 /* CQ "next element" is of the size of the regular element,
1553 that's why it's ok here */
a2fbb9ea
ET
1554 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1555 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1556 hw_comp_cons++;
1557
1558 bd_cons = fp->rx_bd_cons;
1559 bd_prod = fp->rx_bd_prod;
34f80b04 1560 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1561 sw_comp_cons = fp->rx_comp_cons;
1562 sw_comp_prod = fp->rx_comp_prod;
1563
1564 /* Memory barrier necessary as speculative reads of the rx
1565 * buffer can be ahead of the index in the status block
1566 */
1567 rmb();
1568
1569 DP(NETIF_MSG_RX_STATUS,
1570 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1571 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1572
1573 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1574 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1575 struct sk_buff *skb;
1576 union eth_rx_cqe *cqe;
34f80b04
EG
1577 u8 cqe_fp_flags;
1578 u16 len, pad;
a2fbb9ea
ET
1579
1580 comp_ring_cons = RCQ_BD(sw_comp_cons);
1581 bd_prod = RX_BD(bd_prod);
1582 bd_cons = RX_BD(bd_cons);
1583
619e7a66
EG
1584 /* Prefetch the page containing the BD descriptor
1585 at producer's index. It will be needed when new skb is
1586 allocated */
1587 prefetch((void *)(PAGE_ALIGN((unsigned long)
1588 (&fp->rx_desc_ring[bd_prod])) -
1589 PAGE_SIZE + 1));
1590
a2fbb9ea 1591 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1592 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1593
a2fbb9ea 1594 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1595 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1596 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1597 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1598 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1599 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1600
1601 /* is this a slowpath msg? */
34f80b04 1602 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1603 bnx2x_sp_event(fp, cqe);
1604 goto next_cqe;
1605
1606 /* this is an rx packet */
1607 } else {
1608 rx_buf = &fp->rx_buf_ring[bd_cons];
1609 skb = rx_buf->skb;
54b9ddaa
VZ
1610 prefetch(skb);
1611 prefetch((u8 *)skb + 256);
a2fbb9ea
ET
1612 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1613 pad = cqe->fast_path_cqe.placement_offset;
1614
7a9b2557
VZ
1615 /* If CQE is marked both TPA_START and TPA_END
1616 it is a non-TPA CQE */
1617 if ((!fp->disable_tpa) &&
1618 (TPA_TYPE(cqe_fp_flags) !=
1619 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1620 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1621
1622 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1623 DP(NETIF_MSG_RX_STATUS,
1624 "calling tpa_start on queue %d\n",
1625 queue);
1626
1627 bnx2x_tpa_start(fp, queue, skb,
1628 bd_cons, bd_prod);
1629 goto next_rx;
1630 }
1631
1632 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1633 DP(NETIF_MSG_RX_STATUS,
1634 "calling tpa_stop on queue %d\n",
1635 queue);
1636
1637 if (!BNX2X_RX_SUM_FIX(cqe))
1638 BNX2X_ERR("STOP on none TCP "
1639 "data\n");
1640
1641 /* This is a size of the linear data
1642 on this skb */
1643 len = le16_to_cpu(cqe->fast_path_cqe.
1644 len_on_bd);
1645 bnx2x_tpa_stop(bp, fp, queue, pad,
1646 len, cqe, comp_ring_cons);
1647#ifdef BNX2X_STOP_ON_ERROR
1648 if (bp->panic)
17cb4006 1649 return 0;
7a9b2557
VZ
1650#endif
1651
1652 bnx2x_update_sge_prod(fp,
1653 &cqe->fast_path_cqe);
1654 goto next_cqe;
1655 }
1656 }
1657
1a983142
FT
1658 dma_sync_single_for_device(&bp->pdev->dev,
1659 dma_unmap_addr(rx_buf, mapping),
1660 pad + RX_COPY_THRESH,
1661 DMA_FROM_DEVICE);
a2fbb9ea
ET
1662 prefetch(skb);
1663 prefetch(((char *)(skb)) + 128);
1664
1665 /* is this an error packet? */
34f80b04 1666 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1667 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1668 "ERROR flags %x rx packet %u\n",
1669 cqe_fp_flags, sw_comp_cons);
de832a55 1670 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1671 goto reuse_rx;
1672 }
1673
1674 /* Since we don't have a jumbo ring
1675 * copy small packets if mtu > 1500
1676 */
1677 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1678 (len <= RX_COPY_THRESH)) {
1679 struct sk_buff *new_skb;
1680
1681 new_skb = netdev_alloc_skb(bp->dev,
1682 len + pad);
1683 if (new_skb == NULL) {
1684 DP(NETIF_MSG_RX_ERR,
34f80b04 1685 "ERROR packet dropped "
a2fbb9ea 1686 "because of alloc failure\n");
de832a55 1687 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1688 goto reuse_rx;
1689 }
1690
1691 /* aligned copy */
1692 skb_copy_from_linear_data_offset(skb, pad,
1693 new_skb->data + pad, len);
1694 skb_reserve(new_skb, pad);
1695 skb_put(new_skb, len);
1696
1697 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1698
1699 skb = new_skb;
1700
a119a069
EG
1701 } else
1702 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1a983142
FT
1703 dma_unmap_single(&bp->pdev->dev,
1704 dma_unmap_addr(rx_buf, mapping),
437cf2f1 1705 bp->rx_buf_size,
1a983142 1706 DMA_FROM_DEVICE);
a2fbb9ea
ET
1707 skb_reserve(skb, pad);
1708 skb_put(skb, len);
1709
1710 } else {
1711 DP(NETIF_MSG_RX_ERR,
34f80b04 1712 "ERROR packet dropped because "
a2fbb9ea 1713 "of alloc failure\n");
de832a55 1714 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1715reuse_rx:
1716 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1717 goto next_rx;
1718 }
1719
1720 skb->protocol = eth_type_trans(skb, bp->dev);
1721
1722 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1723 if (bp->rx_csum) {
1adcd8be
EG
1724 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1725 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1726 else
de832a55 1727 fp->eth_q_stats.hw_csum_err++;
66e855f3 1728 }
a2fbb9ea
ET
1729 }
1730
748e5439 1731 skb_record_rx_queue(skb, fp->index);
ab6ad5a4 1732
a2fbb9ea 1733#ifdef BCM_VLAN
0c6671b0 1734 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1735 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1736 PARSING_FLAGS_VLAN))
4fd89b7a
DK
1737 vlan_gro_receive(&fp->napi, bp->vlgrp,
1738 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
a2fbb9ea
ET
1739 else
1740#endif
4fd89b7a 1741 napi_gro_receive(&fp->napi, skb);
a2fbb9ea 1742
a2fbb9ea
ET
1743
1744next_rx:
1745 rx_buf->skb = NULL;
1746
1747 bd_cons = NEXT_RX_IDX(bd_cons);
1748 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1749 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1750 rx_pkt++;
a2fbb9ea
ET
1751next_cqe:
1752 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1753 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1754
34f80b04 1755 if (rx_pkt == budget)
a2fbb9ea
ET
1756 break;
1757 } /* while */
1758
1759 fp->rx_bd_cons = bd_cons;
34f80b04 1760 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1761 fp->rx_comp_cons = sw_comp_cons;
1762 fp->rx_comp_prod = sw_comp_prod;
1763
7a9b2557
VZ
1764 /* Update producers */
1765 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1766 fp->rx_sge_prod);
a2fbb9ea
ET
1767
1768 fp->rx_pkt += rx_pkt;
1769 fp->rx_calls++;
1770
1771 return rx_pkt;
1772}
1773
1774static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1775{
1776 struct bnx2x_fastpath *fp = fp_cookie;
1777 struct bnx2x *bp = fp->bp;
a2fbb9ea 1778
da5a662a
VZ
1779 /* Return here if interrupt is disabled */
1780 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1781 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1782 return IRQ_HANDLED;
1783 }
1784
34f80b04 1785 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1786 fp->index, fp->sb_id);
0626b899 1787 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1788
1789#ifdef BNX2X_STOP_ON_ERROR
1790 if (unlikely(bp->panic))
1791 return IRQ_HANDLED;
1792#endif
ca00392c 1793
54b9ddaa
VZ
1794 /* Handle Rx and Tx according to MSI-X vector */
1795 prefetch(fp->rx_cons_sb);
1796 prefetch(fp->tx_cons_sb);
1797 prefetch(&fp->status_blk->u_status_block.status_block_index);
1798 prefetch(&fp->status_blk->c_status_block.status_block_index);
1799 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
34f80b04 1800
a2fbb9ea
ET
1801 return IRQ_HANDLED;
1802}
1803
1804static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1805{
555f6c78 1806 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1807 u16 status = bnx2x_ack_int(bp);
34f80b04 1808 u16 mask;
ca00392c 1809 int i;
a2fbb9ea 1810
34f80b04 1811 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1812 if (unlikely(status == 0)) {
1813 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1814 return IRQ_NONE;
1815 }
f5372251 1816 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1817
34f80b04 1818 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1819 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1820 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1821 return IRQ_HANDLED;
1822 }
1823
3196a88a
EG
1824#ifdef BNX2X_STOP_ON_ERROR
1825 if (unlikely(bp->panic))
1826 return IRQ_HANDLED;
1827#endif
1828
ca00392c
EG
1829 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1830 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1831
ca00392c
EG
1832 mask = 0x2 << fp->sb_id;
1833 if (status & mask) {
54b9ddaa
VZ
1834 /* Handle Rx and Tx according to SB id */
1835 prefetch(fp->rx_cons_sb);
1836 prefetch(&fp->status_blk->u_status_block.
1837 status_block_index);
1838 prefetch(fp->tx_cons_sb);
1839 prefetch(&fp->status_blk->c_status_block.
1840 status_block_index);
1841 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
1842 status &= ~mask;
1843 }
a2fbb9ea
ET
1844 }
1845
993ac7b5
MC
1846#ifdef BCM_CNIC
1847 mask = 0x2 << CNIC_SB_ID(bp);
1848 if (status & (mask | 0x1)) {
1849 struct cnic_ops *c_ops = NULL;
1850
1851 rcu_read_lock();
1852 c_ops = rcu_dereference(bp->cnic_ops);
1853 if (c_ops)
1854 c_ops->cnic_handler(bp->cnic_data, NULL);
1855 rcu_read_unlock();
1856
1857 status &= ~mask;
1858 }
1859#endif
a2fbb9ea 1860
34f80b04 1861 if (unlikely(status & 0x1)) {
1cf167f2 1862 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1863
1864 status &= ~0x1;
1865 if (!status)
1866 return IRQ_HANDLED;
1867 }
1868
34f80b04
EG
1869 if (status)
1870 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1871 status);
a2fbb9ea 1872
c18487ee 1873 return IRQ_HANDLED;
a2fbb9ea
ET
1874}
1875
c18487ee 1876/* end of fast path */
a2fbb9ea 1877
bb2a0f7a 1878static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1879
c18487ee
YR
1880/* Link */
1881
1882/*
1883 * General service functions
1884 */
a2fbb9ea 1885
4a37fb66 1886static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1887{
1888 u32 lock_status;
1889 u32 resource_bit = (1 << resource);
4a37fb66
YG
1890 int func = BP_FUNC(bp);
1891 u32 hw_lock_control_reg;
c18487ee 1892 int cnt;
a2fbb9ea 1893
c18487ee
YR
1894 /* Validating that the resource is within range */
1895 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1896 DP(NETIF_MSG_HW,
1897 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1898 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1899 return -EINVAL;
1900 }
a2fbb9ea 1901
4a37fb66
YG
1902 if (func <= 5) {
1903 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1904 } else {
1905 hw_lock_control_reg =
1906 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1907 }
1908
c18487ee 1909 /* Validating that the resource is not already taken */
4a37fb66 1910 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1911 if (lock_status & resource_bit) {
1912 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1913 lock_status, resource_bit);
1914 return -EEXIST;
1915 }
a2fbb9ea 1916
46230476
EG
1917 /* Try for 5 second every 5ms */
1918 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1919 /* Try to acquire the lock */
4a37fb66
YG
1920 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1921 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1922 if (lock_status & resource_bit)
1923 return 0;
a2fbb9ea 1924
c18487ee 1925 msleep(5);
a2fbb9ea 1926 }
c18487ee
YR
1927 DP(NETIF_MSG_HW, "Timeout\n");
1928 return -EAGAIN;
1929}
a2fbb9ea 1930
4a37fb66 1931static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1932{
1933 u32 lock_status;
1934 u32 resource_bit = (1 << resource);
4a37fb66
YG
1935 int func = BP_FUNC(bp);
1936 u32 hw_lock_control_reg;
a2fbb9ea 1937
72fd0718
VZ
1938 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1939
c18487ee
YR
1940 /* Validating that the resource is within range */
1941 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1942 DP(NETIF_MSG_HW,
1943 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1944 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1945 return -EINVAL;
1946 }
1947
4a37fb66
YG
1948 if (func <= 5) {
1949 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1950 } else {
1951 hw_lock_control_reg =
1952 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1953 }
1954
c18487ee 1955 /* Validating that the resource is currently taken */
4a37fb66 1956 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1957 if (!(lock_status & resource_bit)) {
1958 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1959 lock_status, resource_bit);
1960 return -EFAULT;
a2fbb9ea
ET
1961 }
1962
4a37fb66 1963 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1964 return 0;
1965}
1966
1967/* HW Lock for shared dual port PHYs */
4a37fb66 1968static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1969{
34f80b04 1970 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1971
46c6a674
EG
1972 if (bp->port.need_hw_lock)
1973 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1974}
a2fbb9ea 1975
4a37fb66 1976static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1977{
46c6a674
EG
1978 if (bp->port.need_hw_lock)
1979 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1980
34f80b04 1981 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1982}
a2fbb9ea 1983
4acac6a5
EG
1984int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1985{
1986 /* The GPIO should be swapped if swap register is set and active */
1987 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1988 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1989 int gpio_shift = gpio_num +
1990 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1991 u32 gpio_mask = (1 << gpio_shift);
1992 u32 gpio_reg;
1993 int value;
1994
1995 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1996 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1997 return -EINVAL;
1998 }
1999
2000 /* read GPIO value */
2001 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2002
2003 /* get the requested pin value */
2004 if ((gpio_reg & gpio_mask) == gpio_mask)
2005 value = 1;
2006 else
2007 value = 0;
2008
2009 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
2010
2011 return value;
2012}
2013
17de50b7 2014int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
2015{
2016 /* The GPIO should be swapped if swap register is set and active */
2017 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 2018 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
2019 int gpio_shift = gpio_num +
2020 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2021 u32 gpio_mask = (1 << gpio_shift);
2022 u32 gpio_reg;
a2fbb9ea 2023
c18487ee
YR
2024 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2025 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2026 return -EINVAL;
2027 }
a2fbb9ea 2028
4a37fb66 2029 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
2030 /* read GPIO and mask except the float bits */
2031 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 2032
c18487ee
YR
2033 switch (mode) {
2034 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2035 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2036 gpio_num, gpio_shift);
2037 /* clear FLOAT and set CLR */
2038 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2039 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2040 break;
a2fbb9ea 2041
c18487ee
YR
2042 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2043 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2044 gpio_num, gpio_shift);
2045 /* clear FLOAT and set SET */
2046 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2047 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2048 break;
a2fbb9ea 2049
17de50b7 2050 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
2051 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2052 gpio_num, gpio_shift);
2053 /* set FLOAT */
2054 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2055 break;
a2fbb9ea 2056
c18487ee
YR
2057 default:
2058 break;
a2fbb9ea
ET
2059 }
2060
c18487ee 2061 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 2062 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 2063
c18487ee 2064 return 0;
a2fbb9ea
ET
2065}
2066
4acac6a5
EG
2067int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2068{
2069 /* The GPIO should be swapped if swap register is set and active */
2070 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2071 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2072 int gpio_shift = gpio_num +
2073 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2074 u32 gpio_mask = (1 << gpio_shift);
2075 u32 gpio_reg;
2076
2077 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2078 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2079 return -EINVAL;
2080 }
2081
2082 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2083 /* read GPIO int */
2084 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2085
2086 switch (mode) {
2087 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2088 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2089 "output low\n", gpio_num, gpio_shift);
2090 /* clear SET and set CLR */
2091 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2092 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2093 break;
2094
2095 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2096 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2097 "output high\n", gpio_num, gpio_shift);
2098 /* clear CLR and set SET */
2099 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2100 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2101 break;
2102
2103 default:
2104 break;
2105 }
2106
2107 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2108 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2109
2110 return 0;
2111}
2112
c18487ee 2113static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2114{
c18487ee
YR
2115 u32 spio_mask = (1 << spio_num);
2116 u32 spio_reg;
a2fbb9ea 2117
c18487ee
YR
2118 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2119 (spio_num > MISC_REGISTERS_SPIO_7)) {
2120 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2121 return -EINVAL;
a2fbb9ea
ET
2122 }
2123
4a37fb66 2124 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2125 /* read SPIO and mask except the float bits */
2126 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2127
c18487ee 2128 switch (mode) {
6378c025 2129 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2130 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2131 /* clear FLOAT and set CLR */
2132 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2133 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2134 break;
a2fbb9ea 2135
6378c025 2136 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2137 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2138 /* clear FLOAT and set SET */
2139 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2140 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2141 break;
a2fbb9ea 2142
c18487ee
YR
2143 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2144 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2145 /* set FLOAT */
2146 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2147 break;
a2fbb9ea 2148
c18487ee
YR
2149 default:
2150 break;
a2fbb9ea
ET
2151 }
2152
c18487ee 2153 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2154 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2155
a2fbb9ea
ET
2156 return 0;
2157}
2158
c18487ee 2159static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2160{
ad33ea3a
EG
2161 switch (bp->link_vars.ieee_fc &
2162 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2163 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2164 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2165 ADVERTISED_Pause);
2166 break;
356e2385 2167
c18487ee 2168 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2169 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2170 ADVERTISED_Pause);
2171 break;
356e2385 2172
c18487ee 2173 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2174 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2175 break;
356e2385 2176
c18487ee 2177 default:
34f80b04 2178 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2179 ADVERTISED_Pause);
2180 break;
2181 }
2182}
f1410647 2183
c18487ee
YR
2184static void bnx2x_link_report(struct bnx2x *bp)
2185{
f34d28ea 2186 if (bp->flags & MF_FUNC_DIS) {
2691d51d 2187 netif_carrier_off(bp->dev);
7995c64e 2188 netdev_err(bp->dev, "NIC Link is Down\n");
2691d51d
EG
2189 return;
2190 }
2191
c18487ee 2192 if (bp->link_vars.link_up) {
35c5f8fe
EG
2193 u16 line_speed;
2194
c18487ee
YR
2195 if (bp->state == BNX2X_STATE_OPEN)
2196 netif_carrier_on(bp->dev);
7995c64e 2197 netdev_info(bp->dev, "NIC Link is Up, ");
f1410647 2198
35c5f8fe
EG
2199 line_speed = bp->link_vars.line_speed;
2200 if (IS_E1HMF(bp)) {
2201 u16 vn_max_rate;
2202
2203 vn_max_rate =
2204 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2205 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2206 if (vn_max_rate < line_speed)
2207 line_speed = vn_max_rate;
2208 }
7995c64e 2209 pr_cont("%d Mbps ", line_speed);
f1410647 2210
c18487ee 2211 if (bp->link_vars.duplex == DUPLEX_FULL)
7995c64e 2212 pr_cont("full duplex");
c18487ee 2213 else
7995c64e 2214 pr_cont("half duplex");
f1410647 2215
c0700f90
DM
2216 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2217 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
7995c64e 2218 pr_cont(", receive ");
356e2385
EG
2219 if (bp->link_vars.flow_ctrl &
2220 BNX2X_FLOW_CTRL_TX)
7995c64e 2221 pr_cont("& transmit ");
c18487ee 2222 } else {
7995c64e 2223 pr_cont(", transmit ");
c18487ee 2224 }
7995c64e 2225 pr_cont("flow control ON");
c18487ee 2226 }
7995c64e 2227 pr_cont("\n");
f1410647 2228
c18487ee
YR
2229 } else { /* link_down */
2230 netif_carrier_off(bp->dev);
7995c64e 2231 netdev_err(bp->dev, "NIC Link is Down\n");
f1410647 2232 }
c18487ee
YR
2233}
2234
b5bf9068 2235static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2236{
19680c48
EG
2237 if (!BP_NOMCP(bp)) {
2238 u8 rc;
a2fbb9ea 2239
19680c48 2240 /* Initialize link parameters structure variables */
8c99e7b0
YR
2241 /* It is recommended to turn off RX FC for jumbo frames
2242 for better performance */
0c593270 2243 if (bp->dev->mtu > 5000)
c0700f90 2244 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2245 else
c0700f90 2246 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2247
4a37fb66 2248 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2249
2250 if (load_mode == LOAD_DIAG)
2251 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2252
19680c48 2253 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2254
4a37fb66 2255 bnx2x_release_phy_lock(bp);
a2fbb9ea 2256
3c96c68b
EG
2257 bnx2x_calc_fc_adv(bp);
2258
b5bf9068
EG
2259 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2260 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2261 bnx2x_link_report(bp);
b5bf9068 2262 }
34f80b04 2263
19680c48
EG
2264 return rc;
2265 }
f5372251 2266 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2267 return -EINVAL;
a2fbb9ea
ET
2268}
2269
c18487ee 2270static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2271{
19680c48 2272 if (!BP_NOMCP(bp)) {
4a37fb66 2273 bnx2x_acquire_phy_lock(bp);
19680c48 2274 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2275 bnx2x_release_phy_lock(bp);
a2fbb9ea 2276
19680c48
EG
2277 bnx2x_calc_fc_adv(bp);
2278 } else
f5372251 2279 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2280}
a2fbb9ea 2281
c18487ee
YR
2282static void bnx2x__link_reset(struct bnx2x *bp)
2283{
19680c48 2284 if (!BP_NOMCP(bp)) {
4a37fb66 2285 bnx2x_acquire_phy_lock(bp);
589abe3a 2286 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2287 bnx2x_release_phy_lock(bp);
19680c48 2288 } else
f5372251 2289 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2290}
a2fbb9ea 2291
c18487ee
YR
2292static u8 bnx2x_link_test(struct bnx2x *bp)
2293{
2294 u8 rc;
a2fbb9ea 2295
4a37fb66 2296 bnx2x_acquire_phy_lock(bp);
c18487ee 2297 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2298 bnx2x_release_phy_lock(bp);
a2fbb9ea 2299
c18487ee
YR
2300 return rc;
2301}
a2fbb9ea 2302
8a1c38d1 2303static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2304{
8a1c38d1
EG
2305 u32 r_param = bp->link_vars.line_speed / 8;
2306 u32 fair_periodic_timeout_usec;
2307 u32 t_fair;
34f80b04 2308
8a1c38d1
EG
2309 memset(&(bp->cmng.rs_vars), 0,
2310 sizeof(struct rate_shaping_vars_per_port));
2311 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2312
8a1c38d1
EG
2313 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2314 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2315
8a1c38d1
EG
2316 /* this is the threshold below which no timer arming will occur
2317 1.25 coefficient is for the threshold to be a little bigger
2318 than the real time, to compensate for timer in-accuracy */
2319 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2320 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2321
8a1c38d1
EG
2322 /* resolution of fairness timer */
2323 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2324 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2325 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2326
8a1c38d1
EG
2327 /* this is the threshold below which we won't arm the timer anymore */
2328 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2329
8a1c38d1
EG
2330 /* we multiply by 1e3/8 to get bytes/msec.
2331 We don't want the credits to pass a credit
2332 of the t_fair*FAIR_MEM (algorithm resolution) */
2333 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2334 /* since each tick is 4 usec */
2335 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2336}
2337
2691d51d
EG
2338/* Calculates the sum of vn_min_rates.
2339 It's needed for further normalizing of the min_rates.
2340 Returns:
2341 sum of vn_min_rates.
2342 or
2343 0 - if all the min_rates are 0.
2344 In the later case fainess algorithm should be deactivated.
2345 If not all min_rates are zero then those that are zeroes will be set to 1.
2346 */
2347static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2348{
2349 int all_zero = 1;
2350 int port = BP_PORT(bp);
2351 int vn;
2352
2353 bp->vn_weight_sum = 0;
2354 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2355 int func = 2*vn + port;
2356 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2357 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2358 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2359
2360 /* Skip hidden vns */
2361 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2362 continue;
2363
2364 /* If min rate is zero - set it to 1 */
2365 if (!vn_min_rate)
2366 vn_min_rate = DEF_MIN_RATE;
2367 else
2368 all_zero = 0;
2369
2370 bp->vn_weight_sum += vn_min_rate;
2371 }
2372
2373 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
2374 if (all_zero) {
2375 bp->cmng.flags.cmng_enables &=
2376 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2377 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2378 " fairness will be disabled\n");
2379 } else
2380 bp->cmng.flags.cmng_enables |=
2381 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
2382}
2383
8a1c38d1 2384static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2385{
2386 struct rate_shaping_vars_per_vn m_rs_vn;
2387 struct fairness_vars_per_vn m_fair_vn;
2388 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2389 u16 vn_min_rate, vn_max_rate;
2390 int i;
2391
2392 /* If function is hidden - set min and max to zeroes */
2393 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2394 vn_min_rate = 0;
2395 vn_max_rate = 0;
2396
2397 } else {
2398 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2399 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1
EG
2400 /* If min rate is zero - set it to 1 */
2401 if (!vn_min_rate)
34f80b04
EG
2402 vn_min_rate = DEF_MIN_RATE;
2403 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2404 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2405 }
8a1c38d1 2406 DP(NETIF_MSG_IFUP,
b015e3d1 2407 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 2408 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2409
2410 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2411 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2412
2413 /* global vn counter - maximal Mbps for this vn */
2414 m_rs_vn.vn_counter.rate = vn_max_rate;
2415
2416 /* quota - number of bytes transmitted in this period */
2417 m_rs_vn.vn_counter.quota =
2418 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2419
8a1c38d1 2420 if (bp->vn_weight_sum) {
34f80b04
EG
2421 /* credit for each period of the fairness algorithm:
2422 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2423 vn_weight_sum should not be larger than 10000, thus
2424 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2425 than zero */
34f80b04 2426 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2427 max((u32)(vn_min_rate * (T_FAIR_COEF /
2428 (8 * bp->vn_weight_sum))),
2429 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2430 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2431 m_fair_vn.vn_credit_delta);
2432 }
2433
34f80b04
EG
2434 /* Store it to internal memory */
2435 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2436 REG_WR(bp, BAR_XSTRORM_INTMEM +
2437 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2438 ((u32 *)(&m_rs_vn))[i]);
2439
2440 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2441 REG_WR(bp, BAR_XSTRORM_INTMEM +
2442 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2443 ((u32 *)(&m_fair_vn))[i]);
2444}
2445
8a1c38d1 2446
c18487ee
YR
2447/* This function is called upon link interrupt */
2448static void bnx2x_link_attn(struct bnx2x *bp)
2449{
bb2a0f7a
YG
2450 /* Make sure that we are synced with the current statistics */
2451 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2452
c18487ee 2453 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2454
bb2a0f7a
YG
2455 if (bp->link_vars.link_up) {
2456
1c06328c 2457 /* dropless flow control */
a18f5128 2458 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
2459 int port = BP_PORT(bp);
2460 u32 pause_enabled = 0;
2461
2462 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2463 pause_enabled = 1;
2464
2465 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2466 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2467 pause_enabled);
2468 }
2469
bb2a0f7a
YG
2470 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2471 struct host_port_stats *pstats;
2472
2473 pstats = bnx2x_sp(bp, port_stats);
2474 /* reset old bmac stats */
2475 memset(&(pstats->mac_stx[0]), 0,
2476 sizeof(struct mac_stx));
2477 }
f34d28ea 2478 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2479 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2480 }
2481
c18487ee
YR
2482 /* indicate link status */
2483 bnx2x_link_report(bp);
34f80b04
EG
2484
2485 if (IS_E1HMF(bp)) {
8a1c38d1 2486 int port = BP_PORT(bp);
34f80b04 2487 int func;
8a1c38d1 2488 int vn;
34f80b04 2489
ab6ad5a4 2490 /* Set the attention towards other drivers on the same port */
34f80b04
EG
2491 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2492 if (vn == BP_E1HVN(bp))
2493 continue;
2494
8a1c38d1 2495 func = ((vn << 1) | port);
34f80b04
EG
2496 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2497 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2498 }
34f80b04 2499
8a1c38d1
EG
2500 if (bp->link_vars.link_up) {
2501 int i;
2502
2503 /* Init rate shaping and fairness contexts */
2504 bnx2x_init_port_minmax(bp);
34f80b04 2505
34f80b04 2506 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2507 bnx2x_init_vn_minmax(bp, 2*vn + port);
2508
2509 /* Store it to internal memory */
2510 for (i = 0;
2511 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2512 REG_WR(bp, BAR_XSTRORM_INTMEM +
2513 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2514 ((u32 *)(&bp->cmng))[i]);
2515 }
34f80b04 2516 }
c18487ee 2517}
a2fbb9ea 2518
c18487ee
YR
2519static void bnx2x__link_status_update(struct bnx2x *bp)
2520{
f34d28ea 2521 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 2522 return;
a2fbb9ea 2523
c18487ee 2524 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2525
bb2a0f7a
YG
2526 if (bp->link_vars.link_up)
2527 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2528 else
2529 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2530
2691d51d
EG
2531 bnx2x_calc_vn_weight_sum(bp);
2532
c18487ee
YR
2533 /* indicate link status */
2534 bnx2x_link_report(bp);
a2fbb9ea 2535}
a2fbb9ea 2536
34f80b04
EG
2537static void bnx2x_pmf_update(struct bnx2x *bp)
2538{
2539 int port = BP_PORT(bp);
2540 u32 val;
2541
2542 bp->port.pmf = 1;
2543 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2544
2545 /* enable nig attention */
2546 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2547 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2548 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2549
2550 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2551}
2552
c18487ee 2553/* end of Link */
a2fbb9ea
ET
2554
2555/* slow path */
2556
2557/*
2558 * General service functions
2559 */
2560
2691d51d
EG
2561/* send the MCP a request, block until there is a reply */
2562u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2563{
2564 int func = BP_FUNC(bp);
2565 u32 seq = ++bp->fw_seq;
2566 u32 rc = 0;
2567 u32 cnt = 1;
2568 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2569
c4ff7cbf 2570 mutex_lock(&bp->fw_mb_mutex);
2691d51d
EG
2571 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2572 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2573
2574 do {
2575 /* let the FW do it's magic ... */
2576 msleep(delay);
2577
2578 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2579
c4ff7cbf
EG
2580 /* Give the FW up to 5 second (500*10ms) */
2581 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2582
2583 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2584 cnt*delay, rc, seq);
2585
2586 /* is this a reply to our command? */
2587 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2588 rc &= FW_MSG_CODE_MASK;
2589 else {
2590 /* FW BUG! */
2591 BNX2X_ERR("FW failed to respond!\n");
2592 bnx2x_fw_dump(bp);
2593 rc = 0;
2594 }
c4ff7cbf 2595 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2596
2597 return rc;
2598}
2599
2600static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
e665bfda 2601static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2691d51d
EG
2602static void bnx2x_set_rx_mode(struct net_device *dev);
2603
2604static void bnx2x_e1h_disable(struct bnx2x *bp)
2605{
2606 int port = BP_PORT(bp);
2691d51d
EG
2607
2608 netif_tx_disable(bp->dev);
2691d51d
EG
2609
2610 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2611
2691d51d
EG
2612 netif_carrier_off(bp->dev);
2613}
2614
2615static void bnx2x_e1h_enable(struct bnx2x *bp)
2616{
2617 int port = BP_PORT(bp);
2618
2619 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2620
2691d51d
EG
2621 /* Tx queue should be only reenabled */
2622 netif_tx_wake_all_queues(bp->dev);
2623
061bc702
EG
2624 /*
2625 * Should not call netif_carrier_on since it will be called if the link
2626 * is up when checking for link state
2627 */
2691d51d
EG
2628}
2629
2630static void bnx2x_update_min_max(struct bnx2x *bp)
2631{
2632 int port = BP_PORT(bp);
2633 int vn, i;
2634
2635 /* Init rate shaping and fairness contexts */
2636 bnx2x_init_port_minmax(bp);
2637
2638 bnx2x_calc_vn_weight_sum(bp);
2639
2640 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2641 bnx2x_init_vn_minmax(bp, 2*vn + port);
2642
2643 if (bp->port.pmf) {
2644 int func;
2645
2646 /* Set the attention towards other drivers on the same port */
2647 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2648 if (vn == BP_E1HVN(bp))
2649 continue;
2650
2651 func = ((vn << 1) | port);
2652 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2653 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2654 }
2655
2656 /* Store it to internal memory */
2657 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2658 REG_WR(bp, BAR_XSTRORM_INTMEM +
2659 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2660 ((u32 *)(&bp->cmng))[i]);
2661 }
2662}
2663
2664static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2665{
2691d51d 2666 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2691d51d
EG
2667
2668 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2669
f34d28ea
EG
2670 /*
2671 * This is the only place besides the function initialization
2672 * where the bp->flags can change so it is done without any
2673 * locks
2674 */
2691d51d
EG
2675 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2676 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
f34d28ea 2677 bp->flags |= MF_FUNC_DIS;
2691d51d
EG
2678
2679 bnx2x_e1h_disable(bp);
2680 } else {
2681 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
f34d28ea 2682 bp->flags &= ~MF_FUNC_DIS;
2691d51d
EG
2683
2684 bnx2x_e1h_enable(bp);
2685 }
2686 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2687 }
2688 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2689
2690 bnx2x_update_min_max(bp);
2691 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2692 }
2693
2694 /* Report results to MCP */
2695 if (dcc_event)
2696 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2697 else
2698 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2699}
2700
28912902
MC
2701/* must be called under the spq lock */
2702static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2703{
2704 struct eth_spe *next_spe = bp->spq_prod_bd;
2705
2706 if (bp->spq_prod_bd == bp->spq_last_bd) {
2707 bp->spq_prod_bd = bp->spq;
2708 bp->spq_prod_idx = 0;
2709 DP(NETIF_MSG_TIMER, "end of spq\n");
2710 } else {
2711 bp->spq_prod_bd++;
2712 bp->spq_prod_idx++;
2713 }
2714 return next_spe;
2715}
2716
2717/* must be called under the spq lock */
2718static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2719{
2720 int func = BP_FUNC(bp);
2721
2722 /* Make sure that BD data is updated before writing the producer */
2723 wmb();
2724
2725 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2726 bp->spq_prod_idx);
2727 mmiowb();
2728}
2729
a2fbb9ea
ET
2730/* the slow path queue is odd since completions arrive on the fastpath ring */
2731static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2732 u32 data_hi, u32 data_lo, int common)
2733{
28912902 2734 struct eth_spe *spe;
a2fbb9ea 2735
34f80b04
EG
2736 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2737 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2738 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2739 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2740 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2741
2742#ifdef BNX2X_STOP_ON_ERROR
2743 if (unlikely(bp->panic))
2744 return -EIO;
2745#endif
2746
34f80b04 2747 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2748
2749 if (!bp->spq_left) {
2750 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2751 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2752 bnx2x_panic();
2753 return -EBUSY;
2754 }
f1410647 2755
28912902
MC
2756 spe = bnx2x_sp_get_next(bp);
2757
a2fbb9ea 2758 /* CID needs port number to be encoded int it */
28912902 2759 spe->hdr.conn_and_cmd_data =
a2fbb9ea
ET
2760 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2761 HW_CID(bp, cid)));
28912902 2762 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 2763 if (common)
28912902 2764 spe->hdr.type |=
a2fbb9ea
ET
2765 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2766
28912902
MC
2767 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2768 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
2769
2770 bp->spq_left--;
2771
28912902 2772 bnx2x_sp_prod_update(bp);
34f80b04 2773 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2774 return 0;
2775}
2776
2777/* acquire split MCP access lock register */
4a37fb66 2778static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2779{
72fd0718 2780 u32 j, val;
34f80b04 2781 int rc = 0;
a2fbb9ea
ET
2782
2783 might_sleep();
72fd0718 2784 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
2785 val = (1UL << 31);
2786 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2787 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2788 if (val & (1L << 31))
2789 break;
2790
2791 msleep(5);
2792 }
a2fbb9ea 2793 if (!(val & (1L << 31))) {
19680c48 2794 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2795 rc = -EBUSY;
2796 }
2797
2798 return rc;
2799}
2800
4a37fb66
YG
2801/* release split MCP access lock register */
2802static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 2803{
72fd0718 2804 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
2805}
2806
2807static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2808{
2809 struct host_def_status_block *def_sb = bp->def_status_blk;
2810 u16 rc = 0;
2811
2812 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2813 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2814 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2815 rc |= 1;
2816 }
2817 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2818 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2819 rc |= 2;
2820 }
2821 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2822 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2823 rc |= 4;
2824 }
2825 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2826 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2827 rc |= 8;
2828 }
2829 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2830 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2831 rc |= 16;
2832 }
2833 return rc;
2834}
2835
2836/*
2837 * slow path service functions
2838 */
2839
2840static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2841{
34f80b04 2842 int port = BP_PORT(bp);
5c862848
EG
2843 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2844 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2845 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2846 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2847 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2848 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2849 u32 aeu_mask;
87942b46 2850 u32 nig_mask = 0;
a2fbb9ea 2851
a2fbb9ea
ET
2852 if (bp->attn_state & asserted)
2853 BNX2X_ERR("IGU ERROR\n");
2854
3fcaf2e5
EG
2855 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2856 aeu_mask = REG_RD(bp, aeu_addr);
2857
a2fbb9ea 2858 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 2859 aeu_mask, asserted);
72fd0718 2860 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 2861 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2862
3fcaf2e5
EG
2863 REG_WR(bp, aeu_addr, aeu_mask);
2864 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2865
3fcaf2e5 2866 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2867 bp->attn_state |= asserted;
3fcaf2e5 2868 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2869
2870 if (asserted & ATTN_HARD_WIRED_MASK) {
2871 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2872
a5e9a7cf
EG
2873 bnx2x_acquire_phy_lock(bp);
2874
877e9aa4 2875 /* save nig interrupt mask */
87942b46 2876 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2877 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2878
c18487ee 2879 bnx2x_link_attn(bp);
a2fbb9ea
ET
2880
2881 /* handle unicore attn? */
2882 }
2883 if (asserted & ATTN_SW_TIMER_4_FUNC)
2884 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2885
2886 if (asserted & GPIO_2_FUNC)
2887 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2888
2889 if (asserted & GPIO_3_FUNC)
2890 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2891
2892 if (asserted & GPIO_4_FUNC)
2893 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2894
2895 if (port == 0) {
2896 if (asserted & ATTN_GENERAL_ATTN_1) {
2897 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2898 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2899 }
2900 if (asserted & ATTN_GENERAL_ATTN_2) {
2901 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2902 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2903 }
2904 if (asserted & ATTN_GENERAL_ATTN_3) {
2905 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2906 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2907 }
2908 } else {
2909 if (asserted & ATTN_GENERAL_ATTN_4) {
2910 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2911 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2912 }
2913 if (asserted & ATTN_GENERAL_ATTN_5) {
2914 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2915 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2916 }
2917 if (asserted & ATTN_GENERAL_ATTN_6) {
2918 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2919 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2920 }
2921 }
2922
2923 } /* if hardwired */
2924
5c862848
EG
2925 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2926 asserted, hc_addr);
2927 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2928
2929 /* now set back the mask */
a5e9a7cf 2930 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2931 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2932 bnx2x_release_phy_lock(bp);
2933 }
a2fbb9ea
ET
2934}
2935
fd4ef40d
EG
2936static inline void bnx2x_fan_failure(struct bnx2x *bp)
2937{
2938 int port = BP_PORT(bp);
2939
2940 /* mark the failure */
2941 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2942 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2943 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2944 bp->link_params.ext_phy_config);
2945
2946 /* log the failure */
7995c64e
JP
2947 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
2948 "Please contact Dell Support for assistance.\n");
fd4ef40d 2949}
ab6ad5a4 2950
877e9aa4 2951static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2952{
34f80b04 2953 int port = BP_PORT(bp);
877e9aa4 2954 int reg_offset;
4d295db0 2955 u32 val, swap_val, swap_override;
877e9aa4 2956
34f80b04
EG
2957 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2958 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2959
34f80b04 2960 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2961
2962 val = REG_RD(bp, reg_offset);
2963 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2964 REG_WR(bp, reg_offset, val);
2965
2966 BNX2X_ERR("SPIO5 hw attention\n");
2967
fd4ef40d 2968 /* Fan failure attention */
35b19ba5
EG
2969 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2970 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2971 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2972 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2973 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2974 /* The PHY reset is controlled by GPIO 1 */
2975 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2976 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2977 break;
2978
4d295db0
EG
2979 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2980 /* The PHY reset is controlled by GPIO 1 */
2981 /* fake the port number to cancel the swap done in
2982 set_gpio() */
2983 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2984 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2985 port = (swap_val && swap_override) ^ 1;
2986 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2987 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2988 break;
2989
877e9aa4
ET
2990 default:
2991 break;
2992 }
fd4ef40d 2993 bnx2x_fan_failure(bp);
877e9aa4 2994 }
34f80b04 2995
589abe3a
EG
2996 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2997 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2998 bnx2x_acquire_phy_lock(bp);
2999 bnx2x_handle_module_detect_int(&bp->link_params);
3000 bnx2x_release_phy_lock(bp);
3001 }
3002
34f80b04
EG
3003 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3004
3005 val = REG_RD(bp, reg_offset);
3006 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3007 REG_WR(bp, reg_offset, val);
3008
3009 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 3010 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
3011 bnx2x_panic();
3012 }
877e9aa4
ET
3013}
3014
3015static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3016{
3017 u32 val;
3018
0626b899 3019 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
3020
3021 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3022 BNX2X_ERR("DB hw attention 0x%x\n", val);
3023 /* DORQ discard attention */
3024 if (val & 0x2)
3025 BNX2X_ERR("FATAL error from DORQ\n");
3026 }
34f80b04
EG
3027
3028 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3029
3030 int port = BP_PORT(bp);
3031 int reg_offset;
3032
3033 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3034 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3035
3036 val = REG_RD(bp, reg_offset);
3037 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3038 REG_WR(bp, reg_offset, val);
3039
3040 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 3041 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
3042 bnx2x_panic();
3043 }
877e9aa4
ET
3044}
3045
3046static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3047{
3048 u32 val;
3049
3050 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3051
3052 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3053 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3054 /* CFC error attention */
3055 if (val & 0x2)
3056 BNX2X_ERR("FATAL error from CFC\n");
3057 }
3058
3059 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3060
3061 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3062 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3063 /* RQ_USDMDP_FIFO_OVERFLOW */
3064 if (val & 0x18000)
3065 BNX2X_ERR("FATAL error from PXP\n");
3066 }
34f80b04
EG
3067
3068 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3069
3070 int port = BP_PORT(bp);
3071 int reg_offset;
3072
3073 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3074 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3075
3076 val = REG_RD(bp, reg_offset);
3077 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3078 REG_WR(bp, reg_offset, val);
3079
3080 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3081 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3082 bnx2x_panic();
3083 }
877e9aa4
ET
3084}
3085
3086static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3087{
34f80b04
EG
3088 u32 val;
3089
877e9aa4
ET
3090 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3091
34f80b04
EG
3092 if (attn & BNX2X_PMF_LINK_ASSERT) {
3093 int func = BP_FUNC(bp);
3094
3095 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
b015e3d1
EG
3096 bp->mf_config = SHMEM_RD(bp,
3097 mf_cfg.func_mf_config[func].config);
2691d51d
EG
3098 val = SHMEM_RD(bp, func_mb[func].drv_status);
3099 if (val & DRV_STATUS_DCC_EVENT_MASK)
3100 bnx2x_dcc_event(bp,
3101 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3102 bnx2x__link_status_update(bp);
2691d51d 3103 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3104 bnx2x_pmf_update(bp);
3105
3106 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3107
3108 BNX2X_ERR("MC assert!\n");
3109 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3110 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3111 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3112 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3113 bnx2x_panic();
3114
3115 } else if (attn & BNX2X_MCP_ASSERT) {
3116
3117 BNX2X_ERR("MCP assert!\n");
3118 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3119 bnx2x_fw_dump(bp);
877e9aa4
ET
3120
3121 } else
3122 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3123 }
3124
3125 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3126 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3127 if (attn & BNX2X_GRC_TIMEOUT) {
3128 val = CHIP_IS_E1H(bp) ?
3129 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3130 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3131 }
3132 if (attn & BNX2X_GRC_RSV) {
3133 val = CHIP_IS_E1H(bp) ?
3134 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3135 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3136 }
877e9aa4 3137 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3138 }
3139}
3140
72fd0718
VZ
3141static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3142static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3143
3144
3145#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3146#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3147#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3148#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3149#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3150#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3151/*
3152 * should be run under rtnl lock
3153 */
3154static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3155{
3156 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3157 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3158 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3159 barrier();
3160 mmiowb();
3161}
3162
3163/*
3164 * should be run under rtnl lock
3165 */
3166static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3167{
3168 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3169 val |= (1 << 16);
3170 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3171 barrier();
3172 mmiowb();
3173}
3174
3175/*
3176 * should be run under rtnl lock
3177 */
3178static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3179{
3180 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3181 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3182 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3183}
3184
3185/*
3186 * should be run under rtnl lock
3187 */
3188static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3189{
3190 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3191
3192 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3193
3194 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3195 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3196 barrier();
3197 mmiowb();
3198}
3199
3200/*
3201 * should be run under rtnl lock
3202 */
3203static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3204{
3205 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3206
3207 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3208
3209 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3210 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3211 barrier();
3212 mmiowb();
3213
3214 return val1;
3215}
3216
3217/*
3218 * should be run under rtnl lock
3219 */
3220static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3221{
3222 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3223}
3224
3225static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3226{
3227 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3228 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3229}
3230
3231static inline void _print_next_block(int idx, const char *blk)
3232{
3233 if (idx)
3234 pr_cont(", ");
3235 pr_cont("%s", blk);
3236}
3237
3238static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3239{
3240 int i = 0;
3241 u32 cur_bit = 0;
3242 for (i = 0; sig; i++) {
3243 cur_bit = ((u32)0x1 << i);
3244 if (sig & cur_bit) {
3245 switch (cur_bit) {
3246 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3247 _print_next_block(par_num++, "BRB");
3248 break;
3249 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3250 _print_next_block(par_num++, "PARSER");
3251 break;
3252 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3253 _print_next_block(par_num++, "TSDM");
3254 break;
3255 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3256 _print_next_block(par_num++, "SEARCHER");
3257 break;
3258 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3259 _print_next_block(par_num++, "TSEMI");
3260 break;
3261 }
3262
3263 /* Clear the bit */
3264 sig &= ~cur_bit;
3265 }
3266 }
3267
3268 return par_num;
3269}
3270
3271static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3272{
3273 int i = 0;
3274 u32 cur_bit = 0;
3275 for (i = 0; sig; i++) {
3276 cur_bit = ((u32)0x1 << i);
3277 if (sig & cur_bit) {
3278 switch (cur_bit) {
3279 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3280 _print_next_block(par_num++, "PBCLIENT");
3281 break;
3282 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3283 _print_next_block(par_num++, "QM");
3284 break;
3285 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3286 _print_next_block(par_num++, "XSDM");
3287 break;
3288 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3289 _print_next_block(par_num++, "XSEMI");
3290 break;
3291 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3292 _print_next_block(par_num++, "DOORBELLQ");
3293 break;
3294 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3295 _print_next_block(par_num++, "VAUX PCI CORE");
3296 break;
3297 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3298 _print_next_block(par_num++, "DEBUG");
3299 break;
3300 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3301 _print_next_block(par_num++, "USDM");
3302 break;
3303 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3304 _print_next_block(par_num++, "USEMI");
3305 break;
3306 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3307 _print_next_block(par_num++, "UPB");
3308 break;
3309 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3310 _print_next_block(par_num++, "CSDM");
3311 break;
3312 }
3313
3314 /* Clear the bit */
3315 sig &= ~cur_bit;
3316 }
3317 }
3318
3319 return par_num;
3320}
3321
3322static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3323{
3324 int i = 0;
3325 u32 cur_bit = 0;
3326 for (i = 0; sig; i++) {
3327 cur_bit = ((u32)0x1 << i);
3328 if (sig & cur_bit) {
3329 switch (cur_bit) {
3330 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3331 _print_next_block(par_num++, "CSEMI");
3332 break;
3333 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3334 _print_next_block(par_num++, "PXP");
3335 break;
3336 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3337 _print_next_block(par_num++,
3338 "PXPPCICLOCKCLIENT");
3339 break;
3340 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3341 _print_next_block(par_num++, "CFC");
3342 break;
3343 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3344 _print_next_block(par_num++, "CDU");
3345 break;
3346 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3347 _print_next_block(par_num++, "IGU");
3348 break;
3349 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3350 _print_next_block(par_num++, "MISC");
3351 break;
3352 }
3353
3354 /* Clear the bit */
3355 sig &= ~cur_bit;
3356 }
3357 }
3358
3359 return par_num;
3360}
3361
3362static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3363{
3364 int i = 0;
3365 u32 cur_bit = 0;
3366 for (i = 0; sig; i++) {
3367 cur_bit = ((u32)0x1 << i);
3368 if (sig & cur_bit) {
3369 switch (cur_bit) {
3370 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3371 _print_next_block(par_num++, "MCP ROM");
3372 break;
3373 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3374 _print_next_block(par_num++, "MCP UMP RX");
3375 break;
3376 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3377 _print_next_block(par_num++, "MCP UMP TX");
3378 break;
3379 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3380 _print_next_block(par_num++, "MCP SCPAD");
3381 break;
3382 }
3383
3384 /* Clear the bit */
3385 sig &= ~cur_bit;
3386 }
3387 }
3388
3389 return par_num;
3390}
3391
3392static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3393 u32 sig2, u32 sig3)
3394{
3395 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3396 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3397 int par_num = 0;
3398 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3399 "[0]:0x%08x [1]:0x%08x "
3400 "[2]:0x%08x [3]:0x%08x\n",
3401 sig0 & HW_PRTY_ASSERT_SET_0,
3402 sig1 & HW_PRTY_ASSERT_SET_1,
3403 sig2 & HW_PRTY_ASSERT_SET_2,
3404 sig3 & HW_PRTY_ASSERT_SET_3);
3405 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3406 bp->dev->name);
3407 par_num = bnx2x_print_blocks_with_parity0(
3408 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3409 par_num = bnx2x_print_blocks_with_parity1(
3410 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3411 par_num = bnx2x_print_blocks_with_parity2(
3412 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3413 par_num = bnx2x_print_blocks_with_parity3(
3414 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3415 printk("\n");
3416 return true;
3417 } else
3418 return false;
3419}
3420
3421static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 3422{
a2fbb9ea 3423 struct attn_route attn;
72fd0718
VZ
3424 int port = BP_PORT(bp);
3425
3426 attn.sig[0] = REG_RD(bp,
3427 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3428 port*4);
3429 attn.sig[1] = REG_RD(bp,
3430 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3431 port*4);
3432 attn.sig[2] = REG_RD(bp,
3433 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3434 port*4);
3435 attn.sig[3] = REG_RD(bp,
3436 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3437 port*4);
3438
3439 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3440 attn.sig[3]);
3441}
3442
3443static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3444{
3445 struct attn_route attn, *group_mask;
34f80b04 3446 int port = BP_PORT(bp);
877e9aa4 3447 int index;
a2fbb9ea
ET
3448 u32 reg_addr;
3449 u32 val;
3fcaf2e5 3450 u32 aeu_mask;
a2fbb9ea
ET
3451
3452 /* need to take HW lock because MCP or other port might also
3453 try to handle this event */
4a37fb66 3454 bnx2x_acquire_alr(bp);
a2fbb9ea 3455
72fd0718
VZ
3456 if (bnx2x_chk_parity_attn(bp)) {
3457 bp->recovery_state = BNX2X_RECOVERY_INIT;
3458 bnx2x_set_reset_in_progress(bp);
3459 schedule_delayed_work(&bp->reset_task, 0);
3460 /* Disable HW interrupts */
3461 bnx2x_int_disable(bp);
3462 bnx2x_release_alr(bp);
3463 /* In case of parity errors don't handle attentions so that
3464 * other function would "see" parity errors.
3465 */
3466 return;
3467 }
3468
a2fbb9ea
ET
3469 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3470 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3471 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3472 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3473 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3474 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3475
3476 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3477 if (deasserted & (1 << index)) {
72fd0718 3478 group_mask = &bp->attn_group[index];
a2fbb9ea 3479
34f80b04 3480 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
72fd0718
VZ
3481 index, group_mask->sig[0], group_mask->sig[1],
3482 group_mask->sig[2], group_mask->sig[3]);
a2fbb9ea 3483
877e9aa4 3484 bnx2x_attn_int_deasserted3(bp,
72fd0718 3485 attn.sig[3] & group_mask->sig[3]);
877e9aa4 3486 bnx2x_attn_int_deasserted1(bp,
72fd0718 3487 attn.sig[1] & group_mask->sig[1]);
877e9aa4 3488 bnx2x_attn_int_deasserted2(bp,
72fd0718 3489 attn.sig[2] & group_mask->sig[2]);
877e9aa4 3490 bnx2x_attn_int_deasserted0(bp,
72fd0718 3491 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
3492 }
3493 }
3494
4a37fb66 3495 bnx2x_release_alr(bp);
a2fbb9ea 3496
5c862848 3497 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3498
3499 val = ~deasserted;
3fcaf2e5
EG
3500 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3501 val, reg_addr);
5c862848 3502 REG_WR(bp, reg_addr, val);
a2fbb9ea 3503
a2fbb9ea 3504 if (~bp->attn_state & deasserted)
3fcaf2e5 3505 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3506
3507 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3508 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3509
3fcaf2e5
EG
3510 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3511 aeu_mask = REG_RD(bp, reg_addr);
3512
3513 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3514 aeu_mask, deasserted);
72fd0718 3515 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 3516 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3517
3fcaf2e5
EG
3518 REG_WR(bp, reg_addr, aeu_mask);
3519 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3520
3521 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3522 bp->attn_state &= ~deasserted;
3523 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3524}
3525
3526static void bnx2x_attn_int(struct bnx2x *bp)
3527{
3528 /* read local copy of bits */
68d59484
EG
3529 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3530 attn_bits);
3531 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3532 attn_bits_ack);
a2fbb9ea
ET
3533 u32 attn_state = bp->attn_state;
3534
3535 /* look for changed bits */
3536 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3537 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3538
3539 DP(NETIF_MSG_HW,
3540 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3541 attn_bits, attn_ack, asserted, deasserted);
3542
3543 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3544 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3545
3546 /* handle bits that were raised */
3547 if (asserted)
3548 bnx2x_attn_int_asserted(bp, asserted);
3549
3550 if (deasserted)
3551 bnx2x_attn_int_deasserted(bp, deasserted);
3552}
3553
3554static void bnx2x_sp_task(struct work_struct *work)
3555{
1cf167f2 3556 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3557 u16 status;
3558
34f80b04 3559
a2fbb9ea
ET
3560 /* Return here if interrupt is disabled */
3561 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3562 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3563 return;
3564 }
3565
3566 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3567/* if (status == 0) */
3568/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3569
3196a88a 3570 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 3571
877e9aa4
ET
3572 /* HW attentions */
3573 if (status & 0x1)
a2fbb9ea 3574 bnx2x_attn_int(bp);
a2fbb9ea 3575
68d59484 3576 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3577 IGU_INT_NOP, 1);
3578 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3579 IGU_INT_NOP, 1);
3580 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3581 IGU_INT_NOP, 1);
3582 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3583 IGU_INT_NOP, 1);
3584 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3585 IGU_INT_ENABLE, 1);
877e9aa4 3586
a2fbb9ea
ET
3587}
3588
3589static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3590{
3591 struct net_device *dev = dev_instance;
3592 struct bnx2x *bp = netdev_priv(dev);
3593
3594 /* Return here if interrupt is disabled */
3595 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3596 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3597 return IRQ_HANDLED;
3598 }
3599
8d9c5f34 3600 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3601
3602#ifdef BNX2X_STOP_ON_ERROR
3603 if (unlikely(bp->panic))
3604 return IRQ_HANDLED;
3605#endif
3606
993ac7b5
MC
3607#ifdef BCM_CNIC
3608 {
3609 struct cnic_ops *c_ops;
3610
3611 rcu_read_lock();
3612 c_ops = rcu_dereference(bp->cnic_ops);
3613 if (c_ops)
3614 c_ops->cnic_handler(bp->cnic_data, NULL);
3615 rcu_read_unlock();
3616 }
3617#endif
1cf167f2 3618 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3619
3620 return IRQ_HANDLED;
3621}
3622
3623/* end of slow path */
3624
3625/* Statistics */
3626
3627/****************************************************************************
3628* Macros
3629****************************************************************************/
3630
a2fbb9ea
ET
3631/* sum[hi:lo] += add[hi:lo] */
3632#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3633 do { \
3634 s_lo += a_lo; \
f5ba6772 3635 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3636 } while (0)
3637
3638/* difference = minuend - subtrahend */
3639#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3640 do { \
bb2a0f7a
YG
3641 if (m_lo < s_lo) { \
3642 /* underflow */ \
a2fbb9ea 3643 d_hi = m_hi - s_hi; \
bb2a0f7a 3644 if (d_hi > 0) { \
6378c025 3645 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3646 d_hi--; \
3647 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3648 } else { \
6378c025 3649 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3650 d_hi = 0; \
3651 d_lo = 0; \
3652 } \
bb2a0f7a
YG
3653 } else { \
3654 /* m_lo >= s_lo */ \
a2fbb9ea 3655 if (m_hi < s_hi) { \
bb2a0f7a
YG
3656 d_hi = 0; \
3657 d_lo = 0; \
3658 } else { \
6378c025 3659 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3660 d_hi = m_hi - s_hi; \
3661 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3662 } \
3663 } \
3664 } while (0)
3665
bb2a0f7a 3666#define UPDATE_STAT64(s, t) \
a2fbb9ea 3667 do { \
bb2a0f7a
YG
3668 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3669 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3670 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3671 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3672 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3673 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3674 } while (0)
3675
bb2a0f7a 3676#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3677 do { \
bb2a0f7a
YG
3678 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3679 diff.lo, new->s##_lo, old->s##_lo); \
3680 ADD_64(estats->t##_hi, diff.hi, \
3681 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3682 } while (0)
3683
3684/* sum[hi:lo] += add */
3685#define ADD_EXTEND_64(s_hi, s_lo, a) \
3686 do { \
3687 s_lo += a; \
3688 s_hi += (s_lo < a) ? 1 : 0; \
3689 } while (0)
3690
bb2a0f7a 3691#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3692 do { \
bb2a0f7a
YG
3693 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3694 pstats->mac_stx[1].s##_lo, \
3695 new->s); \
a2fbb9ea
ET
3696 } while (0)
3697
bb2a0f7a 3698#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3699 do { \
4781bfad
EG
3700 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3701 old_tclient->s = tclient->s; \
de832a55
EG
3702 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3703 } while (0)
3704
3705#define UPDATE_EXTEND_USTAT(s, t) \
3706 do { \
3707 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3708 old_uclient->s = uclient->s; \
3709 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3710 } while (0)
3711
3712#define UPDATE_EXTEND_XSTAT(s, t) \
3713 do { \
4781bfad
EG
3714 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3715 old_xclient->s = xclient->s; \
de832a55
EG
3716 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3717 } while (0)
3718
3719/* minuend -= subtrahend */
3720#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3721 do { \
3722 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3723 } while (0)
3724
3725/* minuend[hi:lo] -= subtrahend */
3726#define SUB_EXTEND_64(m_hi, m_lo, s) \
3727 do { \
3728 SUB_64(m_hi, 0, m_lo, s); \
3729 } while (0)
3730
3731#define SUB_EXTEND_USTAT(s, t) \
3732 do { \
3733 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3734 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3735 } while (0)
3736
3737/*
3738 * General service functions
3739 */
3740
3741static inline long bnx2x_hilo(u32 *hiref)
3742{
3743 u32 lo = *(hiref + 1);
3744#if (BITS_PER_LONG == 64)
3745 u32 hi = *hiref;
3746
3747 return HILO_U64(hi, lo);
3748#else
3749 return lo;
3750#endif
3751}
3752
3753/*
3754 * Init service functions
3755 */
3756
bb2a0f7a
YG
3757static void bnx2x_storm_stats_post(struct bnx2x *bp)
3758{
3759 if (!bp->stats_pending) {
3760 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3761 int i, rc;
bb2a0f7a
YG
3762
3763 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3764 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3765 for_each_queue(bp, i)
3766 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3767
3768 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3769 ((u32 *)&ramrod_data)[1],
3770 ((u32 *)&ramrod_data)[0], 0);
3771 if (rc == 0) {
3772 /* stats ramrod has it's own slot on the spq */
3773 bp->spq_left++;
3774 bp->stats_pending = 1;
3775 }
3776 }
3777}
3778
bb2a0f7a
YG
3779static void bnx2x_hw_stats_post(struct bnx2x *bp)
3780{
3781 struct dmae_command *dmae = &bp->stats_dmae;
3782 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3783
3784 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3785 if (CHIP_REV_IS_SLOW(bp))
3786 return;
bb2a0f7a
YG
3787
3788 /* loader */
3789 if (bp->executer_idx) {
3790 int loader_idx = PMF_DMAE_C(bp);
3791
3792 memset(dmae, 0, sizeof(struct dmae_command));
3793
3794 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3795 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3796 DMAE_CMD_DST_RESET |
3797#ifdef __BIG_ENDIAN
3798 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3799#else
3800 DMAE_CMD_ENDIANITY_DW_SWAP |
3801#endif
3802 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3803 DMAE_CMD_PORT_0) |
3804 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3805 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3806 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3807 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3808 sizeof(struct dmae_command) *
3809 (loader_idx + 1)) >> 2;
3810 dmae->dst_addr_hi = 0;
3811 dmae->len = sizeof(struct dmae_command) >> 2;
3812 if (CHIP_IS_E1(bp))
3813 dmae->len--;
3814 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3815 dmae->comp_addr_hi = 0;
3816 dmae->comp_val = 1;
3817
3818 *stats_comp = 0;
3819 bnx2x_post_dmae(bp, dmae, loader_idx);
3820
3821 } else if (bp->func_stx) {
3822 *stats_comp = 0;
3823 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3824 }
3825}
3826
3827static int bnx2x_stats_comp(struct bnx2x *bp)
3828{
3829 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3830 int cnt = 10;
3831
3832 might_sleep();
3833 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3834 if (!cnt) {
3835 BNX2X_ERR("timeout waiting for stats finished\n");
3836 break;
3837 }
3838 cnt--;
12469401 3839 msleep(1);
bb2a0f7a
YG
3840 }
3841 return 1;
3842}
3843
3844/*
3845 * Statistics service functions
3846 */
3847
3848static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3849{
3850 struct dmae_command *dmae;
3851 u32 opcode;
3852 int loader_idx = PMF_DMAE_C(bp);
3853 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3854
3855 /* sanity */
3856 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3857 BNX2X_ERR("BUG!\n");
3858 return;
3859 }
3860
3861 bp->executer_idx = 0;
3862
3863 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3864 DMAE_CMD_C_ENABLE |
3865 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3866#ifdef __BIG_ENDIAN
3867 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3868#else
3869 DMAE_CMD_ENDIANITY_DW_SWAP |
3870#endif
3871 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3872 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3873
3874 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3875 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3876 dmae->src_addr_lo = bp->port.port_stx >> 2;
3877 dmae->src_addr_hi = 0;
3878 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3879 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3880 dmae->len = DMAE_LEN32_RD_MAX;
3881 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3882 dmae->comp_addr_hi = 0;
3883 dmae->comp_val = 1;
3884
3885 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3886 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3887 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3888 dmae->src_addr_hi = 0;
7a9b2557
VZ
3889 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3890 DMAE_LEN32_RD_MAX * 4);
3891 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3892 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3893 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3894 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3895 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3896 dmae->comp_val = DMAE_COMP_VAL;
3897
3898 *stats_comp = 0;
3899 bnx2x_hw_stats_post(bp);
3900 bnx2x_stats_comp(bp);
3901}
3902
3903static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3904{
3905 struct dmae_command *dmae;
34f80b04 3906 int port = BP_PORT(bp);
bb2a0f7a 3907 int vn = BP_E1HVN(bp);
a2fbb9ea 3908 u32 opcode;
bb2a0f7a 3909 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3910 u32 mac_addr;
bb2a0f7a
YG
3911 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3912
3913 /* sanity */
3914 if (!bp->link_vars.link_up || !bp->port.pmf) {
3915 BNX2X_ERR("BUG!\n");
3916 return;
3917 }
a2fbb9ea
ET
3918
3919 bp->executer_idx = 0;
bb2a0f7a
YG
3920
3921 /* MCP */
3922 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3923 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3924 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3925#ifdef __BIG_ENDIAN
bb2a0f7a 3926 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3927#else
bb2a0f7a 3928 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3929#endif
bb2a0f7a
YG
3930 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3931 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3932
bb2a0f7a 3933 if (bp->port.port_stx) {
a2fbb9ea
ET
3934
3935 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3936 dmae->opcode = opcode;
bb2a0f7a
YG
3937 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3938 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3939 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3940 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3941 dmae->len = sizeof(struct host_port_stats) >> 2;
3942 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3943 dmae->comp_addr_hi = 0;
3944 dmae->comp_val = 1;
a2fbb9ea
ET
3945 }
3946
bb2a0f7a
YG
3947 if (bp->func_stx) {
3948
3949 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3950 dmae->opcode = opcode;
3951 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3952 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3953 dmae->dst_addr_lo = bp->func_stx >> 2;
3954 dmae->dst_addr_hi = 0;
3955 dmae->len = sizeof(struct host_func_stats) >> 2;
3956 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3957 dmae->comp_addr_hi = 0;
3958 dmae->comp_val = 1;
a2fbb9ea
ET
3959 }
3960
bb2a0f7a 3961 /* MAC */
a2fbb9ea
ET
3962 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3963 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3964 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3965#ifdef __BIG_ENDIAN
3966 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3967#else
3968 DMAE_CMD_ENDIANITY_DW_SWAP |
3969#endif
bb2a0f7a
YG
3970 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3971 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3972
c18487ee 3973 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3974
3975 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3976 NIG_REG_INGRESS_BMAC0_MEM);
3977
3978 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3979 BIGMAC_REGISTER_TX_STAT_GTBYT */
3980 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3981 dmae->opcode = opcode;
3982 dmae->src_addr_lo = (mac_addr +
3983 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3984 dmae->src_addr_hi = 0;
3985 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3986 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3987 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3988 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3989 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3990 dmae->comp_addr_hi = 0;
3991 dmae->comp_val = 1;
3992
3993 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3994 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3995 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3996 dmae->opcode = opcode;
3997 dmae->src_addr_lo = (mac_addr +
3998 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3999 dmae->src_addr_hi = 0;
4000 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4001 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 4002 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4003 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
4004 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4005 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4006 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4007 dmae->comp_addr_hi = 0;
4008 dmae->comp_val = 1;
4009
c18487ee 4010 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
4011
4012 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4013
4014 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4015 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4016 dmae->opcode = opcode;
4017 dmae->src_addr_lo = (mac_addr +
4018 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4019 dmae->src_addr_hi = 0;
4020 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4021 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4022 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4023 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4024 dmae->comp_addr_hi = 0;
4025 dmae->comp_val = 1;
4026
4027 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4028 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4029 dmae->opcode = opcode;
4030 dmae->src_addr_lo = (mac_addr +
4031 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4032 dmae->src_addr_hi = 0;
4033 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4034 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 4035 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4036 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
4037 dmae->len = 1;
4038 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4039 dmae->comp_addr_hi = 0;
4040 dmae->comp_val = 1;
4041
4042 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4043 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4044 dmae->opcode = opcode;
4045 dmae->src_addr_lo = (mac_addr +
4046 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4047 dmae->src_addr_hi = 0;
4048 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4049 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 4050 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4051 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
4052 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4053 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4054 dmae->comp_addr_hi = 0;
4055 dmae->comp_val = 1;
4056 }
4057
4058 /* NIG */
bb2a0f7a
YG
4059 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4060 dmae->opcode = opcode;
4061 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4062 NIG_REG_STAT0_BRB_DISCARD) >> 2;
4063 dmae->src_addr_hi = 0;
4064 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
4065 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
4066 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
4067 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4068 dmae->comp_addr_hi = 0;
4069 dmae->comp_val = 1;
4070
4071 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4072 dmae->opcode = opcode;
4073 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
4074 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
4075 dmae->src_addr_hi = 0;
4076 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4077 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4078 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4079 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4080 dmae->len = (2*sizeof(u32)) >> 2;
4081 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4082 dmae->comp_addr_hi = 0;
4083 dmae->comp_val = 1;
4084
a2fbb9ea
ET
4085 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4086 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4087 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4088 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4089#ifdef __BIG_ENDIAN
4090 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4091#else
4092 DMAE_CMD_ENDIANITY_DW_SWAP |
4093#endif
bb2a0f7a
YG
4094 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4095 (vn << DMAE_CMD_E1HVN_SHIFT));
4096 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
4097 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 4098 dmae->src_addr_hi = 0;
bb2a0f7a
YG
4099 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4100 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4101 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4102 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4103 dmae->len = (2*sizeof(u32)) >> 2;
4104 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4105 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4106 dmae->comp_val = DMAE_COMP_VAL;
4107
4108 *stats_comp = 0;
a2fbb9ea
ET
4109}
4110
bb2a0f7a 4111static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 4112{
bb2a0f7a
YG
4113 struct dmae_command *dmae = &bp->stats_dmae;
4114 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4115
bb2a0f7a
YG
4116 /* sanity */
4117 if (!bp->func_stx) {
4118 BNX2X_ERR("BUG!\n");
4119 return;
4120 }
a2fbb9ea 4121
bb2a0f7a
YG
4122 bp->executer_idx = 0;
4123 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 4124
bb2a0f7a
YG
4125 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4126 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4127 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4128#ifdef __BIG_ENDIAN
4129 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4130#else
4131 DMAE_CMD_ENDIANITY_DW_SWAP |
4132#endif
4133 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4134 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4135 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4136 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4137 dmae->dst_addr_lo = bp->func_stx >> 2;
4138 dmae->dst_addr_hi = 0;
4139 dmae->len = sizeof(struct host_func_stats) >> 2;
4140 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4141 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4142 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4143
bb2a0f7a
YG
4144 *stats_comp = 0;
4145}
a2fbb9ea 4146
bb2a0f7a
YG
4147static void bnx2x_stats_start(struct bnx2x *bp)
4148{
4149 if (bp->port.pmf)
4150 bnx2x_port_stats_init(bp);
4151
4152 else if (bp->func_stx)
4153 bnx2x_func_stats_init(bp);
4154
4155 bnx2x_hw_stats_post(bp);
4156 bnx2x_storm_stats_post(bp);
4157}
4158
4159static void bnx2x_stats_pmf_start(struct bnx2x *bp)
4160{
4161 bnx2x_stats_comp(bp);
4162 bnx2x_stats_pmf_update(bp);
4163 bnx2x_stats_start(bp);
4164}
4165
4166static void bnx2x_stats_restart(struct bnx2x *bp)
4167{
4168 bnx2x_stats_comp(bp);
4169 bnx2x_stats_start(bp);
4170}
4171
4172static void bnx2x_bmac_stats_update(struct bnx2x *bp)
4173{
4174 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
4175 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 4176 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
4177 struct {
4178 u32 lo;
4179 u32 hi;
4180 } diff;
bb2a0f7a
YG
4181
4182 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
4183 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
4184 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
4185 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
4186 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
4187 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 4188 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 4189 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 4190 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
4191 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
4192 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
4193 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
4194 UPDATE_STAT64(tx_stat_gt127,
4195 tx_stat_etherstatspkts65octetsto127octets);
4196 UPDATE_STAT64(tx_stat_gt255,
4197 tx_stat_etherstatspkts128octetsto255octets);
4198 UPDATE_STAT64(tx_stat_gt511,
4199 tx_stat_etherstatspkts256octetsto511octets);
4200 UPDATE_STAT64(tx_stat_gt1023,
4201 tx_stat_etherstatspkts512octetsto1023octets);
4202 UPDATE_STAT64(tx_stat_gt1518,
4203 tx_stat_etherstatspkts1024octetsto1522octets);
4204 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
4205 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
4206 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
4207 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
4208 UPDATE_STAT64(tx_stat_gterr,
4209 tx_stat_dot3statsinternalmactransmiterrors);
4210 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
4211
4212 estats->pause_frames_received_hi =
4213 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
4214 estats->pause_frames_received_lo =
4215 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
4216
4217 estats->pause_frames_sent_hi =
4218 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
4219 estats->pause_frames_sent_lo =
4220 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
4221}
4222
4223static void bnx2x_emac_stats_update(struct bnx2x *bp)
4224{
4225 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
4226 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 4227 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
4228
4229 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
4230 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
4231 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
4232 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
4233 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
4234 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
4235 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
4236 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
4237 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
4238 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
4239 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
4240 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
4241 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
4242 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
4243 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
4244 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
4245 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
4246 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
4247 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
4248 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
4249 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
4250 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
4251 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
4252 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
4253 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
4254 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
4255 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
4256 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
4257 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
4258 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
4259 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
4260
4261 estats->pause_frames_received_hi =
4262 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
4263 estats->pause_frames_received_lo =
4264 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
4265 ADD_64(estats->pause_frames_received_hi,
4266 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
4267 estats->pause_frames_received_lo,
4268 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
4269
4270 estats->pause_frames_sent_hi =
4271 pstats->mac_stx[1].tx_stat_outxonsent_hi;
4272 estats->pause_frames_sent_lo =
4273 pstats->mac_stx[1].tx_stat_outxonsent_lo;
4274 ADD_64(estats->pause_frames_sent_hi,
4275 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
4276 estats->pause_frames_sent_lo,
4277 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
4278}
4279
4280static int bnx2x_hw_stats_update(struct bnx2x *bp)
4281{
4282 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
4283 struct nig_stats *old = &(bp->port.old_nig_stats);
4284 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4285 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
4286 struct {
4287 u32 lo;
4288 u32 hi;
4289 } diff;
de832a55 4290 u32 nig_timer_max;
bb2a0f7a
YG
4291
4292 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
4293 bnx2x_bmac_stats_update(bp);
4294
4295 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
4296 bnx2x_emac_stats_update(bp);
4297
4298 else { /* unreached */
c3eefaf6 4299 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
4300 return -1;
4301 }
a2fbb9ea 4302
bb2a0f7a
YG
4303 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
4304 new->brb_discard - old->brb_discard);
66e855f3
YG
4305 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
4306 new->brb_truncate - old->brb_truncate);
a2fbb9ea 4307
bb2a0f7a
YG
4308 UPDATE_STAT64_NIG(egress_mac_pkt0,
4309 etherstatspkts1024octetsto1522octets);
4310 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 4311
bb2a0f7a 4312 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 4313
bb2a0f7a
YG
4314 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
4315 sizeof(struct mac_stx));
4316 estats->brb_drop_hi = pstats->brb_drop_hi;
4317 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 4318
bb2a0f7a 4319 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 4320
de832a55
EG
4321 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4322 if (nig_timer_max != estats->nig_timer_max) {
4323 estats->nig_timer_max = nig_timer_max;
4324 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
4325 }
4326
bb2a0f7a 4327 return 0;
a2fbb9ea
ET
4328}
4329
bb2a0f7a 4330static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
4331{
4332 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 4333 struct tstorm_per_port_stats *tport =
de832a55 4334 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
4335 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4336 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
4337 int i;
4338
6fe49bb9
EG
4339 memcpy(&(fstats->total_bytes_received_hi),
4340 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
4341 sizeof(struct host_func_stats) - 2*sizeof(u32));
4342 estats->error_bytes_received_hi = 0;
4343 estats->error_bytes_received_lo = 0;
4344 estats->etherstatsoverrsizepkts_hi = 0;
4345 estats->etherstatsoverrsizepkts_lo = 0;
4346 estats->no_buff_discard_hi = 0;
4347 estats->no_buff_discard_lo = 0;
a2fbb9ea 4348
54b9ddaa 4349 for_each_queue(bp, i) {
de832a55
EG
4350 struct bnx2x_fastpath *fp = &bp->fp[i];
4351 int cl_id = fp->cl_id;
4352 struct tstorm_per_client_stats *tclient =
4353 &stats->tstorm_common.client_statistics[cl_id];
4354 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4355 struct ustorm_per_client_stats *uclient =
4356 &stats->ustorm_common.client_statistics[cl_id];
4357 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4358 struct xstorm_per_client_stats *xclient =
4359 &stats->xstorm_common.client_statistics[cl_id];
4360 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4361 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4362 u32 diff;
4363
4364 /* are storm stats valid? */
4365 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 4366 bp->stats_counter) {
de832a55
EG
4367 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4368 " xstorm counter (%d) != stats_counter (%d)\n",
4369 i, xclient->stats_counter, bp->stats_counter);
4370 return -1;
4371 }
4372 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 4373 bp->stats_counter) {
de832a55
EG
4374 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4375 " tstorm counter (%d) != stats_counter (%d)\n",
4376 i, tclient->stats_counter, bp->stats_counter);
4377 return -2;
4378 }
4379 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4380 bp->stats_counter) {
4381 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4382 " ustorm counter (%d) != stats_counter (%d)\n",
4383 i, uclient->stats_counter, bp->stats_counter);
4384 return -4;
4385 }
a2fbb9ea 4386
de832a55 4387 qstats->total_bytes_received_hi =
ca00392c 4388 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 4389 qstats->total_bytes_received_lo =
ca00392c
EG
4390 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4391
4392 ADD_64(qstats->total_bytes_received_hi,
4393 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4394 qstats->total_bytes_received_lo,
4395 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4396
4397 ADD_64(qstats->total_bytes_received_hi,
4398 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4399 qstats->total_bytes_received_lo,
4400 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4401
4402 qstats->valid_bytes_received_hi =
4403 qstats->total_bytes_received_hi;
de832a55 4404 qstats->valid_bytes_received_lo =
ca00392c 4405 qstats->total_bytes_received_lo;
bb2a0f7a 4406
de832a55 4407 qstats->error_bytes_received_hi =
bb2a0f7a 4408 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4409 qstats->error_bytes_received_lo =
bb2a0f7a 4410 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4411
de832a55
EG
4412 ADD_64(qstats->total_bytes_received_hi,
4413 qstats->error_bytes_received_hi,
4414 qstats->total_bytes_received_lo,
4415 qstats->error_bytes_received_lo);
4416
4417 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4418 total_unicast_packets_received);
4419 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4420 total_multicast_packets_received);
4421 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4422 total_broadcast_packets_received);
4423 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4424 etherstatsoverrsizepkts);
4425 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4426
4427 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4428 total_unicast_packets_received);
4429 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4430 total_multicast_packets_received);
4431 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4432 total_broadcast_packets_received);
4433 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4434 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4435 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4436
4437 qstats->total_bytes_transmitted_hi =
ca00392c 4438 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4439 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4440 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4441
4442 ADD_64(qstats->total_bytes_transmitted_hi,
4443 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4444 qstats->total_bytes_transmitted_lo,
4445 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4446
4447 ADD_64(qstats->total_bytes_transmitted_hi,
4448 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4449 qstats->total_bytes_transmitted_lo,
4450 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4451
de832a55
EG
4452 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4453 total_unicast_packets_transmitted);
4454 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4455 total_multicast_packets_transmitted);
4456 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4457 total_broadcast_packets_transmitted);
4458
4459 old_tclient->checksum_discard = tclient->checksum_discard;
4460 old_tclient->ttl0_discard = tclient->ttl0_discard;
4461
4462 ADD_64(fstats->total_bytes_received_hi,
4463 qstats->total_bytes_received_hi,
4464 fstats->total_bytes_received_lo,
4465 qstats->total_bytes_received_lo);
4466 ADD_64(fstats->total_bytes_transmitted_hi,
4467 qstats->total_bytes_transmitted_hi,
4468 fstats->total_bytes_transmitted_lo,
4469 qstats->total_bytes_transmitted_lo);
4470 ADD_64(fstats->total_unicast_packets_received_hi,
4471 qstats->total_unicast_packets_received_hi,
4472 fstats->total_unicast_packets_received_lo,
4473 qstats->total_unicast_packets_received_lo);
4474 ADD_64(fstats->total_multicast_packets_received_hi,
4475 qstats->total_multicast_packets_received_hi,
4476 fstats->total_multicast_packets_received_lo,
4477 qstats->total_multicast_packets_received_lo);
4478 ADD_64(fstats->total_broadcast_packets_received_hi,
4479 qstats->total_broadcast_packets_received_hi,
4480 fstats->total_broadcast_packets_received_lo,
4481 qstats->total_broadcast_packets_received_lo);
4482 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4483 qstats->total_unicast_packets_transmitted_hi,
4484 fstats->total_unicast_packets_transmitted_lo,
4485 qstats->total_unicast_packets_transmitted_lo);
4486 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4487 qstats->total_multicast_packets_transmitted_hi,
4488 fstats->total_multicast_packets_transmitted_lo,
4489 qstats->total_multicast_packets_transmitted_lo);
4490 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4491 qstats->total_broadcast_packets_transmitted_hi,
4492 fstats->total_broadcast_packets_transmitted_lo,
4493 qstats->total_broadcast_packets_transmitted_lo);
4494 ADD_64(fstats->valid_bytes_received_hi,
4495 qstats->valid_bytes_received_hi,
4496 fstats->valid_bytes_received_lo,
4497 qstats->valid_bytes_received_lo);
4498
4499 ADD_64(estats->error_bytes_received_hi,
4500 qstats->error_bytes_received_hi,
4501 estats->error_bytes_received_lo,
4502 qstats->error_bytes_received_lo);
4503 ADD_64(estats->etherstatsoverrsizepkts_hi,
4504 qstats->etherstatsoverrsizepkts_hi,
4505 estats->etherstatsoverrsizepkts_lo,
4506 qstats->etherstatsoverrsizepkts_lo);
4507 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4508 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4509 }
4510
4511 ADD_64(fstats->total_bytes_received_hi,
4512 estats->rx_stat_ifhcinbadoctets_hi,
4513 fstats->total_bytes_received_lo,
4514 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4515
4516 memcpy(estats, &(fstats->total_bytes_received_hi),
4517 sizeof(struct host_func_stats) - 2*sizeof(u32));
4518
de832a55
EG
4519 ADD_64(estats->etherstatsoverrsizepkts_hi,
4520 estats->rx_stat_dot3statsframestoolong_hi,
4521 estats->etherstatsoverrsizepkts_lo,
4522 estats->rx_stat_dot3statsframestoolong_lo);
4523 ADD_64(estats->error_bytes_received_hi,
4524 estats->rx_stat_ifhcinbadoctets_hi,
4525 estats->error_bytes_received_lo,
4526 estats->rx_stat_ifhcinbadoctets_lo);
4527
4528 if (bp->port.pmf) {
4529 estats->mac_filter_discard =
4530 le32_to_cpu(tport->mac_filter_discard);
4531 estats->xxoverflow_discard =
4532 le32_to_cpu(tport->xxoverflow_discard);
4533 estats->brb_truncate_discard =
bb2a0f7a 4534 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4535 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4536 }
bb2a0f7a
YG
4537
4538 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4539
de832a55
EG
4540 bp->stats_pending = 0;
4541
a2fbb9ea
ET
4542 return 0;
4543}
4544
bb2a0f7a 4545static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4546{
bb2a0f7a 4547 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4548 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4549 int i;
a2fbb9ea
ET
4550
4551 nstats->rx_packets =
4552 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4553 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4554 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4555
4556 nstats->tx_packets =
4557 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4558 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4559 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4560
de832a55 4561 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4562
0e39e645 4563 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4564
de832a55 4565 nstats->rx_dropped = estats->mac_discard;
54b9ddaa 4566 for_each_queue(bp, i)
de832a55
EG
4567 nstats->rx_dropped +=
4568 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4569
a2fbb9ea
ET
4570 nstats->tx_dropped = 0;
4571
4572 nstats->multicast =
de832a55 4573 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4574
bb2a0f7a 4575 nstats->collisions =
de832a55 4576 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4577
4578 nstats->rx_length_errors =
de832a55
EG
4579 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4580 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4581 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4582 bnx2x_hilo(&estats->brb_truncate_hi);
4583 nstats->rx_crc_errors =
4584 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4585 nstats->rx_frame_errors =
4586 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4587 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4588 nstats->rx_missed_errors = estats->xxoverflow_discard;
4589
4590 nstats->rx_errors = nstats->rx_length_errors +
4591 nstats->rx_over_errors +
4592 nstats->rx_crc_errors +
4593 nstats->rx_frame_errors +
0e39e645
ET
4594 nstats->rx_fifo_errors +
4595 nstats->rx_missed_errors;
a2fbb9ea 4596
bb2a0f7a 4597 nstats->tx_aborted_errors =
de832a55
EG
4598 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4599 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4600 nstats->tx_carrier_errors =
4601 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4602 nstats->tx_fifo_errors = 0;
4603 nstats->tx_heartbeat_errors = 0;
4604 nstats->tx_window_errors = 0;
4605
4606 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4607 nstats->tx_carrier_errors +
4608 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4609}
4610
4611static void bnx2x_drv_stats_update(struct bnx2x *bp)
4612{
4613 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4614 int i;
4615
4616 estats->driver_xoff = 0;
4617 estats->rx_err_discard_pkt = 0;
4618 estats->rx_skb_alloc_failed = 0;
4619 estats->hw_csum_err = 0;
54b9ddaa 4620 for_each_queue(bp, i) {
de832a55
EG
4621 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4622
4623 estats->driver_xoff += qstats->driver_xoff;
4624 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4625 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4626 estats->hw_csum_err += qstats->hw_csum_err;
4627 }
a2fbb9ea
ET
4628}
4629
bb2a0f7a 4630static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4631{
bb2a0f7a 4632 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4633
bb2a0f7a
YG
4634 if (*stats_comp != DMAE_COMP_VAL)
4635 return;
4636
4637 if (bp->port.pmf)
de832a55 4638 bnx2x_hw_stats_update(bp);
a2fbb9ea 4639
de832a55
EG
4640 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4641 BNX2X_ERR("storm stats were not updated for 3 times\n");
4642 bnx2x_panic();
4643 return;
a2fbb9ea
ET
4644 }
4645
de832a55
EG
4646 bnx2x_net_stats_update(bp);
4647 bnx2x_drv_stats_update(bp);
4648
7995c64e 4649 if (netif_msg_timer(bp)) {
ca00392c 4650 struct bnx2x_fastpath *fp0_rx = bp->fp;
54b9ddaa 4651 struct bnx2x_fastpath *fp0_tx = bp->fp;
de832a55
EG
4652 struct tstorm_per_client_stats *old_tclient =
4653 &bp->fp->old_tclient;
4654 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4655 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4656 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4657 int i;
a2fbb9ea 4658
7995c64e 4659 netdev_printk(KERN_DEBUG, bp->dev, "\n");
a2fbb9ea
ET
4660 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4661 " tx pkt (%lx)\n",
ca00392c
EG
4662 bnx2x_tx_avail(fp0_tx),
4663 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4664 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4665 " rx pkt (%lx)\n",
ca00392c
EG
4666 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4667 fp0_rx->rx_comp_cons),
4668 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4669 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4670 "brb truncate %u\n",
4671 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4672 qstats->driver_xoff,
4673 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4674 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4675 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4676 "mac_discard %u mac_filter_discard %u "
4677 "xxovrflow_discard %u brb_truncate_discard %u "
4678 "ttl0_discard %u\n",
4781bfad 4679 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4680 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4681 bnx2x_hilo(&qstats->no_buff_discard_hi),
4682 estats->mac_discard, estats->mac_filter_discard,
4683 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4684 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4685
4686 for_each_queue(bp, i) {
4687 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4688 bnx2x_fp(bp, i, tx_pkt),
4689 bnx2x_fp(bp, i, rx_pkt),
4690 bnx2x_fp(bp, i, rx_calls));
4691 }
4692 }
4693
bb2a0f7a
YG
4694 bnx2x_hw_stats_post(bp);
4695 bnx2x_storm_stats_post(bp);
4696}
a2fbb9ea 4697
bb2a0f7a
YG
4698static void bnx2x_port_stats_stop(struct bnx2x *bp)
4699{
4700 struct dmae_command *dmae;
4701 u32 opcode;
4702 int loader_idx = PMF_DMAE_C(bp);
4703 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4704
bb2a0f7a 4705 bp->executer_idx = 0;
a2fbb9ea 4706
bb2a0f7a
YG
4707 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4708 DMAE_CMD_C_ENABLE |
4709 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4710#ifdef __BIG_ENDIAN
bb2a0f7a 4711 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4712#else
bb2a0f7a 4713 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4714#endif
bb2a0f7a
YG
4715 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4716 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4717
4718 if (bp->port.port_stx) {
4719
4720 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4721 if (bp->func_stx)
4722 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4723 else
4724 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4725 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4726 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4727 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4728 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4729 dmae->len = sizeof(struct host_port_stats) >> 2;
4730 if (bp->func_stx) {
4731 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4732 dmae->comp_addr_hi = 0;
4733 dmae->comp_val = 1;
4734 } else {
4735 dmae->comp_addr_lo =
4736 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4737 dmae->comp_addr_hi =
4738 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4739 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4740
bb2a0f7a
YG
4741 *stats_comp = 0;
4742 }
a2fbb9ea
ET
4743 }
4744
bb2a0f7a
YG
4745 if (bp->func_stx) {
4746
4747 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4748 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4749 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4750 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4751 dmae->dst_addr_lo = bp->func_stx >> 2;
4752 dmae->dst_addr_hi = 0;
4753 dmae->len = sizeof(struct host_func_stats) >> 2;
4754 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4755 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4756 dmae->comp_val = DMAE_COMP_VAL;
4757
4758 *stats_comp = 0;
a2fbb9ea 4759 }
bb2a0f7a
YG
4760}
4761
4762static void bnx2x_stats_stop(struct bnx2x *bp)
4763{
4764 int update = 0;
4765
4766 bnx2x_stats_comp(bp);
4767
4768 if (bp->port.pmf)
4769 update = (bnx2x_hw_stats_update(bp) == 0);
4770
4771 update |= (bnx2x_storm_stats_update(bp) == 0);
4772
4773 if (update) {
4774 bnx2x_net_stats_update(bp);
a2fbb9ea 4775
bb2a0f7a
YG
4776 if (bp->port.pmf)
4777 bnx2x_port_stats_stop(bp);
4778
4779 bnx2x_hw_stats_post(bp);
4780 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4781 }
4782}
4783
bb2a0f7a
YG
4784static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4785{
4786}
4787
4788static const struct {
4789 void (*action)(struct bnx2x *bp);
4790 enum bnx2x_stats_state next_state;
4791} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4792/* state event */
4793{
4794/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4795/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4796/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4797/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4798},
4799{
4800/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4801/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4802/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4803/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4804}
4805};
4806
4807static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4808{
4809 enum bnx2x_stats_state state = bp->stats_state;
4810
4811 bnx2x_stats_stm[state][event].action(bp);
4812 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4813
8924665a
EG
4814 /* Make sure the state has been "changed" */
4815 smp_wmb();
4816
7995c64e 4817 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
bb2a0f7a
YG
4818 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4819 state, event, bp->stats_state);
4820}
4821
6fe49bb9
EG
4822static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4823{
4824 struct dmae_command *dmae;
4825 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4826
4827 /* sanity */
4828 if (!bp->port.pmf || !bp->port.port_stx) {
4829 BNX2X_ERR("BUG!\n");
4830 return;
4831 }
4832
4833 bp->executer_idx = 0;
4834
4835 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4836 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4837 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4838 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4839#ifdef __BIG_ENDIAN
4840 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4841#else
4842 DMAE_CMD_ENDIANITY_DW_SWAP |
4843#endif
4844 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4845 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4846 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4847 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4848 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4849 dmae->dst_addr_hi = 0;
4850 dmae->len = sizeof(struct host_port_stats) >> 2;
4851 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4852 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4853 dmae->comp_val = DMAE_COMP_VAL;
4854
4855 *stats_comp = 0;
4856 bnx2x_hw_stats_post(bp);
4857 bnx2x_stats_comp(bp);
4858}
4859
4860static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4861{
4862 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4863 int port = BP_PORT(bp);
4864 int func;
4865 u32 func_stx;
4866
4867 /* sanity */
4868 if (!bp->port.pmf || !bp->func_stx) {
4869 BNX2X_ERR("BUG!\n");
4870 return;
4871 }
4872
4873 /* save our func_stx */
4874 func_stx = bp->func_stx;
4875
4876 for (vn = VN_0; vn < vn_max; vn++) {
4877 func = 2*vn + port;
4878
4879 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4880 bnx2x_func_stats_init(bp);
4881 bnx2x_hw_stats_post(bp);
4882 bnx2x_stats_comp(bp);
4883 }
4884
4885 /* restore our func_stx */
4886 bp->func_stx = func_stx;
4887}
4888
4889static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4890{
4891 struct dmae_command *dmae = &bp->stats_dmae;
4892 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4893
4894 /* sanity */
4895 if (!bp->func_stx) {
4896 BNX2X_ERR("BUG!\n");
4897 return;
4898 }
4899
4900 bp->executer_idx = 0;
4901 memset(dmae, 0, sizeof(struct dmae_command));
4902
4903 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4904 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4905 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4906#ifdef __BIG_ENDIAN
4907 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4908#else
4909 DMAE_CMD_ENDIANITY_DW_SWAP |
4910#endif
4911 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4912 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4913 dmae->src_addr_lo = bp->func_stx >> 2;
4914 dmae->src_addr_hi = 0;
4915 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4916 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4917 dmae->len = sizeof(struct host_func_stats) >> 2;
4918 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4919 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4920 dmae->comp_val = DMAE_COMP_VAL;
4921
4922 *stats_comp = 0;
4923 bnx2x_hw_stats_post(bp);
4924 bnx2x_stats_comp(bp);
4925}
4926
4927static void bnx2x_stats_init(struct bnx2x *bp)
4928{
4929 int port = BP_PORT(bp);
4930 int func = BP_FUNC(bp);
4931 int i;
4932
4933 bp->stats_pending = 0;
4934 bp->executer_idx = 0;
4935 bp->stats_counter = 0;
4936
4937 /* port and func stats for management */
4938 if (!BP_NOMCP(bp)) {
4939 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4940 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4941
4942 } else {
4943 bp->port.port_stx = 0;
4944 bp->func_stx = 0;
4945 }
4946 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4947 bp->port.port_stx, bp->func_stx);
4948
4949 /* port stats */
4950 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4951 bp->port.old_nig_stats.brb_discard =
4952 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4953 bp->port.old_nig_stats.brb_truncate =
4954 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4955 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4956 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4957 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4958 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4959
4960 /* function stats */
4961 for_each_queue(bp, i) {
4962 struct bnx2x_fastpath *fp = &bp->fp[i];
4963
4964 memset(&fp->old_tclient, 0,
4965 sizeof(struct tstorm_per_client_stats));
4966 memset(&fp->old_uclient, 0,
4967 sizeof(struct ustorm_per_client_stats));
4968 memset(&fp->old_xclient, 0,
4969 sizeof(struct xstorm_per_client_stats));
4970 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4971 }
4972
4973 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4974 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4975
4976 bp->stats_state = STATS_STATE_DISABLED;
4977
4978 if (bp->port.pmf) {
4979 if (bp->port.port_stx)
4980 bnx2x_port_stats_base_init(bp);
4981
4982 if (bp->func_stx)
4983 bnx2x_func_stats_base_init(bp);
4984
4985 } else if (bp->func_stx)
4986 bnx2x_func_stats_base_update(bp);
4987}
4988
a2fbb9ea
ET
4989static void bnx2x_timer(unsigned long data)
4990{
4991 struct bnx2x *bp = (struct bnx2x *) data;
4992
4993 if (!netif_running(bp->dev))
4994 return;
4995
4996 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4997 goto timer_restart;
a2fbb9ea
ET
4998
4999 if (poll) {
5000 struct bnx2x_fastpath *fp = &bp->fp[0];
5001 int rc;
5002
7961f791 5003 bnx2x_tx_int(fp);
a2fbb9ea
ET
5004 rc = bnx2x_rx_int(fp, 1000);
5005 }
5006
34f80b04
EG
5007 if (!BP_NOMCP(bp)) {
5008 int func = BP_FUNC(bp);
a2fbb9ea
ET
5009 u32 drv_pulse;
5010 u32 mcp_pulse;
5011
5012 ++bp->fw_drv_pulse_wr_seq;
5013 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5014 /* TBD - add SYSTEM_TIME */
5015 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 5016 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 5017
34f80b04 5018 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
5019 MCP_PULSE_SEQ_MASK);
5020 /* The delta between driver pulse and mcp response
5021 * should be 1 (before mcp response) or 0 (after mcp response)
5022 */
5023 if ((drv_pulse != mcp_pulse) &&
5024 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5025 /* someone lost a heartbeat... */
5026 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5027 drv_pulse, mcp_pulse);
5028 }
5029 }
5030
f34d28ea 5031 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 5032 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 5033
f1410647 5034timer_restart:
a2fbb9ea
ET
5035 mod_timer(&bp->timer, jiffies + bp->current_interval);
5036}
5037
5038/* end of Statistics */
5039
5040/* nic init */
5041
5042/*
5043 * nic init service functions
5044 */
5045
34f80b04 5046static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 5047{
34f80b04
EG
5048 int port = BP_PORT(bp);
5049
ca00392c
EG
5050 /* "CSTORM" */
5051 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5052 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
5053 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
5054 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5055 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
5056 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
5057}
5058
5c862848
EG
5059static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5060 dma_addr_t mapping, int sb_id)
34f80b04
EG
5061{
5062 int port = BP_PORT(bp);
bb2a0f7a 5063 int func = BP_FUNC(bp);
a2fbb9ea 5064 int index;
34f80b04 5065 u64 section;
a2fbb9ea
ET
5066
5067 /* USTORM */
5068 section = ((u64)mapping) + offsetof(struct host_status_block,
5069 u_status_block);
34f80b04 5070 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 5071
ca00392c
EG
5072 REG_WR(bp, BAR_CSTRORM_INTMEM +
5073 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
5074 REG_WR(bp, BAR_CSTRORM_INTMEM +
5075 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 5076 U64_HI(section));
ca00392c
EG
5077 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
5078 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
5079
5080 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
5081 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5082 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
5083
5084 /* CSTORM */
5085 section = ((u64)mapping) + offsetof(struct host_status_block,
5086 c_status_block);
34f80b04 5087 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5088
5089 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5090 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 5091 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5092 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 5093 U64_HI(section));
7a9b2557 5094 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 5095 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
5096
5097 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5098 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 5099 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
5100
5101 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5102}
5103
5104static void bnx2x_zero_def_sb(struct bnx2x *bp)
5105{
5106 int func = BP_FUNC(bp);
a2fbb9ea 5107
ca00392c 5108 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
5109 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5110 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
5111 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5112 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
5113 sizeof(struct cstorm_def_status_block_u)/4);
5114 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5115 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
5116 sizeof(struct cstorm_def_status_block_c)/4);
5117 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
5118 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5119 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
5120}
5121
5122static void bnx2x_init_def_sb(struct bnx2x *bp,
5123 struct host_def_status_block *def_sb,
34f80b04 5124 dma_addr_t mapping, int sb_id)
a2fbb9ea 5125{
34f80b04
EG
5126 int port = BP_PORT(bp);
5127 int func = BP_FUNC(bp);
a2fbb9ea
ET
5128 int index, val, reg_offset;
5129 u64 section;
5130
5131 /* ATTN */
5132 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5133 atten_status_block);
34f80b04 5134 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 5135
49d66772
ET
5136 bp->attn_state = 0;
5137
a2fbb9ea
ET
5138 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5139 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5140
34f80b04 5141 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
5142 bp->attn_group[index].sig[0] = REG_RD(bp,
5143 reg_offset + 0x10*index);
5144 bp->attn_group[index].sig[1] = REG_RD(bp,
5145 reg_offset + 0x4 + 0x10*index);
5146 bp->attn_group[index].sig[2] = REG_RD(bp,
5147 reg_offset + 0x8 + 0x10*index);
5148 bp->attn_group[index].sig[3] = REG_RD(bp,
5149 reg_offset + 0xc + 0x10*index);
5150 }
5151
a2fbb9ea
ET
5152 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5153 HC_REG_ATTN_MSG0_ADDR_L);
5154
5155 REG_WR(bp, reg_offset, U64_LO(section));
5156 REG_WR(bp, reg_offset + 4, U64_HI(section));
5157
5158 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5159
5160 val = REG_RD(bp, reg_offset);
34f80b04 5161 val |= sb_id;
a2fbb9ea
ET
5162 REG_WR(bp, reg_offset, val);
5163
5164 /* USTORM */
5165 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5166 u_def_status_block);
34f80b04 5167 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 5168
ca00392c
EG
5169 REG_WR(bp, BAR_CSTRORM_INTMEM +
5170 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
5171 REG_WR(bp, BAR_CSTRORM_INTMEM +
5172 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 5173 U64_HI(section));
ca00392c
EG
5174 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
5175 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
5176
5177 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
5178 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5179 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
5180
5181 /* CSTORM */
5182 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5183 c_def_status_block);
34f80b04 5184 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5185
5186 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5187 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 5188 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5189 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 5190 U64_HI(section));
5c862848 5191 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 5192 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
5193
5194 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5195 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 5196 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
5197
5198 /* TSTORM */
5199 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5200 t_def_status_block);
34f80b04 5201 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5202
5203 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5204 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 5205 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5206 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 5207 U64_HI(section));
5c862848 5208 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 5209 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
5210
5211 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5212 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 5213 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
5214
5215 /* XSTORM */
5216 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5217 x_def_status_block);
34f80b04 5218 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5219
5220 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 5221 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 5222 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 5223 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 5224 U64_HI(section));
5c862848 5225 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 5226 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
5227
5228 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5229 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 5230 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 5231
bb2a0f7a 5232 bp->stats_pending = 0;
66e855f3 5233 bp->set_mac_pending = 0;
bb2a0f7a 5234
34f80b04 5235 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
5236}
5237
5238static void bnx2x_update_coalesce(struct bnx2x *bp)
5239{
34f80b04 5240 int port = BP_PORT(bp);
a2fbb9ea
ET
5241 int i;
5242
5243 for_each_queue(bp, i) {
34f80b04 5244 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
5245
5246 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
5247 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5248 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
5249 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 5250 bp->rx_ticks/(4 * BNX2X_BTR));
ca00392c
EG
5251 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5252 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
5253 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 5254 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
5255
5256 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5257 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
5258 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
5259 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 5260 bp->tx_ticks/(4 * BNX2X_BTR));
a2fbb9ea 5261 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
5262 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
5263 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 5264 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
5265 }
5266}
5267
7a9b2557
VZ
5268static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5269 struct bnx2x_fastpath *fp, int last)
5270{
5271 int i;
5272
5273 for (i = 0; i < last; i++) {
5274 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5275 struct sk_buff *skb = rx_buf->skb;
5276
5277 if (skb == NULL) {
5278 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5279 continue;
5280 }
5281
5282 if (fp->tpa_state[i] == BNX2X_TPA_START)
1a983142
FT
5283 dma_unmap_single(&bp->pdev->dev,
5284 dma_unmap_addr(rx_buf, mapping),
5285 bp->rx_buf_size, DMA_FROM_DEVICE);
7a9b2557
VZ
5286
5287 dev_kfree_skb(skb);
5288 rx_buf->skb = NULL;
5289 }
5290}
5291
a2fbb9ea
ET
5292static void bnx2x_init_rx_rings(struct bnx2x *bp)
5293{
7a9b2557 5294 int func = BP_FUNC(bp);
32626230
EG
5295 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5296 ETH_MAX_AGGREGATION_QUEUES_E1H;
5297 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 5298 int i, j;
a2fbb9ea 5299
87942b46 5300 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
5301 DP(NETIF_MSG_IFUP,
5302 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 5303
7a9b2557 5304 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 5305
54b9ddaa 5306 for_each_queue(bp, j) {
32626230 5307 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 5308
32626230 5309 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
5310 fp->tpa_pool[i].skb =
5311 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5312 if (!fp->tpa_pool[i].skb) {
5313 BNX2X_ERR("Failed to allocate TPA "
5314 "skb pool for queue[%d] - "
5315 "disabling TPA on this "
5316 "queue!\n", j);
5317 bnx2x_free_tpa_pool(bp, fp, i);
5318 fp->disable_tpa = 1;
5319 break;
5320 }
1a983142 5321 dma_unmap_addr_set((struct sw_rx_bd *)
7a9b2557
VZ
5322 &bp->fp->tpa_pool[i],
5323 mapping, 0);
5324 fp->tpa_state[i] = BNX2X_TPA_STOP;
5325 }
5326 }
5327 }
5328
54b9ddaa 5329 for_each_queue(bp, j) {
a2fbb9ea
ET
5330 struct bnx2x_fastpath *fp = &bp->fp[j];
5331
5332 fp->rx_bd_cons = 0;
5333 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
5334 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5335
5336 /* "next page" elements initialization */
5337 /* SGE ring */
5338 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5339 struct eth_rx_sge *sge;
5340
5341 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5342 sge->addr_hi =
5343 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5344 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5345 sge->addr_lo =
5346 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5347 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5348 }
5349
5350 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 5351
7a9b2557 5352 /* RX BD ring */
a2fbb9ea
ET
5353 for (i = 1; i <= NUM_RX_RINGS; i++) {
5354 struct eth_rx_bd *rx_bd;
5355
5356 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5357 rx_bd->addr_hi =
5358 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 5359 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5360 rx_bd->addr_lo =
5361 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 5362 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5363 }
5364
34f80b04 5365 /* CQ ring */
a2fbb9ea
ET
5366 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5367 struct eth_rx_cqe_next_page *nextpg;
5368
5369 nextpg = (struct eth_rx_cqe_next_page *)
5370 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5371 nextpg->addr_hi =
5372 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 5373 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5374 nextpg->addr_lo =
5375 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 5376 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5377 }
5378
7a9b2557
VZ
5379 /* Allocate SGEs and initialize the ring elements */
5380 for (i = 0, ring_prod = 0;
5381 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 5382
7a9b2557
VZ
5383 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5384 BNX2X_ERR("was only able to allocate "
5385 "%d rx sges\n", i);
5386 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5387 /* Cleanup already allocated elements */
5388 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 5389 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
5390 fp->disable_tpa = 1;
5391 ring_prod = 0;
5392 break;
5393 }
5394 ring_prod = NEXT_SGE_IDX(ring_prod);
5395 }
5396 fp->rx_sge_prod = ring_prod;
5397
5398 /* Allocate BDs and initialize BD ring */
66e855f3 5399 fp->rx_comp_cons = 0;
7a9b2557 5400 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
5401 for (i = 0; i < bp->rx_ring_size; i++) {
5402 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5403 BNX2X_ERR("was only able to allocate "
de832a55
EG
5404 "%d rx skbs on queue[%d]\n", i, j);
5405 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
5406 break;
5407 }
5408 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 5409 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5410 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5411 }
5412
7a9b2557
VZ
5413 fp->rx_bd_prod = ring_prod;
5414 /* must not have more available CQEs than BDs */
5415 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5416 cqe_ring_prod);
a2fbb9ea
ET
5417 fp->rx_pkt = fp->rx_calls = 0;
5418
7a9b2557
VZ
5419 /* Warning!
5420 * this will generate an interrupt (to the TSTORM)
5421 * must only be done after chip is initialized
5422 */
5423 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5424 fp->rx_sge_prod);
a2fbb9ea
ET
5425 if (j != 0)
5426 continue;
5427
5428 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5429 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5430 U64_LO(fp->rx_comp_mapping));
5431 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5432 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5433 U64_HI(fp->rx_comp_mapping));
5434 }
5435}
5436
5437static void bnx2x_init_tx_ring(struct bnx2x *bp)
5438{
5439 int i, j;
5440
54b9ddaa 5441 for_each_queue(bp, j) {
a2fbb9ea
ET
5442 struct bnx2x_fastpath *fp = &bp->fp[j];
5443
5444 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5445 struct eth_tx_next_bd *tx_next_bd =
5446 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5447
ca00392c 5448 tx_next_bd->addr_hi =
a2fbb9ea 5449 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5450 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5451 tx_next_bd->addr_lo =
a2fbb9ea 5452 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5453 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5454 }
5455
ca00392c
EG
5456 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5457 fp->tx_db.data.zero_fill1 = 0;
5458 fp->tx_db.data.prod = 0;
5459
a2fbb9ea
ET
5460 fp->tx_pkt_prod = 0;
5461 fp->tx_pkt_cons = 0;
5462 fp->tx_bd_prod = 0;
5463 fp->tx_bd_cons = 0;
5464 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5465 fp->tx_pkt = 0;
5466 }
5467}
5468
5469static void bnx2x_init_sp_ring(struct bnx2x *bp)
5470{
34f80b04 5471 int func = BP_FUNC(bp);
a2fbb9ea
ET
5472
5473 spin_lock_init(&bp->spq_lock);
5474
5475 bp->spq_left = MAX_SPQ_PENDING;
5476 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5477 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5478 bp->spq_prod_bd = bp->spq;
5479 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5480
34f80b04 5481 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5482 U64_LO(bp->spq_mapping));
34f80b04
EG
5483 REG_WR(bp,
5484 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5485 U64_HI(bp->spq_mapping));
5486
34f80b04 5487 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5488 bp->spq_prod_idx);
5489}
5490
5491static void bnx2x_init_context(struct bnx2x *bp)
5492{
5493 int i;
5494
54b9ddaa
VZ
5495 /* Rx */
5496 for_each_queue(bp, i) {
a2fbb9ea
ET
5497 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5498 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5499 u8 cl_id = fp->cl_id;
a2fbb9ea 5500
34f80b04
EG
5501 context->ustorm_st_context.common.sb_index_numbers =
5502 BNX2X_RX_SB_INDEX_NUM;
0626b899 5503 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5504 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5505 context->ustorm_st_context.common.flags =
de832a55
EG
5506 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5507 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5508 context->ustorm_st_context.common.statistics_counter_id =
5509 cl_id;
8d9c5f34 5510 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5511 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5512 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5513 bp->rx_buf_size;
34f80b04 5514 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5515 U64_HI(fp->rx_desc_mapping);
34f80b04 5516 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5517 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5518 if (!fp->disable_tpa) {
5519 context->ustorm_st_context.common.flags |=
ca00392c 5520 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5521 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
5522 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5523 (u32)0xffff);
7a9b2557
VZ
5524 context->ustorm_st_context.common.sge_page_base_hi =
5525 U64_HI(fp->rx_sge_mapping);
5526 context->ustorm_st_context.common.sge_page_base_lo =
5527 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5528
5529 context->ustorm_st_context.common.max_sges_for_packet =
5530 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5531 context->ustorm_st_context.common.max_sges_for_packet =
5532 ((context->ustorm_st_context.common.
5533 max_sges_for_packet + PAGES_PER_SGE - 1) &
5534 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5535 }
5536
8d9c5f34
EG
5537 context->ustorm_ag_context.cdu_usage =
5538 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5539 CDU_REGION_NUMBER_UCM_AG,
5540 ETH_CONNECTION_TYPE);
5541
ca00392c
EG
5542 context->xstorm_ag_context.cdu_reserved =
5543 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5544 CDU_REGION_NUMBER_XCM_AG,
5545 ETH_CONNECTION_TYPE);
5546 }
5547
54b9ddaa
VZ
5548 /* Tx */
5549 for_each_queue(bp, i) {
ca00392c
EG
5550 struct bnx2x_fastpath *fp = &bp->fp[i];
5551 struct eth_context *context =
54b9ddaa 5552 bnx2x_sp(bp, context[i].eth);
ca00392c
EG
5553
5554 context->cstorm_st_context.sb_index_number =
5555 C_SB_ETH_TX_CQ_INDEX;
5556 context->cstorm_st_context.status_block_id = fp->sb_id;
5557
8d9c5f34
EG
5558 context->xstorm_st_context.tx_bd_page_base_hi =
5559 U64_HI(fp->tx_desc_mapping);
5560 context->xstorm_st_context.tx_bd_page_base_lo =
5561 U64_LO(fp->tx_desc_mapping);
ca00392c 5562 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5563 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5564 }
5565}
5566
5567static void bnx2x_init_ind_table(struct bnx2x *bp)
5568{
26c8fa4d 5569 int func = BP_FUNC(bp);
a2fbb9ea
ET
5570 int i;
5571
555f6c78 5572 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5573 return;
5574
555f6c78
EG
5575 DP(NETIF_MSG_IFUP,
5576 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5577 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5578 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5579 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
54b9ddaa 5580 bp->fp->cl_id + (i % bp->num_queues));
a2fbb9ea
ET
5581}
5582
49d66772
ET
5583static void bnx2x_set_client_config(struct bnx2x *bp)
5584{
49d66772 5585 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5586 int port = BP_PORT(bp);
5587 int i;
49d66772 5588
e7799c5f 5589 tstorm_client.mtu = bp->dev->mtu;
49d66772 5590 tstorm_client.config_flags =
de832a55
EG
5591 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5592 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5593#ifdef BCM_VLAN
0c6671b0 5594 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5595 tstorm_client.config_flags |=
8d9c5f34 5596 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5597 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5598 }
5599#endif
49d66772
ET
5600
5601 for_each_queue(bp, i) {
de832a55
EG
5602 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5603
49d66772 5604 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5605 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5606 ((u32 *)&tstorm_client)[0]);
5607 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5608 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5609 ((u32 *)&tstorm_client)[1]);
5610 }
5611
34f80b04
EG
5612 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5613 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5614}
5615
a2fbb9ea
ET
5616static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5617{
a2fbb9ea 5618 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04 5619 int mode = bp->rx_mode;
37b091ba 5620 int mask = bp->rx_mode_cl_mask;
34f80b04 5621 int func = BP_FUNC(bp);
581ce43d 5622 int port = BP_PORT(bp);
a2fbb9ea 5623 int i;
581ce43d
EG
5624 /* All but management unicast packets should pass to the host as well */
5625 u32 llh_mask =
5626 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5627 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5628 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5629 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5630
3196a88a 5631 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5632
5633 switch (mode) {
5634 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5635 tstorm_mac_filter.ucast_drop_all = mask;
5636 tstorm_mac_filter.mcast_drop_all = mask;
5637 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5638 break;
356e2385 5639
a2fbb9ea 5640 case BNX2X_RX_MODE_NORMAL:
34f80b04 5641 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5642 break;
356e2385 5643
a2fbb9ea 5644 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5645 tstorm_mac_filter.mcast_accept_all = mask;
5646 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5647 break;
356e2385 5648
a2fbb9ea 5649 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5650 tstorm_mac_filter.ucast_accept_all = mask;
5651 tstorm_mac_filter.mcast_accept_all = mask;
5652 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5653 /* pass management unicast packets as well */
5654 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5655 break;
356e2385 5656
a2fbb9ea 5657 default:
34f80b04
EG
5658 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5659 break;
a2fbb9ea
ET
5660 }
5661
581ce43d
EG
5662 REG_WR(bp,
5663 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5664 llh_mask);
5665
a2fbb9ea
ET
5666 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5667 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5668 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5669 ((u32 *)&tstorm_mac_filter)[i]);
5670
34f80b04 5671/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5672 ((u32 *)&tstorm_mac_filter)[i]); */
5673 }
a2fbb9ea 5674
49d66772
ET
5675 if (mode != BNX2X_RX_MODE_NONE)
5676 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5677}
5678
471de716
EG
5679static void bnx2x_init_internal_common(struct bnx2x *bp)
5680{
5681 int i;
5682
5683 /* Zero this manually as its initialization is
5684 currently missing in the initTool */
5685 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5686 REG_WR(bp, BAR_USTRORM_INTMEM +
5687 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5688}
5689
5690static void bnx2x_init_internal_port(struct bnx2x *bp)
5691{
5692 int port = BP_PORT(bp);
5693
ca00392c
EG
5694 REG_WR(bp,
5695 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5696 REG_WR(bp,
5697 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5698 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5699 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5700}
5701
5702static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5703{
a2fbb9ea
ET
5704 struct tstorm_eth_function_common_config tstorm_config = {0};
5705 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5706 int port = BP_PORT(bp);
5707 int func = BP_FUNC(bp);
de832a55
EG
5708 int i, j;
5709 u32 offset;
471de716 5710 u16 max_agg_size;
a2fbb9ea
ET
5711
5712 if (is_multi(bp)) {
555f6c78 5713 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5714 tstorm_config.rss_result_mask = MULTI_MASK;
5715 }
ca00392c
EG
5716
5717 /* Enable TPA if needed */
5718 if (bp->flags & TPA_ENABLE_FLAG)
5719 tstorm_config.config_flags |=
5720 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5721
8d9c5f34
EG
5722 if (IS_E1HMF(bp))
5723 tstorm_config.config_flags |=
5724 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5725
34f80b04
EG
5726 tstorm_config.leading_client_id = BP_L_ID(bp);
5727
a2fbb9ea 5728 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5729 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5730 (*(u32 *)&tstorm_config));
5731
c14423fe 5732 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
37b091ba 5733 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
a2fbb9ea
ET
5734 bnx2x_set_storm_rx_mode(bp);
5735
de832a55
EG
5736 for_each_queue(bp, i) {
5737 u8 cl_id = bp->fp[i].cl_id;
5738
5739 /* reset xstorm per client statistics */
5740 offset = BAR_XSTRORM_INTMEM +
5741 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5742 for (j = 0;
5743 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5744 REG_WR(bp, offset + j*4, 0);
5745
5746 /* reset tstorm per client statistics */
5747 offset = BAR_TSTRORM_INTMEM +
5748 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5749 for (j = 0;
5750 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5751 REG_WR(bp, offset + j*4, 0);
5752
5753 /* reset ustorm per client statistics */
5754 offset = BAR_USTRORM_INTMEM +
5755 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5756 for (j = 0;
5757 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5758 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5759 }
5760
5761 /* Init statistics related context */
34f80b04 5762 stats_flags.collect_eth = 1;
a2fbb9ea 5763
66e855f3 5764 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5765 ((u32 *)&stats_flags)[0]);
66e855f3 5766 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5767 ((u32 *)&stats_flags)[1]);
5768
66e855f3 5769 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5770 ((u32 *)&stats_flags)[0]);
66e855f3 5771 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5772 ((u32 *)&stats_flags)[1]);
5773
de832a55
EG
5774 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5775 ((u32 *)&stats_flags)[0]);
5776 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5777 ((u32 *)&stats_flags)[1]);
5778
66e855f3 5779 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5780 ((u32 *)&stats_flags)[0]);
66e855f3 5781 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5782 ((u32 *)&stats_flags)[1]);
5783
66e855f3
YG
5784 REG_WR(bp, BAR_XSTRORM_INTMEM +
5785 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5786 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5787 REG_WR(bp, BAR_XSTRORM_INTMEM +
5788 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5789 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5790
5791 REG_WR(bp, BAR_TSTRORM_INTMEM +
5792 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5793 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5794 REG_WR(bp, BAR_TSTRORM_INTMEM +
5795 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5796 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5797
de832a55
EG
5798 REG_WR(bp, BAR_USTRORM_INTMEM +
5799 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5800 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5801 REG_WR(bp, BAR_USTRORM_INTMEM +
5802 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5803 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5804
34f80b04
EG
5805 if (CHIP_IS_E1H(bp)) {
5806 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5807 IS_E1HMF(bp));
5808 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5809 IS_E1HMF(bp));
5810 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5811 IS_E1HMF(bp));
5812 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5813 IS_E1HMF(bp));
5814
7a9b2557
VZ
5815 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5816 bp->e1hov);
34f80b04
EG
5817 }
5818
4f40f2cb
EG
5819 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5820 max_agg_size =
5821 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5822 SGE_PAGE_SIZE * PAGES_PER_SGE),
5823 (u32)0xffff);
54b9ddaa 5824 for_each_queue(bp, i) {
7a9b2557 5825 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5826
5827 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5828 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5829 U64_LO(fp->rx_comp_mapping));
5830 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5831 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5832 U64_HI(fp->rx_comp_mapping));
5833
ca00392c
EG
5834 /* Next page */
5835 REG_WR(bp, BAR_USTRORM_INTMEM +
5836 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5837 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5838 REG_WR(bp, BAR_USTRORM_INTMEM +
5839 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5840 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5841
7a9b2557 5842 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5843 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5844 max_agg_size);
5845 }
8a1c38d1 5846
1c06328c
EG
5847 /* dropless flow control */
5848 if (CHIP_IS_E1H(bp)) {
5849 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5850
5851 rx_pause.bd_thr_low = 250;
5852 rx_pause.cqe_thr_low = 250;
5853 rx_pause.cos = 1;
5854 rx_pause.sge_thr_low = 0;
5855 rx_pause.bd_thr_high = 350;
5856 rx_pause.cqe_thr_high = 350;
5857 rx_pause.sge_thr_high = 0;
5858
54b9ddaa 5859 for_each_queue(bp, i) {
1c06328c
EG
5860 struct bnx2x_fastpath *fp = &bp->fp[i];
5861
5862 if (!fp->disable_tpa) {
5863 rx_pause.sge_thr_low = 150;
5864 rx_pause.sge_thr_high = 250;
5865 }
5866
5867
5868 offset = BAR_USTRORM_INTMEM +
5869 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5870 fp->cl_id);
5871 for (j = 0;
5872 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5873 j++)
5874 REG_WR(bp, offset + j*4,
5875 ((u32 *)&rx_pause)[j]);
5876 }
5877 }
5878
8a1c38d1
EG
5879 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5880
5881 /* Init rate shaping and fairness contexts */
5882 if (IS_E1HMF(bp)) {
5883 int vn;
5884
5885 /* During init there is no active link
5886 Until link is up, set link rate to 10Gbps */
5887 bp->link_vars.line_speed = SPEED_10000;
5888 bnx2x_init_port_minmax(bp);
5889
b015e3d1
EG
5890 if (!BP_NOMCP(bp))
5891 bp->mf_config =
5892 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8a1c38d1
EG
5893 bnx2x_calc_vn_weight_sum(bp);
5894
5895 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5896 bnx2x_init_vn_minmax(bp, 2*vn + port);
5897
5898 /* Enable rate shaping and fairness */
b015e3d1 5899 bp->cmng.flags.cmng_enables |=
8a1c38d1 5900 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
b015e3d1 5901
8a1c38d1
EG
5902 } else {
5903 /* rate shaping and fairness are disabled */
5904 DP(NETIF_MSG_IFUP,
5905 "single function mode minmax will be disabled\n");
5906 }
5907
5908
5909 /* Store it to internal memory */
5910 if (bp->port.pmf)
5911 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5912 REG_WR(bp, BAR_XSTRORM_INTMEM +
5913 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5914 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5915}
5916
471de716
EG
5917static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5918{
5919 switch (load_code) {
5920 case FW_MSG_CODE_DRV_LOAD_COMMON:
5921 bnx2x_init_internal_common(bp);
5922 /* no break */
5923
5924 case FW_MSG_CODE_DRV_LOAD_PORT:
5925 bnx2x_init_internal_port(bp);
5926 /* no break */
5927
5928 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5929 bnx2x_init_internal_func(bp);
5930 break;
5931
5932 default:
5933 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5934 break;
5935 }
5936}
5937
5938static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5939{
5940 int i;
5941
5942 for_each_queue(bp, i) {
5943 struct bnx2x_fastpath *fp = &bp->fp[i];
5944
34f80b04 5945 fp->bp = bp;
a2fbb9ea 5946 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5947 fp->index = i;
34f80b04 5948 fp->cl_id = BP_L_ID(bp) + i;
37b091ba
MC
5949#ifdef BCM_CNIC
5950 fp->sb_id = fp->cl_id + 1;
5951#else
34f80b04 5952 fp->sb_id = fp->cl_id;
37b091ba 5953#endif
34f80b04 5954 DP(NETIF_MSG_IFUP,
f5372251
EG
5955 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5956 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5957 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5958 fp->sb_id);
5c862848 5959 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5960 }
5961
16119785
EG
5962 /* ensure status block indices were read */
5963 rmb();
5964
5965
5c862848
EG
5966 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5967 DEF_SB_ID);
5968 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5969 bnx2x_update_coalesce(bp);
5970 bnx2x_init_rx_rings(bp);
5971 bnx2x_init_tx_ring(bp);
5972 bnx2x_init_sp_ring(bp);
5973 bnx2x_init_context(bp);
471de716 5974 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5975 bnx2x_init_ind_table(bp);
0ef00459
EG
5976 bnx2x_stats_init(bp);
5977
5978 /* At this point, we are ready for interrupts */
5979 atomic_set(&bp->intr_sem, 0);
5980
5981 /* flush all before enabling interrupts */
5982 mb();
5983 mmiowb();
5984
615f8fd9 5985 bnx2x_int_enable(bp);
eb8da205
EG
5986
5987 /* Check for SPIO5 */
5988 bnx2x_attn_int_deasserted0(bp,
5989 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5990 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5991}
5992
5993/* end of nic init */
5994
5995/*
5996 * gzip service functions
5997 */
5998
5999static int bnx2x_gunzip_init(struct bnx2x *bp)
6000{
1a983142
FT
6001 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6002 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
6003 if (bp->gunzip_buf == NULL)
6004 goto gunzip_nomem1;
6005
6006 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6007 if (bp->strm == NULL)
6008 goto gunzip_nomem2;
6009
6010 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
6011 GFP_KERNEL);
6012 if (bp->strm->workspace == NULL)
6013 goto gunzip_nomem3;
6014
6015 return 0;
6016
6017gunzip_nomem3:
6018 kfree(bp->strm);
6019 bp->strm = NULL;
6020
6021gunzip_nomem2:
1a983142
FT
6022 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6023 bp->gunzip_mapping);
a2fbb9ea
ET
6024 bp->gunzip_buf = NULL;
6025
6026gunzip_nomem1:
7995c64e 6027 netdev_err(bp->dev, "Cannot allocate firmware buffer for un-compression\n");
a2fbb9ea
ET
6028 return -ENOMEM;
6029}
6030
6031static void bnx2x_gunzip_end(struct bnx2x *bp)
6032{
6033 kfree(bp->strm->workspace);
6034
6035 kfree(bp->strm);
6036 bp->strm = NULL;
6037
6038 if (bp->gunzip_buf) {
1a983142
FT
6039 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6040 bp->gunzip_mapping);
a2fbb9ea
ET
6041 bp->gunzip_buf = NULL;
6042 }
6043}
6044
94a78b79 6045static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
6046{
6047 int n, rc;
6048
6049 /* check gzip header */
94a78b79
VZ
6050 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6051 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 6052 return -EINVAL;
94a78b79 6053 }
a2fbb9ea
ET
6054
6055 n = 10;
6056
34f80b04 6057#define FNAME 0x8
a2fbb9ea
ET
6058
6059 if (zbuf[3] & FNAME)
6060 while ((zbuf[n++] != 0) && (n < len));
6061
94a78b79 6062 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
6063 bp->strm->avail_in = len - n;
6064 bp->strm->next_out = bp->gunzip_buf;
6065 bp->strm->avail_out = FW_BUF_SIZE;
6066
6067 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6068 if (rc != Z_OK)
6069 return rc;
6070
6071 rc = zlib_inflate(bp->strm, Z_FINISH);
6072 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
6073 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6074 bp->strm->msg);
a2fbb9ea
ET
6075
6076 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6077 if (bp->gunzip_outlen & 0x3)
7995c64e
JP
6078 netdev_err(bp->dev, "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
6079 bp->gunzip_outlen);
a2fbb9ea
ET
6080 bp->gunzip_outlen >>= 2;
6081
6082 zlib_inflateEnd(bp->strm);
6083
6084 if (rc == Z_STREAM_END)
6085 return 0;
6086
6087 return rc;
6088}
6089
6090/* nic load/unload */
6091
6092/*
34f80b04 6093 * General service functions
a2fbb9ea
ET
6094 */
6095
6096/* send a NIG loopback debug packet */
6097static void bnx2x_lb_pckt(struct bnx2x *bp)
6098{
a2fbb9ea 6099 u32 wb_write[3];
a2fbb9ea
ET
6100
6101 /* Ethernet source and destination addresses */
a2fbb9ea
ET
6102 wb_write[0] = 0x55555555;
6103 wb_write[1] = 0x55555555;
34f80b04 6104 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 6105 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
6106
6107 /* NON-IP protocol */
a2fbb9ea
ET
6108 wb_write[0] = 0x09000000;
6109 wb_write[1] = 0x55555555;
34f80b04 6110 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 6111 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
6112}
6113
6114/* some of the internal memories
6115 * are not directly readable from the driver
6116 * to test them we send debug packets
6117 */
6118static int bnx2x_int_mem_test(struct bnx2x *bp)
6119{
6120 int factor;
6121 int count, i;
6122 u32 val = 0;
6123
ad8d3948 6124 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 6125 factor = 120;
ad8d3948
EG
6126 else if (CHIP_REV_IS_EMUL(bp))
6127 factor = 200;
6128 else
a2fbb9ea 6129 factor = 1;
a2fbb9ea
ET
6130
6131 DP(NETIF_MSG_HW, "start part1\n");
6132
6133 /* Disable inputs of parser neighbor blocks */
6134 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6135 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6136 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 6137 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
6138
6139 /* Write 0 to parser credits for CFC search request */
6140 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6141
6142 /* send Ethernet packet */
6143 bnx2x_lb_pckt(bp);
6144
6145 /* TODO do i reset NIG statistic? */
6146 /* Wait until NIG register shows 1 packet of size 0x10 */
6147 count = 1000 * factor;
6148 while (count) {
34f80b04 6149
a2fbb9ea
ET
6150 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6151 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
6152 if (val == 0x10)
6153 break;
6154
6155 msleep(10);
6156 count--;
6157 }
6158 if (val != 0x10) {
6159 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6160 return -1;
6161 }
6162
6163 /* Wait until PRS register shows 1 packet */
6164 count = 1000 * factor;
6165 while (count) {
6166 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
6167 if (val == 1)
6168 break;
6169
6170 msleep(10);
6171 count--;
6172 }
6173 if (val != 0x1) {
6174 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6175 return -2;
6176 }
6177
6178 /* Reset and init BRB, PRS */
34f80b04 6179 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 6180 msleep(50);
34f80b04 6181 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 6182 msleep(50);
94a78b79
VZ
6183 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6184 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
6185
6186 DP(NETIF_MSG_HW, "part2\n");
6187
6188 /* Disable inputs of parser neighbor blocks */
6189 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6190 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6191 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 6192 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
6193
6194 /* Write 0 to parser credits for CFC search request */
6195 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6196
6197 /* send 10 Ethernet packets */
6198 for (i = 0; i < 10; i++)
6199 bnx2x_lb_pckt(bp);
6200
6201 /* Wait until NIG register shows 10 + 1
6202 packets of size 11*0x10 = 0xb0 */
6203 count = 1000 * factor;
6204 while (count) {
34f80b04 6205
a2fbb9ea
ET
6206 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6207 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
6208 if (val == 0xb0)
6209 break;
6210
6211 msleep(10);
6212 count--;
6213 }
6214 if (val != 0xb0) {
6215 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6216 return -3;
6217 }
6218
6219 /* Wait until PRS register shows 2 packets */
6220 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6221 if (val != 2)
6222 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6223
6224 /* Write 1 to parser credits for CFC search request */
6225 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6226
6227 /* Wait until PRS register shows 3 packets */
6228 msleep(10 * factor);
6229 /* Wait until NIG register shows 1 packet of size 0x10 */
6230 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6231 if (val != 3)
6232 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6233
6234 /* clear NIG EOP FIFO */
6235 for (i = 0; i < 11; i++)
6236 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6237 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6238 if (val != 1) {
6239 BNX2X_ERR("clear of NIG failed\n");
6240 return -4;
6241 }
6242
6243 /* Reset and init BRB, PRS, NIG */
6244 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6245 msleep(50);
6246 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6247 msleep(50);
94a78b79
VZ
6248 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6249 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 6250#ifndef BCM_CNIC
a2fbb9ea
ET
6251 /* set NIC mode */
6252 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6253#endif
6254
6255 /* Enable inputs of parser neighbor blocks */
6256 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6257 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6258 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 6259 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
6260
6261 DP(NETIF_MSG_HW, "done\n");
6262
6263 return 0; /* OK */
6264}
6265
6266static void enable_blocks_attention(struct bnx2x *bp)
6267{
6268 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6269 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6270 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6271 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6272 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6273 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6274 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6275 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6276 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
6277/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6278/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6279 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6280 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6281 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
6282/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6283/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6284 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6285 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6286 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6287 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
6288/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6289/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6290 if (CHIP_REV_IS_FPGA(bp))
6291 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
6292 else
6293 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
6294 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6295 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6296 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
6297/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6298/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6299 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6300 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
6301/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6302 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
6303}
6304
72fd0718
VZ
6305static const struct {
6306 u32 addr;
6307 u32 mask;
6308} bnx2x_parity_mask[] = {
6309 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6310 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6311 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6312 {HC_REG_HC_PRTY_MASK, 0xffffffff},
6313 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6314 {QM_REG_QM_PRTY_MASK, 0x0},
6315 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6316 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6317 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6318 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6319 {CDU_REG_CDU_PRTY_MASK, 0x0},
6320 {CFC_REG_CFC_PRTY_MASK, 0x0},
6321 {DBG_REG_DBG_PRTY_MASK, 0x0},
6322 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6323 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6324 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6325 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6326 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6327 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6328 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6329 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6330 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6331 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6332 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6333 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6334 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6335 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6336 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6337};
6338
6339static void enable_blocks_parity(struct bnx2x *bp)
6340{
6341 int i, mask_arr_len =
6342 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6343
6344 for (i = 0; i < mask_arr_len; i++)
6345 REG_WR(bp, bnx2x_parity_mask[i].addr,
6346 bnx2x_parity_mask[i].mask);
6347}
6348
34f80b04 6349
81f75bbf
EG
6350static void bnx2x_reset_common(struct bnx2x *bp)
6351{
6352 /* reset_common */
6353 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6354 0xd3ffff7f);
6355 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6356}
6357
573f2035
EG
6358static void bnx2x_init_pxp(struct bnx2x *bp)
6359{
6360 u16 devctl;
6361 int r_order, w_order;
6362
6363 pci_read_config_word(bp->pdev,
6364 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6365 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6366 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6367 if (bp->mrrs == -1)
6368 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6369 else {
6370 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6371 r_order = bp->mrrs;
6372 }
6373
6374 bnx2x_init_pxp_arb(bp, r_order, w_order);
6375}
fd4ef40d
EG
6376
6377static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6378{
6379 u32 val;
6380 u8 port;
6381 u8 is_required = 0;
6382
6383 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6384 SHARED_HW_CFG_FAN_FAILURE_MASK;
6385
6386 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6387 is_required = 1;
6388
6389 /*
6390 * The fan failure mechanism is usually related to the PHY type since
6391 * the power consumption of the board is affected by the PHY. Currently,
6392 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6393 */
6394 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6395 for (port = PORT_0; port < PORT_MAX; port++) {
6396 u32 phy_type =
6397 SHMEM_RD(bp, dev_info.port_hw_config[port].
6398 external_phy_config) &
6399 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6400 is_required |=
6401 ((phy_type ==
6402 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
6403 (phy_type ==
6404 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
6405 (phy_type ==
6406 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6407 }
6408
6409 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6410
6411 if (is_required == 0)
6412 return;
6413
6414 /* Fan failure is indicated by SPIO 5 */
6415 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6416 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6417
6418 /* set to active low mode */
6419 val = REG_RD(bp, MISC_REG_SPIO_INT);
6420 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6421 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6422 REG_WR(bp, MISC_REG_SPIO_INT, val);
6423
6424 /* enable interrupt to signal the IGU */
6425 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6426 val |= (1 << MISC_REGISTERS_SPIO_5);
6427 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6428}
6429
34f80b04 6430static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 6431{
a2fbb9ea 6432 u32 val, i;
37b091ba
MC
6433#ifdef BCM_CNIC
6434 u32 wb_write[2];
6435#endif
a2fbb9ea 6436
34f80b04 6437 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 6438
81f75bbf 6439 bnx2x_reset_common(bp);
34f80b04
EG
6440 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6441 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 6442
94a78b79 6443 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
6444 if (CHIP_IS_E1H(bp))
6445 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 6446
34f80b04
EG
6447 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6448 msleep(30);
6449 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 6450
94a78b79 6451 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
6452 if (CHIP_IS_E1(bp)) {
6453 /* enable HW interrupt from PXP on USDM overflow
6454 bit 16 on INT_MASK_0 */
6455 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6456 }
a2fbb9ea 6457
94a78b79 6458 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 6459 bnx2x_init_pxp(bp);
a2fbb9ea
ET
6460
6461#ifdef __BIG_ENDIAN
34f80b04
EG
6462 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6463 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6464 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6465 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6466 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6467 /* make sure this value is 0 */
6468 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6469
6470/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6471 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6472 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6473 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6474 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6475#endif
6476
34f80b04 6477 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
37b091ba 6478#ifdef BCM_CNIC
34f80b04
EG
6479 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6480 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6481 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6482#endif
6483
34f80b04
EG
6484 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6485 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6486
34f80b04
EG
6487 /* let the HW do it's magic ... */
6488 msleep(100);
6489 /* finish PXP init */
6490 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6491 if (val != 1) {
6492 BNX2X_ERR("PXP2 CFG failed\n");
6493 return -EBUSY;
6494 }
6495 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6496 if (val != 1) {
6497 BNX2X_ERR("PXP2 RD_INIT failed\n");
6498 return -EBUSY;
6499 }
a2fbb9ea 6500
34f80b04
EG
6501 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6502 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6503
94a78b79 6504 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6505
34f80b04
EG
6506 /* clean the DMAE memory */
6507 bp->dmae_ready = 1;
6508 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6509
94a78b79
VZ
6510 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6511 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6512 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6513 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6514
34f80b04
EG
6515 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6516 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6517 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6518 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6519
94a78b79 6520 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba
MC
6521
6522#ifdef BCM_CNIC
6523 wb_write[0] = 0;
6524 wb_write[1] = 0;
6525 for (i = 0; i < 64; i++) {
6526 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6527 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6528
6529 if (CHIP_IS_E1H(bp)) {
6530 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6531 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6532 wb_write, 2);
6533 }
6534 }
6535#endif
34f80b04
EG
6536 /* soft reset pulse */
6537 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6538 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 6539
37b091ba 6540#ifdef BCM_CNIC
94a78b79 6541 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6542#endif
a2fbb9ea 6543
94a78b79 6544 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6545 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6546 if (!CHIP_REV_IS_SLOW(bp)) {
6547 /* enable hw interrupt from doorbell Q */
6548 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6549 }
a2fbb9ea 6550
94a78b79
VZ
6551 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6552 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6553 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 6554#ifndef BCM_CNIC
3196a88a
EG
6555 /* set NIC mode */
6556 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 6557#endif
34f80b04
EG
6558 if (CHIP_IS_E1H(bp))
6559 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6560
94a78b79
VZ
6561 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6562 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6563 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6564 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6565
ca00392c
EG
6566 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6567 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6568 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6569 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6570
94a78b79
VZ
6571 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6572 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6573 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6574 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6575
34f80b04
EG
6576 /* sync semi rtc */
6577 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6578 0x80000000);
6579 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6580 0x80000000);
a2fbb9ea 6581
94a78b79
VZ
6582 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6583 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6584 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6585
34f80b04
EG
6586 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6587 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6588 REG_WR(bp, i, 0xc0cac01a);
6589 /* TODO: replace with something meaningful */
6590 }
94a78b79 6591 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
6592#ifdef BCM_CNIC
6593 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6594 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6595 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6596 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6597 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6598 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6599 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6600 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6601 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6602 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6603#endif
34f80b04 6604 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6605
34f80b04
EG
6606 if (sizeof(union cdu_context) != 1024)
6607 /* we currently assume that a context is 1024 bytes */
7995c64e
JP
6608 pr_alert("please adjust the size of cdu_context(%ld)\n",
6609 (long)sizeof(union cdu_context));
a2fbb9ea 6610
94a78b79 6611 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6612 val = (4 << 24) + (0 << 12) + 1024;
6613 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6614
94a78b79 6615 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6616 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6617 /* enable context validation interrupt from CFC */
6618 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6619
6620 /* set the thresholds to prevent CFC/CDU race */
6621 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6622
94a78b79
VZ
6623 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6624 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6625
94a78b79 6626 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6627 /* Reset PCIE errors for debug */
6628 REG_WR(bp, 0x2814, 0xffffffff);
6629 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6630
94a78b79 6631 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6632 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6633 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6634 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6635
94a78b79 6636 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6637 if (CHIP_IS_E1H(bp)) {
6638 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6639 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6640 }
6641
6642 if (CHIP_REV_IS_SLOW(bp))
6643 msleep(200);
6644
6645 /* finish CFC init */
6646 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6647 if (val != 1) {
6648 BNX2X_ERR("CFC LL_INIT failed\n");
6649 return -EBUSY;
6650 }
6651 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6652 if (val != 1) {
6653 BNX2X_ERR("CFC AC_INIT failed\n");
6654 return -EBUSY;
6655 }
6656 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6657 if (val != 1) {
6658 BNX2X_ERR("CFC CAM_INIT failed\n");
6659 return -EBUSY;
6660 }
6661 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6662
34f80b04
EG
6663 /* read NIG statistic
6664 to see if this is our first up since powerup */
6665 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6666 val = *bnx2x_sp(bp, wb_data[0]);
6667
6668 /* do internal memory self test */
6669 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6670 BNX2X_ERR("internal mem self test failed\n");
6671 return -EBUSY;
6672 }
6673
35b19ba5 6674 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6675 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6676 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6677 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6678 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6679 bp->port.need_hw_lock = 1;
6680 break;
6681
34f80b04
EG
6682 default:
6683 break;
6684 }
f1410647 6685
fd4ef40d
EG
6686 bnx2x_setup_fan_failure_detection(bp);
6687
34f80b04
EG
6688 /* clear PXP2 attentions */
6689 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6690
34f80b04 6691 enable_blocks_attention(bp);
72fd0718
VZ
6692 if (CHIP_PARITY_SUPPORTED(bp))
6693 enable_blocks_parity(bp);
a2fbb9ea 6694
6bbca910
YR
6695 if (!BP_NOMCP(bp)) {
6696 bnx2x_acquire_phy_lock(bp);
6697 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6698 bnx2x_release_phy_lock(bp);
6699 } else
6700 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6701
34f80b04
EG
6702 return 0;
6703}
a2fbb9ea 6704
34f80b04
EG
6705static int bnx2x_init_port(struct bnx2x *bp)
6706{
6707 int port = BP_PORT(bp);
94a78b79 6708 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6709 u32 low, high;
34f80b04 6710 u32 val;
a2fbb9ea 6711
34f80b04
EG
6712 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6713
6714 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6715
94a78b79 6716 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6717 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6718
6719 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6720 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6721 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 6722 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6723
37b091ba
MC
6724#ifdef BCM_CNIC
6725 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
a2fbb9ea 6726
94a78b79 6727 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
6728 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6729 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 6730#endif
94a78b79 6731 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6732
94a78b79 6733 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6734 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6735 /* no pause for emulation and FPGA */
6736 low = 0;
6737 high = 513;
6738 } else {
6739 if (IS_E1HMF(bp))
6740 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6741 else if (bp->dev->mtu > 4096) {
6742 if (bp->flags & ONE_PORT_FLAG)
6743 low = 160;
6744 else {
6745 val = bp->dev->mtu;
6746 /* (24*1024 + val*4)/256 */
6747 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6748 }
6749 } else
6750 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6751 high = low + 56; /* 14*1024/256 */
6752 }
6753 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6754 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6755
6756
94a78b79 6757 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6758
94a78b79 6759 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6760 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6761 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6762 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6763
94a78b79
VZ
6764 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6765 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6766 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6767 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6768
94a78b79 6769 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6770 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6771
94a78b79 6772 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6773
6774 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6775 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6776
6777 /* update threshold */
34f80b04 6778 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6779 /* update init credit */
34f80b04 6780 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6781
6782 /* probe changes */
34f80b04 6783 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6784 msleep(5);
34f80b04 6785 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea 6786
37b091ba
MC
6787#ifdef BCM_CNIC
6788 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 6789#endif
94a78b79 6790 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6791 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6792
6793 if (CHIP_IS_E1(bp)) {
6794 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6795 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6796 }
94a78b79 6797 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6798
94a78b79 6799 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6800 /* init aeu_mask_attn_func_0/1:
6801 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6802 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6803 * bits 4-7 are used for "per vn group attention" */
6804 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6805 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6806
94a78b79 6807 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6808 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6809 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6810 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6811 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6812
94a78b79 6813 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6814
6815 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6816
6817 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6818 /* 0x2 disable e1hov, 0x1 enable */
6819 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6820 (IS_E1HMF(bp) ? 0x1 : 0x2));
6821
1c06328c
EG
6822 {
6823 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6824 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6825 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6826 }
34f80b04
EG
6827 }
6828
94a78b79 6829 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6830 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6831
35b19ba5 6832 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6833 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6834 {
6835 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6836
6837 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6838 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6839
6840 /* The GPIO should be swapped if the swap register is
6841 set and active */
6842 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6843 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6844
6845 /* Select function upon port-swap configuration */
6846 if (port == 0) {
6847 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6848 aeu_gpio_mask = (swap_val && swap_override) ?
6849 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6850 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6851 } else {
6852 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6853 aeu_gpio_mask = (swap_val && swap_override) ?
6854 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6855 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6856 }
6857 val = REG_RD(bp, offset);
6858 /* add GPIO3 to group */
6859 val |= aeu_gpio_mask;
6860 REG_WR(bp, offset, val);
6861 }
6862 break;
6863
35b19ba5 6864 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6865 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6866 /* add SPIO 5 to group 0 */
4d295db0
EG
6867 {
6868 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6869 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6870 val = REG_RD(bp, reg_addr);
f1410647 6871 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6872 REG_WR(bp, reg_addr, val);
6873 }
f1410647
ET
6874 break;
6875
6876 default:
6877 break;
6878 }
6879
c18487ee 6880 bnx2x__link_reset(bp);
a2fbb9ea 6881
34f80b04
EG
6882 return 0;
6883}
6884
6885#define ILT_PER_FUNC (768/2)
6886#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6887/* the phys address is shifted right 12 bits and has an added
6888 1=valid bit added to the 53rd bit
6889 then since this is a wide register(TM)
6890 we split it into two 32 bit writes
6891 */
6892#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6893#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6894#define PXP_ONE_ILT(x) (((x) << 10) | x)
6895#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6896
37b091ba
MC
6897#ifdef BCM_CNIC
6898#define CNIC_ILT_LINES 127
6899#define CNIC_CTX_PER_ILT 16
6900#else
34f80b04 6901#define CNIC_ILT_LINES 0
37b091ba 6902#endif
34f80b04
EG
6903
6904static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6905{
6906 int reg;
6907
6908 if (CHIP_IS_E1H(bp))
6909 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6910 else /* E1 */
6911 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6912
6913 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6914}
6915
6916static int bnx2x_init_func(struct bnx2x *bp)
6917{
6918 int port = BP_PORT(bp);
6919 int func = BP_FUNC(bp);
8badd27a 6920 u32 addr, val;
34f80b04
EG
6921 int i;
6922
6923 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6924
8badd27a
EG
6925 /* set MSI reconfigure capability */
6926 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6927 val = REG_RD(bp, addr);
6928 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6929 REG_WR(bp, addr, val);
6930
34f80b04
EG
6931 i = FUNC_ILT_BASE(func);
6932
6933 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6934 if (CHIP_IS_E1H(bp)) {
6935 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6936 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6937 } else /* E1 */
6938 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6939 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6940
37b091ba
MC
6941#ifdef BCM_CNIC
6942 i += 1 + CNIC_ILT_LINES;
6943 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6944 if (CHIP_IS_E1(bp))
6945 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6946 else {
6947 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6948 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6949 }
6950
6951 i++;
6952 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6953 if (CHIP_IS_E1(bp))
6954 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6955 else {
6956 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6957 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6958 }
6959
6960 i++;
6961 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6962 if (CHIP_IS_E1(bp))
6963 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6964 else {
6965 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6966 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6967 }
6968
6969 /* tell the searcher where the T2 table is */
6970 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6971
6972 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6973 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6974
6975 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6976 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6977 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6978
6979 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6980#endif
34f80b04
EG
6981
6982 if (CHIP_IS_E1H(bp)) {
573f2035
EG
6983 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6984 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6985 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6986 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6987 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6988 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6989 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6990 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6991 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
6992
6993 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6994 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6995 }
6996
6997 /* HC init per function */
6998 if (CHIP_IS_E1H(bp)) {
6999 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7000
7001 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7002 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7003 }
94a78b79 7004 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 7005
c14423fe 7006 /* Reset PCIE errors for debug */
a2fbb9ea
ET
7007 REG_WR(bp, 0x2114, 0xffffffff);
7008 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 7009
34f80b04
EG
7010 return 0;
7011}
7012
7013static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7014{
7015 int i, rc = 0;
a2fbb9ea 7016
34f80b04
EG
7017 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
7018 BP_FUNC(bp), load_code);
a2fbb9ea 7019
34f80b04
EG
7020 bp->dmae_ready = 0;
7021 mutex_init(&bp->dmae_mutex);
54016b26
EG
7022 rc = bnx2x_gunzip_init(bp);
7023 if (rc)
7024 return rc;
a2fbb9ea 7025
34f80b04
EG
7026 switch (load_code) {
7027 case FW_MSG_CODE_DRV_LOAD_COMMON:
7028 rc = bnx2x_init_common(bp);
7029 if (rc)
7030 goto init_hw_err;
7031 /* no break */
7032
7033 case FW_MSG_CODE_DRV_LOAD_PORT:
7034 bp->dmae_ready = 1;
7035 rc = bnx2x_init_port(bp);
7036 if (rc)
7037 goto init_hw_err;
7038 /* no break */
7039
7040 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
7041 bp->dmae_ready = 1;
7042 rc = bnx2x_init_func(bp);
7043 if (rc)
7044 goto init_hw_err;
7045 break;
7046
7047 default:
7048 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
7049 break;
7050 }
7051
7052 if (!BP_NOMCP(bp)) {
7053 int func = BP_FUNC(bp);
a2fbb9ea
ET
7054
7055 bp->fw_drv_pulse_wr_seq =
34f80b04 7056 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 7057 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
7058 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
7059 }
a2fbb9ea 7060
34f80b04
EG
7061 /* this needs to be done before gunzip end */
7062 bnx2x_zero_def_sb(bp);
7063 for_each_queue(bp, i)
7064 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
37b091ba
MC
7065#ifdef BCM_CNIC
7066 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7067#endif
34f80b04
EG
7068
7069init_hw_err:
7070 bnx2x_gunzip_end(bp);
7071
7072 return rc;
a2fbb9ea
ET
7073}
7074
a2fbb9ea
ET
7075static void bnx2x_free_mem(struct bnx2x *bp)
7076{
7077
7078#define BNX2X_PCI_FREE(x, y, size) \
7079 do { \
7080 if (x) { \
1a983142 7081 dma_free_coherent(&bp->pdev->dev, size, x, y); \
a2fbb9ea
ET
7082 x = NULL; \
7083 y = 0; \
7084 } \
7085 } while (0)
7086
7087#define BNX2X_FREE(x) \
7088 do { \
7089 if (x) { \
7090 vfree(x); \
7091 x = NULL; \
7092 } \
7093 } while (0)
7094
7095 int i;
7096
7097 /* fastpath */
555f6c78 7098 /* Common */
a2fbb9ea
ET
7099 for_each_queue(bp, i) {
7100
555f6c78 7101 /* status blocks */
a2fbb9ea
ET
7102 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
7103 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 7104 sizeof(struct host_status_block));
555f6c78
EG
7105 }
7106 /* Rx */
54b9ddaa 7107 for_each_queue(bp, i) {
a2fbb9ea 7108
555f6c78 7109 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
7110 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
7111 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
7112 bnx2x_fp(bp, i, rx_desc_mapping),
7113 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7114
7115 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
7116 bnx2x_fp(bp, i, rx_comp_mapping),
7117 sizeof(struct eth_fast_path_rx_cqe) *
7118 NUM_RCQ_BD);
a2fbb9ea 7119
7a9b2557 7120 /* SGE ring */
32626230 7121 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
7122 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
7123 bnx2x_fp(bp, i, rx_sge_mapping),
7124 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7125 }
555f6c78 7126 /* Tx */
54b9ddaa 7127 for_each_queue(bp, i) {
555f6c78
EG
7128
7129 /* fastpath tx rings: tx_buf tx_desc */
7130 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
7131 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
7132 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 7133 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 7134 }
a2fbb9ea
ET
7135 /* end of fastpath */
7136
7137 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 7138 sizeof(struct host_def_status_block));
a2fbb9ea
ET
7139
7140 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 7141 sizeof(struct bnx2x_slowpath));
a2fbb9ea 7142
37b091ba 7143#ifdef BCM_CNIC
a2fbb9ea
ET
7144 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
7145 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
7146 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
7147 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
37b091ba
MC
7148 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
7149 sizeof(struct host_status_block));
a2fbb9ea 7150#endif
7a9b2557 7151 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
7152
7153#undef BNX2X_PCI_FREE
7154#undef BNX2X_KFREE
7155}
7156
7157static int bnx2x_alloc_mem(struct bnx2x *bp)
7158{
7159
7160#define BNX2X_PCI_ALLOC(x, y, size) \
7161 do { \
1a983142 7162 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
a2fbb9ea
ET
7163 if (x == NULL) \
7164 goto alloc_mem_err; \
7165 memset(x, 0, size); \
7166 } while (0)
7167
7168#define BNX2X_ALLOC(x, size) \
7169 do { \
7170 x = vmalloc(size); \
7171 if (x == NULL) \
7172 goto alloc_mem_err; \
7173 memset(x, 0, size); \
7174 } while (0)
7175
7176 int i;
7177
7178 /* fastpath */
555f6c78 7179 /* Common */
a2fbb9ea
ET
7180 for_each_queue(bp, i) {
7181 bnx2x_fp(bp, i, bp) = bp;
7182
555f6c78 7183 /* status blocks */
a2fbb9ea
ET
7184 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
7185 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 7186 sizeof(struct host_status_block));
555f6c78
EG
7187 }
7188 /* Rx */
54b9ddaa 7189 for_each_queue(bp, i) {
a2fbb9ea 7190
555f6c78 7191 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
7192 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
7193 sizeof(struct sw_rx_bd) * NUM_RX_BD);
7194 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
7195 &bnx2x_fp(bp, i, rx_desc_mapping),
7196 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7197
7198 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
7199 &bnx2x_fp(bp, i, rx_comp_mapping),
7200 sizeof(struct eth_fast_path_rx_cqe) *
7201 NUM_RCQ_BD);
7202
7a9b2557
VZ
7203 /* SGE ring */
7204 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
7205 sizeof(struct sw_rx_page) * NUM_RX_SGE);
7206 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
7207 &bnx2x_fp(bp, i, rx_sge_mapping),
7208 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 7209 }
555f6c78 7210 /* Tx */
54b9ddaa 7211 for_each_queue(bp, i) {
555f6c78 7212
555f6c78
EG
7213 /* fastpath tx rings: tx_buf tx_desc */
7214 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
7215 sizeof(struct sw_tx_bd) * NUM_TX_BD);
7216 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
7217 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 7218 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 7219 }
a2fbb9ea
ET
7220 /* end of fastpath */
7221
7222 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7223 sizeof(struct host_def_status_block));
7224
7225 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7226 sizeof(struct bnx2x_slowpath));
7227
37b091ba 7228#ifdef BCM_CNIC
a2fbb9ea
ET
7229 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
7230
a2fbb9ea
ET
7231 /* allocate searcher T2 table
7232 we allocate 1/4 of alloc num for T2
7233 (which is not entered into the ILT) */
7234 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
7235
37b091ba 7236 /* Initialize T2 (for 1024 connections) */
a2fbb9ea 7237 for (i = 0; i < 16*1024; i += 64)
37b091ba 7238 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
a2fbb9ea 7239
37b091ba 7240 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
a2fbb9ea
ET
7241 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
7242
7243 /* QM queues (128*MAX_CONN) */
7244 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
37b091ba
MC
7245
7246 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
7247 sizeof(struct host_status_block));
a2fbb9ea
ET
7248#endif
7249
7250 /* Slow path ring */
7251 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7252
7253 return 0;
7254
7255alloc_mem_err:
7256 bnx2x_free_mem(bp);
7257 return -ENOMEM;
7258
7259#undef BNX2X_PCI_ALLOC
7260#undef BNX2X_ALLOC
7261}
7262
7263static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7264{
7265 int i;
7266
54b9ddaa 7267 for_each_queue(bp, i) {
a2fbb9ea
ET
7268 struct bnx2x_fastpath *fp = &bp->fp[i];
7269
7270 u16 bd_cons = fp->tx_bd_cons;
7271 u16 sw_prod = fp->tx_pkt_prod;
7272 u16 sw_cons = fp->tx_pkt_cons;
7273
a2fbb9ea
ET
7274 while (sw_cons != sw_prod) {
7275 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7276 sw_cons++;
7277 }
7278 }
7279}
7280
7281static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7282{
7283 int i, j;
7284
54b9ddaa 7285 for_each_queue(bp, j) {
a2fbb9ea
ET
7286 struct bnx2x_fastpath *fp = &bp->fp[j];
7287
a2fbb9ea
ET
7288 for (i = 0; i < NUM_RX_BD; i++) {
7289 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7290 struct sk_buff *skb = rx_buf->skb;
7291
7292 if (skb == NULL)
7293 continue;
7294
1a983142
FT
7295 dma_unmap_single(&bp->pdev->dev,
7296 dma_unmap_addr(rx_buf, mapping),
7297 bp->rx_buf_size, DMA_FROM_DEVICE);
a2fbb9ea
ET
7298
7299 rx_buf->skb = NULL;
7300 dev_kfree_skb(skb);
7301 }
7a9b2557 7302 if (!fp->disable_tpa)
32626230
EG
7303 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7304 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 7305 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
7306 }
7307}
7308
7309static void bnx2x_free_skbs(struct bnx2x *bp)
7310{
7311 bnx2x_free_tx_skbs(bp);
7312 bnx2x_free_rx_skbs(bp);
7313}
7314
7315static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7316{
34f80b04 7317 int i, offset = 1;
a2fbb9ea
ET
7318
7319 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 7320 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
7321 bp->msix_table[0].vector);
7322
37b091ba
MC
7323#ifdef BCM_CNIC
7324 offset++;
7325#endif
a2fbb9ea 7326 for_each_queue(bp, i) {
c14423fe 7327 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 7328 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
7329 bnx2x_fp(bp, i, state));
7330
34f80b04 7331 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 7332 }
a2fbb9ea
ET
7333}
7334
6cbe5065 7335static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
a2fbb9ea 7336{
a2fbb9ea 7337 if (bp->flags & USING_MSIX_FLAG) {
6cbe5065
VZ
7338 if (!disable_only)
7339 bnx2x_free_msix_irqs(bp);
a2fbb9ea 7340 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
7341 bp->flags &= ~USING_MSIX_FLAG;
7342
8badd27a 7343 } else if (bp->flags & USING_MSI_FLAG) {
6cbe5065
VZ
7344 if (!disable_only)
7345 free_irq(bp->pdev->irq, bp->dev);
8badd27a
EG
7346 pci_disable_msi(bp->pdev);
7347 bp->flags &= ~USING_MSI_FLAG;
7348
6cbe5065 7349 } else if (!disable_only)
a2fbb9ea
ET
7350 free_irq(bp->pdev->irq, bp->dev);
7351}
7352
7353static int bnx2x_enable_msix(struct bnx2x *bp)
7354{
8badd27a
EG
7355 int i, rc, offset = 1;
7356 int igu_vec = 0;
a2fbb9ea 7357
8badd27a
EG
7358 bp->msix_table[0].entry = igu_vec;
7359 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 7360
37b091ba
MC
7361#ifdef BCM_CNIC
7362 igu_vec = BP_L_ID(bp) + offset;
7363 bp->msix_table[1].entry = igu_vec;
7364 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7365 offset++;
7366#endif
34f80b04 7367 for_each_queue(bp, i) {
8badd27a 7368 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
7369 bp->msix_table[i + offset].entry = igu_vec;
7370 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7371 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
7372 }
7373
34f80b04 7374 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 7375 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 7376 if (rc) {
8badd27a
EG
7377 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7378 return rc;
34f80b04 7379 }
8badd27a 7380
a2fbb9ea
ET
7381 bp->flags |= USING_MSIX_FLAG;
7382
7383 return 0;
a2fbb9ea
ET
7384}
7385
a2fbb9ea
ET
7386static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7387{
34f80b04 7388 int i, rc, offset = 1;
a2fbb9ea 7389
a2fbb9ea
ET
7390 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7391 bp->dev->name, bp->dev);
a2fbb9ea
ET
7392 if (rc) {
7393 BNX2X_ERR("request sp irq failed\n");
7394 return -EBUSY;
7395 }
7396
37b091ba
MC
7397#ifdef BCM_CNIC
7398 offset++;
7399#endif
a2fbb9ea 7400 for_each_queue(bp, i) {
555f6c78 7401 struct bnx2x_fastpath *fp = &bp->fp[i];
54b9ddaa
VZ
7402 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7403 bp->dev->name, i);
ca00392c 7404
34f80b04 7405 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 7406 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 7407 if (rc) {
555f6c78 7408 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
7409 bnx2x_free_msix_irqs(bp);
7410 return -EBUSY;
7411 }
7412
555f6c78 7413 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
7414 }
7415
555f6c78 7416 i = BNX2X_NUM_QUEUES(bp);
7995c64e
JP
7417 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
7418 bp->msix_table[0].vector,
7419 0, bp->msix_table[offset].vector,
7420 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 7421
a2fbb9ea 7422 return 0;
a2fbb9ea
ET
7423}
7424
8badd27a
EG
7425static int bnx2x_enable_msi(struct bnx2x *bp)
7426{
7427 int rc;
7428
7429 rc = pci_enable_msi(bp->pdev);
7430 if (rc) {
7431 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7432 return -1;
7433 }
7434 bp->flags |= USING_MSI_FLAG;
7435
7436 return 0;
7437}
7438
a2fbb9ea
ET
7439static int bnx2x_req_irq(struct bnx2x *bp)
7440{
8badd27a 7441 unsigned long flags;
34f80b04 7442 int rc;
a2fbb9ea 7443
8badd27a
EG
7444 if (bp->flags & USING_MSI_FLAG)
7445 flags = 0;
7446 else
7447 flags = IRQF_SHARED;
7448
7449 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 7450 bp->dev->name, bp->dev);
a2fbb9ea
ET
7451 if (!rc)
7452 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7453
7454 return rc;
a2fbb9ea
ET
7455}
7456
65abd74d
YG
7457static void bnx2x_napi_enable(struct bnx2x *bp)
7458{
7459 int i;
7460
54b9ddaa 7461 for_each_queue(bp, i)
65abd74d
YG
7462 napi_enable(&bnx2x_fp(bp, i, napi));
7463}
7464
7465static void bnx2x_napi_disable(struct bnx2x *bp)
7466{
7467 int i;
7468
54b9ddaa 7469 for_each_queue(bp, i)
65abd74d
YG
7470 napi_disable(&bnx2x_fp(bp, i, napi));
7471}
7472
7473static void bnx2x_netif_start(struct bnx2x *bp)
7474{
e1510706
EG
7475 int intr_sem;
7476
7477 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7478 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7479
7480 if (intr_sem) {
65abd74d 7481 if (netif_running(bp->dev)) {
65abd74d
YG
7482 bnx2x_napi_enable(bp);
7483 bnx2x_int_enable(bp);
555f6c78
EG
7484 if (bp->state == BNX2X_STATE_OPEN)
7485 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
7486 }
7487 }
7488}
7489
f8ef6e44 7490static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 7491{
f8ef6e44 7492 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 7493 bnx2x_napi_disable(bp);
762d5f6c 7494 netif_tx_disable(bp->dev);
65abd74d
YG
7495}
7496
a2fbb9ea
ET
7497/*
7498 * Init service functions
7499 */
7500
e665bfda
MC
7501/**
7502 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7503 *
7504 * @param bp driver descriptor
7505 * @param set set or clear an entry (1 or 0)
7506 * @param mac pointer to a buffer containing a MAC
7507 * @param cl_bit_vec bit vector of clients to register a MAC for
7508 * @param cam_offset offset in a CAM to use
7509 * @param with_bcast set broadcast MAC as well
7510 */
7511static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7512 u32 cl_bit_vec, u8 cam_offset,
7513 u8 with_bcast)
a2fbb9ea
ET
7514{
7515 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 7516 int port = BP_PORT(bp);
a2fbb9ea
ET
7517
7518 /* CAM allocation
7519 * unicasts 0-31:port0 32-63:port1
7520 * multicast 64-127:port0 128-191:port1
7521 */
e665bfda
MC
7522 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7523 config->hdr.offset = cam_offset;
7524 config->hdr.client_id = 0xff;
a2fbb9ea
ET
7525 config->hdr.reserved1 = 0;
7526
7527 /* primary MAC */
7528 config->config_table[0].cam_entry.msb_mac_addr =
e665bfda 7529 swab16(*(u16 *)&mac[0]);
a2fbb9ea 7530 config->config_table[0].cam_entry.middle_mac_addr =
e665bfda 7531 swab16(*(u16 *)&mac[2]);
a2fbb9ea 7532 config->config_table[0].cam_entry.lsb_mac_addr =
e665bfda 7533 swab16(*(u16 *)&mac[4]);
34f80b04 7534 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7535 if (set)
7536 config->config_table[0].target_table_entry.flags = 0;
7537 else
7538 CAM_INVALIDATE(config->config_table[0]);
ca00392c 7539 config->config_table[0].target_table_entry.clients_bit_vector =
e665bfda 7540 cpu_to_le32(cl_bit_vec);
a2fbb9ea
ET
7541 config->config_table[0].target_table_entry.vlan_id = 0;
7542
3101c2bc
YG
7543 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7544 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7545 config->config_table[0].cam_entry.msb_mac_addr,
7546 config->config_table[0].cam_entry.middle_mac_addr,
7547 config->config_table[0].cam_entry.lsb_mac_addr);
7548
7549 /* broadcast */
e665bfda
MC
7550 if (with_bcast) {
7551 config->config_table[1].cam_entry.msb_mac_addr =
7552 cpu_to_le16(0xffff);
7553 config->config_table[1].cam_entry.middle_mac_addr =
7554 cpu_to_le16(0xffff);
7555 config->config_table[1].cam_entry.lsb_mac_addr =
7556 cpu_to_le16(0xffff);
7557 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7558 if (set)
7559 config->config_table[1].target_table_entry.flags =
7560 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7561 else
7562 CAM_INVALIDATE(config->config_table[1]);
7563 config->config_table[1].target_table_entry.clients_bit_vector =
7564 cpu_to_le32(cl_bit_vec);
7565 config->config_table[1].target_table_entry.vlan_id = 0;
7566 }
a2fbb9ea
ET
7567
7568 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7569 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7570 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7571}
7572
e665bfda
MC
7573/**
7574 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7575 *
7576 * @param bp driver descriptor
7577 * @param set set or clear an entry (1 or 0)
7578 * @param mac pointer to a buffer containing a MAC
7579 * @param cl_bit_vec bit vector of clients to register a MAC for
7580 * @param cam_offset offset in a CAM to use
7581 */
7582static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7583 u32 cl_bit_vec, u8 cam_offset)
34f80b04
EG
7584{
7585 struct mac_configuration_cmd_e1h *config =
7586 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7587
8d9c5f34 7588 config->hdr.length = 1;
e665bfda
MC
7589 config->hdr.offset = cam_offset;
7590 config->hdr.client_id = 0xff;
34f80b04
EG
7591 config->hdr.reserved1 = 0;
7592
7593 /* primary MAC */
7594 config->config_table[0].msb_mac_addr =
e665bfda 7595 swab16(*(u16 *)&mac[0]);
34f80b04 7596 config->config_table[0].middle_mac_addr =
e665bfda 7597 swab16(*(u16 *)&mac[2]);
34f80b04 7598 config->config_table[0].lsb_mac_addr =
e665bfda 7599 swab16(*(u16 *)&mac[4]);
ca00392c 7600 config->config_table[0].clients_bit_vector =
e665bfda 7601 cpu_to_le32(cl_bit_vec);
34f80b04
EG
7602 config->config_table[0].vlan_id = 0;
7603 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7604 if (set)
7605 config->config_table[0].flags = BP_PORT(bp);
7606 else
7607 config->config_table[0].flags =
7608 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7609
e665bfda 7610 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
3101c2bc 7611 (set ? "setting" : "clearing"),
34f80b04
EG
7612 config->config_table[0].msb_mac_addr,
7613 config->config_table[0].middle_mac_addr,
e665bfda 7614 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
34f80b04
EG
7615
7616 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7617 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7618 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7619}
7620
a2fbb9ea
ET
7621static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7622 int *state_p, int poll)
7623{
7624 /* can take a while if any port is running */
8b3a0f0b 7625 int cnt = 5000;
a2fbb9ea 7626
c14423fe
ET
7627 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7628 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7629
7630 might_sleep();
34f80b04 7631 while (cnt--) {
a2fbb9ea
ET
7632 if (poll) {
7633 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7634 /* if index is different from 0
7635 * the reply for some commands will
3101c2bc 7636 * be on the non default queue
a2fbb9ea
ET
7637 */
7638 if (idx)
7639 bnx2x_rx_int(&bp->fp[idx], 10);
7640 }
a2fbb9ea 7641
3101c2bc 7642 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7643 if (*state_p == state) {
7644#ifdef BNX2X_STOP_ON_ERROR
7645 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7646#endif
a2fbb9ea 7647 return 0;
8b3a0f0b 7648 }
a2fbb9ea 7649
a2fbb9ea 7650 msleep(1);
e3553b29
EG
7651
7652 if (bp->panic)
7653 return -EIO;
a2fbb9ea
ET
7654 }
7655
a2fbb9ea 7656 /* timeout! */
49d66772
ET
7657 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7658 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7659#ifdef BNX2X_STOP_ON_ERROR
7660 bnx2x_panic();
7661#endif
a2fbb9ea 7662
49d66772 7663 return -EBUSY;
a2fbb9ea
ET
7664}
7665
e665bfda
MC
7666static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7667{
7668 bp->set_mac_pending++;
7669 smp_wmb();
7670
7671 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7672 (1 << bp->fp->cl_id), BP_FUNC(bp));
7673
7674 /* Wait for a completion */
7675 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7676}
7677
7678static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7679{
7680 bp->set_mac_pending++;
7681 smp_wmb();
7682
7683 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7684 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7685 1);
7686
7687 /* Wait for a completion */
7688 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7689}
7690
993ac7b5
MC
7691#ifdef BCM_CNIC
7692/**
7693 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7694 * MAC(s). This function will wait until the ramdord completion
7695 * returns.
7696 *
7697 * @param bp driver handle
7698 * @param set set or clear the CAM entry
7699 *
7700 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7701 */
7702static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7703{
7704 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7705
7706 bp->set_mac_pending++;
7707 smp_wmb();
7708
7709 /* Send a SET_MAC ramrod */
7710 if (CHIP_IS_E1(bp))
7711 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7712 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7713 1);
7714 else
7715 /* CAM allocation for E1H
7716 * unicasts: by func number
7717 * multicast: 20+FUNC*20, 20 each
7718 */
7719 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7720 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7721
7722 /* Wait for a completion when setting */
7723 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7724
7725 return 0;
7726}
7727#endif
7728
a2fbb9ea
ET
7729static int bnx2x_setup_leading(struct bnx2x *bp)
7730{
34f80b04 7731 int rc;
a2fbb9ea 7732
c14423fe 7733 /* reset IGU state */
34f80b04 7734 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7735
7736 /* SETUP ramrod */
7737 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7738
34f80b04
EG
7739 /* Wait for completion */
7740 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7741
34f80b04 7742 return rc;
a2fbb9ea
ET
7743}
7744
7745static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7746{
555f6c78
EG
7747 struct bnx2x_fastpath *fp = &bp->fp[index];
7748
a2fbb9ea 7749 /* reset IGU state */
555f6c78 7750 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7751
228241eb 7752 /* SETUP ramrod */
555f6c78
EG
7753 fp->state = BNX2X_FP_STATE_OPENING;
7754 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7755 fp->cl_id, 0);
a2fbb9ea
ET
7756
7757 /* Wait for completion */
7758 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7759 &(fp->state), 0);
a2fbb9ea
ET
7760}
7761
a2fbb9ea 7762static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7763
54b9ddaa 7764static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
ca00392c 7765{
ca00392c
EG
7766
7767 switch (bp->multi_mode) {
7768 case ETH_RSS_MODE_DISABLED:
54b9ddaa 7769 bp->num_queues = 1;
ca00392c
EG
7770 break;
7771
7772 case ETH_RSS_MODE_REGULAR:
54b9ddaa
VZ
7773 if (num_queues)
7774 bp->num_queues = min_t(u32, num_queues,
7775 BNX2X_MAX_QUEUES(bp));
ca00392c 7776 else
54b9ddaa
VZ
7777 bp->num_queues = min_t(u32, num_online_cpus(),
7778 BNX2X_MAX_QUEUES(bp));
ca00392c
EG
7779 break;
7780
7781
7782 default:
54b9ddaa 7783 bp->num_queues = 1;
ca00392c
EG
7784 break;
7785 }
ca00392c
EG
7786}
7787
54b9ddaa 7788static int bnx2x_set_num_queues(struct bnx2x *bp)
a2fbb9ea 7789{
ca00392c 7790 int rc = 0;
a2fbb9ea 7791
8badd27a
EG
7792 switch (int_mode) {
7793 case INT_MODE_INTx:
7794 case INT_MODE_MSI:
54b9ddaa 7795 bp->num_queues = 1;
ca00392c 7796 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a
EG
7797 break;
7798
7799 case INT_MODE_MSIX:
7800 default:
54b9ddaa
VZ
7801 /* Set number of queues according to bp->multi_mode value */
7802 bnx2x_set_num_queues_msix(bp);
ca00392c 7803
54b9ddaa
VZ
7804 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7805 bp->num_queues);
ca00392c 7806
2dfe0e1f
EG
7807 /* if we can't use MSI-X we only need one fp,
7808 * so try to enable MSI-X with the requested number of fp's
7809 * and fallback to MSI or legacy INTx with one fp
7810 */
ca00392c 7811 rc = bnx2x_enable_msix(bp);
54b9ddaa 7812 if (rc)
34f80b04 7813 /* failed to enable MSI-X */
54b9ddaa 7814 bp->num_queues = 1;
8badd27a 7815 break;
a2fbb9ea 7816 }
54b9ddaa 7817 bp->dev->real_num_tx_queues = bp->num_queues;
ca00392c 7818 return rc;
8badd27a
EG
7819}
7820
993ac7b5
MC
7821#ifdef BCM_CNIC
7822static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7823static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7824#endif
8badd27a
EG
7825
7826/* must be called with rtnl_lock */
7827static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7828{
7829 u32 load_code;
ca00392c
EG
7830 int i, rc;
7831
8badd27a 7832#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7833 if (unlikely(bp->panic))
7834 return -EPERM;
7835#endif
7836
7837 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7838
54b9ddaa 7839 rc = bnx2x_set_num_queues(bp);
c14423fe 7840
6cbe5065
VZ
7841 if (bnx2x_alloc_mem(bp)) {
7842 bnx2x_free_irq(bp, true);
a2fbb9ea 7843 return -ENOMEM;
6cbe5065 7844 }
a2fbb9ea 7845
54b9ddaa 7846 for_each_queue(bp, i)
7a9b2557
VZ
7847 bnx2x_fp(bp, i, disable_tpa) =
7848 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7849
54b9ddaa 7850 for_each_queue(bp, i)
2dfe0e1f
EG
7851 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7852 bnx2x_poll, 128);
7853
2dfe0e1f
EG
7854 bnx2x_napi_enable(bp);
7855
34f80b04
EG
7856 if (bp->flags & USING_MSIX_FLAG) {
7857 rc = bnx2x_req_msix_irqs(bp);
7858 if (rc) {
6cbe5065 7859 bnx2x_free_irq(bp, true);
2dfe0e1f 7860 goto load_error1;
34f80b04
EG
7861 }
7862 } else {
ca00392c 7863 /* Fall to INTx if failed to enable MSI-X due to lack of
54b9ddaa 7864 memory (in bnx2x_set_num_queues()) */
8badd27a
EG
7865 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7866 bnx2x_enable_msi(bp);
34f80b04
EG
7867 bnx2x_ack_int(bp);
7868 rc = bnx2x_req_irq(bp);
7869 if (rc) {
2dfe0e1f 7870 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6cbe5065 7871 bnx2x_free_irq(bp, true);
2dfe0e1f 7872 goto load_error1;
a2fbb9ea 7873 }
8badd27a
EG
7874 if (bp->flags & USING_MSI_FLAG) {
7875 bp->dev->irq = bp->pdev->irq;
7995c64e
JP
7876 netdev_info(bp->dev, "using MSI IRQ %d\n",
7877 bp->pdev->irq);
8badd27a 7878 }
a2fbb9ea
ET
7879 }
7880
2dfe0e1f
EG
7881 /* Send LOAD_REQUEST command to MCP
7882 Returns the type of LOAD command:
7883 if it is the first port to be initialized
7884 common blocks should be initialized, otherwise - not
7885 */
7886 if (!BP_NOMCP(bp)) {
7887 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7888 if (!load_code) {
7889 BNX2X_ERR("MCP response failure, aborting\n");
7890 rc = -EBUSY;
7891 goto load_error2;
7892 }
7893 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7894 rc = -EBUSY; /* other port in diagnostic mode */
7895 goto load_error2;
7896 }
7897
7898 } else {
7899 int port = BP_PORT(bp);
7900
f5372251 7901 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7902 load_count[0], load_count[1], load_count[2]);
7903 load_count[0]++;
7904 load_count[1 + port]++;
f5372251 7905 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7906 load_count[0], load_count[1], load_count[2]);
7907 if (load_count[0] == 1)
7908 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7909 else if (load_count[1 + port] == 1)
7910 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7911 else
7912 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7913 }
7914
7915 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7916 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7917 bp->port.pmf = 1;
7918 else
7919 bp->port.pmf = 0;
7920 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7921
a2fbb9ea 7922 /* Initialize HW */
34f80b04
EG
7923 rc = bnx2x_init_hw(bp, load_code);
7924 if (rc) {
a2fbb9ea 7925 BNX2X_ERR("HW init failed, aborting\n");
f1e1a199
VZ
7926 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7927 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7928 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
2dfe0e1f 7929 goto load_error2;
a2fbb9ea
ET
7930 }
7931
a2fbb9ea 7932 /* Setup NIC internals and enable interrupts */
471de716 7933 bnx2x_nic_init(bp, load_code);
a2fbb9ea 7934
2691d51d
EG
7935 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7936 (bp->common.shmem2_base))
7937 SHMEM2_WR(bp, dcc_support,
7938 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7939 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7940
a2fbb9ea 7941 /* Send LOAD_DONE command to MCP */
34f80b04 7942 if (!BP_NOMCP(bp)) {
228241eb
ET
7943 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7944 if (!load_code) {
da5a662a 7945 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7946 rc = -EBUSY;
2dfe0e1f 7947 goto load_error3;
a2fbb9ea
ET
7948 }
7949 }
7950
7951 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7952
34f80b04
EG
7953 rc = bnx2x_setup_leading(bp);
7954 if (rc) {
da5a662a 7955 BNX2X_ERR("Setup leading failed!\n");
e3553b29 7956#ifndef BNX2X_STOP_ON_ERROR
2dfe0e1f 7957 goto load_error3;
e3553b29
EG
7958#else
7959 bp->panic = 1;
7960 return -EBUSY;
7961#endif
34f80b04 7962 }
a2fbb9ea 7963
34f80b04
EG
7964 if (CHIP_IS_E1H(bp))
7965 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7966 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
f34d28ea 7967 bp->flags |= MF_FUNC_DIS;
34f80b04 7968 }
a2fbb9ea 7969
ca00392c 7970 if (bp->state == BNX2X_STATE_OPEN) {
37b091ba
MC
7971#ifdef BCM_CNIC
7972 /* Enable Timer scan */
7973 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7974#endif
34f80b04
EG
7975 for_each_nondefault_queue(bp, i) {
7976 rc = bnx2x_setup_multi(bp, i);
7977 if (rc)
37b091ba
MC
7978#ifdef BCM_CNIC
7979 goto load_error4;
7980#else
2dfe0e1f 7981 goto load_error3;
37b091ba 7982#endif
34f80b04 7983 }
a2fbb9ea 7984
ca00392c 7985 if (CHIP_IS_E1(bp))
e665bfda 7986 bnx2x_set_eth_mac_addr_e1(bp, 1);
ca00392c 7987 else
e665bfda 7988 bnx2x_set_eth_mac_addr_e1h(bp, 1);
993ac7b5
MC
7989#ifdef BCM_CNIC
7990 /* Set iSCSI L2 MAC */
7991 mutex_lock(&bp->cnic_mutex);
7992 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7993 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7994 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
4a6e47a4
MC
7995 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
7996 CNIC_SB_ID(bp));
993ac7b5
MC
7997 }
7998 mutex_unlock(&bp->cnic_mutex);
7999#endif
ca00392c 8000 }
34f80b04
EG
8001
8002 if (bp->port.pmf)
b5bf9068 8003 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
8004
8005 /* Start fast path */
34f80b04
EG
8006 switch (load_mode) {
8007 case LOAD_NORMAL:
ca00392c
EG
8008 if (bp->state == BNX2X_STATE_OPEN) {
8009 /* Tx queue should be only reenabled */
8010 netif_tx_wake_all_queues(bp->dev);
8011 }
2dfe0e1f 8012 /* Initialize the receive filter. */
34f80b04
EG
8013 bnx2x_set_rx_mode(bp->dev);
8014 break;
8015
8016 case LOAD_OPEN:
555f6c78 8017 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
8018 if (bp->state != BNX2X_STATE_OPEN)
8019 netif_tx_disable(bp->dev);
2dfe0e1f 8020 /* Initialize the receive filter. */
34f80b04 8021 bnx2x_set_rx_mode(bp->dev);
34f80b04 8022 break;
a2fbb9ea 8023
34f80b04 8024 case LOAD_DIAG:
2dfe0e1f 8025 /* Initialize the receive filter. */
a2fbb9ea 8026 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
8027 bp->state = BNX2X_STATE_DIAG;
8028 break;
8029
8030 default:
8031 break;
a2fbb9ea
ET
8032 }
8033
34f80b04
EG
8034 if (!bp->port.pmf)
8035 bnx2x__link_status_update(bp);
8036
a2fbb9ea
ET
8037 /* start the timer */
8038 mod_timer(&bp->timer, jiffies + bp->current_interval);
8039
993ac7b5
MC
8040#ifdef BCM_CNIC
8041 bnx2x_setup_cnic_irq_info(bp);
8042 if (bp->state == BNX2X_STATE_OPEN)
8043 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8044#endif
72fd0718 8045 bnx2x_inc_load_cnt(bp);
34f80b04 8046
a2fbb9ea
ET
8047 return 0;
8048
37b091ba
MC
8049#ifdef BCM_CNIC
8050load_error4:
8051 /* Disable Timer scan */
8052 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8053#endif
2dfe0e1f
EG
8054load_error3:
8055 bnx2x_int_disable_sync(bp, 1);
8056 if (!BP_NOMCP(bp)) {
8057 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8058 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8059 }
8060 bp->port.pmf = 0;
7a9b2557
VZ
8061 /* Free SKBs, SGEs, TPA pool and driver internals */
8062 bnx2x_free_skbs(bp);
54b9ddaa 8063 for_each_queue(bp, i)
3196a88a 8064 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 8065load_error2:
d1014634 8066 /* Release IRQs */
6cbe5065 8067 bnx2x_free_irq(bp, false);
2dfe0e1f
EG
8068load_error1:
8069 bnx2x_napi_disable(bp);
54b9ddaa 8070 for_each_queue(bp, i)
7cde1c8b 8071 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
8072 bnx2x_free_mem(bp);
8073
34f80b04 8074 return rc;
a2fbb9ea
ET
8075}
8076
8077static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8078{
555f6c78 8079 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
8080 int rc;
8081
c14423fe 8082 /* halt the connection */
555f6c78
EG
8083 fp->state = BNX2X_FP_STATE_HALTING;
8084 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 8085
34f80b04 8086 /* Wait for completion */
a2fbb9ea 8087 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 8088 &(fp->state), 1);
c14423fe 8089 if (rc) /* timeout */
a2fbb9ea
ET
8090 return rc;
8091
8092 /* delete cfc entry */
8093 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
8094
34f80b04
EG
8095 /* Wait for completion */
8096 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 8097 &(fp->state), 1);
34f80b04 8098 return rc;
a2fbb9ea
ET
8099}
8100
da5a662a 8101static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 8102{
4781bfad 8103 __le16 dsb_sp_prod_idx;
c14423fe 8104 /* if the other port is handling traffic,
a2fbb9ea 8105 this can take a lot of time */
34f80b04
EG
8106 int cnt = 500;
8107 int rc;
a2fbb9ea
ET
8108
8109 might_sleep();
8110
8111 /* Send HALT ramrod */
8112 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 8113 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 8114
34f80b04
EG
8115 /* Wait for completion */
8116 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
8117 &(bp->fp[0].state), 1);
8118 if (rc) /* timeout */
da5a662a 8119 return rc;
a2fbb9ea 8120
49d66772 8121 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 8122
228241eb 8123 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
8124 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
8125
49d66772 8126 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
8127 we are going to reset the chip anyway
8128 so there is not much to do if this times out
8129 */
34f80b04 8130 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
8131 if (!cnt) {
8132 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
8133 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
8134 *bp->dsb_sp_prod, dsb_sp_prod_idx);
8135#ifdef BNX2X_STOP_ON_ERROR
8136 bnx2x_panic();
8137#endif
36e552ab 8138 rc = -EBUSY;
34f80b04
EG
8139 break;
8140 }
8141 cnt--;
da5a662a 8142 msleep(1);
5650d9d4 8143 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
8144 }
8145 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
8146 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
8147
8148 return rc;
a2fbb9ea
ET
8149}
8150
34f80b04
EG
8151static void bnx2x_reset_func(struct bnx2x *bp)
8152{
8153 int port = BP_PORT(bp);
8154 int func = BP_FUNC(bp);
8155 int base, i;
8156
8157 /* Configure IGU */
8158 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8159 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8160
37b091ba
MC
8161#ifdef BCM_CNIC
8162 /* Disable Timer scan */
8163 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8164 /*
8165 * Wait for at least 10ms and up to 2 second for the timers scan to
8166 * complete
8167 */
8168 for (i = 0; i < 200; i++) {
8169 msleep(10);
8170 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8171 break;
8172 }
8173#endif
34f80b04
EG
8174 /* Clear ILT */
8175 base = FUNC_ILT_BASE(func);
8176 for (i = base; i < base + ILT_PER_FUNC; i++)
8177 bnx2x_ilt_wr(bp, i, 0);
8178}
8179
8180static void bnx2x_reset_port(struct bnx2x *bp)
8181{
8182 int port = BP_PORT(bp);
8183 u32 val;
8184
8185 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8186
8187 /* Do not rcv packets to BRB */
8188 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8189 /* Do not direct rcv packets that are not for MCP to the BRB */
8190 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8191 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8192
8193 /* Configure AEU */
8194 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8195
8196 msleep(100);
8197 /* Check for BRB port occupancy */
8198 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8199 if (val)
8200 DP(NETIF_MSG_IFDOWN,
33471629 8201 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
8202
8203 /* TODO: Close Doorbell port? */
8204}
8205
34f80b04
EG
8206static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8207{
8208 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
8209 BP_FUNC(bp), reset_code);
8210
8211 switch (reset_code) {
8212 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
8213 bnx2x_reset_port(bp);
8214 bnx2x_reset_func(bp);
8215 bnx2x_reset_common(bp);
8216 break;
8217
8218 case FW_MSG_CODE_DRV_UNLOAD_PORT:
8219 bnx2x_reset_port(bp);
8220 bnx2x_reset_func(bp);
8221 break;
8222
8223 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
8224 bnx2x_reset_func(bp);
8225 break;
49d66772 8226
34f80b04
EG
8227 default:
8228 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
8229 break;
8230 }
8231}
8232
72fd0718 8233static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 8234{
da5a662a 8235 int port = BP_PORT(bp);
a2fbb9ea 8236 u32 reset_code = 0;
da5a662a 8237 int i, cnt, rc;
a2fbb9ea 8238
555f6c78 8239 /* Wait until tx fastpath tasks complete */
54b9ddaa 8240 for_each_queue(bp, i) {
228241eb
ET
8241 struct bnx2x_fastpath *fp = &bp->fp[i];
8242
34f80b04 8243 cnt = 1000;
e8b5fc51 8244 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 8245
7961f791 8246 bnx2x_tx_int(fp);
34f80b04
EG
8247 if (!cnt) {
8248 BNX2X_ERR("timeout waiting for queue[%d]\n",
8249 i);
8250#ifdef BNX2X_STOP_ON_ERROR
8251 bnx2x_panic();
8252 return -EBUSY;
8253#else
8254 break;
8255#endif
8256 }
8257 cnt--;
da5a662a 8258 msleep(1);
34f80b04 8259 }
228241eb 8260 }
da5a662a
VZ
8261 /* Give HW time to discard old tx messages */
8262 msleep(1);
a2fbb9ea 8263
3101c2bc
YG
8264 if (CHIP_IS_E1(bp)) {
8265 struct mac_configuration_cmd *config =
8266 bnx2x_sp(bp, mcast_config);
8267
e665bfda 8268 bnx2x_set_eth_mac_addr_e1(bp, 0);
3101c2bc 8269
8d9c5f34 8270 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
8271 CAM_INVALIDATE(config->config_table[i]);
8272
8d9c5f34 8273 config->hdr.length = i;
3101c2bc
YG
8274 if (CHIP_REV_IS_SLOW(bp))
8275 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8276 else
8277 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 8278 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
8279 config->hdr.reserved1 = 0;
8280
e665bfda
MC
8281 bp->set_mac_pending++;
8282 smp_wmb();
8283
3101c2bc
YG
8284 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8285 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8286 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8287
8288 } else { /* E1H */
65abd74d
YG
8289 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8290
e665bfda 8291 bnx2x_set_eth_mac_addr_e1h(bp, 0);
3101c2bc
YG
8292
8293 for (i = 0; i < MC_HASH_SIZE; i++)
8294 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
8295
8296 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc 8297 }
993ac7b5
MC
8298#ifdef BCM_CNIC
8299 /* Clear iSCSI L2 MAC */
8300 mutex_lock(&bp->cnic_mutex);
8301 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8302 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8303 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8304 }
8305 mutex_unlock(&bp->cnic_mutex);
8306#endif
3101c2bc 8307
65abd74d
YG
8308 if (unload_mode == UNLOAD_NORMAL)
8309 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8310
7d0446c2 8311 else if (bp->flags & NO_WOL_FLAG)
65abd74d 8312 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 8313
7d0446c2 8314 else if (bp->wol) {
65abd74d
YG
8315 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8316 u8 *mac_addr = bp->dev->dev_addr;
8317 u32 val;
8318 /* The mac address is written to entries 1-4 to
8319 preserve entry 0 which is used by the PMF */
8320 u8 entry = (BP_E1HVN(bp) + 1)*8;
8321
8322 val = (mac_addr[0] << 8) | mac_addr[1];
8323 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8324
8325 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8326 (mac_addr[4] << 8) | mac_addr[5];
8327 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8328
8329 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8330
8331 } else
8332 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8333
34f80b04
EG
8334 /* Close multi and leading connections
8335 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
8336 for_each_nondefault_queue(bp, i)
8337 if (bnx2x_stop_multi(bp, i))
228241eb 8338 goto unload_error;
a2fbb9ea 8339
da5a662a
VZ
8340 rc = bnx2x_stop_leading(bp);
8341 if (rc) {
34f80b04 8342 BNX2X_ERR("Stop leading failed!\n");
da5a662a 8343#ifdef BNX2X_STOP_ON_ERROR
34f80b04 8344 return -EBUSY;
da5a662a
VZ
8345#else
8346 goto unload_error;
34f80b04 8347#endif
228241eb
ET
8348 }
8349
8350unload_error:
34f80b04 8351 if (!BP_NOMCP(bp))
228241eb 8352 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 8353 else {
f5372251 8354 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
8355 load_count[0], load_count[1], load_count[2]);
8356 load_count[0]--;
da5a662a 8357 load_count[1 + port]--;
f5372251 8358 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
8359 load_count[0], load_count[1], load_count[2]);
8360 if (load_count[0] == 0)
8361 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 8362 else if (load_count[1 + port] == 0)
34f80b04
EG
8363 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8364 else
8365 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8366 }
a2fbb9ea 8367
34f80b04
EG
8368 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8369 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8370 bnx2x__link_reset(bp);
a2fbb9ea
ET
8371
8372 /* Reset the chip */
228241eb 8373 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
8374
8375 /* Report UNLOAD_DONE to MCP */
34f80b04 8376 if (!BP_NOMCP(bp))
a2fbb9ea 8377 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 8378
72fd0718
VZ
8379}
8380
8381static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8382{
8383 u32 val;
8384
8385 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8386
8387 if (CHIP_IS_E1(bp)) {
8388 int port = BP_PORT(bp);
8389 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8390 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8391
8392 val = REG_RD(bp, addr);
8393 val &= ~(0x300);
8394 REG_WR(bp, addr, val);
8395 } else if (CHIP_IS_E1H(bp)) {
8396 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8397 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8398 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8399 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8400 }
8401}
8402
8403/* must be called with rtnl_lock */
8404static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8405{
8406 int i;
8407
8408 if (bp->state == BNX2X_STATE_CLOSED) {
8409 /* Interface has been removed - nothing to recover */
8410 bp->recovery_state = BNX2X_RECOVERY_DONE;
8411 bp->is_leader = 0;
8412 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8413 smp_wmb();
8414
8415 return -EINVAL;
8416 }
8417
8418#ifdef BCM_CNIC
8419 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8420#endif
8421 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8422
8423 /* Set "drop all" */
8424 bp->rx_mode = BNX2X_RX_MODE_NONE;
8425 bnx2x_set_storm_rx_mode(bp);
8426
8427 /* Disable HW interrupts, NAPI and Tx */
8428 bnx2x_netif_stop(bp, 1);
8429
8430 del_timer_sync(&bp->timer);
8431 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8432 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8433 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8434
8435 /* Release IRQs */
8436 bnx2x_free_irq(bp, false);
8437
8438 /* Cleanup the chip if needed */
8439 if (unload_mode != UNLOAD_RECOVERY)
8440 bnx2x_chip_cleanup(bp, unload_mode);
8441
9a035440 8442 bp->port.pmf = 0;
a2fbb9ea 8443
7a9b2557 8444 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 8445 bnx2x_free_skbs(bp);
54b9ddaa 8446 for_each_queue(bp, i)
3196a88a 8447 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 8448 for_each_queue(bp, i)
7cde1c8b 8449 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
8450 bnx2x_free_mem(bp);
8451
8452 bp->state = BNX2X_STATE_CLOSED;
228241eb 8453
a2fbb9ea
ET
8454 netif_carrier_off(bp->dev);
8455
72fd0718
VZ
8456 /* The last driver must disable a "close the gate" if there is no
8457 * parity attention or "process kill" pending.
8458 */
8459 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8460 bnx2x_reset_is_done(bp))
8461 bnx2x_disable_close_the_gate(bp);
8462
8463 /* Reset MCP mail box sequence if there is on going recovery */
8464 if (unload_mode == UNLOAD_RECOVERY)
8465 bp->fw_seq = 0;
8466
8467 return 0;
8468}
8469
8470/* Close gates #2, #3 and #4: */
8471static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8472{
8473 u32 val, addr;
8474
8475 /* Gates #2 and #4a are closed/opened for "not E1" only */
8476 if (!CHIP_IS_E1(bp)) {
8477 /* #4 */
8478 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8479 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8480 close ? (val | 0x1) : (val & (~(u32)1)));
8481 /* #2 */
8482 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8483 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8484 close ? (val | 0x1) : (val & (~(u32)1)));
8485 }
8486
8487 /* #3 */
8488 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8489 val = REG_RD(bp, addr);
8490 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8491
8492 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8493 close ? "closing" : "opening");
8494 mmiowb();
8495}
8496
8497#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
8498
8499static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8500{
8501 /* Do some magic... */
8502 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8503 *magic_val = val & SHARED_MF_CLP_MAGIC;
8504 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8505}
8506
8507/* Restore the value of the `magic' bit.
8508 *
8509 * @param pdev Device handle.
8510 * @param magic_val Old value of the `magic' bit.
8511 */
8512static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8513{
8514 /* Restore the `magic' bit value... */
8515 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8516 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8517 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8518 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8519 MF_CFG_WR(bp, shared_mf_config.clp_mb,
8520 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8521}
8522
8523/* Prepares for MCP reset: takes care of CLP configurations.
8524 *
8525 * @param bp
8526 * @param magic_val Old value of 'magic' bit.
8527 */
8528static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8529{
8530 u32 shmem;
8531 u32 validity_offset;
8532
8533 DP(NETIF_MSG_HW, "Starting\n");
8534
8535 /* Set `magic' bit in order to save MF config */
8536 if (!CHIP_IS_E1(bp))
8537 bnx2x_clp_reset_prep(bp, magic_val);
8538
8539 /* Get shmem offset */
8540 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8541 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8542
8543 /* Clear validity map flags */
8544 if (shmem > 0)
8545 REG_WR(bp, shmem + validity_offset, 0);
8546}
8547
8548#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
8549#define MCP_ONE_TIMEOUT 100 /* 100 ms */
8550
8551/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8552 * depending on the HW type.
8553 *
8554 * @param bp
8555 */
8556static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8557{
8558 /* special handling for emulation and FPGA,
8559 wait 10 times longer */
8560 if (CHIP_REV_IS_SLOW(bp))
8561 msleep(MCP_ONE_TIMEOUT*10);
8562 else
8563 msleep(MCP_ONE_TIMEOUT);
8564}
8565
8566static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8567{
8568 u32 shmem, cnt, validity_offset, val;
8569 int rc = 0;
8570
8571 msleep(100);
8572
8573 /* Get shmem offset */
8574 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8575 if (shmem == 0) {
8576 BNX2X_ERR("Shmem 0 return failure\n");
8577 rc = -ENOTTY;
8578 goto exit_lbl;
8579 }
8580
8581 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8582
8583 /* Wait for MCP to come up */
8584 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8585 /* TBD: its best to check validity map of last port.
8586 * currently checks on port 0.
8587 */
8588 val = REG_RD(bp, shmem + validity_offset);
8589 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8590 shmem + validity_offset, val);
8591
8592 /* check that shared memory is valid. */
8593 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8594 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8595 break;
8596
8597 bnx2x_mcp_wait_one(bp);
8598 }
8599
8600 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8601
8602 /* Check that shared memory is valid. This indicates that MCP is up. */
8603 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8604 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8605 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8606 rc = -ENOTTY;
8607 goto exit_lbl;
8608 }
8609
8610exit_lbl:
8611 /* Restore the `magic' bit value */
8612 if (!CHIP_IS_E1(bp))
8613 bnx2x_clp_reset_done(bp, magic_val);
8614
8615 return rc;
8616}
8617
8618static void bnx2x_pxp_prep(struct bnx2x *bp)
8619{
8620 if (!CHIP_IS_E1(bp)) {
8621 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8622 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8623 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8624 mmiowb();
8625 }
8626}
8627
8628/*
8629 * Reset the whole chip except for:
8630 * - PCIE core
8631 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8632 * one reset bit)
8633 * - IGU
8634 * - MISC (including AEU)
8635 * - GRC
8636 * - RBCN, RBCP
8637 */
8638static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8639{
8640 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8641
8642 not_reset_mask1 =
8643 MISC_REGISTERS_RESET_REG_1_RST_HC |
8644 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8645 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8646
8647 not_reset_mask2 =
8648 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8649 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8650 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8651 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8652 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8653 MISC_REGISTERS_RESET_REG_2_RST_GRC |
8654 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8655 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8656
8657 reset_mask1 = 0xffffffff;
8658
8659 if (CHIP_IS_E1(bp))
8660 reset_mask2 = 0xffff;
8661 else
8662 reset_mask2 = 0x1ffff;
8663
8664 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8665 reset_mask1 & (~not_reset_mask1));
8666 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8667 reset_mask2 & (~not_reset_mask2));
8668
8669 barrier();
8670 mmiowb();
8671
8672 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8673 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8674 mmiowb();
8675}
8676
8677static int bnx2x_process_kill(struct bnx2x *bp)
8678{
8679 int cnt = 1000;
8680 u32 val = 0;
8681 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8682
8683
8684 /* Empty the Tetris buffer, wait for 1s */
8685 do {
8686 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8687 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8688 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8689 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8690 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8691 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8692 ((port_is_idle_0 & 0x1) == 0x1) &&
8693 ((port_is_idle_1 & 0x1) == 0x1) &&
8694 (pgl_exp_rom2 == 0xffffffff))
8695 break;
8696 msleep(1);
8697 } while (cnt-- > 0);
8698
8699 if (cnt <= 0) {
8700 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8701 " are still"
8702 " outstanding read requests after 1s!\n");
8703 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8704 " port_is_idle_0=0x%08x,"
8705 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8706 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8707 pgl_exp_rom2);
8708 return -EAGAIN;
8709 }
8710
8711 barrier();
8712
8713 /* Close gates #2, #3 and #4 */
8714 bnx2x_set_234_gates(bp, true);
8715
8716 /* TBD: Indicate that "process kill" is in progress to MCP */
8717
8718 /* Clear "unprepared" bit */
8719 REG_WR(bp, MISC_REG_UNPREPARED, 0);
8720 barrier();
8721
8722 /* Make sure all is written to the chip before the reset */
8723 mmiowb();
8724
8725 /* Wait for 1ms to empty GLUE and PCI-E core queues,
8726 * PSWHST, GRC and PSWRD Tetris buffer.
8727 */
8728 msleep(1);
8729
8730 /* Prepare to chip reset: */
8731 /* MCP */
8732 bnx2x_reset_mcp_prep(bp, &val);
8733
8734 /* PXP */
8735 bnx2x_pxp_prep(bp);
8736 barrier();
8737
8738 /* reset the chip */
8739 bnx2x_process_kill_chip_reset(bp);
8740 barrier();
8741
8742 /* Recover after reset: */
8743 /* MCP */
8744 if (bnx2x_reset_mcp_comp(bp, val))
8745 return -EAGAIN;
8746
8747 /* PXP */
8748 bnx2x_pxp_prep(bp);
8749
8750 /* Open the gates #2, #3 and #4 */
8751 bnx2x_set_234_gates(bp, false);
8752
8753 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8754 * reset state, re-enable attentions. */
8755
a2fbb9ea
ET
8756 return 0;
8757}
8758
72fd0718
VZ
8759static int bnx2x_leader_reset(struct bnx2x *bp)
8760{
8761 int rc = 0;
8762 /* Try to recover after the failure */
8763 if (bnx2x_process_kill(bp)) {
8764 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8765 bp->dev->name);
8766 rc = -EAGAIN;
8767 goto exit_leader_reset;
8768 }
8769
8770 /* Clear "reset is in progress" bit and update the driver state */
8771 bnx2x_set_reset_done(bp);
8772 bp->recovery_state = BNX2X_RECOVERY_DONE;
8773
8774exit_leader_reset:
8775 bp->is_leader = 0;
8776 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8777 smp_wmb();
8778 return rc;
8779}
8780
8781static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8782
8783/* Assumption: runs under rtnl lock. This together with the fact
8784 * that it's called only from bnx2x_reset_task() ensure that it
8785 * will never be called when netif_running(bp->dev) is false.
8786 */
8787static void bnx2x_parity_recover(struct bnx2x *bp)
8788{
8789 DP(NETIF_MSG_HW, "Handling parity\n");
8790 while (1) {
8791 switch (bp->recovery_state) {
8792 case BNX2X_RECOVERY_INIT:
8793 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8794 /* Try to get a LEADER_LOCK HW lock */
8795 if (bnx2x_trylock_hw_lock(bp,
8796 HW_LOCK_RESOURCE_RESERVED_08))
8797 bp->is_leader = 1;
8798
8799 /* Stop the driver */
8800 /* If interface has been removed - break */
8801 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8802 return;
8803
8804 bp->recovery_state = BNX2X_RECOVERY_WAIT;
8805 /* Ensure "is_leader" and "recovery_state"
8806 * update values are seen on other CPUs
8807 */
8808 smp_wmb();
8809 break;
8810
8811 case BNX2X_RECOVERY_WAIT:
8812 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8813 if (bp->is_leader) {
8814 u32 load_counter = bnx2x_get_load_cnt(bp);
8815 if (load_counter) {
8816 /* Wait until all other functions get
8817 * down.
8818 */
8819 schedule_delayed_work(&bp->reset_task,
8820 HZ/10);
8821 return;
8822 } else {
8823 /* If all other functions got down -
8824 * try to bring the chip back to
8825 * normal. In any case it's an exit
8826 * point for a leader.
8827 */
8828 if (bnx2x_leader_reset(bp) ||
8829 bnx2x_nic_load(bp, LOAD_NORMAL)) {
8830 printk(KERN_ERR"%s: Recovery "
8831 "has failed. Power cycle is "
8832 "needed.\n", bp->dev->name);
8833 /* Disconnect this device */
8834 netif_device_detach(bp->dev);
8835 /* Block ifup for all function
8836 * of this ASIC until
8837 * "process kill" or power
8838 * cycle.
8839 */
8840 bnx2x_set_reset_in_progress(bp);
8841 /* Shut down the power */
8842 bnx2x_set_power_state(bp,
8843 PCI_D3hot);
8844 return;
8845 }
8846
8847 return;
8848 }
8849 } else { /* non-leader */
8850 if (!bnx2x_reset_is_done(bp)) {
8851 /* Try to get a LEADER_LOCK HW lock as
8852 * long as a former leader may have
8853 * been unloaded by the user or
8854 * released a leadership by another
8855 * reason.
8856 */
8857 if (bnx2x_trylock_hw_lock(bp,
8858 HW_LOCK_RESOURCE_RESERVED_08)) {
8859 /* I'm a leader now! Restart a
8860 * switch case.
8861 */
8862 bp->is_leader = 1;
8863 break;
8864 }
8865
8866 schedule_delayed_work(&bp->reset_task,
8867 HZ/10);
8868 return;
8869
8870 } else { /* A leader has completed
8871 * the "process kill". It's an exit
8872 * point for a non-leader.
8873 */
8874 bnx2x_nic_load(bp, LOAD_NORMAL);
8875 bp->recovery_state =
8876 BNX2X_RECOVERY_DONE;
8877 smp_wmb();
8878 return;
8879 }
8880 }
8881 default:
8882 return;
8883 }
8884 }
8885}
8886
8887/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8888 * scheduled on a general queue in order to prevent a dead lock.
8889 */
34f80b04
EG
8890static void bnx2x_reset_task(struct work_struct *work)
8891{
72fd0718 8892 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
8893
8894#ifdef BNX2X_STOP_ON_ERROR
8895 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8896 " so reset not done to allow debug dump,\n"
72fd0718 8897 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
8898 return;
8899#endif
8900
8901 rtnl_lock();
8902
8903 if (!netif_running(bp->dev))
8904 goto reset_task_exit;
8905
72fd0718
VZ
8906 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8907 bnx2x_parity_recover(bp);
8908 else {
8909 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8910 bnx2x_nic_load(bp, LOAD_NORMAL);
8911 }
34f80b04
EG
8912
8913reset_task_exit:
8914 rtnl_unlock();
8915}
8916
a2fbb9ea
ET
8917/* end of nic load/unload */
8918
8919/* ethtool_ops */
8920
8921/*
8922 * Init service functions
8923 */
8924
f1ef27ef
EG
8925static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8926{
8927 switch (func) {
8928 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8929 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8930 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8931 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8932 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8933 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8934 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8935 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8936 default:
8937 BNX2X_ERR("Unsupported function index: %d\n", func);
8938 return (u32)(-1);
8939 }
8940}
8941
8942static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8943{
8944 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8945
8946 /* Flush all outstanding writes */
8947 mmiowb();
8948
8949 /* Pretend to be function 0 */
8950 REG_WR(bp, reg, 0);
8951 /* Flush the GRC transaction (in the chip) */
8952 new_val = REG_RD(bp, reg);
8953 if (new_val != 0) {
8954 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8955 new_val);
8956 BUG();
8957 }
8958
8959 /* From now we are in the "like-E1" mode */
8960 bnx2x_int_disable(bp);
8961
8962 /* Flush all outstanding writes */
8963 mmiowb();
8964
8965 /* Restore the original funtion settings */
8966 REG_WR(bp, reg, orig_func);
8967 new_val = REG_RD(bp, reg);
8968 if (new_val != orig_func) {
8969 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8970 orig_func, new_val);
8971 BUG();
8972 }
8973}
8974
8975static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8976{
8977 if (CHIP_IS_E1H(bp))
8978 bnx2x_undi_int_disable_e1h(bp, func);
8979 else
8980 bnx2x_int_disable(bp);
8981}
8982
34f80b04
EG
8983static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8984{
8985 u32 val;
8986
8987 /* Check if there is any driver already loaded */
8988 val = REG_RD(bp, MISC_REG_UNPREPARED);
8989 if (val == 0x1) {
8990 /* Check if it is the UNDI driver
8991 * UNDI driver initializes CID offset for normal bell to 0x7
8992 */
4a37fb66 8993 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
8994 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8995 if (val == 0x7) {
8996 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8997 /* save our func */
34f80b04 8998 int func = BP_FUNC(bp);
da5a662a
VZ
8999 u32 swap_en;
9000 u32 swap_val;
34f80b04 9001
b4661739
EG
9002 /* clear the UNDI indication */
9003 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9004
34f80b04
EG
9005 BNX2X_DEV_INFO("UNDI is active! reset device\n");
9006
9007 /* try unload UNDI on port 0 */
9008 bp->func = 0;
da5a662a
VZ
9009 bp->fw_seq =
9010 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9011 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 9012 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
9013
9014 /* if UNDI is loaded on the other port */
9015 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9016
da5a662a
VZ
9017 /* send "DONE" for previous unload */
9018 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9019
9020 /* unload UNDI on port 1 */
34f80b04 9021 bp->func = 1;
da5a662a
VZ
9022 bp->fw_seq =
9023 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9024 DRV_MSG_SEQ_NUMBER_MASK);
9025 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9026
9027 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
9028 }
9029
b4661739
EG
9030 /* now it's safe to release the lock */
9031 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9032
f1ef27ef 9033 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
9034
9035 /* close input traffic and wait for it */
9036 /* Do not rcv packets to BRB */
9037 REG_WR(bp,
9038 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
9039 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
9040 /* Do not direct rcv packets that are not for MCP to
9041 * the BRB */
9042 REG_WR(bp,
9043 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
9044 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9045 /* clear AEU */
9046 REG_WR(bp,
9047 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9048 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
9049 msleep(10);
9050
9051 /* save NIG port swap info */
9052 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
9053 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
9054 /* reset device */
9055 REG_WR(bp,
9056 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 9057 0xd3ffffff);
34f80b04
EG
9058 REG_WR(bp,
9059 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9060 0x1403);
da5a662a
VZ
9061 /* take the NIG out of reset and restore swap values */
9062 REG_WR(bp,
9063 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
9064 MISC_REGISTERS_RESET_REG_1_RST_NIG);
9065 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
9066 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
9067
9068 /* send unload done to the MCP */
9069 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9070
9071 /* restore our func and fw_seq */
9072 bp->func = func;
9073 bp->fw_seq =
9074 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9075 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
9076
9077 } else
9078 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
9079 }
9080}
9081
9082static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9083{
9084 u32 val, val2, val3, val4, id;
72ce58c3 9085 u16 pmc;
34f80b04
EG
9086
9087 /* Get the chip revision id and number. */
9088 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
9089 val = REG_RD(bp, MISC_REG_CHIP_NUM);
9090 id = ((val & 0xffff) << 16);
9091 val = REG_RD(bp, MISC_REG_CHIP_REV);
9092 id |= ((val & 0xf) << 12);
9093 val = REG_RD(bp, MISC_REG_CHIP_METAL);
9094 id |= ((val & 0xff) << 4);
5a40e08e 9095 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
9096 id |= (val & 0xf);
9097 bp->common.chip_id = id;
9098 bp->link_params.chip_id = bp->common.chip_id;
9099 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
9100
1c06328c
EG
9101 val = (REG_RD(bp, 0x2874) & 0x55);
9102 if ((bp->common.chip_id & 0x1) ||
9103 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
9104 bp->flags |= ONE_PORT_FLAG;
9105 BNX2X_DEV_INFO("single port device\n");
9106 }
9107
34f80b04
EG
9108 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
9109 bp->common.flash_size = (NVRAM_1MB_SIZE <<
9110 (val & MCPR_NVM_CFG4_FLASH_SIZE));
9111 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
9112 bp->common.flash_size, bp->common.flash_size);
9113
9114 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 9115 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 9116 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
9117 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
9118 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
9119
9120 if (!bp->common.shmem_base ||
9121 (bp->common.shmem_base < 0xA0000) ||
9122 (bp->common.shmem_base >= 0xC0000)) {
9123 BNX2X_DEV_INFO("MCP not active\n");
9124 bp->flags |= NO_MCP_FLAG;
9125 return;
9126 }
9127
9128 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9129 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9130 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9131 BNX2X_ERR("BAD MCP validity signature\n");
9132
9133 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 9134 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
9135
9136 bp->link_params.hw_led_mode = ((bp->common.hw_config &
9137 SHARED_HW_CFG_LED_MODE_MASK) >>
9138 SHARED_HW_CFG_LED_MODE_SHIFT);
9139
c2c8b03e
EG
9140 bp->link_params.feature_config_flags = 0;
9141 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
9142 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
9143 bp->link_params.feature_config_flags |=
9144 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9145 else
9146 bp->link_params.feature_config_flags &=
9147 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9148
34f80b04
EG
9149 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
9150 bp->common.bc_ver = val;
9151 BNX2X_DEV_INFO("bc_ver %X\n", val);
9152 if (val < BNX2X_BC_VER) {
9153 /* for now only warn
9154 * later we might need to enforce this */
9155 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
9156 " please upgrade BC\n", BNX2X_BC_VER, val);
9157 }
4d295db0
EG
9158 bp->link_params.feature_config_flags |=
9159 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
9160 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
9161
9162 if (BP_E1HVN(bp) == 0) {
9163 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
9164 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
9165 } else {
9166 /* no WOL capability for E1HVN != 0 */
9167 bp->flags |= NO_WOL_FLAG;
9168 }
9169 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 9170 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
9171
9172 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
9173 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
9174 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
9175 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
9176
7995c64e 9177 pr_info("part number %X-%X-%X-%X\n", val, val2, val3, val4);
34f80b04
EG
9178}
9179
9180static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
9181 u32 switch_cfg)
a2fbb9ea 9182{
34f80b04 9183 int port = BP_PORT(bp);
a2fbb9ea
ET
9184 u32 ext_phy_type;
9185
a2fbb9ea
ET
9186 switch (switch_cfg) {
9187 case SWITCH_CFG_1G:
9188 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
9189
c18487ee
YR
9190 ext_phy_type =
9191 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
9192 switch (ext_phy_type) {
9193 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
9194 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9195 ext_phy_type);
9196
34f80b04
EG
9197 bp->port.supported |= (SUPPORTED_10baseT_Half |
9198 SUPPORTED_10baseT_Full |
9199 SUPPORTED_100baseT_Half |
9200 SUPPORTED_100baseT_Full |
9201 SUPPORTED_1000baseT_Full |
9202 SUPPORTED_2500baseX_Full |
9203 SUPPORTED_TP |
9204 SUPPORTED_FIBRE |
9205 SUPPORTED_Autoneg |
9206 SUPPORTED_Pause |
9207 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9208 break;
9209
9210 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
9211 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
9212 ext_phy_type);
9213
34f80b04
EG
9214 bp->port.supported |= (SUPPORTED_10baseT_Half |
9215 SUPPORTED_10baseT_Full |
9216 SUPPORTED_100baseT_Half |
9217 SUPPORTED_100baseT_Full |
9218 SUPPORTED_1000baseT_Full |
9219 SUPPORTED_TP |
9220 SUPPORTED_FIBRE |
9221 SUPPORTED_Autoneg |
9222 SUPPORTED_Pause |
9223 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9224 break;
9225
9226 default:
9227 BNX2X_ERR("NVRAM config error. "
9228 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 9229 bp->link_params.ext_phy_config);
a2fbb9ea
ET
9230 return;
9231 }
9232
34f80b04
EG
9233 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
9234 port*0x10);
9235 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
9236 break;
9237
9238 case SWITCH_CFG_10G:
9239 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
9240
c18487ee
YR
9241 ext_phy_type =
9242 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
9243 switch (ext_phy_type) {
9244 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9245 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9246 ext_phy_type);
9247
34f80b04
EG
9248 bp->port.supported |= (SUPPORTED_10baseT_Half |
9249 SUPPORTED_10baseT_Full |
9250 SUPPORTED_100baseT_Half |
9251 SUPPORTED_100baseT_Full |
9252 SUPPORTED_1000baseT_Full |
9253 SUPPORTED_2500baseX_Full |
9254 SUPPORTED_10000baseT_Full |
9255 SUPPORTED_TP |
9256 SUPPORTED_FIBRE |
9257 SUPPORTED_Autoneg |
9258 SUPPORTED_Pause |
9259 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9260 break;
9261
589abe3a
EG
9262 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9263 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 9264 ext_phy_type);
f1410647 9265
34f80b04 9266 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 9267 SUPPORTED_1000baseT_Full |
34f80b04 9268 SUPPORTED_FIBRE |
589abe3a 9269 SUPPORTED_Autoneg |
34f80b04
EG
9270 SUPPORTED_Pause |
9271 SUPPORTED_Asym_Pause);
f1410647
ET
9272 break;
9273
589abe3a
EG
9274 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9275 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
9276 ext_phy_type);
9277
34f80b04 9278 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 9279 SUPPORTED_2500baseX_Full |
34f80b04 9280 SUPPORTED_1000baseT_Full |
589abe3a
EG
9281 SUPPORTED_FIBRE |
9282 SUPPORTED_Autoneg |
9283 SUPPORTED_Pause |
9284 SUPPORTED_Asym_Pause);
9285 break;
9286
9287 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9288 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
9289 ext_phy_type);
9290
9291 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
9292 SUPPORTED_FIBRE |
9293 SUPPORTED_Pause |
9294 SUPPORTED_Asym_Pause);
f1410647
ET
9295 break;
9296
589abe3a
EG
9297 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9298 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
9299 ext_phy_type);
9300
34f80b04
EG
9301 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9302 SUPPORTED_1000baseT_Full |
9303 SUPPORTED_FIBRE |
34f80b04
EG
9304 SUPPORTED_Pause |
9305 SUPPORTED_Asym_Pause);
f1410647
ET
9306 break;
9307
589abe3a
EG
9308 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9309 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
9310 ext_phy_type);
9311
34f80b04 9312 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 9313 SUPPORTED_1000baseT_Full |
34f80b04 9314 SUPPORTED_Autoneg |
589abe3a 9315 SUPPORTED_FIBRE |
34f80b04
EG
9316 SUPPORTED_Pause |
9317 SUPPORTED_Asym_Pause);
c18487ee
YR
9318 break;
9319
4d295db0
EG
9320 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9321 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
9322 ext_phy_type);
9323
9324 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9325 SUPPORTED_1000baseT_Full |
9326 SUPPORTED_Autoneg |
9327 SUPPORTED_FIBRE |
9328 SUPPORTED_Pause |
9329 SUPPORTED_Asym_Pause);
9330 break;
9331
f1410647
ET
9332 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9333 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
9334 ext_phy_type);
9335
34f80b04
EG
9336 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9337 SUPPORTED_TP |
9338 SUPPORTED_Autoneg |
9339 SUPPORTED_Pause |
9340 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9341 break;
9342
28577185
EG
9343 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9344 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
9345 ext_phy_type);
9346
9347 bp->port.supported |= (SUPPORTED_10baseT_Half |
9348 SUPPORTED_10baseT_Full |
9349 SUPPORTED_100baseT_Half |
9350 SUPPORTED_100baseT_Full |
9351 SUPPORTED_1000baseT_Full |
9352 SUPPORTED_10000baseT_Full |
9353 SUPPORTED_TP |
9354 SUPPORTED_Autoneg |
9355 SUPPORTED_Pause |
9356 SUPPORTED_Asym_Pause);
9357 break;
9358
c18487ee
YR
9359 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9360 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9361 bp->link_params.ext_phy_config);
9362 break;
9363
a2fbb9ea
ET
9364 default:
9365 BNX2X_ERR("NVRAM config error. "
9366 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 9367 bp->link_params.ext_phy_config);
a2fbb9ea
ET
9368 return;
9369 }
9370
34f80b04
EG
9371 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
9372 port*0x18);
9373 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 9374
a2fbb9ea
ET
9375 break;
9376
9377 default:
9378 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 9379 bp->port.link_config);
a2fbb9ea
ET
9380 return;
9381 }
34f80b04 9382 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
9383
9384 /* mask what we support according to speed_cap_mask */
c18487ee
YR
9385 if (!(bp->link_params.speed_cap_mask &
9386 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 9387 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 9388
c18487ee
YR
9389 if (!(bp->link_params.speed_cap_mask &
9390 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 9391 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 9392
c18487ee
YR
9393 if (!(bp->link_params.speed_cap_mask &
9394 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 9395 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 9396
c18487ee
YR
9397 if (!(bp->link_params.speed_cap_mask &
9398 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 9399 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 9400
c18487ee
YR
9401 if (!(bp->link_params.speed_cap_mask &
9402 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
9403 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
9404 SUPPORTED_1000baseT_Full);
a2fbb9ea 9405
c18487ee
YR
9406 if (!(bp->link_params.speed_cap_mask &
9407 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 9408 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 9409
c18487ee
YR
9410 if (!(bp->link_params.speed_cap_mask &
9411 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 9412 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 9413
34f80b04 9414 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
9415}
9416
34f80b04 9417static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 9418{
c18487ee 9419 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 9420
34f80b04 9421 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 9422 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 9423 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 9424 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 9425 bp->port.advertising = bp->port.supported;
a2fbb9ea 9426 } else {
c18487ee
YR
9427 u32 ext_phy_type =
9428 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9429
9430 if ((ext_phy_type ==
9431 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
9432 (ext_phy_type ==
9433 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 9434 /* force 10G, no AN */
c18487ee 9435 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 9436 bp->port.advertising =
a2fbb9ea
ET
9437 (ADVERTISED_10000baseT_Full |
9438 ADVERTISED_FIBRE);
9439 break;
9440 }
9441 BNX2X_ERR("NVRAM config error. "
9442 "Invalid link_config 0x%x"
9443 " Autoneg not supported\n",
34f80b04 9444 bp->port.link_config);
a2fbb9ea
ET
9445 return;
9446 }
9447 break;
9448
9449 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 9450 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 9451 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
9452 bp->port.advertising = (ADVERTISED_10baseT_Full |
9453 ADVERTISED_TP);
a2fbb9ea
ET
9454 } else {
9455 BNX2X_ERR("NVRAM config error. "
9456 "Invalid link_config 0x%x"
9457 " speed_cap_mask 0x%x\n",
34f80b04 9458 bp->port.link_config,
c18487ee 9459 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9460 return;
9461 }
9462 break;
9463
9464 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 9465 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
9466 bp->link_params.req_line_speed = SPEED_10;
9467 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
9468 bp->port.advertising = (ADVERTISED_10baseT_Half |
9469 ADVERTISED_TP);
a2fbb9ea
ET
9470 } else {
9471 BNX2X_ERR("NVRAM config error. "
9472 "Invalid link_config 0x%x"
9473 " speed_cap_mask 0x%x\n",
34f80b04 9474 bp->port.link_config,
c18487ee 9475 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9476 return;
9477 }
9478 break;
9479
9480 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 9481 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 9482 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
9483 bp->port.advertising = (ADVERTISED_100baseT_Full |
9484 ADVERTISED_TP);
a2fbb9ea
ET
9485 } else {
9486 BNX2X_ERR("NVRAM config error. "
9487 "Invalid link_config 0x%x"
9488 " speed_cap_mask 0x%x\n",
34f80b04 9489 bp->port.link_config,
c18487ee 9490 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9491 return;
9492 }
9493 break;
9494
9495 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 9496 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
9497 bp->link_params.req_line_speed = SPEED_100;
9498 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
9499 bp->port.advertising = (ADVERTISED_100baseT_Half |
9500 ADVERTISED_TP);
a2fbb9ea
ET
9501 } else {
9502 BNX2X_ERR("NVRAM config error. "
9503 "Invalid link_config 0x%x"
9504 " speed_cap_mask 0x%x\n",
34f80b04 9505 bp->port.link_config,
c18487ee 9506 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9507 return;
9508 }
9509 break;
9510
9511 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 9512 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 9513 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
9514 bp->port.advertising = (ADVERTISED_1000baseT_Full |
9515 ADVERTISED_TP);
a2fbb9ea
ET
9516 } else {
9517 BNX2X_ERR("NVRAM config error. "
9518 "Invalid link_config 0x%x"
9519 " speed_cap_mask 0x%x\n",
34f80b04 9520 bp->port.link_config,
c18487ee 9521 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9522 return;
9523 }
9524 break;
9525
9526 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 9527 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 9528 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
9529 bp->port.advertising = (ADVERTISED_2500baseX_Full |
9530 ADVERTISED_TP);
a2fbb9ea
ET
9531 } else {
9532 BNX2X_ERR("NVRAM config error. "
9533 "Invalid link_config 0x%x"
9534 " speed_cap_mask 0x%x\n",
34f80b04 9535 bp->port.link_config,
c18487ee 9536 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9537 return;
9538 }
9539 break;
9540
9541 case PORT_FEATURE_LINK_SPEED_10G_CX4:
9542 case PORT_FEATURE_LINK_SPEED_10G_KX4:
9543 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 9544 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 9545 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
9546 bp->port.advertising = (ADVERTISED_10000baseT_Full |
9547 ADVERTISED_FIBRE);
a2fbb9ea
ET
9548 } else {
9549 BNX2X_ERR("NVRAM config error. "
9550 "Invalid link_config 0x%x"
9551 " speed_cap_mask 0x%x\n",
34f80b04 9552 bp->port.link_config,
c18487ee 9553 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9554 return;
9555 }
9556 break;
9557
9558 default:
9559 BNX2X_ERR("NVRAM config error. "
9560 "BAD link speed link_config 0x%x\n",
34f80b04 9561 bp->port.link_config);
c18487ee 9562 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 9563 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
9564 break;
9565 }
a2fbb9ea 9566
34f80b04
EG
9567 bp->link_params.req_flow_ctrl = (bp->port.link_config &
9568 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 9569 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 9570 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 9571 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9572
c18487ee 9573 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 9574 " advertising 0x%x\n",
c18487ee
YR
9575 bp->link_params.req_line_speed,
9576 bp->link_params.req_duplex,
34f80b04 9577 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
9578}
9579
e665bfda
MC
9580static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
9581{
9582 mac_hi = cpu_to_be16(mac_hi);
9583 mac_lo = cpu_to_be32(mac_lo);
9584 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
9585 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
9586}
9587
34f80b04 9588static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 9589{
34f80b04
EG
9590 int port = BP_PORT(bp);
9591 u32 val, val2;
589abe3a 9592 u32 config;
c2c8b03e 9593 u16 i;
01cd4528 9594 u32 ext_phy_type;
a2fbb9ea 9595
c18487ee 9596 bp->link_params.bp = bp;
34f80b04 9597 bp->link_params.port = port;
c18487ee 9598
c18487ee 9599 bp->link_params.lane_config =
a2fbb9ea 9600 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 9601 bp->link_params.ext_phy_config =
a2fbb9ea
ET
9602 SHMEM_RD(bp,
9603 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
9604 /* BCM8727_NOC => BCM8727 no over current */
9605 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9606 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
9607 bp->link_params.ext_phy_config &=
9608 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
9609 bp->link_params.ext_phy_config |=
9610 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
9611 bp->link_params.feature_config_flags |=
9612 FEATURE_CONFIG_BCM8727_NOC;
9613 }
9614
c18487ee 9615 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
9616 SHMEM_RD(bp,
9617 dev_info.port_hw_config[port].speed_capability_mask);
9618
34f80b04 9619 bp->port.link_config =
a2fbb9ea
ET
9620 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
9621
c2c8b03e
EG
9622 /* Get the 4 lanes xgxs config rx and tx */
9623 for (i = 0; i < 2; i++) {
9624 val = SHMEM_RD(bp,
9625 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
9626 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
9627 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
9628
9629 val = SHMEM_RD(bp,
9630 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
9631 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
9632 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
9633 }
9634
3ce2c3f9
EG
9635 /* If the device is capable of WoL, set the default state according
9636 * to the HW
9637 */
4d295db0 9638 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
9639 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
9640 (config & PORT_FEATURE_WOL_ENABLED));
9641
c2c8b03e
EG
9642 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
9643 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
9644 bp->link_params.lane_config,
9645 bp->link_params.ext_phy_config,
34f80b04 9646 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 9647
4d295db0
EG
9648 bp->link_params.switch_cfg |= (bp->port.link_config &
9649 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 9650 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
9651
9652 bnx2x_link_settings_requested(bp);
9653
01cd4528
EG
9654 /*
9655 * If connected directly, work with the internal PHY, otherwise, work
9656 * with the external PHY
9657 */
9658 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9659 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
9660 bp->mdio.prtad = bp->link_params.phy_addr;
9661
9662 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
9663 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
9664 bp->mdio.prtad =
659bc5c4 9665 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
01cd4528 9666
a2fbb9ea
ET
9667 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
9668 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 9669 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
9670 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
9671 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
9672
9673#ifdef BCM_CNIC
9674 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
9675 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
9676 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
9677#endif
34f80b04
EG
9678}
9679
9680static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9681{
9682 int func = BP_FUNC(bp);
9683 u32 val, val2;
9684 int rc = 0;
a2fbb9ea 9685
34f80b04 9686 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 9687
34f80b04
EG
9688 bp->e1hov = 0;
9689 bp->e1hmf = 0;
9690 if (CHIP_IS_E1H(bp)) {
9691 bp->mf_config =
9692 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 9693
2691d51d 9694 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 9695 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 9696 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 9697 bp->e1hmf = 1;
2691d51d
EG
9698 BNX2X_DEV_INFO("%s function mode\n",
9699 IS_E1HMF(bp) ? "multi" : "single");
9700
9701 if (IS_E1HMF(bp)) {
9702 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
9703 e1hov_tag) &
9704 FUNC_MF_CFG_E1HOV_TAG_MASK);
9705 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
9706 bp->e1hov = val;
9707 BNX2X_DEV_INFO("E1HOV for func %d is %d "
9708 "(0x%04x)\n",
9709 func, bp->e1hov, bp->e1hov);
9710 } else {
34f80b04
EG
9711 BNX2X_ERR("!!! No valid E1HOV for func %d,"
9712 " aborting\n", func);
9713 rc = -EPERM;
9714 }
2691d51d
EG
9715 } else {
9716 if (BP_E1HVN(bp)) {
9717 BNX2X_ERR("!!! VN %d in single function mode,"
9718 " aborting\n", BP_E1HVN(bp));
9719 rc = -EPERM;
9720 }
34f80b04
EG
9721 }
9722 }
a2fbb9ea 9723
34f80b04
EG
9724 if (!BP_NOMCP(bp)) {
9725 bnx2x_get_port_hwinfo(bp);
9726
9727 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
9728 DRV_MSG_SEQ_NUMBER_MASK);
9729 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9730 }
9731
9732 if (IS_E1HMF(bp)) {
9733 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
9734 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
9735 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
9736 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
9737 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
9738 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
9739 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
9740 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
9741 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
9742 bp->dev->dev_addr[5] = (u8)(val & 0xff);
9743 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
9744 ETH_ALEN);
9745 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
9746 ETH_ALEN);
a2fbb9ea 9747 }
34f80b04
EG
9748
9749 return rc;
a2fbb9ea
ET
9750 }
9751
34f80b04
EG
9752 if (BP_NOMCP(bp)) {
9753 /* only supposed to happen on emulation/FPGA */
33471629 9754 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
9755 random_ether_addr(bp->dev->dev_addr);
9756 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9757 }
a2fbb9ea 9758
34f80b04
EG
9759 return rc;
9760}
9761
9762static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9763{
9764 int func = BP_FUNC(bp);
87942b46 9765 int timer_interval;
34f80b04
EG
9766 int rc;
9767
da5a662a
VZ
9768 /* Disable interrupt handling until HW is initialized */
9769 atomic_set(&bp->intr_sem, 1);
e1510706 9770 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 9771
34f80b04 9772 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 9773 mutex_init(&bp->fw_mb_mutex);
993ac7b5
MC
9774#ifdef BCM_CNIC
9775 mutex_init(&bp->cnic_mutex);
9776#endif
a2fbb9ea 9777
1cf167f2 9778 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 9779 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
9780
9781 rc = bnx2x_get_hwinfo(bp);
9782
9783 /* need to reset chip if undi was active */
9784 if (!BP_NOMCP(bp))
9785 bnx2x_undi_unload(bp);
9786
9787 if (CHIP_REV_IS_FPGA(bp))
7995c64e 9788 pr_err("FPGA detected\n");
34f80b04
EG
9789
9790 if (BP_NOMCP(bp) && (func == 0))
7995c64e 9791 pr_err("MCP disabled, must load devices in order!\n");
34f80b04 9792
555f6c78 9793 /* Set multi queue mode */
8badd27a
EG
9794 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
9795 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
7995c64e 9796 pr_err("Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
9797 multi_mode = ETH_RSS_MODE_DISABLED;
9798 }
9799 bp->multi_mode = multi_mode;
9800
9801
4fd89b7a
DK
9802 bp->dev->features |= NETIF_F_GRO;
9803
7a9b2557
VZ
9804 /* Set TPA flags */
9805 if (disable_tpa) {
9806 bp->flags &= ~TPA_ENABLE_FLAG;
9807 bp->dev->features &= ~NETIF_F_LRO;
9808 } else {
9809 bp->flags |= TPA_ENABLE_FLAG;
9810 bp->dev->features |= NETIF_F_LRO;
9811 }
9812
a18f5128
EG
9813 if (CHIP_IS_E1(bp))
9814 bp->dropless_fc = 0;
9815 else
9816 bp->dropless_fc = dropless_fc;
9817
8d5726c4 9818 bp->mrrs = mrrs;
7a9b2557 9819
34f80b04
EG
9820 bp->tx_ring_size = MAX_TX_AVAIL;
9821 bp->rx_ring_size = MAX_RX_AVAIL;
9822
9823 bp->rx_csum = 1;
34f80b04 9824
7d323bfd
EG
9825 /* make sure that the numbers are in the right granularity */
9826 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9827 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
34f80b04 9828
87942b46
EG
9829 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9830 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
9831
9832 init_timer(&bp->timer);
9833 bp->timer.expires = jiffies + bp->current_interval;
9834 bp->timer.data = (unsigned long) bp;
9835 bp->timer.function = bnx2x_timer;
9836
9837 return rc;
a2fbb9ea
ET
9838}
9839
9840/*
9841 * ethtool service functions
9842 */
9843
9844/* All ethtool functions called with rtnl_lock */
9845
9846static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9847{
9848 struct bnx2x *bp = netdev_priv(dev);
9849
34f80b04
EG
9850 cmd->supported = bp->port.supported;
9851 cmd->advertising = bp->port.advertising;
a2fbb9ea 9852
f34d28ea
EG
9853 if ((bp->state == BNX2X_STATE_OPEN) &&
9854 !(bp->flags & MF_FUNC_DIS) &&
9855 (bp->link_vars.link_up)) {
c18487ee
YR
9856 cmd->speed = bp->link_vars.line_speed;
9857 cmd->duplex = bp->link_vars.duplex;
b015e3d1
EG
9858 if (IS_E1HMF(bp)) {
9859 u16 vn_max_rate;
34f80b04 9860
b015e3d1
EG
9861 vn_max_rate =
9862 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
34f80b04 9863 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
b015e3d1
EG
9864 if (vn_max_rate < cmd->speed)
9865 cmd->speed = vn_max_rate;
9866 }
9867 } else {
9868 cmd->speed = -1;
9869 cmd->duplex = -1;
34f80b04 9870 }
a2fbb9ea 9871
c18487ee
YR
9872 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9873 u32 ext_phy_type =
9874 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
9875
9876 switch (ext_phy_type) {
9877 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 9878 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 9879 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
9880 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9881 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9882 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 9883 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
9884 cmd->port = PORT_FIBRE;
9885 break;
9886
9887 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 9888 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
9889 cmd->port = PORT_TP;
9890 break;
9891
c18487ee
YR
9892 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9893 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9894 bp->link_params.ext_phy_config);
9895 break;
9896
f1410647
ET
9897 default:
9898 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
9899 bp->link_params.ext_phy_config);
9900 break;
f1410647
ET
9901 }
9902 } else
a2fbb9ea 9903 cmd->port = PORT_TP;
a2fbb9ea 9904
01cd4528 9905 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
9906 cmd->transceiver = XCVR_INTERNAL;
9907
c18487ee 9908 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 9909 cmd->autoneg = AUTONEG_ENABLE;
f1410647 9910 else
a2fbb9ea 9911 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
9912
9913 cmd->maxtxpkt = 0;
9914 cmd->maxrxpkt = 0;
9915
9916 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9917 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9918 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9919 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9920 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9921 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9922 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9923
9924 return 0;
9925}
9926
9927static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9928{
9929 struct bnx2x *bp = netdev_priv(dev);
9930 u32 advertising;
9931
34f80b04
EG
9932 if (IS_E1HMF(bp))
9933 return 0;
9934
a2fbb9ea
ET
9935 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9936 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9937 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9938 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9939 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9940 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9941 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9942
a2fbb9ea 9943 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
9944 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9945 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 9946 return -EINVAL;
f1410647 9947 }
a2fbb9ea
ET
9948
9949 /* advertise the requested speed and duplex if supported */
34f80b04 9950 cmd->advertising &= bp->port.supported;
a2fbb9ea 9951
c18487ee
YR
9952 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9953 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
9954 bp->port.advertising |= (ADVERTISED_Autoneg |
9955 cmd->advertising);
a2fbb9ea
ET
9956
9957 } else { /* forced speed */
9958 /* advertise the requested speed and duplex if supported */
9959 switch (cmd->speed) {
9960 case SPEED_10:
9961 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 9962 if (!(bp->port.supported &
f1410647
ET
9963 SUPPORTED_10baseT_Full)) {
9964 DP(NETIF_MSG_LINK,
9965 "10M full not supported\n");
a2fbb9ea 9966 return -EINVAL;
f1410647 9967 }
a2fbb9ea
ET
9968
9969 advertising = (ADVERTISED_10baseT_Full |
9970 ADVERTISED_TP);
9971 } else {
34f80b04 9972 if (!(bp->port.supported &
f1410647
ET
9973 SUPPORTED_10baseT_Half)) {
9974 DP(NETIF_MSG_LINK,
9975 "10M half not supported\n");
a2fbb9ea 9976 return -EINVAL;
f1410647 9977 }
a2fbb9ea
ET
9978
9979 advertising = (ADVERTISED_10baseT_Half |
9980 ADVERTISED_TP);
9981 }
9982 break;
9983
9984 case SPEED_100:
9985 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 9986 if (!(bp->port.supported &
f1410647
ET
9987 SUPPORTED_100baseT_Full)) {
9988 DP(NETIF_MSG_LINK,
9989 "100M full not supported\n");
a2fbb9ea 9990 return -EINVAL;
f1410647 9991 }
a2fbb9ea
ET
9992
9993 advertising = (ADVERTISED_100baseT_Full |
9994 ADVERTISED_TP);
9995 } else {
34f80b04 9996 if (!(bp->port.supported &
f1410647
ET
9997 SUPPORTED_100baseT_Half)) {
9998 DP(NETIF_MSG_LINK,
9999 "100M half not supported\n");
a2fbb9ea 10000 return -EINVAL;
f1410647 10001 }
a2fbb9ea
ET
10002
10003 advertising = (ADVERTISED_100baseT_Half |
10004 ADVERTISED_TP);
10005 }
10006 break;
10007
10008 case SPEED_1000:
f1410647
ET
10009 if (cmd->duplex != DUPLEX_FULL) {
10010 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 10011 return -EINVAL;
f1410647 10012 }
a2fbb9ea 10013
34f80b04 10014 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 10015 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 10016 return -EINVAL;
f1410647 10017 }
a2fbb9ea
ET
10018
10019 advertising = (ADVERTISED_1000baseT_Full |
10020 ADVERTISED_TP);
10021 break;
10022
10023 case SPEED_2500:
f1410647
ET
10024 if (cmd->duplex != DUPLEX_FULL) {
10025 DP(NETIF_MSG_LINK,
10026 "2.5G half not supported\n");
a2fbb9ea 10027 return -EINVAL;
f1410647 10028 }
a2fbb9ea 10029
34f80b04 10030 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
10031 DP(NETIF_MSG_LINK,
10032 "2.5G full not supported\n");
a2fbb9ea 10033 return -EINVAL;
f1410647 10034 }
a2fbb9ea 10035
f1410647 10036 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
10037 ADVERTISED_TP);
10038 break;
10039
10040 case SPEED_10000:
f1410647
ET
10041 if (cmd->duplex != DUPLEX_FULL) {
10042 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 10043 return -EINVAL;
f1410647 10044 }
a2fbb9ea 10045
34f80b04 10046 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 10047 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 10048 return -EINVAL;
f1410647 10049 }
a2fbb9ea
ET
10050
10051 advertising = (ADVERTISED_10000baseT_Full |
10052 ADVERTISED_FIBRE);
10053 break;
10054
10055 default:
f1410647 10056 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
10057 return -EINVAL;
10058 }
10059
c18487ee
YR
10060 bp->link_params.req_line_speed = cmd->speed;
10061 bp->link_params.req_duplex = cmd->duplex;
34f80b04 10062 bp->port.advertising = advertising;
a2fbb9ea
ET
10063 }
10064
c18487ee 10065 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 10066 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 10067 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 10068 bp->port.advertising);
a2fbb9ea 10069
34f80b04 10070 if (netif_running(dev)) {
bb2a0f7a 10071 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10072 bnx2x_link_set(bp);
10073 }
a2fbb9ea
ET
10074
10075 return 0;
10076}
10077
0a64ea57
EG
10078#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
10079#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
10080
10081static int bnx2x_get_regs_len(struct net_device *dev)
10082{
0a64ea57 10083 struct bnx2x *bp = netdev_priv(dev);
0d28e49a 10084 int regdump_len = 0;
0a64ea57
EG
10085 int i;
10086
0a64ea57
EG
10087 if (CHIP_IS_E1(bp)) {
10088 for (i = 0; i < REGS_COUNT; i++)
10089 if (IS_E1_ONLINE(reg_addrs[i].info))
10090 regdump_len += reg_addrs[i].size;
10091
10092 for (i = 0; i < WREGS_COUNT_E1; i++)
10093 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
10094 regdump_len += wreg_addrs_e1[i].size *
10095 (1 + wreg_addrs_e1[i].read_regs_count);
10096
10097 } else { /* E1H */
10098 for (i = 0; i < REGS_COUNT; i++)
10099 if (IS_E1H_ONLINE(reg_addrs[i].info))
10100 regdump_len += reg_addrs[i].size;
10101
10102 for (i = 0; i < WREGS_COUNT_E1H; i++)
10103 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
10104 regdump_len += wreg_addrs_e1h[i].size *
10105 (1 + wreg_addrs_e1h[i].read_regs_count);
10106 }
10107 regdump_len *= 4;
10108 regdump_len += sizeof(struct dump_hdr);
10109
10110 return regdump_len;
10111}
10112
10113static void bnx2x_get_regs(struct net_device *dev,
10114 struct ethtool_regs *regs, void *_p)
10115{
10116 u32 *p = _p, i, j;
10117 struct bnx2x *bp = netdev_priv(dev);
10118 struct dump_hdr dump_hdr = {0};
10119
10120 regs->version = 0;
10121 memset(p, 0, regs->len);
10122
10123 if (!netif_running(bp->dev))
10124 return;
10125
10126 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
10127 dump_hdr.dump_sign = dump_sign_all;
10128 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
10129 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
10130 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
10131 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
10132 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
10133
10134 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
10135 p += dump_hdr.hdr_size + 1;
10136
10137 if (CHIP_IS_E1(bp)) {
10138 for (i = 0; i < REGS_COUNT; i++)
10139 if (IS_E1_ONLINE(reg_addrs[i].info))
10140 for (j = 0; j < reg_addrs[i].size; j++)
10141 *p++ = REG_RD(bp,
10142 reg_addrs[i].addr + j*4);
10143
10144 } else { /* E1H */
10145 for (i = 0; i < REGS_COUNT; i++)
10146 if (IS_E1H_ONLINE(reg_addrs[i].info))
10147 for (j = 0; j < reg_addrs[i].size; j++)
10148 *p++ = REG_RD(bp,
10149 reg_addrs[i].addr + j*4);
10150 }
10151}
10152
0d28e49a
EG
10153#define PHY_FW_VER_LEN 10
10154
10155static void bnx2x_get_drvinfo(struct net_device *dev,
10156 struct ethtool_drvinfo *info)
10157{
10158 struct bnx2x *bp = netdev_priv(dev);
10159 u8 phy_fw_ver[PHY_FW_VER_LEN];
10160
10161 strcpy(info->driver, DRV_MODULE_NAME);
10162 strcpy(info->version, DRV_MODULE_VERSION);
10163
10164 phy_fw_ver[0] = '\0';
10165 if (bp->port.pmf) {
10166 bnx2x_acquire_phy_lock(bp);
10167 bnx2x_get_ext_phy_fw_version(&bp->link_params,
10168 (bp->state != BNX2X_STATE_CLOSED),
10169 phy_fw_ver, PHY_FW_VER_LEN);
10170 bnx2x_release_phy_lock(bp);
10171 }
10172
10173 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
10174 (bp->common.bc_ver & 0xff0000) >> 16,
10175 (bp->common.bc_ver & 0xff00) >> 8,
10176 (bp->common.bc_ver & 0xff),
10177 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
10178 strcpy(info->bus_info, pci_name(bp->pdev));
10179 info->n_stats = BNX2X_NUM_STATS;
10180 info->testinfo_len = BNX2X_NUM_TESTS;
10181 info->eedump_len = bp->common.flash_size;
10182 info->regdump_len = bnx2x_get_regs_len(dev);
10183}
10184
a2fbb9ea
ET
10185static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10186{
10187 struct bnx2x *bp = netdev_priv(dev);
10188
10189 if (bp->flags & NO_WOL_FLAG) {
10190 wol->supported = 0;
10191 wol->wolopts = 0;
10192 } else {
10193 wol->supported = WAKE_MAGIC;
10194 if (bp->wol)
10195 wol->wolopts = WAKE_MAGIC;
10196 else
10197 wol->wolopts = 0;
10198 }
10199 memset(&wol->sopass, 0, sizeof(wol->sopass));
10200}
10201
10202static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10203{
10204 struct bnx2x *bp = netdev_priv(dev);
10205
10206 if (wol->wolopts & ~WAKE_MAGIC)
10207 return -EINVAL;
10208
10209 if (wol->wolopts & WAKE_MAGIC) {
10210 if (bp->flags & NO_WOL_FLAG)
10211 return -EINVAL;
10212
10213 bp->wol = 1;
34f80b04 10214 } else
a2fbb9ea 10215 bp->wol = 0;
34f80b04 10216
a2fbb9ea
ET
10217 return 0;
10218}
10219
10220static u32 bnx2x_get_msglevel(struct net_device *dev)
10221{
10222 struct bnx2x *bp = netdev_priv(dev);
10223
7995c64e 10224 return bp->msg_enable;
a2fbb9ea
ET
10225}
10226
10227static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
10228{
10229 struct bnx2x *bp = netdev_priv(dev);
10230
10231 if (capable(CAP_NET_ADMIN))
7995c64e 10232 bp->msg_enable = level;
a2fbb9ea
ET
10233}
10234
10235static int bnx2x_nway_reset(struct net_device *dev)
10236{
10237 struct bnx2x *bp = netdev_priv(dev);
10238
34f80b04
EG
10239 if (!bp->port.pmf)
10240 return 0;
a2fbb9ea 10241
34f80b04 10242 if (netif_running(dev)) {
bb2a0f7a 10243 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10244 bnx2x_link_set(bp);
10245 }
a2fbb9ea
ET
10246
10247 return 0;
10248}
10249
ab6ad5a4 10250static u32 bnx2x_get_link(struct net_device *dev)
01e53298
NO
10251{
10252 struct bnx2x *bp = netdev_priv(dev);
10253
f34d28ea
EG
10254 if (bp->flags & MF_FUNC_DIS)
10255 return 0;
10256
01e53298
NO
10257 return bp->link_vars.link_up;
10258}
10259
a2fbb9ea
ET
10260static int bnx2x_get_eeprom_len(struct net_device *dev)
10261{
10262 struct bnx2x *bp = netdev_priv(dev);
10263
34f80b04 10264 return bp->common.flash_size;
a2fbb9ea
ET
10265}
10266
10267static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
10268{
34f80b04 10269 int port = BP_PORT(bp);
a2fbb9ea
ET
10270 int count, i;
10271 u32 val = 0;
10272
10273 /* adjust timeout for emulation/FPGA */
10274 count = NVRAM_TIMEOUT_COUNT;
10275 if (CHIP_REV_IS_SLOW(bp))
10276 count *= 100;
10277
10278 /* request access to nvram interface */
10279 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10280 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
10281
10282 for (i = 0; i < count*10; i++) {
10283 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10284 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
10285 break;
10286
10287 udelay(5);
10288 }
10289
10290 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 10291 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
10292 return -EBUSY;
10293 }
10294
10295 return 0;
10296}
10297
10298static int bnx2x_release_nvram_lock(struct bnx2x *bp)
10299{
34f80b04 10300 int port = BP_PORT(bp);
a2fbb9ea
ET
10301 int count, i;
10302 u32 val = 0;
10303
10304 /* adjust timeout for emulation/FPGA */
10305 count = NVRAM_TIMEOUT_COUNT;
10306 if (CHIP_REV_IS_SLOW(bp))
10307 count *= 100;
10308
10309 /* relinquish nvram interface */
10310 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10311 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
10312
10313 for (i = 0; i < count*10; i++) {
10314 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10315 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
10316 break;
10317
10318 udelay(5);
10319 }
10320
10321 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 10322 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
10323 return -EBUSY;
10324 }
10325
10326 return 0;
10327}
10328
10329static void bnx2x_enable_nvram_access(struct bnx2x *bp)
10330{
10331 u32 val;
10332
10333 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10334
10335 /* enable both bits, even on read */
10336 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10337 (val | MCPR_NVM_ACCESS_ENABLE_EN |
10338 MCPR_NVM_ACCESS_ENABLE_WR_EN));
10339}
10340
10341static void bnx2x_disable_nvram_access(struct bnx2x *bp)
10342{
10343 u32 val;
10344
10345 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10346
10347 /* disable both bits, even after read */
10348 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10349 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
10350 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
10351}
10352
4781bfad 10353static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
10354 u32 cmd_flags)
10355{
f1410647 10356 int count, i, rc;
a2fbb9ea
ET
10357 u32 val;
10358
10359 /* build the command word */
10360 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
10361
10362 /* need to clear DONE bit separately */
10363 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10364
10365 /* address of the NVRAM to read from */
10366 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10367 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10368
10369 /* issue a read command */
10370 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10371
10372 /* adjust timeout for emulation/FPGA */
10373 count = NVRAM_TIMEOUT_COUNT;
10374 if (CHIP_REV_IS_SLOW(bp))
10375 count *= 100;
10376
10377 /* wait for completion */
10378 *ret_val = 0;
10379 rc = -EBUSY;
10380 for (i = 0; i < count; i++) {
10381 udelay(5);
10382 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10383
10384 if (val & MCPR_NVM_COMMAND_DONE) {
10385 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
10386 /* we read nvram data in cpu order
10387 * but ethtool sees it as an array of bytes
10388 * converting to big-endian will do the work */
4781bfad 10389 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
10390 rc = 0;
10391 break;
10392 }
10393 }
10394
10395 return rc;
10396}
10397
10398static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
10399 int buf_size)
10400{
10401 int rc;
10402 u32 cmd_flags;
4781bfad 10403 __be32 val;
a2fbb9ea
ET
10404
10405 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 10406 DP(BNX2X_MSG_NVM,
c14423fe 10407 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
10408 offset, buf_size);
10409 return -EINVAL;
10410 }
10411
34f80b04
EG
10412 if (offset + buf_size > bp->common.flash_size) {
10413 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 10414 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 10415 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
10416 return -EINVAL;
10417 }
10418
10419 /* request access to nvram interface */
10420 rc = bnx2x_acquire_nvram_lock(bp);
10421 if (rc)
10422 return rc;
10423
10424 /* enable access to nvram interface */
10425 bnx2x_enable_nvram_access(bp);
10426
10427 /* read the first word(s) */
10428 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10429 while ((buf_size > sizeof(u32)) && (rc == 0)) {
10430 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10431 memcpy(ret_buf, &val, 4);
10432
10433 /* advance to the next dword */
10434 offset += sizeof(u32);
10435 ret_buf += sizeof(u32);
10436 buf_size -= sizeof(u32);
10437 cmd_flags = 0;
10438 }
10439
10440 if (rc == 0) {
10441 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10442 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10443 memcpy(ret_buf, &val, 4);
10444 }
10445
10446 /* disable access to nvram interface */
10447 bnx2x_disable_nvram_access(bp);
10448 bnx2x_release_nvram_lock(bp);
10449
10450 return rc;
10451}
10452
10453static int bnx2x_get_eeprom(struct net_device *dev,
10454 struct ethtool_eeprom *eeprom, u8 *eebuf)
10455{
10456 struct bnx2x *bp = netdev_priv(dev);
10457 int rc;
10458
2add3acb
EG
10459 if (!netif_running(dev))
10460 return -EAGAIN;
10461
34f80b04 10462 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
10463 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10464 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10465 eeprom->len, eeprom->len);
10466
10467 /* parameters already validated in ethtool_get_eeprom */
10468
10469 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
10470
10471 return rc;
10472}
10473
10474static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
10475 u32 cmd_flags)
10476{
f1410647 10477 int count, i, rc;
a2fbb9ea
ET
10478
10479 /* build the command word */
10480 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
10481
10482 /* need to clear DONE bit separately */
10483 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10484
10485 /* write the data */
10486 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
10487
10488 /* address of the NVRAM to write to */
10489 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10490 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10491
10492 /* issue the write command */
10493 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10494
10495 /* adjust timeout for emulation/FPGA */
10496 count = NVRAM_TIMEOUT_COUNT;
10497 if (CHIP_REV_IS_SLOW(bp))
10498 count *= 100;
10499
10500 /* wait for completion */
10501 rc = -EBUSY;
10502 for (i = 0; i < count; i++) {
10503 udelay(5);
10504 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10505 if (val & MCPR_NVM_COMMAND_DONE) {
10506 rc = 0;
10507 break;
10508 }
10509 }
10510
10511 return rc;
10512}
10513
f1410647 10514#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
10515
10516static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
10517 int buf_size)
10518{
10519 int rc;
10520 u32 cmd_flags;
10521 u32 align_offset;
4781bfad 10522 __be32 val;
a2fbb9ea 10523
34f80b04
EG
10524 if (offset + buf_size > bp->common.flash_size) {
10525 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 10526 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 10527 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
10528 return -EINVAL;
10529 }
10530
10531 /* request access to nvram interface */
10532 rc = bnx2x_acquire_nvram_lock(bp);
10533 if (rc)
10534 return rc;
10535
10536 /* enable access to nvram interface */
10537 bnx2x_enable_nvram_access(bp);
10538
10539 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
10540 align_offset = (offset & ~0x03);
10541 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
10542
10543 if (rc == 0) {
10544 val &= ~(0xff << BYTE_OFFSET(offset));
10545 val |= (*data_buf << BYTE_OFFSET(offset));
10546
10547 /* nvram data is returned as an array of bytes
10548 * convert it back to cpu order */
10549 val = be32_to_cpu(val);
10550
a2fbb9ea
ET
10551 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
10552 cmd_flags);
10553 }
10554
10555 /* disable access to nvram interface */
10556 bnx2x_disable_nvram_access(bp);
10557 bnx2x_release_nvram_lock(bp);
10558
10559 return rc;
10560}
10561
10562static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
10563 int buf_size)
10564{
10565 int rc;
10566 u32 cmd_flags;
10567 u32 val;
10568 u32 written_so_far;
10569
34f80b04 10570 if (buf_size == 1) /* ethtool */
a2fbb9ea 10571 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
10572
10573 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 10574 DP(BNX2X_MSG_NVM,
c14423fe 10575 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
10576 offset, buf_size);
10577 return -EINVAL;
10578 }
10579
34f80b04
EG
10580 if (offset + buf_size > bp->common.flash_size) {
10581 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 10582 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 10583 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
10584 return -EINVAL;
10585 }
10586
10587 /* request access to nvram interface */
10588 rc = bnx2x_acquire_nvram_lock(bp);
10589 if (rc)
10590 return rc;
10591
10592 /* enable access to nvram interface */
10593 bnx2x_enable_nvram_access(bp);
10594
10595 written_so_far = 0;
10596 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10597 while ((written_so_far < buf_size) && (rc == 0)) {
10598 if (written_so_far == (buf_size - sizeof(u32)))
10599 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10600 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
10601 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10602 else if ((offset % NVRAM_PAGE_SIZE) == 0)
10603 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
10604
10605 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
10606
10607 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
10608
10609 /* advance to the next dword */
10610 offset += sizeof(u32);
10611 data_buf += sizeof(u32);
10612 written_so_far += sizeof(u32);
10613 cmd_flags = 0;
10614 }
10615
10616 /* disable access to nvram interface */
10617 bnx2x_disable_nvram_access(bp);
10618 bnx2x_release_nvram_lock(bp);
10619
10620 return rc;
10621}
10622
10623static int bnx2x_set_eeprom(struct net_device *dev,
10624 struct ethtool_eeprom *eeprom, u8 *eebuf)
10625{
10626 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
10627 int port = BP_PORT(bp);
10628 int rc = 0;
a2fbb9ea 10629
9f4c9583
EG
10630 if (!netif_running(dev))
10631 return -EAGAIN;
10632
34f80b04 10633 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
10634 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10635 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10636 eeprom->len, eeprom->len);
10637
10638 /* parameters already validated in ethtool_set_eeprom */
10639
f57a6025
EG
10640 /* PHY eeprom can be accessed only by the PMF */
10641 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
10642 !bp->port.pmf)
10643 return -EINVAL;
10644
10645 if (eeprom->magic == 0x50485950) {
10646 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
10647 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 10648
f57a6025
EG
10649 bnx2x_acquire_phy_lock(bp);
10650 rc |= bnx2x_link_reset(&bp->link_params,
10651 &bp->link_vars, 0);
10652 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10653 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
10654 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10655 MISC_REGISTERS_GPIO_HIGH, port);
10656 bnx2x_release_phy_lock(bp);
10657 bnx2x_link_report(bp);
10658
10659 } else if (eeprom->magic == 0x50485952) {
10660 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
f34d28ea 10661 if (bp->state == BNX2X_STATE_OPEN) {
4a37fb66 10662 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
10663 rc |= bnx2x_link_reset(&bp->link_params,
10664 &bp->link_vars, 1);
10665
10666 rc |= bnx2x_phy_init(&bp->link_params,
10667 &bp->link_vars);
4a37fb66 10668 bnx2x_release_phy_lock(bp);
f57a6025
EG
10669 bnx2x_calc_fc_adv(bp);
10670 }
10671 } else if (eeprom->magic == 0x53985943) {
10672 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
10673 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10674 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
10675 u8 ext_phy_addr =
659bc5c4 10676 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
f57a6025
EG
10677
10678 /* DSP Remove Download Mode */
10679 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10680 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 10681
f57a6025
EG
10682 bnx2x_acquire_phy_lock(bp);
10683
10684 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
10685
10686 /* wait 0.5 sec to allow it to run */
10687 msleep(500);
10688 bnx2x_ext_phy_hw_reset(bp, port);
10689 msleep(500);
10690 bnx2x_release_phy_lock(bp);
10691 }
10692 } else
c18487ee 10693 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
10694
10695 return rc;
10696}
10697
10698static int bnx2x_get_coalesce(struct net_device *dev,
10699 struct ethtool_coalesce *coal)
10700{
10701 struct bnx2x *bp = netdev_priv(dev);
10702
10703 memset(coal, 0, sizeof(struct ethtool_coalesce));
10704
10705 coal->rx_coalesce_usecs = bp->rx_ticks;
10706 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
10707
10708 return 0;
10709}
10710
ca00392c 10711#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
a2fbb9ea
ET
10712static int bnx2x_set_coalesce(struct net_device *dev,
10713 struct ethtool_coalesce *coal)
10714{
10715 struct bnx2x *bp = netdev_priv(dev);
10716
10717 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
ca00392c
EG
10718 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
10719 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea
ET
10720
10721 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
ca00392c
EG
10722 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
10723 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea 10724
34f80b04 10725 if (netif_running(dev))
a2fbb9ea
ET
10726 bnx2x_update_coalesce(bp);
10727
10728 return 0;
10729}
10730
10731static void bnx2x_get_ringparam(struct net_device *dev,
10732 struct ethtool_ringparam *ering)
10733{
10734 struct bnx2x *bp = netdev_priv(dev);
10735
10736 ering->rx_max_pending = MAX_RX_AVAIL;
10737 ering->rx_mini_max_pending = 0;
10738 ering->rx_jumbo_max_pending = 0;
10739
10740 ering->rx_pending = bp->rx_ring_size;
10741 ering->rx_mini_pending = 0;
10742 ering->rx_jumbo_pending = 0;
10743
10744 ering->tx_max_pending = MAX_TX_AVAIL;
10745 ering->tx_pending = bp->tx_ring_size;
10746}
10747
10748static int bnx2x_set_ringparam(struct net_device *dev,
10749 struct ethtool_ringparam *ering)
10750{
10751 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10752 int rc = 0;
a2fbb9ea 10753
72fd0718
VZ
10754 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10755 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10756 return -EAGAIN;
10757 }
10758
a2fbb9ea
ET
10759 if ((ering->rx_pending > MAX_RX_AVAIL) ||
10760 (ering->tx_pending > MAX_TX_AVAIL) ||
10761 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
10762 return -EINVAL;
10763
10764 bp->rx_ring_size = ering->rx_pending;
10765 bp->tx_ring_size = ering->tx_pending;
10766
34f80b04
EG
10767 if (netif_running(dev)) {
10768 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10769 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
10770 }
10771
34f80b04 10772 return rc;
a2fbb9ea
ET
10773}
10774
10775static void bnx2x_get_pauseparam(struct net_device *dev,
10776 struct ethtool_pauseparam *epause)
10777{
10778 struct bnx2x *bp = netdev_priv(dev);
10779
356e2385
EG
10780 epause->autoneg = (bp->link_params.req_flow_ctrl ==
10781 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
10782 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
10783
c0700f90
DM
10784 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
10785 BNX2X_FLOW_CTRL_RX);
10786 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
10787 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
10788
10789 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10790 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10791 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10792}
10793
10794static int bnx2x_set_pauseparam(struct net_device *dev,
10795 struct ethtool_pauseparam *epause)
10796{
10797 struct bnx2x *bp = netdev_priv(dev);
10798
34f80b04
EG
10799 if (IS_E1HMF(bp))
10800 return 0;
10801
a2fbb9ea
ET
10802 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10803 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10804 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10805
c0700f90 10806 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 10807
f1410647 10808 if (epause->rx_pause)
c0700f90 10809 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 10810
f1410647 10811 if (epause->tx_pause)
c0700f90 10812 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 10813
c0700f90
DM
10814 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10815 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 10816
c18487ee 10817 if (epause->autoneg) {
34f80b04 10818 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 10819 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
10820 return -EINVAL;
10821 }
a2fbb9ea 10822
c18487ee 10823 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 10824 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 10825 }
a2fbb9ea 10826
c18487ee
YR
10827 DP(NETIF_MSG_LINK,
10828 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
10829
10830 if (netif_running(dev)) {
bb2a0f7a 10831 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10832 bnx2x_link_set(bp);
10833 }
a2fbb9ea
ET
10834
10835 return 0;
10836}
10837
df0f2343
VZ
10838static int bnx2x_set_flags(struct net_device *dev, u32 data)
10839{
10840 struct bnx2x *bp = netdev_priv(dev);
10841 int changed = 0;
10842 int rc = 0;
10843
72fd0718
VZ
10844 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10845 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10846 return -EAGAIN;
10847 }
10848
df0f2343
VZ
10849 /* TPA requires Rx CSUM offloading */
10850 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
d43a7e67
VZ
10851 if (!disable_tpa) {
10852 if (!(dev->features & NETIF_F_LRO)) {
10853 dev->features |= NETIF_F_LRO;
10854 bp->flags |= TPA_ENABLE_FLAG;
10855 changed = 1;
10856 }
10857 } else
10858 rc = -EINVAL;
df0f2343
VZ
10859 } else if (dev->features & NETIF_F_LRO) {
10860 dev->features &= ~NETIF_F_LRO;
10861 bp->flags &= ~TPA_ENABLE_FLAG;
10862 changed = 1;
10863 }
10864
10865 if (changed && netif_running(dev)) {
10866 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10867 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10868 }
10869
10870 return rc;
10871}
10872
a2fbb9ea
ET
10873static u32 bnx2x_get_rx_csum(struct net_device *dev)
10874{
10875 struct bnx2x *bp = netdev_priv(dev);
10876
10877 return bp->rx_csum;
10878}
10879
10880static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10881{
10882 struct bnx2x *bp = netdev_priv(dev);
df0f2343 10883 int rc = 0;
a2fbb9ea 10884
72fd0718
VZ
10885 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10886 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10887 return -EAGAIN;
10888 }
10889
a2fbb9ea 10890 bp->rx_csum = data;
df0f2343
VZ
10891
10892 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10893 TPA'ed packets will be discarded due to wrong TCP CSUM */
10894 if (!data) {
10895 u32 flags = ethtool_op_get_flags(dev);
10896
10897 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10898 }
10899
10900 return rc;
a2fbb9ea
ET
10901}
10902
10903static int bnx2x_set_tso(struct net_device *dev, u32 data)
10904{
755735eb 10905 if (data) {
a2fbb9ea 10906 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
10907 dev->features |= NETIF_F_TSO6;
10908 } else {
a2fbb9ea 10909 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
10910 dev->features &= ~NETIF_F_TSO6;
10911 }
10912
a2fbb9ea
ET
10913 return 0;
10914}
10915
f3c87cdd 10916static const struct {
a2fbb9ea
ET
10917 char string[ETH_GSTRING_LEN];
10918} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
10919 { "register_test (offline)" },
10920 { "memory_test (offline)" },
10921 { "loopback_test (offline)" },
10922 { "nvram_test (online)" },
10923 { "interrupt_test (online)" },
10924 { "link_test (online)" },
d3d4f495 10925 { "idle check (online)" }
a2fbb9ea
ET
10926};
10927
f3c87cdd
YG
10928static int bnx2x_test_registers(struct bnx2x *bp)
10929{
10930 int idx, i, rc = -ENODEV;
10931 u32 wr_val = 0;
9dabc424 10932 int port = BP_PORT(bp);
f3c87cdd
YG
10933 static const struct {
10934 u32 offset0;
10935 u32 offset1;
10936 u32 mask;
10937 } reg_tbl[] = {
10938/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10939 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
10940 { HC_REG_AGG_INT_0, 4, 0x000003ff },
10941 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
10942 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
10943 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
10944 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
10945 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10946 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
10947 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10948/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
10949 { QM_REG_CONNNUM_0, 4, 0x000fffff },
10950 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
10951 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
10952 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
10953 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10954 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
10955 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 10956 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
10957 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
10958/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
10959 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
10960 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
10961 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
10962 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
10963 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
10964 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
10965 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
10966 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
10967 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
10968/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
10969 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
10970 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
10971 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10972 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
10973 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10974 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
10975
10976 { 0xffffffff, 0, 0x00000000 }
10977 };
10978
10979 if (!netif_running(bp->dev))
10980 return rc;
10981
10982 /* Repeat the test twice:
10983 First by writing 0x00000000, second by writing 0xffffffff */
10984 for (idx = 0; idx < 2; idx++) {
10985
10986 switch (idx) {
10987 case 0:
10988 wr_val = 0;
10989 break;
10990 case 1:
10991 wr_val = 0xffffffff;
10992 break;
10993 }
10994
10995 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10996 u32 offset, mask, save_val, val;
f3c87cdd
YG
10997
10998 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10999 mask = reg_tbl[i].mask;
11000
11001 save_val = REG_RD(bp, offset);
11002
11003 REG_WR(bp, offset, wr_val);
11004 val = REG_RD(bp, offset);
11005
11006 /* Restore the original register's value */
11007 REG_WR(bp, offset, save_val);
11008
11009 /* verify that value is as expected value */
11010 if ((val & mask) != (wr_val & mask))
11011 goto test_reg_exit;
11012 }
11013 }
11014
11015 rc = 0;
11016
11017test_reg_exit:
11018 return rc;
11019}
11020
11021static int bnx2x_test_memory(struct bnx2x *bp)
11022{
11023 int i, j, rc = -ENODEV;
11024 u32 val;
11025 static const struct {
11026 u32 offset;
11027 int size;
11028 } mem_tbl[] = {
11029 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
11030 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
11031 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
11032 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
11033 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
11034 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
11035 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
11036
11037 { 0xffffffff, 0 }
11038 };
11039 static const struct {
11040 char *name;
11041 u32 offset;
9dabc424
YG
11042 u32 e1_mask;
11043 u32 e1h_mask;
f3c87cdd 11044 } prty_tbl[] = {
9dabc424
YG
11045 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
11046 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
11047 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
11048 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
11049 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
11050 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
11051
11052 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
11053 };
11054
11055 if (!netif_running(bp->dev))
11056 return rc;
11057
11058 /* Go through all the memories */
11059 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
11060 for (j = 0; j < mem_tbl[i].size; j++)
11061 REG_RD(bp, mem_tbl[i].offset + j*4);
11062
11063 /* Check the parity status */
11064 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
11065 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
11066 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
11067 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
11068 DP(NETIF_MSG_HW,
11069 "%s is 0x%x\n", prty_tbl[i].name, val);
11070 goto test_mem_exit;
11071 }
11072 }
11073
11074 rc = 0;
11075
11076test_mem_exit:
11077 return rc;
11078}
11079
f3c87cdd
YG
11080static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
11081{
11082 int cnt = 1000;
11083
11084 if (link_up)
11085 while (bnx2x_link_test(bp) && cnt--)
11086 msleep(10);
11087}
11088
11089static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
11090{
11091 unsigned int pkt_size, num_pkts, i;
11092 struct sk_buff *skb;
11093 unsigned char *packet;
ca00392c 11094 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
54b9ddaa 11095 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
f3c87cdd
YG
11096 u16 tx_start_idx, tx_idx;
11097 u16 rx_start_idx, rx_idx;
ca00392c 11098 u16 pkt_prod, bd_prod;
f3c87cdd 11099 struct sw_tx_bd *tx_buf;
ca00392c
EG
11100 struct eth_tx_start_bd *tx_start_bd;
11101 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
11102 dma_addr_t mapping;
11103 union eth_rx_cqe *cqe;
11104 u8 cqe_fp_flags;
11105 struct sw_rx_bd *rx_buf;
11106 u16 len;
11107 int rc = -ENODEV;
11108
b5bf9068
EG
11109 /* check the loopback mode */
11110 switch (loopback_mode) {
11111 case BNX2X_PHY_LOOPBACK:
11112 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
11113 return -EINVAL;
11114 break;
11115 case BNX2X_MAC_LOOPBACK:
f3c87cdd 11116 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 11117 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
11118 break;
11119 default:
f3c87cdd 11120 return -EINVAL;
b5bf9068 11121 }
f3c87cdd 11122
b5bf9068
EG
11123 /* prepare the loopback packet */
11124 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
11125 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
11126 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
11127 if (!skb) {
11128 rc = -ENOMEM;
11129 goto test_loopback_exit;
11130 }
11131 packet = skb_put(skb, pkt_size);
11132 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
11133 memset(packet + ETH_ALEN, 0, ETH_ALEN);
11134 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
11135 for (i = ETH_HLEN; i < pkt_size; i++)
11136 packet[i] = (unsigned char) (i & 0xff);
11137
b5bf9068 11138 /* send the loopback packet */
f3c87cdd 11139 num_pkts = 0;
ca00392c
EG
11140 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11141 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 11142
ca00392c
EG
11143 pkt_prod = fp_tx->tx_pkt_prod++;
11144 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
11145 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 11146 tx_buf->skb = skb;
ca00392c 11147 tx_buf->flags = 0;
f3c87cdd 11148
ca00392c
EG
11149 bd_prod = TX_BD(fp_tx->tx_bd_prod);
11150 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
1a983142
FT
11151 mapping = dma_map_single(&bp->pdev->dev, skb->data,
11152 skb_headlen(skb), DMA_TO_DEVICE);
ca00392c
EG
11153 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11154 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11155 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
11156 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11157 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11158 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11159 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
11160 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
11161
11162 /* turn on parsing and get a BD */
11163 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11164 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
11165
11166 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 11167
58f4c4cf
EG
11168 wmb();
11169
ca00392c
EG
11170 fp_tx->tx_db.data.prod += 2;
11171 barrier();
54b9ddaa 11172 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
f3c87cdd
YG
11173
11174 mmiowb();
11175
11176 num_pkts++;
ca00392c 11177 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
11178
11179 udelay(100);
11180
ca00392c 11181 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
11182 if (tx_idx != tx_start_idx + num_pkts)
11183 goto test_loopback_exit;
11184
ca00392c 11185 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
11186 if (rx_idx != rx_start_idx + num_pkts)
11187 goto test_loopback_exit;
11188
ca00392c 11189 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
11190 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
11191 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
11192 goto test_loopback_rx_exit;
11193
11194 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
11195 if (len != pkt_size)
11196 goto test_loopback_rx_exit;
11197
ca00392c 11198 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
11199 skb = rx_buf->skb;
11200 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
11201 for (i = ETH_HLEN; i < pkt_size; i++)
11202 if (*(skb->data + i) != (unsigned char) (i & 0xff))
11203 goto test_loopback_rx_exit;
11204
11205 rc = 0;
11206
11207test_loopback_rx_exit:
f3c87cdd 11208
ca00392c
EG
11209 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
11210 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
11211 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
11212 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
11213
11214 /* Update producers */
ca00392c
EG
11215 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
11216 fp_rx->rx_sge_prod);
f3c87cdd
YG
11217
11218test_loopback_exit:
11219 bp->link_params.loopback_mode = LOOPBACK_NONE;
11220
11221 return rc;
11222}
11223
11224static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
11225{
b5bf9068 11226 int rc = 0, res;
f3c87cdd
YG
11227
11228 if (!netif_running(bp->dev))
11229 return BNX2X_LOOPBACK_FAILED;
11230
f8ef6e44 11231 bnx2x_netif_stop(bp, 1);
3910c8ae 11232 bnx2x_acquire_phy_lock(bp);
f3c87cdd 11233
b5bf9068
EG
11234 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
11235 if (res) {
11236 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
11237 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
11238 }
11239
b5bf9068
EG
11240 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
11241 if (res) {
11242 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
11243 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
11244 }
11245
3910c8ae 11246 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
11247 bnx2x_netif_start(bp);
11248
11249 return rc;
11250}
11251
11252#define CRC32_RESIDUAL 0xdebb20e3
11253
11254static int bnx2x_test_nvram(struct bnx2x *bp)
11255{
11256 static const struct {
11257 int offset;
11258 int size;
11259 } nvram_tbl[] = {
11260 { 0, 0x14 }, /* bootstrap */
11261 { 0x14, 0xec }, /* dir */
11262 { 0x100, 0x350 }, /* manuf_info */
11263 { 0x450, 0xf0 }, /* feature_info */
11264 { 0x640, 0x64 }, /* upgrade_key_info */
11265 { 0x6a4, 0x64 },
11266 { 0x708, 0x70 }, /* manuf_key_info */
11267 { 0x778, 0x70 },
11268 { 0, 0 }
11269 };
4781bfad 11270 __be32 buf[0x350 / 4];
f3c87cdd
YG
11271 u8 *data = (u8 *)buf;
11272 int i, rc;
ab6ad5a4 11273 u32 magic, crc;
f3c87cdd
YG
11274
11275 rc = bnx2x_nvram_read(bp, 0, data, 4);
11276 if (rc) {
f5372251 11277 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
11278 goto test_nvram_exit;
11279 }
11280
11281 magic = be32_to_cpu(buf[0]);
11282 if (magic != 0x669955aa) {
11283 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
11284 rc = -ENODEV;
11285 goto test_nvram_exit;
11286 }
11287
11288 for (i = 0; nvram_tbl[i].size; i++) {
11289
11290 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
11291 nvram_tbl[i].size);
11292 if (rc) {
11293 DP(NETIF_MSG_PROBE,
f5372251 11294 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
11295 goto test_nvram_exit;
11296 }
11297
ab6ad5a4
EG
11298 crc = ether_crc_le(nvram_tbl[i].size, data);
11299 if (crc != CRC32_RESIDUAL) {
f3c87cdd 11300 DP(NETIF_MSG_PROBE,
ab6ad5a4 11301 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
f3c87cdd
YG
11302 rc = -ENODEV;
11303 goto test_nvram_exit;
11304 }
11305 }
11306
11307test_nvram_exit:
11308 return rc;
11309}
11310
11311static int bnx2x_test_intr(struct bnx2x *bp)
11312{
11313 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
11314 int i, rc;
11315
11316 if (!netif_running(bp->dev))
11317 return -ENODEV;
11318
8d9c5f34 11319 config->hdr.length = 0;
af246401 11320 if (CHIP_IS_E1(bp))
0c43f43f
VZ
11321 /* use last unicast entries */
11322 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
af246401
EG
11323 else
11324 config->hdr.offset = BP_FUNC(bp);
0626b899 11325 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
11326 config->hdr.reserved1 = 0;
11327
e665bfda
MC
11328 bp->set_mac_pending++;
11329 smp_wmb();
f3c87cdd
YG
11330 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11331 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
11332 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
11333 if (rc == 0) {
f3c87cdd
YG
11334 for (i = 0; i < 10; i++) {
11335 if (!bp->set_mac_pending)
11336 break;
e665bfda 11337 smp_rmb();
f3c87cdd
YG
11338 msleep_interruptible(10);
11339 }
11340 if (i == 10)
11341 rc = -ENODEV;
11342 }
11343
11344 return rc;
11345}
11346
a2fbb9ea
ET
11347static void bnx2x_self_test(struct net_device *dev,
11348 struct ethtool_test *etest, u64 *buf)
11349{
11350 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea 11351
72fd0718
VZ
11352 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11353 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11354 etest->flags |= ETH_TEST_FL_FAILED;
11355 return;
11356 }
11357
a2fbb9ea
ET
11358 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
11359
f3c87cdd 11360 if (!netif_running(dev))
a2fbb9ea 11361 return;
a2fbb9ea 11362
33471629 11363 /* offline tests are not supported in MF mode */
f3c87cdd
YG
11364 if (IS_E1HMF(bp))
11365 etest->flags &= ~ETH_TEST_FL_OFFLINE;
11366
11367 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
11368 int port = BP_PORT(bp);
11369 u32 val;
f3c87cdd
YG
11370 u8 link_up;
11371
279abdf5
EG
11372 /* save current value of input enable for TX port IF */
11373 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
11374 /* disable input for TX port IF */
11375 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
11376
061bc702 11377 link_up = (bnx2x_link_test(bp) == 0);
f3c87cdd
YG
11378 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11379 bnx2x_nic_load(bp, LOAD_DIAG);
11380 /* wait until link state is restored */
11381 bnx2x_wait_for_link(bp, link_up);
11382
11383 if (bnx2x_test_registers(bp) != 0) {
11384 buf[0] = 1;
11385 etest->flags |= ETH_TEST_FL_FAILED;
11386 }
11387 if (bnx2x_test_memory(bp) != 0) {
11388 buf[1] = 1;
11389 etest->flags |= ETH_TEST_FL_FAILED;
11390 }
11391 buf[2] = bnx2x_test_loopback(bp, link_up);
11392 if (buf[2] != 0)
11393 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 11394
f3c87cdd 11395 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
11396
11397 /* restore input for TX port IF */
11398 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
11399
f3c87cdd
YG
11400 bnx2x_nic_load(bp, LOAD_NORMAL);
11401 /* wait until link state is restored */
11402 bnx2x_wait_for_link(bp, link_up);
11403 }
11404 if (bnx2x_test_nvram(bp) != 0) {
11405 buf[3] = 1;
a2fbb9ea
ET
11406 etest->flags |= ETH_TEST_FL_FAILED;
11407 }
f3c87cdd
YG
11408 if (bnx2x_test_intr(bp) != 0) {
11409 buf[4] = 1;
11410 etest->flags |= ETH_TEST_FL_FAILED;
11411 }
11412 if (bp->port.pmf)
11413 if (bnx2x_link_test(bp) != 0) {
11414 buf[5] = 1;
11415 etest->flags |= ETH_TEST_FL_FAILED;
11416 }
f3c87cdd
YG
11417
11418#ifdef BNX2X_EXTRA_DEBUG
11419 bnx2x_panic_dump(bp);
11420#endif
a2fbb9ea
ET
11421}
11422
de832a55
EG
11423static const struct {
11424 long offset;
11425 int size;
11426 u8 string[ETH_GSTRING_LEN];
11427} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
11428/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
11429 { Q_STATS_OFFSET32(error_bytes_received_hi),
11430 8, "[%d]: rx_error_bytes" },
11431 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
11432 8, "[%d]: rx_ucast_packets" },
11433 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
11434 8, "[%d]: rx_mcast_packets" },
11435 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
11436 8, "[%d]: rx_bcast_packets" },
11437 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
11438 { Q_STATS_OFFSET32(rx_err_discard_pkt),
11439 4, "[%d]: rx_phy_ip_err_discards"},
11440 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
11441 4, "[%d]: rx_skb_alloc_discard" },
11442 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
11443
11444/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
11445 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11446 8, "[%d]: tx_packets" }
11447};
11448
bb2a0f7a
YG
11449static const struct {
11450 long offset;
11451 int size;
11452 u32 flags;
66e855f3
YG
11453#define STATS_FLAGS_PORT 1
11454#define STATS_FLAGS_FUNC 2
de832a55 11455#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 11456 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 11457} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
11458/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
11459 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 11460 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 11461 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 11462 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 11463 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 11464 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 11465 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 11466 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 11467 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 11468 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 11469 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 11470 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 11471 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
11472 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
11473 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
11474 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
11475 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
11476/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
11477 8, STATS_FLAGS_PORT, "rx_fragments" },
11478 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
11479 8, STATS_FLAGS_PORT, "rx_jabbers" },
11480 { STATS_OFFSET32(no_buff_discard_hi),
11481 8, STATS_FLAGS_BOTH, "rx_discards" },
11482 { STATS_OFFSET32(mac_filter_discard),
11483 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
11484 { STATS_OFFSET32(xxoverflow_discard),
11485 4, STATS_FLAGS_PORT, "rx_fw_discards" },
11486 { STATS_OFFSET32(brb_drop_hi),
11487 8, STATS_FLAGS_PORT, "rx_brb_discard" },
11488 { STATS_OFFSET32(brb_truncate_hi),
11489 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
11490 { STATS_OFFSET32(pause_frames_received_hi),
11491 8, STATS_FLAGS_PORT, "rx_pause_frames" },
11492 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
11493 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
11494 { STATS_OFFSET32(nig_timer_max),
11495 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
11496/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
11497 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
11498 { STATS_OFFSET32(rx_skb_alloc_failed),
11499 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
11500 { STATS_OFFSET32(hw_csum_err),
11501 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
11502
11503 { STATS_OFFSET32(total_bytes_transmitted_hi),
11504 8, STATS_FLAGS_BOTH, "tx_bytes" },
11505 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
11506 8, STATS_FLAGS_PORT, "tx_error_bytes" },
11507 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11508 8, STATS_FLAGS_BOTH, "tx_packets" },
11509 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
11510 8, STATS_FLAGS_PORT, "tx_mac_errors" },
11511 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
11512 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 11513 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 11514 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 11515 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 11516 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 11517/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 11518 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 11519 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 11520 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 11521 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 11522 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 11523 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 11524 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 11525 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 11526 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 11527 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 11528 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 11529 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 11530 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 11531 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 11532 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 11533 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 11534 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 11535 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 11536 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 11537/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 11538 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
11539 { STATS_OFFSET32(pause_frames_sent_hi),
11540 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
11541};
11542
de832a55
EG
11543#define IS_PORT_STAT(i) \
11544 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
11545#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
11546#define IS_E1HMF_MODE_STAT(bp) \
7995c64e 11547 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
66e855f3 11548
15f0a394
BH
11549static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
11550{
11551 struct bnx2x *bp = netdev_priv(dev);
11552 int i, num_stats;
11553
11554 switch(stringset) {
11555 case ETH_SS_STATS:
11556 if (is_multi(bp)) {
54b9ddaa 11557 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
15f0a394
BH
11558 if (!IS_E1HMF_MODE_STAT(bp))
11559 num_stats += BNX2X_NUM_STATS;
11560 } else {
11561 if (IS_E1HMF_MODE_STAT(bp)) {
11562 num_stats = 0;
11563 for (i = 0; i < BNX2X_NUM_STATS; i++)
11564 if (IS_FUNC_STAT(i))
11565 num_stats++;
11566 } else
11567 num_stats = BNX2X_NUM_STATS;
11568 }
11569 return num_stats;
11570
11571 case ETH_SS_TEST:
11572 return BNX2X_NUM_TESTS;
11573
11574 default:
11575 return -EINVAL;
11576 }
11577}
11578
a2fbb9ea
ET
11579static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11580{
bb2a0f7a 11581 struct bnx2x *bp = netdev_priv(dev);
de832a55 11582 int i, j, k;
bb2a0f7a 11583
a2fbb9ea
ET
11584 switch (stringset) {
11585 case ETH_SS_STATS:
de832a55
EG
11586 if (is_multi(bp)) {
11587 k = 0;
54b9ddaa 11588 for_each_queue(bp, i) {
de832a55
EG
11589 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
11590 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
11591 bnx2x_q_stats_arr[j].string, i);
11592 k += BNX2X_NUM_Q_STATS;
11593 }
11594 if (IS_E1HMF_MODE_STAT(bp))
11595 break;
11596 for (j = 0; j < BNX2X_NUM_STATS; j++)
11597 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
11598 bnx2x_stats_arr[j].string);
11599 } else {
11600 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11601 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11602 continue;
11603 strcpy(buf + j*ETH_GSTRING_LEN,
11604 bnx2x_stats_arr[i].string);
11605 j++;
11606 }
bb2a0f7a 11607 }
a2fbb9ea
ET
11608 break;
11609
11610 case ETH_SS_TEST:
11611 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
11612 break;
11613 }
11614}
11615
a2fbb9ea
ET
11616static void bnx2x_get_ethtool_stats(struct net_device *dev,
11617 struct ethtool_stats *stats, u64 *buf)
11618{
11619 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
11620 u32 *hw_stats, *offset;
11621 int i, j, k;
bb2a0f7a 11622
de832a55
EG
11623 if (is_multi(bp)) {
11624 k = 0;
54b9ddaa 11625 for_each_queue(bp, i) {
de832a55
EG
11626 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
11627 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
11628 if (bnx2x_q_stats_arr[j].size == 0) {
11629 /* skip this counter */
11630 buf[k + j] = 0;
11631 continue;
11632 }
11633 offset = (hw_stats +
11634 bnx2x_q_stats_arr[j].offset);
11635 if (bnx2x_q_stats_arr[j].size == 4) {
11636 /* 4-byte counter */
11637 buf[k + j] = (u64) *offset;
11638 continue;
11639 }
11640 /* 8-byte counter */
11641 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11642 }
11643 k += BNX2X_NUM_Q_STATS;
11644 }
11645 if (IS_E1HMF_MODE_STAT(bp))
11646 return;
11647 hw_stats = (u32 *)&bp->eth_stats;
11648 for (j = 0; j < BNX2X_NUM_STATS; j++) {
11649 if (bnx2x_stats_arr[j].size == 0) {
11650 /* skip this counter */
11651 buf[k + j] = 0;
11652 continue;
11653 }
11654 offset = (hw_stats + bnx2x_stats_arr[j].offset);
11655 if (bnx2x_stats_arr[j].size == 4) {
11656 /* 4-byte counter */
11657 buf[k + j] = (u64) *offset;
11658 continue;
11659 }
11660 /* 8-byte counter */
11661 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 11662 }
de832a55
EG
11663 } else {
11664 hw_stats = (u32 *)&bp->eth_stats;
11665 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11666 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11667 continue;
11668 if (bnx2x_stats_arr[i].size == 0) {
11669 /* skip this counter */
11670 buf[j] = 0;
11671 j++;
11672 continue;
11673 }
11674 offset = (hw_stats + bnx2x_stats_arr[i].offset);
11675 if (bnx2x_stats_arr[i].size == 4) {
11676 /* 4-byte counter */
11677 buf[j] = (u64) *offset;
11678 j++;
11679 continue;
11680 }
11681 /* 8-byte counter */
11682 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 11683 j++;
a2fbb9ea 11684 }
a2fbb9ea
ET
11685 }
11686}
11687
11688static int bnx2x_phys_id(struct net_device *dev, u32 data)
11689{
11690 struct bnx2x *bp = netdev_priv(dev);
11691 int i;
11692
34f80b04
EG
11693 if (!netif_running(dev))
11694 return 0;
11695
11696 if (!bp->port.pmf)
11697 return 0;
11698
a2fbb9ea
ET
11699 if (data == 0)
11700 data = 2;
11701
11702 for (i = 0; i < (data * 2); i++) {
c18487ee 11703 if ((i % 2) == 0)
7846e471
YR
11704 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11705 SPEED_1000);
c18487ee 11706 else
7846e471 11707 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
c18487ee 11708
a2fbb9ea
ET
11709 msleep_interruptible(500);
11710 if (signal_pending(current))
11711 break;
11712 }
11713
c18487ee 11714 if (bp->link_vars.link_up)
7846e471
YR
11715 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11716 bp->link_vars.line_speed);
a2fbb9ea
ET
11717
11718 return 0;
11719}
11720
0fc0b732 11721static const struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
11722 .get_settings = bnx2x_get_settings,
11723 .set_settings = bnx2x_set_settings,
11724 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
11725 .get_regs_len = bnx2x_get_regs_len,
11726 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
11727 .get_wol = bnx2x_get_wol,
11728 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
11729 .get_msglevel = bnx2x_get_msglevel,
11730 .set_msglevel = bnx2x_set_msglevel,
11731 .nway_reset = bnx2x_nway_reset,
01e53298 11732 .get_link = bnx2x_get_link,
7a9b2557
VZ
11733 .get_eeprom_len = bnx2x_get_eeprom_len,
11734 .get_eeprom = bnx2x_get_eeprom,
11735 .set_eeprom = bnx2x_set_eeprom,
11736 .get_coalesce = bnx2x_get_coalesce,
11737 .set_coalesce = bnx2x_set_coalesce,
11738 .get_ringparam = bnx2x_get_ringparam,
11739 .set_ringparam = bnx2x_set_ringparam,
11740 .get_pauseparam = bnx2x_get_pauseparam,
11741 .set_pauseparam = bnx2x_set_pauseparam,
11742 .get_rx_csum = bnx2x_get_rx_csum,
11743 .set_rx_csum = bnx2x_set_rx_csum,
11744 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 11745 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
11746 .set_flags = bnx2x_set_flags,
11747 .get_flags = ethtool_op_get_flags,
11748 .get_sg = ethtool_op_get_sg,
11749 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
11750 .get_tso = ethtool_op_get_tso,
11751 .set_tso = bnx2x_set_tso,
7a9b2557 11752 .self_test = bnx2x_self_test,
15f0a394 11753 .get_sset_count = bnx2x_get_sset_count,
7a9b2557 11754 .get_strings = bnx2x_get_strings,
a2fbb9ea 11755 .phys_id = bnx2x_phys_id,
bb2a0f7a 11756 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
11757};
11758
11759/* end of ethtool_ops */
11760
11761/****************************************************************************
11762* General service functions
11763****************************************************************************/
11764
11765static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11766{
11767 u16 pmcsr;
11768
11769 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11770
11771 switch (state) {
11772 case PCI_D0:
34f80b04 11773 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
11774 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11775 PCI_PM_CTRL_PME_STATUS));
11776
11777 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 11778 /* delay required during transition out of D3hot */
a2fbb9ea 11779 msleep(20);
34f80b04 11780 break;
a2fbb9ea 11781
34f80b04
EG
11782 case PCI_D3hot:
11783 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11784 pmcsr |= 3;
a2fbb9ea 11785
34f80b04
EG
11786 if (bp->wol)
11787 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 11788
34f80b04
EG
11789 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11790 pmcsr);
a2fbb9ea 11791
34f80b04
EG
11792 /* No more memory access after this point until
11793 * device is brought back to D0.
11794 */
11795 break;
11796
11797 default:
11798 return -EINVAL;
11799 }
11800 return 0;
a2fbb9ea
ET
11801}
11802
237907c1
EG
11803static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11804{
11805 u16 rx_cons_sb;
11806
11807 /* Tell compiler that status block fields can change */
11808 barrier();
11809 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
11810 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
11811 rx_cons_sb++;
11812 return (fp->rx_comp_cons != rx_cons_sb);
11813}
11814
34f80b04
EG
11815/*
11816 * net_device service functions
11817 */
11818
a2fbb9ea
ET
11819static int bnx2x_poll(struct napi_struct *napi, int budget)
11820{
54b9ddaa 11821 int work_done = 0;
a2fbb9ea
ET
11822 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
11823 napi);
11824 struct bnx2x *bp = fp->bp;
a2fbb9ea 11825
54b9ddaa 11826 while (1) {
a2fbb9ea 11827#ifdef BNX2X_STOP_ON_ERROR
54b9ddaa
VZ
11828 if (unlikely(bp->panic)) {
11829 napi_complete(napi);
11830 return 0;
11831 }
a2fbb9ea
ET
11832#endif
11833
54b9ddaa
VZ
11834 if (bnx2x_has_tx_work(fp))
11835 bnx2x_tx_int(fp);
356e2385 11836
54b9ddaa
VZ
11837 if (bnx2x_has_rx_work(fp)) {
11838 work_done += bnx2x_rx_int(fp, budget - work_done);
a2fbb9ea 11839
54b9ddaa
VZ
11840 /* must not complete if we consumed full budget */
11841 if (work_done >= budget)
11842 break;
11843 }
a2fbb9ea 11844
54b9ddaa
VZ
11845 /* Fall out from the NAPI loop if needed */
11846 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
11847 bnx2x_update_fpsb_idx(fp);
11848 /* bnx2x_has_rx_work() reads the status block, thus we need
11849 * to ensure that status block indices have been actually read
11850 * (bnx2x_update_fpsb_idx) prior to this check
11851 * (bnx2x_has_rx_work) so that we won't write the "newer"
11852 * value of the status block to IGU (if there was a DMA right
11853 * after bnx2x_has_rx_work and if there is no rmb, the memory
11854 * reading (bnx2x_update_fpsb_idx) may be postponed to right
11855 * before bnx2x_ack_sb). In this case there will never be
11856 * another interrupt until there is another update of the
11857 * status block, while there is still unhandled work.
11858 */
11859 rmb();
a2fbb9ea 11860
54b9ddaa
VZ
11861 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
11862 napi_complete(napi);
11863 /* Re-enable interrupts */
11864 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
11865 le16_to_cpu(fp->fp_c_idx),
11866 IGU_INT_NOP, 1);
11867 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
11868 le16_to_cpu(fp->fp_u_idx),
11869 IGU_INT_ENABLE, 1);
11870 break;
11871 }
11872 }
a2fbb9ea 11873 }
356e2385 11874
a2fbb9ea
ET
11875 return work_done;
11876}
11877
755735eb
EG
11878
11879/* we split the first BD into headers and data BDs
33471629 11880 * to ease the pain of our fellow microcode engineers
755735eb
EG
11881 * we use one mapping for both BDs
11882 * So far this has only been observed to happen
11883 * in Other Operating Systems(TM)
11884 */
11885static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
11886 struct bnx2x_fastpath *fp,
ca00392c
EG
11887 struct sw_tx_bd *tx_buf,
11888 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
11889 u16 bd_prod, int nbd)
11890{
ca00392c 11891 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
11892 struct eth_tx_bd *d_tx_bd;
11893 dma_addr_t mapping;
11894 int old_len = le16_to_cpu(h_tx_bd->nbytes);
11895
11896 /* first fix first BD */
11897 h_tx_bd->nbd = cpu_to_le16(nbd);
11898 h_tx_bd->nbytes = cpu_to_le16(hlen);
11899
11900 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11901 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11902 h_tx_bd->addr_lo, h_tx_bd->nbd);
11903
11904 /* now get a new data BD
11905 * (after the pbd) and fill it */
11906 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 11907 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
11908
11909 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11910 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11911
11912 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11913 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11914 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
11915
11916 /* this marks the BD as one that has no individual mapping */
11917 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11918
755735eb
EG
11919 DP(NETIF_MSG_TX_QUEUED,
11920 "TSO split data size is %d (%x:%x)\n",
11921 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11922
ca00392c
EG
11923 /* update tx_bd */
11924 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
11925
11926 return bd_prod;
11927}
11928
11929static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11930{
11931 if (fix > 0)
11932 csum = (u16) ~csum_fold(csum_sub(csum,
11933 csum_partial(t_header - fix, fix, 0)));
11934
11935 else if (fix < 0)
11936 csum = (u16) ~csum_fold(csum_add(csum,
11937 csum_partial(t_header, -fix, 0)));
11938
11939 return swab16(csum);
11940}
11941
11942static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11943{
11944 u32 rc;
11945
11946 if (skb->ip_summed != CHECKSUM_PARTIAL)
11947 rc = XMIT_PLAIN;
11948
11949 else {
4781bfad 11950 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
11951 rc = XMIT_CSUM_V6;
11952 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11953 rc |= XMIT_CSUM_TCP;
11954
11955 } else {
11956 rc = XMIT_CSUM_V4;
11957 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11958 rc |= XMIT_CSUM_TCP;
11959 }
11960 }
11961
11962 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
d6a2f98b 11963 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
755735eb
EG
11964
11965 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
d6a2f98b 11966 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
755735eb
EG
11967
11968 return rc;
11969}
11970
632da4d6 11971#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
11972/* check if packet requires linearization (packet is too fragmented)
11973 no need to check fragmentation if page size > 8K (there will be no
11974 violation to FW restrictions) */
755735eb
EG
11975static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11976 u32 xmit_type)
11977{
11978 int to_copy = 0;
11979 int hlen = 0;
11980 int first_bd_sz = 0;
11981
11982 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11983 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11984
11985 if (xmit_type & XMIT_GSO) {
11986 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11987 /* Check if LSO packet needs to be copied:
11988 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11989 int wnd_size = MAX_FETCH_BD - 3;
33471629 11990 /* Number of windows to check */
755735eb
EG
11991 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11992 int wnd_idx = 0;
11993 int frag_idx = 0;
11994 u32 wnd_sum = 0;
11995
11996 /* Headers length */
11997 hlen = (int)(skb_transport_header(skb) - skb->data) +
11998 tcp_hdrlen(skb);
11999
12000 /* Amount of data (w/o headers) on linear part of SKB*/
12001 first_bd_sz = skb_headlen(skb) - hlen;
12002
12003 wnd_sum = first_bd_sz;
12004
12005 /* Calculate the first sum - it's special */
12006 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12007 wnd_sum +=
12008 skb_shinfo(skb)->frags[frag_idx].size;
12009
12010 /* If there was data on linear skb data - check it */
12011 if (first_bd_sz > 0) {
12012 if (unlikely(wnd_sum < lso_mss)) {
12013 to_copy = 1;
12014 goto exit_lbl;
12015 }
12016
12017 wnd_sum -= first_bd_sz;
12018 }
12019
12020 /* Others are easier: run through the frag list and
12021 check all windows */
12022 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12023 wnd_sum +=
12024 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12025
12026 if (unlikely(wnd_sum < lso_mss)) {
12027 to_copy = 1;
12028 break;
12029 }
12030 wnd_sum -=
12031 skb_shinfo(skb)->frags[wnd_idx].size;
12032 }
755735eb
EG
12033 } else {
12034 /* in non-LSO too fragmented packet should always
12035 be linearized */
12036 to_copy = 1;
12037 }
12038 }
12039
12040exit_lbl:
12041 if (unlikely(to_copy))
12042 DP(NETIF_MSG_TX_QUEUED,
12043 "Linearization IS REQUIRED for %s packet. "
12044 "num_frags %d hlen %d first_bd_sz %d\n",
12045 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12046 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12047
12048 return to_copy;
12049}
632da4d6 12050#endif
755735eb
EG
12051
12052/* called with netif_tx_lock
a2fbb9ea 12053 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 12054 * netif_wake_queue()
a2fbb9ea 12055 */
61357325 12056static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
a2fbb9ea
ET
12057{
12058 struct bnx2x *bp = netdev_priv(dev);
54b9ddaa 12059 struct bnx2x_fastpath *fp;
555f6c78 12060 struct netdev_queue *txq;
a2fbb9ea 12061 struct sw_tx_bd *tx_buf;
ca00392c
EG
12062 struct eth_tx_start_bd *tx_start_bd;
12063 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
12064 struct eth_tx_parse_bd *pbd = NULL;
12065 u16 pkt_prod, bd_prod;
755735eb 12066 int nbd, fp_index;
a2fbb9ea 12067 dma_addr_t mapping;
755735eb 12068 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
12069 int i;
12070 u8 hlen = 0;
ca00392c 12071 __le16 pkt_size = 0;
a2fbb9ea
ET
12072
12073#ifdef BNX2X_STOP_ON_ERROR
12074 if (unlikely(bp->panic))
12075 return NETDEV_TX_BUSY;
12076#endif
12077
555f6c78
EG
12078 fp_index = skb_get_queue_mapping(skb);
12079 txq = netdev_get_tx_queue(dev, fp_index);
12080
54b9ddaa 12081 fp = &bp->fp[fp_index];
755735eb 12082
231fd58a 12083 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
54b9ddaa 12084 fp->eth_q_stats.driver_xoff++;
555f6c78 12085 netif_tx_stop_queue(txq);
a2fbb9ea
ET
12086 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12087 return NETDEV_TX_BUSY;
12088 }
12089
755735eb
EG
12090 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
12091 " gso type %x xmit_type %x\n",
12092 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12093 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12094
632da4d6 12095#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
12096 /* First, check if we need to linearize the skb (due to FW
12097 restrictions). No need to check fragmentation if page size > 8K
12098 (there will be no violation to FW restrictions) */
755735eb
EG
12099 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12100 /* Statistics of linearization */
12101 bp->lin_cnt++;
12102 if (skb_linearize(skb) != 0) {
12103 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12104 "silently dropping this SKB\n");
12105 dev_kfree_skb_any(skb);
da5a662a 12106 return NETDEV_TX_OK;
755735eb
EG
12107 }
12108 }
632da4d6 12109#endif
755735eb 12110
a2fbb9ea 12111 /*
755735eb 12112 Please read carefully. First we use one BD which we mark as start,
ca00392c 12113 then we have a parsing info BD (used for TSO or xsum),
755735eb 12114 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
12115 (don't forget to mark the last one as last,
12116 and to unmap only AFTER you write to the BD ...)
755735eb 12117 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
12118 */
12119
12120 pkt_prod = fp->tx_pkt_prod++;
755735eb 12121 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 12122
755735eb 12123 /* get a tx_buf and first BD */
a2fbb9ea 12124 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 12125 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 12126
ca00392c
EG
12127 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
12128 tx_start_bd->general_data = (UNICAST_ADDRESS <<
12129 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 12130 /* header nbd */
ca00392c 12131 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 12132
755735eb
EG
12133 /* remember the first BD of the packet */
12134 tx_buf->first_bd = fp->tx_bd_prod;
12135 tx_buf->skb = skb;
ca00392c 12136 tx_buf->flags = 0;
a2fbb9ea
ET
12137
12138 DP(NETIF_MSG_TX_QUEUED,
12139 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 12140 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 12141
0c6671b0
EG
12142#ifdef BCM_VLAN
12143 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12144 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
12145 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12146 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 12147 } else
0c6671b0 12148#endif
ca00392c 12149 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 12150
ca00392c
EG
12151 /* turn on parsing and get a BD */
12152 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12153 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 12154
ca00392c 12155 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
12156
12157 if (xmit_type & XMIT_CSUM) {
ca00392c 12158 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
12159
12160 /* for now NS flag is not used in Linux */
4781bfad
EG
12161 pbd->global_data =
12162 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12163 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 12164
755735eb
EG
12165 pbd->ip_hlen = (skb_transport_header(skb) -
12166 skb_network_header(skb)) / 2;
12167
12168 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 12169
755735eb 12170 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 12171 hlen = hlen*2;
a2fbb9ea 12172
ca00392c 12173 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
12174
12175 if (xmit_type & XMIT_CSUM_V4)
ca00392c 12176 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
12177 ETH_TX_BD_FLAGS_IP_CSUM;
12178 else
ca00392c
EG
12179 tx_start_bd->bd_flags.as_bitfield |=
12180 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
12181
12182 if (xmit_type & XMIT_CSUM_TCP) {
12183 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12184
12185 } else {
12186 s8 fix = SKB_CS_OFF(skb); /* signed! */
12187
ca00392c 12188 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 12189
755735eb 12190 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
12191 "hlen %d fix %d csum before fix %x\n",
12192 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
12193
12194 /* HW bug: fixup the CSUM */
12195 pbd->tcp_pseudo_csum =
12196 bnx2x_csum_fix(skb_transport_header(skb),
12197 SKB_CS(skb), fix);
12198
12199 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12200 pbd->tcp_pseudo_csum);
12201 }
a2fbb9ea
ET
12202 }
12203
1a983142
FT
12204 mapping = dma_map_single(&bp->pdev->dev, skb->data,
12205 skb_headlen(skb), DMA_TO_DEVICE);
a2fbb9ea 12206
ca00392c
EG
12207 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12208 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12209 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12210 tx_start_bd->nbd = cpu_to_le16(nbd);
12211 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12212 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
12213
12214 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 12215 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
12216 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12217 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12218 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 12219
755735eb 12220 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
12221
12222 DP(NETIF_MSG_TX_QUEUED,
12223 "TSO packet len %d hlen %d total len %d tso size %d\n",
12224 skb->len, hlen, skb_headlen(skb),
12225 skb_shinfo(skb)->gso_size);
12226
ca00392c 12227 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 12228
755735eb 12229 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
12230 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12231 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
12232
12233 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12234 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
12235 pbd->tcp_flags = pbd_tcp_flags(skb);
12236
12237 if (xmit_type & XMIT_GSO_V4) {
12238 pbd->ip_id = swab16(ip_hdr(skb)->id);
12239 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
12240 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12241 ip_hdr(skb)->daddr,
12242 0, IPPROTO_TCP, 0));
755735eb
EG
12243
12244 } else
12245 pbd->tcp_pseudo_csum =
12246 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12247 &ipv6_hdr(skb)->daddr,
12248 0, IPPROTO_TCP, 0));
12249
a2fbb9ea
ET
12250 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12251 }
ca00392c 12252 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 12253
755735eb
EG
12254 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12255 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 12256
755735eb 12257 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
12258 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12259 if (total_pkt_bd == NULL)
12260 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 12261
1a983142
FT
12262 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12263 frag->page_offset,
12264 frag->size, DMA_TO_DEVICE);
a2fbb9ea 12265
ca00392c
EG
12266 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12267 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12268 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12269 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 12270
755735eb 12271 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
12272 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
12273 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12274 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
12275 }
12276
ca00392c 12277 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 12278
a2fbb9ea
ET
12279 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12280
755735eb 12281 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
12282 * if the packet contains or ends with it
12283 */
12284 if (TX_BD_POFF(bd_prod) < nbd)
12285 nbd++;
12286
ca00392c
EG
12287 if (total_pkt_bd != NULL)
12288 total_pkt_bd->total_pkt_bytes = pkt_size;
12289
a2fbb9ea
ET
12290 if (pbd)
12291 DP(NETIF_MSG_TX_QUEUED,
12292 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
12293 " tcp_flags %x xsum %x seq %u hlen %u\n",
12294 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12295 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 12296 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 12297
755735eb 12298 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 12299
58f4c4cf
EG
12300 /*
12301 * Make sure that the BD data is updated before updating the producer
12302 * since FW might read the BD right after the producer is updated.
12303 * This is only applicable for weak-ordered memory model archs such
12304 * as IA-64. The following barrier is also mandatory since FW will
12305 * assumes packets must have BDs.
12306 */
12307 wmb();
12308
ca00392c
EG
12309 fp->tx_db.data.prod += nbd;
12310 barrier();
54b9ddaa 12311 DOORBELL(bp, fp->index, fp->tx_db.raw);
a2fbb9ea
ET
12312
12313 mmiowb();
12314
755735eb 12315 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
12316
12317 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 12318 netif_tx_stop_queue(txq);
9baddeb8
SG
12319
12320 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12321 * ordering of set_bit() in netif_tx_stop_queue() and read of
12322 * fp->bd_tx_cons */
58f4c4cf 12323 smp_mb();
9baddeb8 12324
54b9ddaa 12325 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 12326 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 12327 netif_tx_wake_queue(txq);
a2fbb9ea 12328 }
54b9ddaa 12329 fp->tx_pkt++;
a2fbb9ea
ET
12330
12331 return NETDEV_TX_OK;
12332}
12333
bb2a0f7a 12334/* called with rtnl_lock */
a2fbb9ea
ET
12335static int bnx2x_open(struct net_device *dev)
12336{
12337 struct bnx2x *bp = netdev_priv(dev);
12338
6eccabb3
EG
12339 netif_carrier_off(dev);
12340
a2fbb9ea
ET
12341 bnx2x_set_power_state(bp, PCI_D0);
12342
72fd0718
VZ
12343 if (!bnx2x_reset_is_done(bp)) {
12344 do {
12345 /* Reset MCP mail box sequence if there is on going
12346 * recovery
12347 */
12348 bp->fw_seq = 0;
12349
12350 /* If it's the first function to load and reset done
12351 * is still not cleared it may mean that. We don't
12352 * check the attention state here because it may have
12353 * already been cleared by a "common" reset but we
12354 * shell proceed with "process kill" anyway.
12355 */
12356 if ((bnx2x_get_load_cnt(bp) == 0) &&
12357 bnx2x_trylock_hw_lock(bp,
12358 HW_LOCK_RESOURCE_RESERVED_08) &&
12359 (!bnx2x_leader_reset(bp))) {
12360 DP(NETIF_MSG_HW, "Recovered in open\n");
12361 break;
12362 }
12363
12364 bnx2x_set_power_state(bp, PCI_D3hot);
12365
12366 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12367 " completed yet. Try again later. If u still see this"
12368 " message after a few retries then power cycle is"
12369 " required.\n", bp->dev->name);
12370
12371 return -EAGAIN;
12372 } while (0);
12373 }
12374
12375 bp->recovery_state = BNX2X_RECOVERY_DONE;
12376
bb2a0f7a 12377 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
12378}
12379
bb2a0f7a 12380/* called with rtnl_lock */
a2fbb9ea
ET
12381static int bnx2x_close(struct net_device *dev)
12382{
a2fbb9ea
ET
12383 struct bnx2x *bp = netdev_priv(dev);
12384
12385 /* Unload the driver, release IRQs */
bb2a0f7a
YG
12386 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12387 if (atomic_read(&bp->pdev->enable_cnt) == 1)
12388 if (!CHIP_REV_IS_SLOW(bp))
12389 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
12390
12391 return 0;
12392}
12393
f5372251 12394/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
12395static void bnx2x_set_rx_mode(struct net_device *dev)
12396{
12397 struct bnx2x *bp = netdev_priv(dev);
12398 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12399 int port = BP_PORT(bp);
12400
12401 if (bp->state != BNX2X_STATE_OPEN) {
12402 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12403 return;
12404 }
12405
12406 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
12407
12408 if (dev->flags & IFF_PROMISC)
12409 rx_mode = BNX2X_RX_MODE_PROMISC;
12410
12411 else if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf
JP
12412 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
12413 CHIP_IS_E1(bp)))
34f80b04
EG
12414 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12415
12416 else { /* some multicasts */
12417 if (CHIP_IS_E1(bp)) {
12418 int i, old, offset;
22bedad3 12419 struct netdev_hw_addr *ha;
34f80b04
EG
12420 struct mac_configuration_cmd *config =
12421 bnx2x_sp(bp, mcast_config);
12422
0ddf477b 12423 i = 0;
22bedad3 12424 netdev_for_each_mc_addr(ha, dev) {
34f80b04
EG
12425 config->config_table[i].
12426 cam_entry.msb_mac_addr =
22bedad3 12427 swab16(*(u16 *)&ha->addr[0]);
34f80b04
EG
12428 config->config_table[i].
12429 cam_entry.middle_mac_addr =
22bedad3 12430 swab16(*(u16 *)&ha->addr[2]);
34f80b04
EG
12431 config->config_table[i].
12432 cam_entry.lsb_mac_addr =
22bedad3 12433 swab16(*(u16 *)&ha->addr[4]);
34f80b04
EG
12434 config->config_table[i].cam_entry.flags =
12435 cpu_to_le16(port);
12436 config->config_table[i].
12437 target_table_entry.flags = 0;
ca00392c
EG
12438 config->config_table[i].target_table_entry.
12439 clients_bit_vector =
12440 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
12441 config->config_table[i].
12442 target_table_entry.vlan_id = 0;
12443
12444 DP(NETIF_MSG_IFUP,
12445 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
12446 config->config_table[i].
12447 cam_entry.msb_mac_addr,
12448 config->config_table[i].
12449 cam_entry.middle_mac_addr,
12450 config->config_table[i].
12451 cam_entry.lsb_mac_addr);
0ddf477b 12452 i++;
34f80b04 12453 }
8d9c5f34 12454 old = config->hdr.length;
34f80b04
EG
12455 if (old > i) {
12456 for (; i < old; i++) {
12457 if (CAM_IS_INVALID(config->
12458 config_table[i])) {
af246401 12459 /* already invalidated */
34f80b04
EG
12460 break;
12461 }
12462 /* invalidate */
12463 CAM_INVALIDATE(config->
12464 config_table[i]);
12465 }
12466 }
12467
12468 if (CHIP_REV_IS_SLOW(bp))
12469 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
12470 else
12471 offset = BNX2X_MAX_MULTICAST*(1 + port);
12472
8d9c5f34 12473 config->hdr.length = i;
34f80b04 12474 config->hdr.offset = offset;
8d9c5f34 12475 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
12476 config->hdr.reserved1 = 0;
12477
e665bfda
MC
12478 bp->set_mac_pending++;
12479 smp_wmb();
12480
34f80b04
EG
12481 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
12482 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
12483 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
12484 0);
12485 } else { /* E1H */
12486 /* Accept one or more multicasts */
22bedad3 12487 struct netdev_hw_addr *ha;
34f80b04
EG
12488 u32 mc_filter[MC_HASH_SIZE];
12489 u32 crc, bit, regidx;
12490 int i;
12491
12492 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
12493
22bedad3 12494 netdev_for_each_mc_addr(ha, dev) {
7c510e4b 12495 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
22bedad3 12496 ha->addr);
34f80b04 12497
22bedad3 12498 crc = crc32c_le(0, ha->addr, ETH_ALEN);
34f80b04
EG
12499 bit = (crc >> 24) & 0xff;
12500 regidx = bit >> 5;
12501 bit &= 0x1f;
12502 mc_filter[regidx] |= (1 << bit);
12503 }
12504
12505 for (i = 0; i < MC_HASH_SIZE; i++)
12506 REG_WR(bp, MC_HASH_OFFSET(bp, i),
12507 mc_filter[i]);
12508 }
12509 }
12510
12511 bp->rx_mode = rx_mode;
12512 bnx2x_set_storm_rx_mode(bp);
12513}
12514
12515/* called with rtnl_lock */
a2fbb9ea
ET
12516static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12517{
12518 struct sockaddr *addr = p;
12519 struct bnx2x *bp = netdev_priv(dev);
12520
34f80b04 12521 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
12522 return -EINVAL;
12523
12524 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
12525 if (netif_running(dev)) {
12526 if (CHIP_IS_E1(bp))
e665bfda 12527 bnx2x_set_eth_mac_addr_e1(bp, 1);
34f80b04 12528 else
e665bfda 12529 bnx2x_set_eth_mac_addr_e1h(bp, 1);
34f80b04 12530 }
a2fbb9ea
ET
12531
12532 return 0;
12533}
12534
c18487ee 12535/* called with rtnl_lock */
01cd4528
EG
12536static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12537 int devad, u16 addr)
a2fbb9ea 12538{
01cd4528
EG
12539 struct bnx2x *bp = netdev_priv(netdev);
12540 u16 value;
12541 int rc;
12542 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 12543
01cd4528
EG
12544 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12545 prtad, devad, addr);
a2fbb9ea 12546
01cd4528
EG
12547 if (prtad != bp->mdio.prtad) {
12548 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12549 prtad, bp->mdio.prtad);
12550 return -EINVAL;
12551 }
12552
12553 /* The HW expects different devad if CL22 is used */
12554 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 12555
01cd4528
EG
12556 bnx2x_acquire_phy_lock(bp);
12557 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
12558 devad, addr, &value);
12559 bnx2x_release_phy_lock(bp);
12560 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 12561
01cd4528
EG
12562 if (!rc)
12563 rc = value;
12564 return rc;
12565}
a2fbb9ea 12566
01cd4528
EG
12567/* called with rtnl_lock */
12568static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12569 u16 addr, u16 value)
12570{
12571 struct bnx2x *bp = netdev_priv(netdev);
12572 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12573 int rc;
12574
12575 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
12576 " value 0x%x\n", prtad, devad, addr, value);
12577
12578 if (prtad != bp->mdio.prtad) {
12579 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12580 prtad, bp->mdio.prtad);
12581 return -EINVAL;
a2fbb9ea
ET
12582 }
12583
01cd4528
EG
12584 /* The HW expects different devad if CL22 is used */
12585 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 12586
01cd4528
EG
12587 bnx2x_acquire_phy_lock(bp);
12588 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
12589 devad, addr, value);
12590 bnx2x_release_phy_lock(bp);
12591 return rc;
12592}
c18487ee 12593
01cd4528
EG
12594/* called with rtnl_lock */
12595static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12596{
12597 struct bnx2x *bp = netdev_priv(dev);
12598 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 12599
01cd4528
EG
12600 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12601 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 12602
01cd4528
EG
12603 if (!netif_running(dev))
12604 return -EAGAIN;
12605
12606 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
12607}
12608
34f80b04 12609/* called with rtnl_lock */
a2fbb9ea
ET
12610static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12611{
12612 struct bnx2x *bp = netdev_priv(dev);
34f80b04 12613 int rc = 0;
a2fbb9ea 12614
72fd0718
VZ
12615 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12616 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12617 return -EAGAIN;
12618 }
12619
a2fbb9ea
ET
12620 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12621 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12622 return -EINVAL;
12623
12624 /* This does not race with packet allocation
c14423fe 12625 * because the actual alloc size is
a2fbb9ea
ET
12626 * only updated as part of load
12627 */
12628 dev->mtu = new_mtu;
12629
12630 if (netif_running(dev)) {
34f80b04
EG
12631 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12632 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 12633 }
34f80b04
EG
12634
12635 return rc;
a2fbb9ea
ET
12636}
12637
12638static void bnx2x_tx_timeout(struct net_device *dev)
12639{
12640 struct bnx2x *bp = netdev_priv(dev);
12641
12642#ifdef BNX2X_STOP_ON_ERROR
12643 if (!bp->panic)
12644 bnx2x_panic();
12645#endif
12646 /* This allows the netif to be shutdown gracefully before resetting */
72fd0718 12647 schedule_delayed_work(&bp->reset_task, 0);
a2fbb9ea
ET
12648}
12649
12650#ifdef BCM_VLAN
34f80b04 12651/* called with rtnl_lock */
a2fbb9ea
ET
12652static void bnx2x_vlan_rx_register(struct net_device *dev,
12653 struct vlan_group *vlgrp)
12654{
12655 struct bnx2x *bp = netdev_priv(dev);
12656
12657 bp->vlgrp = vlgrp;
0c6671b0
EG
12658
12659 /* Set flags according to the required capabilities */
12660 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12661
12662 if (dev->features & NETIF_F_HW_VLAN_TX)
12663 bp->flags |= HW_VLAN_TX_FLAG;
12664
12665 if (dev->features & NETIF_F_HW_VLAN_RX)
12666 bp->flags |= HW_VLAN_RX_FLAG;
12667
a2fbb9ea 12668 if (netif_running(dev))
49d66772 12669 bnx2x_set_client_config(bp);
a2fbb9ea 12670}
34f80b04 12671
a2fbb9ea
ET
12672#endif
12673
257ddbda 12674#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
12675static void poll_bnx2x(struct net_device *dev)
12676{
12677 struct bnx2x *bp = netdev_priv(dev);
12678
12679 disable_irq(bp->pdev->irq);
12680 bnx2x_interrupt(bp->pdev->irq, dev);
12681 enable_irq(bp->pdev->irq);
12682}
12683#endif
12684
c64213cd
SH
12685static const struct net_device_ops bnx2x_netdev_ops = {
12686 .ndo_open = bnx2x_open,
12687 .ndo_stop = bnx2x_close,
12688 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 12689 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
12690 .ndo_set_mac_address = bnx2x_change_mac_addr,
12691 .ndo_validate_addr = eth_validate_addr,
12692 .ndo_do_ioctl = bnx2x_ioctl,
12693 .ndo_change_mtu = bnx2x_change_mtu,
12694 .ndo_tx_timeout = bnx2x_tx_timeout,
12695#ifdef BCM_VLAN
12696 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
12697#endif
257ddbda 12698#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
12699 .ndo_poll_controller = poll_bnx2x,
12700#endif
12701};
12702
34f80b04
EG
12703static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
12704 struct net_device *dev)
a2fbb9ea
ET
12705{
12706 struct bnx2x *bp;
12707 int rc;
12708
12709 SET_NETDEV_DEV(dev, &pdev->dev);
12710 bp = netdev_priv(dev);
12711
34f80b04
EG
12712 bp->dev = dev;
12713 bp->pdev = pdev;
a2fbb9ea 12714 bp->flags = 0;
34f80b04 12715 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
12716
12717 rc = pci_enable_device(pdev);
12718 if (rc) {
7995c64e 12719 pr_err("Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
12720 goto err_out;
12721 }
12722
12723 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7995c64e 12724 pr_err("Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
12725 rc = -ENODEV;
12726 goto err_out_disable;
12727 }
12728
12729 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
7995c64e 12730 pr_err("Cannot find second PCI device base address, aborting\n");
a2fbb9ea
ET
12731 rc = -ENODEV;
12732 goto err_out_disable;
12733 }
12734
34f80b04
EG
12735 if (atomic_read(&pdev->enable_cnt) == 1) {
12736 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12737 if (rc) {
7995c64e 12738 pr_err("Cannot obtain PCI resources, aborting\n");
34f80b04
EG
12739 goto err_out_disable;
12740 }
a2fbb9ea 12741
34f80b04
EG
12742 pci_set_master(pdev);
12743 pci_save_state(pdev);
12744 }
a2fbb9ea
ET
12745
12746 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12747 if (bp->pm_cap == 0) {
7995c64e 12748 pr_err("Cannot find power management capability, aborting\n");
a2fbb9ea
ET
12749 rc = -EIO;
12750 goto err_out_release;
12751 }
12752
12753 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
12754 if (bp->pcie_cap == 0) {
7995c64e 12755 pr_err("Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
12756 rc = -EIO;
12757 goto err_out_release;
12758 }
12759
1a983142 12760 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 12761 bp->flags |= USING_DAC_FLAG;
1a983142
FT
12762 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
12763 pr_err("dma_set_coherent_mask failed, aborting\n");
a2fbb9ea
ET
12764 rc = -EIO;
12765 goto err_out_release;
12766 }
12767
1a983142 12768 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
7995c64e 12769 pr_err("System does not support DMA, aborting\n");
a2fbb9ea
ET
12770 rc = -EIO;
12771 goto err_out_release;
12772 }
12773
34f80b04
EG
12774 dev->mem_start = pci_resource_start(pdev, 0);
12775 dev->base_addr = dev->mem_start;
12776 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
12777
12778 dev->irq = pdev->irq;
12779
275f165f 12780 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 12781 if (!bp->regview) {
7995c64e 12782 pr_err("Cannot map register space, aborting\n");
a2fbb9ea
ET
12783 rc = -ENOMEM;
12784 goto err_out_release;
12785 }
12786
34f80b04
EG
12787 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12788 min_t(u64, BNX2X_DB_SIZE,
12789 pci_resource_len(pdev, 2)));
a2fbb9ea 12790 if (!bp->doorbells) {
7995c64e 12791 pr_err("Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
12792 rc = -ENOMEM;
12793 goto err_out_unmap;
12794 }
12795
12796 bnx2x_set_power_state(bp, PCI_D0);
12797
34f80b04
EG
12798 /* clean indirect addresses */
12799 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12800 PCICFG_VENDOR_ID_OFFSET);
12801 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
12802 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
12803 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
12804 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 12805
72fd0718
VZ
12806 /* Reset the load counter */
12807 bnx2x_clear_load_cnt(bp);
12808
34f80b04 12809 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 12810
c64213cd 12811 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 12812 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
12813 dev->features |= NETIF_F_SG;
12814 dev->features |= NETIF_F_HW_CSUM;
12815 if (bp->flags & USING_DAC_FLAG)
12816 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
12817 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
12818 dev->features |= NETIF_F_TSO6;
34f80b04
EG
12819#ifdef BCM_VLAN
12820 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 12821 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
12822
12823 dev->vlan_features |= NETIF_F_SG;
12824 dev->vlan_features |= NETIF_F_HW_CSUM;
12825 if (bp->flags & USING_DAC_FLAG)
12826 dev->vlan_features |= NETIF_F_HIGHDMA;
12827 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
12828 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 12829#endif
a2fbb9ea 12830
01cd4528
EG
12831 /* get_port_hwinfo() will set prtad and mmds properly */
12832 bp->mdio.prtad = MDIO_PRTAD_NONE;
12833 bp->mdio.mmds = 0;
12834 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
12835 bp->mdio.dev = dev;
12836 bp->mdio.mdio_read = bnx2x_mdio_read;
12837 bp->mdio.mdio_write = bnx2x_mdio_write;
12838
a2fbb9ea
ET
12839 return 0;
12840
12841err_out_unmap:
12842 if (bp->regview) {
12843 iounmap(bp->regview);
12844 bp->regview = NULL;
12845 }
a2fbb9ea
ET
12846 if (bp->doorbells) {
12847 iounmap(bp->doorbells);
12848 bp->doorbells = NULL;
12849 }
12850
12851err_out_release:
34f80b04
EG
12852 if (atomic_read(&pdev->enable_cnt) == 1)
12853 pci_release_regions(pdev);
a2fbb9ea
ET
12854
12855err_out_disable:
12856 pci_disable_device(pdev);
12857 pci_set_drvdata(pdev, NULL);
12858
12859err_out:
12860 return rc;
12861}
12862
37f9ce62
EG
12863static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
12864 int *width, int *speed)
25047950
ET
12865{
12866 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
12867
37f9ce62 12868 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 12869
37f9ce62
EG
12870 /* return value of 1=2.5GHz 2=5GHz */
12871 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 12872}
37f9ce62 12873
94a78b79
VZ
12874static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
12875{
37f9ce62 12876 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
12877 struct bnx2x_fw_file_hdr *fw_hdr;
12878 struct bnx2x_fw_file_section *sections;
94a78b79 12879 u32 offset, len, num_ops;
37f9ce62 12880 u16 *ops_offsets;
94a78b79 12881 int i;
37f9ce62 12882 const u8 *fw_ver;
94a78b79
VZ
12883
12884 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
12885 return -EINVAL;
12886
12887 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
12888 sections = (struct bnx2x_fw_file_section *)fw_hdr;
12889
12890 /* Make sure none of the offsets and sizes make us read beyond
12891 * the end of the firmware data */
12892 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
12893 offset = be32_to_cpu(sections[i].offset);
12894 len = be32_to_cpu(sections[i].len);
12895 if (offset + len > firmware->size) {
7995c64e 12896 pr_err("Section %d length is out of bounds\n", i);
94a78b79
VZ
12897 return -EINVAL;
12898 }
12899 }
12900
12901 /* Likewise for the init_ops offsets */
12902 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
12903 ops_offsets = (u16 *)(firmware->data + offset);
12904 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
12905
12906 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
12907 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
7995c64e 12908 pr_err("Section offset %d is out of bounds\n", i);
94a78b79
VZ
12909 return -EINVAL;
12910 }
12911 }
12912
12913 /* Check FW version */
12914 offset = be32_to_cpu(fw_hdr->fw_version.offset);
12915 fw_ver = firmware->data + offset;
12916 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
12917 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
12918 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
12919 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
7995c64e 12920 pr_err("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
12921 fw_ver[0], fw_ver[1], fw_ver[2],
12922 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
12923 BCM_5710_FW_MINOR_VERSION,
12924 BCM_5710_FW_REVISION_VERSION,
12925 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 12926 return -EINVAL;
94a78b79
VZ
12927 }
12928
12929 return 0;
12930}
12931
ab6ad5a4 12932static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 12933{
ab6ad5a4
EG
12934 const __be32 *source = (const __be32 *)_source;
12935 u32 *target = (u32 *)_target;
94a78b79 12936 u32 i;
94a78b79
VZ
12937
12938 for (i = 0; i < n/4; i++)
12939 target[i] = be32_to_cpu(source[i]);
12940}
12941
12942/*
12943 Ops array is stored in the following format:
12944 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12945 */
ab6ad5a4 12946static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 12947{
ab6ad5a4
EG
12948 const __be32 *source = (const __be32 *)_source;
12949 struct raw_op *target = (struct raw_op *)_target;
94a78b79 12950 u32 i, j, tmp;
94a78b79 12951
ab6ad5a4 12952 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
12953 tmp = be32_to_cpu(source[j]);
12954 target[i].op = (tmp >> 24) & 0xff;
12955 target[i].offset = tmp & 0xffffff;
12956 target[i].raw_data = be32_to_cpu(source[j+1]);
12957 }
12958}
ab6ad5a4
EG
12959
12960static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 12961{
ab6ad5a4
EG
12962 const __be16 *source = (const __be16 *)_source;
12963 u16 *target = (u16 *)_target;
94a78b79 12964 u32 i;
94a78b79
VZ
12965
12966 for (i = 0; i < n/2; i++)
12967 target[i] = be16_to_cpu(source[i]);
12968}
12969
7995c64e
JP
12970#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12971do { \
12972 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12973 bp->arr = kmalloc(len, GFP_KERNEL); \
12974 if (!bp->arr) { \
12975 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
12976 goto lbl; \
12977 } \
12978 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12979 (u8 *)bp->arr, len); \
12980} while (0)
94a78b79 12981
94a78b79
VZ
12982static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12983{
45229b42 12984 const char *fw_file_name;
94a78b79 12985 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 12986 int rc;
94a78b79 12987
94a78b79 12988 if (CHIP_IS_E1(bp))
45229b42 12989 fw_file_name = FW_FILE_NAME_E1;
94a78b79 12990 else
45229b42 12991 fw_file_name = FW_FILE_NAME_E1H;
94a78b79 12992
7995c64e 12993 pr_info("Loading %s\n", fw_file_name);
94a78b79
VZ
12994
12995 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12996 if (rc) {
7995c64e 12997 pr_err("Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
12998 goto request_firmware_exit;
12999 }
13000
13001 rc = bnx2x_check_firmware(bp);
13002 if (rc) {
7995c64e 13003 pr_err("Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
13004 goto request_firmware_exit;
13005 }
13006
13007 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13008
13009 /* Initialize the pointers to the init arrays */
13010 /* Blob */
13011 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13012
13013 /* Opcodes */
13014 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13015
13016 /* Offsets */
ab6ad5a4
EG
13017 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13018 be16_to_cpu_n);
94a78b79
VZ
13019
13020 /* STORMs firmware */
573f2035
EG
13021 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13022 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13023 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13024 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13025 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13026 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13027 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13028 be32_to_cpu(fw_hdr->usem_pram_data.offset);
13029 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13030 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13031 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13032 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13033 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13034 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13035 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13036 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
13037
13038 return 0;
ab6ad5a4 13039
94a78b79
VZ
13040init_offsets_alloc_err:
13041 kfree(bp->init_ops);
13042init_ops_alloc_err:
13043 kfree(bp->init_data);
13044request_firmware_exit:
13045 release_firmware(bp->firmware);
13046
13047 return rc;
13048}
13049
13050
a2fbb9ea
ET
13051static int __devinit bnx2x_init_one(struct pci_dev *pdev,
13052 const struct pci_device_id *ent)
13053{
a2fbb9ea
ET
13054 struct net_device *dev = NULL;
13055 struct bnx2x *bp;
37f9ce62 13056 int pcie_width, pcie_speed;
25047950 13057 int rc;
a2fbb9ea 13058
a2fbb9ea 13059 /* dev zeroed in init_etherdev */
555f6c78 13060 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04 13061 if (!dev) {
7995c64e 13062 pr_err("Cannot allocate net device\n");
a2fbb9ea 13063 return -ENOMEM;
34f80b04 13064 }
a2fbb9ea 13065
a2fbb9ea 13066 bp = netdev_priv(dev);
7995c64e 13067 bp->msg_enable = debug;
a2fbb9ea 13068
df4770de
EG
13069 pci_set_drvdata(pdev, dev);
13070
34f80b04 13071 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
13072 if (rc < 0) {
13073 free_netdev(dev);
13074 return rc;
13075 }
13076
34f80b04 13077 rc = bnx2x_init_bp(bp);
693fc0d1
EG
13078 if (rc)
13079 goto init_one_exit;
13080
94a78b79
VZ
13081 /* Set init arrays */
13082 rc = bnx2x_init_firmware(bp, &pdev->dev);
13083 if (rc) {
7995c64e 13084 pr_err("Error loading firmware\n");
94a78b79
VZ
13085 goto init_one_exit;
13086 }
13087
693fc0d1 13088 rc = register_netdev(dev);
34f80b04 13089 if (rc) {
693fc0d1 13090 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
13091 goto init_one_exit;
13092 }
13093
37f9ce62 13094 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
7995c64e
JP
13095 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
13096 board_info[ent->driver_data].name,
13097 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13098 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
13099 dev->base_addr, bp->pdev->irq, dev->dev_addr);
c016201c 13100
a2fbb9ea 13101 return 0;
34f80b04
EG
13102
13103init_one_exit:
13104 if (bp->regview)
13105 iounmap(bp->regview);
13106
13107 if (bp->doorbells)
13108 iounmap(bp->doorbells);
13109
13110 free_netdev(dev);
13111
13112 if (atomic_read(&pdev->enable_cnt) == 1)
13113 pci_release_regions(pdev);
13114
13115 pci_disable_device(pdev);
13116 pci_set_drvdata(pdev, NULL);
13117
13118 return rc;
a2fbb9ea
ET
13119}
13120
13121static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13122{
13123 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
13124 struct bnx2x *bp;
13125
13126 if (!dev) {
7995c64e 13127 pr_err("BAD net device from bnx2x_init_one\n");
228241eb
ET
13128 return;
13129 }
228241eb 13130 bp = netdev_priv(dev);
a2fbb9ea 13131
a2fbb9ea
ET
13132 unregister_netdev(dev);
13133
72fd0718
VZ
13134 /* Make sure RESET task is not scheduled before continuing */
13135 cancel_delayed_work_sync(&bp->reset_task);
13136
94a78b79
VZ
13137 kfree(bp->init_ops_offsets);
13138 kfree(bp->init_ops);
13139 kfree(bp->init_data);
13140 release_firmware(bp->firmware);
13141
a2fbb9ea
ET
13142 if (bp->regview)
13143 iounmap(bp->regview);
13144
13145 if (bp->doorbells)
13146 iounmap(bp->doorbells);
13147
13148 free_netdev(dev);
34f80b04
EG
13149
13150 if (atomic_read(&pdev->enable_cnt) == 1)
13151 pci_release_regions(pdev);
13152
a2fbb9ea
ET
13153 pci_disable_device(pdev);
13154 pci_set_drvdata(pdev, NULL);
13155}
13156
13157static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13158{
13159 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
13160 struct bnx2x *bp;
13161
34f80b04 13162 if (!dev) {
7995c64e 13163 pr_err("BAD net device from bnx2x_init_one\n");
34f80b04
EG
13164 return -ENODEV;
13165 }
13166 bp = netdev_priv(dev);
a2fbb9ea 13167
34f80b04 13168 rtnl_lock();
a2fbb9ea 13169
34f80b04 13170 pci_save_state(pdev);
228241eb 13171
34f80b04
EG
13172 if (!netif_running(dev)) {
13173 rtnl_unlock();
13174 return 0;
13175 }
a2fbb9ea
ET
13176
13177 netif_device_detach(dev);
a2fbb9ea 13178
da5a662a 13179 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 13180
a2fbb9ea 13181 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 13182
34f80b04
EG
13183 rtnl_unlock();
13184
a2fbb9ea
ET
13185 return 0;
13186}
13187
13188static int bnx2x_resume(struct pci_dev *pdev)
13189{
13190 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 13191 struct bnx2x *bp;
a2fbb9ea
ET
13192 int rc;
13193
228241eb 13194 if (!dev) {
7995c64e 13195 pr_err("BAD net device from bnx2x_init_one\n");
228241eb
ET
13196 return -ENODEV;
13197 }
228241eb 13198 bp = netdev_priv(dev);
a2fbb9ea 13199
72fd0718
VZ
13200 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13201 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13202 return -EAGAIN;
13203 }
13204
34f80b04
EG
13205 rtnl_lock();
13206
228241eb 13207 pci_restore_state(pdev);
34f80b04
EG
13208
13209 if (!netif_running(dev)) {
13210 rtnl_unlock();
13211 return 0;
13212 }
13213
a2fbb9ea
ET
13214 bnx2x_set_power_state(bp, PCI_D0);
13215 netif_device_attach(dev);
13216
da5a662a 13217 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 13218
34f80b04
EG
13219 rtnl_unlock();
13220
13221 return rc;
a2fbb9ea
ET
13222}
13223
f8ef6e44
YG
13224static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13225{
13226 int i;
13227
13228 bp->state = BNX2X_STATE_ERROR;
13229
13230 bp->rx_mode = BNX2X_RX_MODE_NONE;
13231
13232 bnx2x_netif_stop(bp, 0);
13233
13234 del_timer_sync(&bp->timer);
13235 bp->stats_state = STATS_STATE_DISABLED;
13236 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
13237
13238 /* Release IRQs */
6cbe5065 13239 bnx2x_free_irq(bp, false);
f8ef6e44
YG
13240
13241 if (CHIP_IS_E1(bp)) {
13242 struct mac_configuration_cmd *config =
13243 bnx2x_sp(bp, mcast_config);
13244
8d9c5f34 13245 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
13246 CAM_INVALIDATE(config->config_table[i]);
13247 }
13248
13249 /* Free SKBs, SGEs, TPA pool and driver internals */
13250 bnx2x_free_skbs(bp);
54b9ddaa 13251 for_each_queue(bp, i)
f8ef6e44 13252 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 13253 for_each_queue(bp, i)
7cde1c8b 13254 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
13255 bnx2x_free_mem(bp);
13256
13257 bp->state = BNX2X_STATE_CLOSED;
13258
13259 netif_carrier_off(bp->dev);
13260
13261 return 0;
13262}
13263
13264static void bnx2x_eeh_recover(struct bnx2x *bp)
13265{
13266 u32 val;
13267
13268 mutex_init(&bp->port.phy_mutex);
13269
13270 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
13271 bp->link_params.shmem_base = bp->common.shmem_base;
13272 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
13273
13274 if (!bp->common.shmem_base ||
13275 (bp->common.shmem_base < 0xA0000) ||
13276 (bp->common.shmem_base >= 0xC0000)) {
13277 BNX2X_DEV_INFO("MCP not active\n");
13278 bp->flags |= NO_MCP_FLAG;
13279 return;
13280 }
13281
13282 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
13283 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13284 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13285 BNX2X_ERR("BAD MCP validity signature\n");
13286
13287 if (!BP_NOMCP(bp)) {
13288 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
13289 & DRV_MSG_SEQ_NUMBER_MASK);
13290 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
13291 }
13292}
13293
493adb1f
WX
13294/**
13295 * bnx2x_io_error_detected - called when PCI error is detected
13296 * @pdev: Pointer to PCI device
13297 * @state: The current pci connection state
13298 *
13299 * This function is called after a PCI bus error affecting
13300 * this device has been detected.
13301 */
13302static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13303 pci_channel_state_t state)
13304{
13305 struct net_device *dev = pci_get_drvdata(pdev);
13306 struct bnx2x *bp = netdev_priv(dev);
13307
13308 rtnl_lock();
13309
13310 netif_device_detach(dev);
13311
07ce50e4
DN
13312 if (state == pci_channel_io_perm_failure) {
13313 rtnl_unlock();
13314 return PCI_ERS_RESULT_DISCONNECT;
13315 }
13316
493adb1f 13317 if (netif_running(dev))
f8ef6e44 13318 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
13319
13320 pci_disable_device(pdev);
13321
13322 rtnl_unlock();
13323
13324 /* Request a slot reset */
13325 return PCI_ERS_RESULT_NEED_RESET;
13326}
13327
13328/**
13329 * bnx2x_io_slot_reset - called after the PCI bus has been reset
13330 * @pdev: Pointer to PCI device
13331 *
13332 * Restart the card from scratch, as if from a cold-boot.
13333 */
13334static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13335{
13336 struct net_device *dev = pci_get_drvdata(pdev);
13337 struct bnx2x *bp = netdev_priv(dev);
13338
13339 rtnl_lock();
13340
13341 if (pci_enable_device(pdev)) {
13342 dev_err(&pdev->dev,
13343 "Cannot re-enable PCI device after reset\n");
13344 rtnl_unlock();
13345 return PCI_ERS_RESULT_DISCONNECT;
13346 }
13347
13348 pci_set_master(pdev);
13349 pci_restore_state(pdev);
13350
13351 if (netif_running(dev))
13352 bnx2x_set_power_state(bp, PCI_D0);
13353
13354 rtnl_unlock();
13355
13356 return PCI_ERS_RESULT_RECOVERED;
13357}
13358
13359/**
13360 * bnx2x_io_resume - called when traffic can start flowing again
13361 * @pdev: Pointer to PCI device
13362 *
13363 * This callback is called when the error recovery driver tells us that
13364 * its OK to resume normal operation.
13365 */
13366static void bnx2x_io_resume(struct pci_dev *pdev)
13367{
13368 struct net_device *dev = pci_get_drvdata(pdev);
13369 struct bnx2x *bp = netdev_priv(dev);
13370
72fd0718
VZ
13371 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13372 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13373 return;
13374 }
13375
493adb1f
WX
13376 rtnl_lock();
13377
f8ef6e44
YG
13378 bnx2x_eeh_recover(bp);
13379
493adb1f 13380 if (netif_running(dev))
f8ef6e44 13381 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
13382
13383 netif_device_attach(dev);
13384
13385 rtnl_unlock();
13386}
13387
13388static struct pci_error_handlers bnx2x_err_handler = {
13389 .error_detected = bnx2x_io_error_detected,
356e2385
EG
13390 .slot_reset = bnx2x_io_slot_reset,
13391 .resume = bnx2x_io_resume,
493adb1f
WX
13392};
13393
a2fbb9ea 13394static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
13395 .name = DRV_MODULE_NAME,
13396 .id_table = bnx2x_pci_tbl,
13397 .probe = bnx2x_init_one,
13398 .remove = __devexit_p(bnx2x_remove_one),
13399 .suspend = bnx2x_suspend,
13400 .resume = bnx2x_resume,
13401 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
13402};
13403
13404static int __init bnx2x_init(void)
13405{
dd21ca6d
SG
13406 int ret;
13407
7995c64e 13408 pr_info("%s", version);
938cf541 13409
1cf167f2
EG
13410 bnx2x_wq = create_singlethread_workqueue("bnx2x");
13411 if (bnx2x_wq == NULL) {
7995c64e 13412 pr_err("Cannot create workqueue\n");
1cf167f2
EG
13413 return -ENOMEM;
13414 }
13415
dd21ca6d
SG
13416 ret = pci_register_driver(&bnx2x_pci_driver);
13417 if (ret) {
7995c64e 13418 pr_err("Cannot register driver\n");
dd21ca6d
SG
13419 destroy_workqueue(bnx2x_wq);
13420 }
13421 return ret;
a2fbb9ea
ET
13422}
13423
13424static void __exit bnx2x_cleanup(void)
13425{
13426 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
13427
13428 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
13429}
13430
13431module_init(bnx2x_init);
13432module_exit(bnx2x_cleanup);
13433
993ac7b5
MC
13434#ifdef BCM_CNIC
13435
13436/* count denotes the number of new completions we have seen */
13437static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13438{
13439 struct eth_spe *spe;
13440
13441#ifdef BNX2X_STOP_ON_ERROR
13442 if (unlikely(bp->panic))
13443 return;
13444#endif
13445
13446 spin_lock_bh(&bp->spq_lock);
13447 bp->cnic_spq_pending -= count;
13448
13449 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
13450 bp->cnic_spq_pending++) {
13451
13452 if (!bp->cnic_kwq_pending)
13453 break;
13454
13455 spe = bnx2x_sp_get_next(bp);
13456 *spe = *bp->cnic_kwq_cons;
13457
13458 bp->cnic_kwq_pending--;
13459
13460 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
13461 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13462
13463 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13464 bp->cnic_kwq_cons = bp->cnic_kwq;
13465 else
13466 bp->cnic_kwq_cons++;
13467 }
13468 bnx2x_sp_prod_update(bp);
13469 spin_unlock_bh(&bp->spq_lock);
13470}
13471
13472static int bnx2x_cnic_sp_queue(struct net_device *dev,
13473 struct kwqe_16 *kwqes[], u32 count)
13474{
13475 struct bnx2x *bp = netdev_priv(dev);
13476 int i;
13477
13478#ifdef BNX2X_STOP_ON_ERROR
13479 if (unlikely(bp->panic))
13480 return -EIO;
13481#endif
13482
13483 spin_lock_bh(&bp->spq_lock);
13484
13485 for (i = 0; i < count; i++) {
13486 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13487
13488 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13489 break;
13490
13491 *bp->cnic_kwq_prod = *spe;
13492
13493 bp->cnic_kwq_pending++;
13494
13495 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
13496 spe->hdr.conn_and_cmd_data, spe->hdr.type,
13497 spe->data.mac_config_addr.hi,
13498 spe->data.mac_config_addr.lo,
13499 bp->cnic_kwq_pending);
13500
13501 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13502 bp->cnic_kwq_prod = bp->cnic_kwq;
13503 else
13504 bp->cnic_kwq_prod++;
13505 }
13506
13507 spin_unlock_bh(&bp->spq_lock);
13508
13509 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13510 bnx2x_cnic_sp_post(bp, 0);
13511
13512 return i;
13513}
13514
13515static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13516{
13517 struct cnic_ops *c_ops;
13518 int rc = 0;
13519
13520 mutex_lock(&bp->cnic_mutex);
13521 c_ops = bp->cnic_ops;
13522 if (c_ops)
13523 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13524 mutex_unlock(&bp->cnic_mutex);
13525
13526 return rc;
13527}
13528
13529static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13530{
13531 struct cnic_ops *c_ops;
13532 int rc = 0;
13533
13534 rcu_read_lock();
13535 c_ops = rcu_dereference(bp->cnic_ops);
13536 if (c_ops)
13537 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13538 rcu_read_unlock();
13539
13540 return rc;
13541}
13542
13543/*
13544 * for commands that have no data
13545 */
13546static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13547{
13548 struct cnic_ctl_info ctl = {0};
13549
13550 ctl.cmd = cmd;
13551
13552 return bnx2x_cnic_ctl_send(bp, &ctl);
13553}
13554
13555static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
13556{
13557 struct cnic_ctl_info ctl;
13558
13559 /* first we tell CNIC and only then we count this as a completion */
13560 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13561 ctl.data.comp.cid = cid;
13562
13563 bnx2x_cnic_ctl_send_bh(bp, &ctl);
13564 bnx2x_cnic_sp_post(bp, 1);
13565}
13566
13567static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13568{
13569 struct bnx2x *bp = netdev_priv(dev);
13570 int rc = 0;
13571
13572 switch (ctl->cmd) {
13573 case DRV_CTL_CTXTBL_WR_CMD: {
13574 u32 index = ctl->data.io.offset;
13575 dma_addr_t addr = ctl->data.io.dma_addr;
13576
13577 bnx2x_ilt_wr(bp, index, addr);
13578 break;
13579 }
13580
13581 case DRV_CTL_COMPLETION_CMD: {
13582 int count = ctl->data.comp.comp_count;
13583
13584 bnx2x_cnic_sp_post(bp, count);
13585 break;
13586 }
13587
13588 /* rtnl_lock is held. */
13589 case DRV_CTL_START_L2_CMD: {
13590 u32 cli = ctl->data.ring.client_id;
13591
13592 bp->rx_mode_cl_mask |= (1 << cli);
13593 bnx2x_set_storm_rx_mode(bp);
13594 break;
13595 }
13596
13597 /* rtnl_lock is held. */
13598 case DRV_CTL_STOP_L2_CMD: {
13599 u32 cli = ctl->data.ring.client_id;
13600
13601 bp->rx_mode_cl_mask &= ~(1 << cli);
13602 bnx2x_set_storm_rx_mode(bp);
13603 break;
13604 }
13605
13606 default:
13607 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13608 rc = -EINVAL;
13609 }
13610
13611 return rc;
13612}
13613
13614static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13615{
13616 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13617
13618 if (bp->flags & USING_MSIX_FLAG) {
13619 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13620 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13621 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13622 } else {
13623 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13624 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13625 }
13626 cp->irq_arr[0].status_blk = bp->cnic_sb;
13627 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
13628 cp->irq_arr[1].status_blk = bp->def_status_blk;
13629 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13630
13631 cp->num_irq = 2;
13632}
13633
13634static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13635 void *data)
13636{
13637 struct bnx2x *bp = netdev_priv(dev);
13638 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13639
13640 if (ops == NULL)
13641 return -EINVAL;
13642
13643 if (atomic_read(&bp->intr_sem) != 0)
13644 return -EBUSY;
13645
13646 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13647 if (!bp->cnic_kwq)
13648 return -ENOMEM;
13649
13650 bp->cnic_kwq_cons = bp->cnic_kwq;
13651 bp->cnic_kwq_prod = bp->cnic_kwq;
13652 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13653
13654 bp->cnic_spq_pending = 0;
13655 bp->cnic_kwq_pending = 0;
13656
13657 bp->cnic_data = data;
13658
13659 cp->num_irq = 0;
13660 cp->drv_state = CNIC_DRV_STATE_REGD;
13661
13662 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
13663
13664 bnx2x_setup_cnic_irq_info(bp);
13665 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
13666 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
13667 rcu_assign_pointer(bp->cnic_ops, ops);
13668
13669 return 0;
13670}
13671
13672static int bnx2x_unregister_cnic(struct net_device *dev)
13673{
13674 struct bnx2x *bp = netdev_priv(dev);
13675 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13676
13677 mutex_lock(&bp->cnic_mutex);
13678 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
13679 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
13680 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
13681 }
13682 cp->drv_state = 0;
13683 rcu_assign_pointer(bp->cnic_ops, NULL);
13684 mutex_unlock(&bp->cnic_mutex);
13685 synchronize_rcu();
13686 kfree(bp->cnic_kwq);
13687 bp->cnic_kwq = NULL;
13688
13689 return 0;
13690}
13691
13692struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13693{
13694 struct bnx2x *bp = netdev_priv(dev);
13695 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13696
13697 cp->drv_owner = THIS_MODULE;
13698 cp->chip_id = CHIP_ID(bp);
13699 cp->pdev = bp->pdev;
13700 cp->io_base = bp->regview;
13701 cp->io_base2 = bp->doorbells;
13702 cp->max_kwqe_pending = 8;
13703 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
13704 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
13705 cp->ctx_tbl_len = CNIC_ILT_LINES;
13706 cp->starting_cid = BCM_CNIC_CID_START;
13707 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13708 cp->drv_ctl = bnx2x_drv_ctl;
13709 cp->drv_register_cnic = bnx2x_register_cnic;
13710 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13711
13712 return cp;
13713}
13714EXPORT_SYMBOL(bnx2x_cnic_probe);
13715
13716#endif /* BCM_CNIC */
94a78b79 13717