]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Re-arrange the link structures for better alignment
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
0a64ea57 57#include "bnx2x_dump.h"
a2fbb9ea 58
573dd788
EG
59#define DRV_MODULE_VERSION "1.48.114-1"
60#define DRV_MODULE_RELDATE "2009/07/29"
34f80b04 61#define BNX2X_BC_VER 0x040200
a2fbb9ea 62
94a78b79
VZ
63#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
68
34f80b04
EG
69/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
a2fbb9ea 71
53a10565 72static char version[] __devinitdata =
34f80b04 73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
24e3fcef 76MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 80
555f6c78
EG
81static int multi_mode = 1;
82module_param(multi_mode, int, 0);
ca00392c
EG
83MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
85
86static int num_rx_queues;
87module_param(num_rx_queues, int, 0);
88MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
90
91static int num_tx_queues;
92module_param(num_tx_queues, int, 0);
93MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
555f6c78 95
19680c48 96static int disable_tpa;
19680c48 97module_param(disable_tpa, int, 0);
9898f86d 98MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
99
100static int int_mode;
101module_param(int_mode, int, 0);
102MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
a18f5128
EG
104static int dropless_fc;
105module_param(dropless_fc, int, 0);
106MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
107
9898f86d 108static int poll;
a2fbb9ea 109module_param(poll, int, 0);
9898f86d 110MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
111
112static int mrrs = -1;
113module_param(mrrs, int, 0);
114MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
115
9898f86d 116static int debug;
a2fbb9ea 117module_param(debug, int, 0);
9898f86d
EG
118MODULE_PARM_DESC(debug, " Default debug msglevel");
119
120static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 121
1cf167f2 122static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
123
124enum bnx2x_board_type {
125 BCM57710 = 0,
34f80b04
EG
126 BCM57711 = 1,
127 BCM57711E = 2,
a2fbb9ea
ET
128};
129
34f80b04 130/* indexed by board_type, above */
53a10565 131static struct {
a2fbb9ea
ET
132 char *name;
133} board_info[] __devinitdata = {
34f80b04
EG
134 { "Broadcom NetXtreme II BCM57710 XGb" },
135 { "Broadcom NetXtreme II BCM57711 XGb" },
136 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
137};
138
34f80b04 139
a2fbb9ea
ET
140static const struct pci_device_id bnx2x_pci_tbl[] = {
141 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
143 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
144 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
145 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
146 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
147 { 0 }
148};
149
150MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
151
152/****************************************************************************
153* General service functions
154****************************************************************************/
155
156/* used only at init
157 * locking is done by mcp
158 */
159static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
160{
161 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164 PCICFG_VENDOR_ID_OFFSET);
165}
166
a2fbb9ea
ET
167static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168{
169 u32 val;
170
171 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174 PCICFG_VENDOR_ID_OFFSET);
175
176 return val;
177}
a2fbb9ea
ET
178
179static const u32 dmae_reg_go_c[] = {
180 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
184};
185
186/* copy command into DMAE command memory and set DMAE command go */
187static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188 int idx)
189{
190 u32 cmd_offset;
191 int i;
192
193 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
196
ad8d3948
EG
197 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
199 }
200 REG_WR(bp, dmae_reg_go_c[idx], 1);
201}
202
ad8d3948
EG
203void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204 u32 len32)
a2fbb9ea 205{
5ff7b6d4 206 struct dmae_command dmae;
a2fbb9ea 207 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
208 int cnt = 200;
209
210 if (!bp->dmae_ready) {
211 u32 *data = bnx2x_sp(bp, wb_data[0]);
212
213 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
214 " using indirect\n", dst_addr, len32);
215 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216 return;
217 }
218
5ff7b6d4 219 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 220
5ff7b6d4
EG
221 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 224#ifdef __BIG_ENDIAN
5ff7b6d4 225 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 226#else
5ff7b6d4 227 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 228#endif
5ff7b6d4
EG
229 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231 dmae.src_addr_lo = U64_LO(dma_addr);
232 dmae.src_addr_hi = U64_HI(dma_addr);
233 dmae.dst_addr_lo = dst_addr >> 2;
234 dmae.dst_addr_hi = 0;
235 dmae.len = len32;
236 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 239
c3eefaf6 240 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
241 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
242 "dst_addr [%x:%08x (%08x)]\n"
243 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
244 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 247 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
248 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 250
5ff7b6d4
EG
251 mutex_lock(&bp->dmae_mutex);
252
a2fbb9ea
ET
253 *wb_comp = 0;
254
5ff7b6d4 255 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
256
257 udelay(5);
ad8d3948
EG
258
259 while (*wb_comp != DMAE_COMP_VAL) {
260 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
261
ad8d3948 262 if (!cnt) {
c3eefaf6 263 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
264 break;
265 }
ad8d3948 266 cnt--;
12469401
YG
267 /* adjust delay for emulation/FPGA */
268 if (CHIP_REV_IS_SLOW(bp))
269 msleep(100);
270 else
271 udelay(5);
a2fbb9ea 272 }
ad8d3948
EG
273
274 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
275}
276
c18487ee 277void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 278{
5ff7b6d4 279 struct dmae_command dmae;
a2fbb9ea 280 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
281 int cnt = 200;
282
283 if (!bp->dmae_ready) {
284 u32 *data = bnx2x_sp(bp, wb_data[0]);
285 int i;
286
287 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
288 " using indirect\n", src_addr, len32);
289 for (i = 0; i < len32; i++)
290 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291 return;
292 }
293
5ff7b6d4 294 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 295
5ff7b6d4
EG
296 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 299#ifdef __BIG_ENDIAN
5ff7b6d4 300 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 301#else
5ff7b6d4 302 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 303#endif
5ff7b6d4
EG
304 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306 dmae.src_addr_lo = src_addr >> 2;
307 dmae.src_addr_hi = 0;
308 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310 dmae.len = len32;
311 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 314
c3eefaf6 315 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
316 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
317 "dst_addr [%x:%08x (%08x)]\n"
318 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
319 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 322
5ff7b6d4
EG
323 mutex_lock(&bp->dmae_mutex);
324
325 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
326 *wb_comp = 0;
327
5ff7b6d4 328 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
329
330 udelay(5);
ad8d3948
EG
331
332 while (*wb_comp != DMAE_COMP_VAL) {
333
ad8d3948 334 if (!cnt) {
c3eefaf6 335 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
336 break;
337 }
ad8d3948 338 cnt--;
12469401
YG
339 /* adjust delay for emulation/FPGA */
340 if (CHIP_REV_IS_SLOW(bp))
341 msleep(100);
342 else
343 udelay(5);
a2fbb9ea 344 }
ad8d3948 345 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
346 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
348
349 mutex_unlock(&bp->dmae_mutex);
350}
351
352/* used only for slowpath so not inlined */
353static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
354{
355 u32 wb_write[2];
356
357 wb_write[0] = val_hi;
358 wb_write[1] = val_lo;
359 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 360}
a2fbb9ea 361
ad8d3948
EG
362#ifdef USE_WB_RD
363static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
364{
365 u32 wb_data[2];
366
367 REG_RD_DMAE(bp, reg, wb_data, 2);
368
369 return HILO_U64(wb_data[0], wb_data[1]);
370}
371#endif
372
a2fbb9ea
ET
373static int bnx2x_mc_assert(struct bnx2x *bp)
374{
a2fbb9ea 375 char last_idx;
34f80b04
EG
376 int i, rc = 0;
377 u32 row0, row1, row2, row3;
378
379 /* XSTORM */
380 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
381 XSTORM_ASSERT_LIST_INDEX_OFFSET);
382 if (last_idx)
383 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
384
385 /* print the asserts */
386 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
387
388 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
389 XSTORM_ASSERT_LIST_OFFSET(i));
390 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
391 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
392 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
393 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
394 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
395 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
396
397 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
398 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
399 " 0x%08x 0x%08x 0x%08x\n",
400 i, row3, row2, row1, row0);
401 rc++;
402 } else {
403 break;
404 }
405 }
406
407 /* TSTORM */
408 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
409 TSTORM_ASSERT_LIST_INDEX_OFFSET);
410 if (last_idx)
411 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
412
413 /* print the asserts */
414 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
415
416 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
417 TSTORM_ASSERT_LIST_OFFSET(i));
418 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
419 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
420 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
421 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
422 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
423 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
424
425 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
426 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
427 " 0x%08x 0x%08x 0x%08x\n",
428 i, row3, row2, row1, row0);
429 rc++;
430 } else {
431 break;
432 }
433 }
434
435 /* CSTORM */
436 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
437 CSTORM_ASSERT_LIST_INDEX_OFFSET);
438 if (last_idx)
439 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
440
441 /* print the asserts */
442 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
443
444 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
445 CSTORM_ASSERT_LIST_OFFSET(i));
446 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
447 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
448 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
449 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
450 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
451 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
452
453 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
454 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
455 " 0x%08x 0x%08x 0x%08x\n",
456 i, row3, row2, row1, row0);
457 rc++;
458 } else {
459 break;
460 }
461 }
462
463 /* USTORM */
464 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
465 USTORM_ASSERT_LIST_INDEX_OFFSET);
466 if (last_idx)
467 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
468
469 /* print the asserts */
470 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
471
472 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
473 USTORM_ASSERT_LIST_OFFSET(i));
474 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
475 USTORM_ASSERT_LIST_OFFSET(i) + 4);
476 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
477 USTORM_ASSERT_LIST_OFFSET(i) + 8);
478 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
479 USTORM_ASSERT_LIST_OFFSET(i) + 12);
480
481 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
482 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
483 " 0x%08x 0x%08x 0x%08x\n",
484 i, row3, row2, row1, row0);
485 rc++;
486 } else {
487 break;
a2fbb9ea
ET
488 }
489 }
34f80b04 490
a2fbb9ea
ET
491 return rc;
492}
c14423fe 493
a2fbb9ea
ET
494static void bnx2x_fw_dump(struct bnx2x *bp)
495{
496 u32 mark, offset;
4781bfad 497 __be32 data[9];
a2fbb9ea
ET
498 int word;
499
500 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772 501 mark = ((mark + 0x3) & ~0x3);
ad361c98 502 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 503
ad361c98 504 printk(KERN_ERR PFX);
a2fbb9ea
ET
505 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
506 for (word = 0; word < 8; word++)
507 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
508 offset + 4*word));
509 data[8] = 0x0;
49d66772 510 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
511 }
512 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
513 for (word = 0; word < 8; word++)
514 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
515 offset + 4*word));
516 data[8] = 0x0;
49d66772 517 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea 518 }
ad361c98 519 printk(KERN_ERR PFX "end of fw dump\n");
a2fbb9ea
ET
520}
521
522static void bnx2x_panic_dump(struct bnx2x *bp)
523{
524 int i;
525 u16 j, start, end;
526
66e855f3
YG
527 bp->stats_state = STATS_STATE_DISABLED;
528 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
529
a2fbb9ea
ET
530 BNX2X_ERR("begin crash dump -----------------\n");
531
8440d2b6
EG
532 /* Indices */
533 /* Common */
534 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
535 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
536 " spq_prod_idx(%u)\n",
537 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
538 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
539
540 /* Rx */
541 for_each_rx_queue(bp, i) {
a2fbb9ea 542 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 543
c3eefaf6 544 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
545 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
546 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 547 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
548 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
549 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 550 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
551 " fp_u_idx(%x) *sb_u_idx(%x)\n",
552 fp->rx_sge_prod, fp->last_max_sge,
553 le16_to_cpu(fp->fp_u_idx),
554 fp->status_blk->u_status_block.status_block_index);
555 }
a2fbb9ea 556
8440d2b6
EG
557 /* Tx */
558 for_each_tx_queue(bp, i) {
559 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 560
c3eefaf6 561 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
562 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
563 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
564 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 565 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
ca00392c 566 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 567 fp->status_blk->c_status_block.status_block_index,
ca00392c 568 fp->tx_db.data.prod);
8440d2b6 569 }
a2fbb9ea 570
8440d2b6
EG
571 /* Rings */
572 /* Rx */
573 for_each_rx_queue(bp, i) {
574 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
575
576 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
577 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 578 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
579 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
580 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
581
c3eefaf6
EG
582 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
583 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
584 }
585
3196a88a
EG
586 start = RX_SGE(fp->rx_sge_prod);
587 end = RX_SGE(fp->last_max_sge);
8440d2b6 588 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
589 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
590 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
591
c3eefaf6
EG
592 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
593 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
594 }
595
a2fbb9ea
ET
596 start = RCQ_BD(fp->rx_comp_cons - 10);
597 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 598 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
599 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
600
c3eefaf6
EG
601 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
602 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
603 }
604 }
605
8440d2b6
EG
606 /* Tx */
607 for_each_tx_queue(bp, i) {
608 struct bnx2x_fastpath *fp = &bp->fp[i];
609
610 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
611 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
612 for (j = start; j != end; j = TX_BD(j + 1)) {
613 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
614
c3eefaf6
EG
615 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
616 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
617 }
618
619 start = TX_BD(fp->tx_bd_cons - 10);
620 end = TX_BD(fp->tx_bd_cons + 254);
621 for (j = start; j != end; j = TX_BD(j + 1)) {
622 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
623
c3eefaf6
EG
624 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
625 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
626 }
627 }
a2fbb9ea 628
34f80b04 629 bnx2x_fw_dump(bp);
a2fbb9ea
ET
630 bnx2x_mc_assert(bp);
631 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
632}
633
615f8fd9 634static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 635{
34f80b04 636 int port = BP_PORT(bp);
a2fbb9ea
ET
637 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
638 u32 val = REG_RD(bp, addr);
639 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 640 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
641
642 if (msix) {
8badd27a
EG
643 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
644 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
645 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
646 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
647 } else if (msi) {
648 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
649 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
650 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
651 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
652 } else {
653 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 654 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
655 HC_CONFIG_0_REG_INT_LINE_EN_0 |
656 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 657
8badd27a
EG
658 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
659 val, port, addr);
615f8fd9
ET
660
661 REG_WR(bp, addr, val);
662
a2fbb9ea
ET
663 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
664 }
665
8badd27a
EG
666 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
667 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
668
669 REG_WR(bp, addr, val);
37dbbf32
EG
670 /*
671 * Ensure that HC_CONFIG is written before leading/trailing edge config
672 */
673 mmiowb();
674 barrier();
34f80b04
EG
675
676 if (CHIP_IS_E1H(bp)) {
677 /* init leading/trailing edge */
678 if (IS_E1HMF(bp)) {
8badd27a 679 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 680 if (bp->port.pmf)
4acac6a5
EG
681 /* enable nig and gpio3 attention */
682 val |= 0x1100;
34f80b04
EG
683 } else
684 val = 0xffff;
685
686 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
687 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
688 }
37dbbf32
EG
689
690 /* Make sure that interrupts are indeed enabled from here on */
691 mmiowb();
a2fbb9ea
ET
692}
693
615f8fd9 694static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 695{
34f80b04 696 int port = BP_PORT(bp);
a2fbb9ea
ET
697 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
698 u32 val = REG_RD(bp, addr);
699
700 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
701 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
702 HC_CONFIG_0_REG_INT_LINE_EN_0 |
703 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
704
705 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
706 val, port, addr);
707
8badd27a
EG
708 /* flush all outstanding writes */
709 mmiowb();
710
a2fbb9ea
ET
711 REG_WR(bp, addr, val);
712 if (REG_RD(bp, addr) != val)
713 BNX2X_ERR("BUG! proper val not read from IGU!\n");
356e2385 714
a2fbb9ea
ET
715}
716
f8ef6e44 717static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 718{
a2fbb9ea 719 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 720 int i, offset;
a2fbb9ea 721
34f80b04 722 /* disable interrupt handling */
a2fbb9ea 723 atomic_inc(&bp->intr_sem);
e1510706
EG
724 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
725
f8ef6e44
YG
726 if (disable_hw)
727 /* prevent the HW from sending interrupts */
728 bnx2x_int_disable(bp);
a2fbb9ea
ET
729
730 /* make sure all ISRs are done */
731 if (msix) {
8badd27a
EG
732 synchronize_irq(bp->msix_table[0].vector);
733 offset = 1;
a2fbb9ea 734 for_each_queue(bp, i)
8badd27a 735 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
736 } else
737 synchronize_irq(bp->pdev->irq);
738
739 /* make sure sp_task is not running */
1cf167f2
EG
740 cancel_delayed_work(&bp->sp_task);
741 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
742}
743
34f80b04 744/* fast path */
a2fbb9ea
ET
745
746/*
34f80b04 747 * General service functions
a2fbb9ea
ET
748 */
749
34f80b04 750static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
751 u8 storm, u16 index, u8 op, u8 update)
752{
5c862848
EG
753 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
754 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
755 struct igu_ack_register igu_ack;
756
757 igu_ack.status_block_index = index;
758 igu_ack.sb_id_and_flags =
34f80b04 759 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
760 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
761 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
762 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
763
5c862848
EG
764 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
765 (*(u32 *)&igu_ack), hc_addr);
766 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
767
768 /* Make sure that ACK is written */
769 mmiowb();
770 barrier();
a2fbb9ea
ET
771}
772
773static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
774{
775 struct host_status_block *fpsb = fp->status_blk;
776 u16 rc = 0;
777
778 barrier(); /* status block is written to by the chip */
779 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
780 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
781 rc |= 1;
782 }
783 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
784 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
785 rc |= 2;
786 }
787 return rc;
788}
789
a2fbb9ea
ET
790static u16 bnx2x_ack_int(struct bnx2x *bp)
791{
5c862848
EG
792 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
793 COMMAND_REG_SIMD_MASK);
794 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 795
5c862848
EG
796 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
797 result, hc_addr);
a2fbb9ea 798
a2fbb9ea
ET
799 return result;
800}
801
802
803/*
804 * fast path service functions
805 */
806
e8b5fc51
VZ
807static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
808{
809 /* Tell compiler that consumer and producer can change */
810 barrier();
811 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
812}
813
a2fbb9ea
ET
814/* free skb in the packet ring at pos idx
815 * return idx of last bd freed
816 */
817static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
818 u16 idx)
819{
820 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
821 struct eth_tx_start_bd *tx_start_bd;
822 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 823 struct sk_buff *skb = tx_buf->skb;
34f80b04 824 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
825 int nbd;
826
827 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
828 idx, tx_buf, skb);
829
830 /* unmap first bd */
831 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c
EG
832 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
833 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
834 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 835
ca00392c 836 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 837#ifdef BNX2X_STOP_ON_ERROR
ca00392c 838 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 839 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
840 bnx2x_panic();
841 }
842#endif
ca00392c 843 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 844
ca00392c
EG
845 /* Get the next bd */
846 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 847
ca00392c
EG
848 /* Skip a parse bd... */
849 --nbd;
850 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
851
852 /* ...and the TSO split header bd since they have no mapping */
853 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
854 --nbd;
855 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
856 }
857
858 /* now free frags */
859 while (nbd > 0) {
860
861 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c
EG
862 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
863 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
864 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
a2fbb9ea
ET
865 if (--nbd)
866 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
867 }
868
869 /* release skb */
53e5e96e 870 WARN_ON(!skb);
ca00392c 871 dev_kfree_skb_any(skb);
a2fbb9ea
ET
872 tx_buf->first_bd = 0;
873 tx_buf->skb = NULL;
874
34f80b04 875 return new_cons;
a2fbb9ea
ET
876}
877
34f80b04 878static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 879{
34f80b04
EG
880 s16 used;
881 u16 prod;
882 u16 cons;
a2fbb9ea 883
34f80b04 884 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
885 prod = fp->tx_bd_prod;
886 cons = fp->tx_bd_cons;
887
34f80b04
EG
888 /* NUM_TX_RINGS = number of "next-page" entries
889 It will be used as a threshold */
890 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 891
34f80b04 892#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
893 WARN_ON(used < 0);
894 WARN_ON(used > fp->bp->tx_ring_size);
895 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 896#endif
a2fbb9ea 897
34f80b04 898 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
899}
900
7961f791 901static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
902{
903 struct bnx2x *bp = fp->bp;
555f6c78 904 struct netdev_queue *txq;
a2fbb9ea
ET
905 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
906 int done = 0;
907
908#ifdef BNX2X_STOP_ON_ERROR
909 if (unlikely(bp->panic))
910 return;
911#endif
912
ca00392c 913 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
a2fbb9ea
ET
914 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
915 sw_cons = fp->tx_pkt_cons;
916
917 while (sw_cons != hw_cons) {
918 u16 pkt_cons;
919
920 pkt_cons = TX_BD(sw_cons);
921
922 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
923
34f80b04 924 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
925 hw_cons, sw_cons, pkt_cons);
926
34f80b04 927/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
928 rmb();
929 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
930 }
931*/
932 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
933 sw_cons++;
934 done++;
a2fbb9ea
ET
935 }
936
937 fp->tx_pkt_cons = sw_cons;
938 fp->tx_bd_cons = bd_cons;
939
a2fbb9ea 940 /* TBD need a thresh? */
555f6c78 941 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 942
6044735d
EG
943 /* Need to make the tx_bd_cons update visible to start_xmit()
944 * before checking for netif_tx_queue_stopped(). Without the
945 * memory barrier, there is a small possibility that
946 * start_xmit() will miss it and cause the queue to be stopped
947 * forever.
948 */
949 smp_mb();
950
555f6c78 951 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 952 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 953 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 954 netif_tx_wake_queue(txq);
a2fbb9ea
ET
955 }
956}
957
3196a88a 958
a2fbb9ea
ET
959static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
960 union eth_rx_cqe *rr_cqe)
961{
962 struct bnx2x *bp = fp->bp;
963 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
964 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
965
34f80b04 966 DP(BNX2X_MSG_SP,
a2fbb9ea 967 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 968 fp->index, cid, command, bp->state,
34f80b04 969 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
970
971 bp->spq_left++;
972
0626b899 973 if (fp->index) {
a2fbb9ea
ET
974 switch (command | fp->state) {
975 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
976 BNX2X_FP_STATE_OPENING):
977 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
978 cid);
979 fp->state = BNX2X_FP_STATE_OPEN;
980 break;
981
982 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
983 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
984 cid);
985 fp->state = BNX2X_FP_STATE_HALTED;
986 break;
987
988 default:
34f80b04
EG
989 BNX2X_ERR("unexpected MC reply (%d) "
990 "fp->state is %x\n", command, fp->state);
991 break;
a2fbb9ea 992 }
34f80b04 993 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
994 return;
995 }
c14423fe 996
a2fbb9ea
ET
997 switch (command | bp->state) {
998 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
999 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1000 bp->state = BNX2X_STATE_OPEN;
1001 break;
1002
1003 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1004 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1005 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1006 fp->state = BNX2X_FP_STATE_HALTED;
1007 break;
1008
a2fbb9ea 1009 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1010 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1011 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1012 break;
1013
3196a88a 1014
a2fbb9ea 1015 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1016 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1017 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 1018 bp->set_mac_pending = 0;
a2fbb9ea
ET
1019 break;
1020
49d66772 1021 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
ca00392c 1022 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
34f80b04 1023 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
1024 break;
1025
a2fbb9ea 1026 default:
34f80b04 1027 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1028 command, bp->state);
34f80b04 1029 break;
a2fbb9ea 1030 }
34f80b04 1031 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1032}
1033
7a9b2557
VZ
1034static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1035 struct bnx2x_fastpath *fp, u16 index)
1036{
1037 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1038 struct page *page = sw_buf->page;
1039 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1040
1041 /* Skip "next page" elements */
1042 if (!page)
1043 return;
1044
1045 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1046 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1047 __free_pages(page, PAGES_PER_SGE_SHIFT);
1048
1049 sw_buf->page = NULL;
1050 sge->addr_hi = 0;
1051 sge->addr_lo = 0;
1052}
1053
1054static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1055 struct bnx2x_fastpath *fp, int last)
1056{
1057 int i;
1058
1059 for (i = 0; i < last; i++)
1060 bnx2x_free_rx_sge(bp, fp, i);
1061}
1062
1063static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1064 struct bnx2x_fastpath *fp, u16 index)
1065{
1066 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1067 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1068 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1069 dma_addr_t mapping;
1070
1071 if (unlikely(page == NULL))
1072 return -ENOMEM;
1073
4f40f2cb 1074 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1075 PCI_DMA_FROMDEVICE);
8d8bb39b 1076 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1077 __free_pages(page, PAGES_PER_SGE_SHIFT);
1078 return -ENOMEM;
1079 }
1080
1081 sw_buf->page = page;
1082 pci_unmap_addr_set(sw_buf, mapping, mapping);
1083
1084 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1085 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1086
1087 return 0;
1088}
1089
a2fbb9ea
ET
1090static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1091 struct bnx2x_fastpath *fp, u16 index)
1092{
1093 struct sk_buff *skb;
1094 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1095 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1096 dma_addr_t mapping;
1097
1098 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1099 if (unlikely(skb == NULL))
1100 return -ENOMEM;
1101
437cf2f1 1102 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1103 PCI_DMA_FROMDEVICE);
8d8bb39b 1104 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1105 dev_kfree_skb(skb);
1106 return -ENOMEM;
1107 }
1108
1109 rx_buf->skb = skb;
1110 pci_unmap_addr_set(rx_buf, mapping, mapping);
1111
1112 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1113 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1114
1115 return 0;
1116}
1117
1118/* note that we are not allocating a new skb,
1119 * we are just moving one from cons to prod
1120 * we are not creating a new mapping,
1121 * so there is no need to check for dma_mapping_error().
1122 */
1123static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1124 struct sk_buff *skb, u16 cons, u16 prod)
1125{
1126 struct bnx2x *bp = fp->bp;
1127 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1128 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1129 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1130 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1131
1132 pci_dma_sync_single_for_device(bp->pdev,
1133 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1134 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1135
1136 prod_rx_buf->skb = cons_rx_buf->skb;
1137 pci_unmap_addr_set(prod_rx_buf, mapping,
1138 pci_unmap_addr(cons_rx_buf, mapping));
1139 *prod_bd = *cons_bd;
1140}
1141
7a9b2557
VZ
1142static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1143 u16 idx)
1144{
1145 u16 last_max = fp->last_max_sge;
1146
1147 if (SUB_S16(idx, last_max) > 0)
1148 fp->last_max_sge = idx;
1149}
1150
1151static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1152{
1153 int i, j;
1154
1155 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1156 int idx = RX_SGE_CNT * i - 1;
1157
1158 for (j = 0; j < 2; j++) {
1159 SGE_MASK_CLEAR_BIT(fp, idx);
1160 idx--;
1161 }
1162 }
1163}
1164
1165static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1166 struct eth_fast_path_rx_cqe *fp_cqe)
1167{
1168 struct bnx2x *bp = fp->bp;
4f40f2cb 1169 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1170 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1171 SGE_PAGE_SHIFT;
7a9b2557
VZ
1172 u16 last_max, last_elem, first_elem;
1173 u16 delta = 0;
1174 u16 i;
1175
1176 if (!sge_len)
1177 return;
1178
1179 /* First mark all used pages */
1180 for (i = 0; i < sge_len; i++)
1181 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1182
1183 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1184 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1185
1186 /* Here we assume that the last SGE index is the biggest */
1187 prefetch((void *)(fp->sge_mask));
1188 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1189
1190 last_max = RX_SGE(fp->last_max_sge);
1191 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1192 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1193
1194 /* If ring is not full */
1195 if (last_elem + 1 != first_elem)
1196 last_elem++;
1197
1198 /* Now update the prod */
1199 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1200 if (likely(fp->sge_mask[i]))
1201 break;
1202
1203 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1204 delta += RX_SGE_MASK_ELEM_SZ;
1205 }
1206
1207 if (delta > 0) {
1208 fp->rx_sge_prod += delta;
1209 /* clear page-end entries */
1210 bnx2x_clear_sge_mask_next_elems(fp);
1211 }
1212
1213 DP(NETIF_MSG_RX_STATUS,
1214 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1215 fp->last_max_sge, fp->rx_sge_prod);
1216}
1217
1218static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1219{
1220 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1221 memset(fp->sge_mask, 0xff,
1222 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1223
33471629
EG
1224 /* Clear the two last indices in the page to 1:
1225 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1226 hence will never be indicated and should be removed from
1227 the calculations. */
1228 bnx2x_clear_sge_mask_next_elems(fp);
1229}
1230
1231static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1232 struct sk_buff *skb, u16 cons, u16 prod)
1233{
1234 struct bnx2x *bp = fp->bp;
1235 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1236 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1237 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1238 dma_addr_t mapping;
1239
1240 /* move empty skb from pool to prod and map it */
1241 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1242 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1243 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1244 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1245
1246 /* move partial skb from cons to pool (don't unmap yet) */
1247 fp->tpa_pool[queue] = *cons_rx_buf;
1248
1249 /* mark bin state as start - print error if current state != stop */
1250 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1251 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1252
1253 fp->tpa_state[queue] = BNX2X_TPA_START;
1254
1255 /* point prod_bd to new skb */
1256 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1257 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1258
1259#ifdef BNX2X_STOP_ON_ERROR
1260 fp->tpa_queue_used |= (1 << queue);
1261#ifdef __powerpc64__
1262 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1263#else
1264 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1265#endif
1266 fp->tpa_queue_used);
1267#endif
1268}
1269
1270static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1271 struct sk_buff *skb,
1272 struct eth_fast_path_rx_cqe *fp_cqe,
1273 u16 cqe_idx)
1274{
1275 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1276 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1277 u32 i, frag_len, frag_size, pages;
1278 int err;
1279 int j;
1280
1281 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1282 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1283
1284 /* This is needed in order to enable forwarding support */
1285 if (frag_size)
4f40f2cb 1286 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1287 max(frag_size, (u32)len_on_bd));
1288
1289#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1290 if (pages >
1291 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1292 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1293 pages, cqe_idx);
1294 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1295 fp_cqe->pkt_len, len_on_bd);
1296 bnx2x_panic();
1297 return -EINVAL;
1298 }
1299#endif
1300
1301 /* Run through the SGL and compose the fragmented skb */
1302 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1303 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1304
1305 /* FW gives the indices of the SGE as if the ring is an array
1306 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1307 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1308 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1309 old_rx_pg = *rx_pg;
1310
1311 /* If we fail to allocate a substitute page, we simply stop
1312 where we are and drop the whole packet */
1313 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1314 if (unlikely(err)) {
de832a55 1315 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1316 return err;
1317 }
1318
1319 /* Unmap the page as we r going to pass it to the stack */
1320 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1321 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1322
1323 /* Add one frag and update the appropriate fields in the skb */
1324 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1325
1326 skb->data_len += frag_len;
1327 skb->truesize += frag_len;
1328 skb->len += frag_len;
1329
1330 frag_size -= frag_len;
1331 }
1332
1333 return 0;
1334}
1335
1336static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1337 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1338 u16 cqe_idx)
1339{
1340 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1341 struct sk_buff *skb = rx_buf->skb;
1342 /* alloc new skb */
1343 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1344
1345 /* Unmap skb in the pool anyway, as we are going to change
1346 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1347 fails. */
1348 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1349 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1350
7a9b2557 1351 if (likely(new_skb)) {
66e855f3
YG
1352 /* fix ip xsum and give it to the stack */
1353 /* (no need to map the new skb) */
0c6671b0
EG
1354#ifdef BCM_VLAN
1355 int is_vlan_cqe =
1356 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1357 PARSING_FLAGS_VLAN);
1358 int is_not_hwaccel_vlan_cqe =
1359 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1360#endif
7a9b2557
VZ
1361
1362 prefetch(skb);
1363 prefetch(((char *)(skb)) + 128);
1364
7a9b2557
VZ
1365#ifdef BNX2X_STOP_ON_ERROR
1366 if (pad + len > bp->rx_buf_size) {
1367 BNX2X_ERR("skb_put is about to fail... "
1368 "pad %d len %d rx_buf_size %d\n",
1369 pad, len, bp->rx_buf_size);
1370 bnx2x_panic();
1371 return;
1372 }
1373#endif
1374
1375 skb_reserve(skb, pad);
1376 skb_put(skb, len);
1377
1378 skb->protocol = eth_type_trans(skb, bp->dev);
1379 skb->ip_summed = CHECKSUM_UNNECESSARY;
1380
1381 {
1382 struct iphdr *iph;
1383
1384 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1385#ifdef BCM_VLAN
1386 /* If there is no Rx VLAN offloading -
1387 take VLAN tag into an account */
1388 if (unlikely(is_not_hwaccel_vlan_cqe))
1389 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1390#endif
7a9b2557
VZ
1391 iph->check = 0;
1392 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1393 }
1394
1395 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1396 &cqe->fast_path_cqe, cqe_idx)) {
1397#ifdef BCM_VLAN
0c6671b0
EG
1398 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1399 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1400 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1401 le16_to_cpu(cqe->fast_path_cqe.
1402 vlan_tag));
1403 else
1404#endif
1405 netif_receive_skb(skb);
1406 } else {
1407 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1408 " - dropping packet!\n");
1409 dev_kfree_skb(skb);
1410 }
1411
7a9b2557
VZ
1412
1413 /* put new skb in bin */
1414 fp->tpa_pool[queue].skb = new_skb;
1415
1416 } else {
66e855f3 1417 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1418 DP(NETIF_MSG_RX_STATUS,
1419 "Failed to allocate new skb - dropping packet!\n");
de832a55 1420 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1421 }
1422
1423 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1424}
1425
1426static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1427 struct bnx2x_fastpath *fp,
1428 u16 bd_prod, u16 rx_comp_prod,
1429 u16 rx_sge_prod)
1430{
8d9c5f34 1431 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1432 int i;
1433
1434 /* Update producers */
1435 rx_prods.bd_prod = bd_prod;
1436 rx_prods.cqe_prod = rx_comp_prod;
1437 rx_prods.sge_prod = rx_sge_prod;
1438
58f4c4cf
EG
1439 /*
1440 * Make sure that the BD and SGE data is updated before updating the
1441 * producers since FW might read the BD/SGE right after the producer
1442 * is updated.
1443 * This is only applicable for weak-ordered memory model archs such
1444 * as IA-64. The following barrier is also mandatory since FW will
1445 * assumes BDs must have buffers.
1446 */
1447 wmb();
1448
8d9c5f34
EG
1449 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1450 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1451 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1452 ((u32 *)&rx_prods)[i]);
1453
58f4c4cf
EG
1454 mmiowb(); /* keep prod updates ordered */
1455
7a9b2557 1456 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1457 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1458 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1459}
1460
a2fbb9ea
ET
1461static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1462{
1463 struct bnx2x *bp = fp->bp;
34f80b04 1464 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1465 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1466 int rx_pkt = 0;
1467
1468#ifdef BNX2X_STOP_ON_ERROR
1469 if (unlikely(bp->panic))
1470 return 0;
1471#endif
1472
34f80b04
EG
1473 /* CQ "next element" is of the size of the regular element,
1474 that's why it's ok here */
a2fbb9ea
ET
1475 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1476 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1477 hw_comp_cons++;
1478
1479 bd_cons = fp->rx_bd_cons;
1480 bd_prod = fp->rx_bd_prod;
34f80b04 1481 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1482 sw_comp_cons = fp->rx_comp_cons;
1483 sw_comp_prod = fp->rx_comp_prod;
1484
1485 /* Memory barrier necessary as speculative reads of the rx
1486 * buffer can be ahead of the index in the status block
1487 */
1488 rmb();
1489
1490 DP(NETIF_MSG_RX_STATUS,
1491 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1492 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1493
1494 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1495 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1496 struct sk_buff *skb;
1497 union eth_rx_cqe *cqe;
34f80b04
EG
1498 u8 cqe_fp_flags;
1499 u16 len, pad;
a2fbb9ea
ET
1500
1501 comp_ring_cons = RCQ_BD(sw_comp_cons);
1502 bd_prod = RX_BD(bd_prod);
1503 bd_cons = RX_BD(bd_cons);
1504
619e7a66
EG
1505 /* Prefetch the page containing the BD descriptor
1506 at producer's index. It will be needed when new skb is
1507 allocated */
1508 prefetch((void *)(PAGE_ALIGN((unsigned long)
1509 (&fp->rx_desc_ring[bd_prod])) -
1510 PAGE_SIZE + 1));
1511
a2fbb9ea 1512 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1513 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1514
a2fbb9ea 1515 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1516 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1517 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1518 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1519 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1520 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1521
1522 /* is this a slowpath msg? */
34f80b04 1523 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1524 bnx2x_sp_event(fp, cqe);
1525 goto next_cqe;
1526
1527 /* this is an rx packet */
1528 } else {
1529 rx_buf = &fp->rx_buf_ring[bd_cons];
1530 skb = rx_buf->skb;
a2fbb9ea
ET
1531 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1532 pad = cqe->fast_path_cqe.placement_offset;
1533
7a9b2557
VZ
1534 /* If CQE is marked both TPA_START and TPA_END
1535 it is a non-TPA CQE */
1536 if ((!fp->disable_tpa) &&
1537 (TPA_TYPE(cqe_fp_flags) !=
1538 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1539 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1540
1541 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1542 DP(NETIF_MSG_RX_STATUS,
1543 "calling tpa_start on queue %d\n",
1544 queue);
1545
1546 bnx2x_tpa_start(fp, queue, skb,
1547 bd_cons, bd_prod);
1548 goto next_rx;
1549 }
1550
1551 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1552 DP(NETIF_MSG_RX_STATUS,
1553 "calling tpa_stop on queue %d\n",
1554 queue);
1555
1556 if (!BNX2X_RX_SUM_FIX(cqe))
1557 BNX2X_ERR("STOP on none TCP "
1558 "data\n");
1559
1560 /* This is a size of the linear data
1561 on this skb */
1562 len = le16_to_cpu(cqe->fast_path_cqe.
1563 len_on_bd);
1564 bnx2x_tpa_stop(bp, fp, queue, pad,
1565 len, cqe, comp_ring_cons);
1566#ifdef BNX2X_STOP_ON_ERROR
1567 if (bp->panic)
17cb4006 1568 return 0;
7a9b2557
VZ
1569#endif
1570
1571 bnx2x_update_sge_prod(fp,
1572 &cqe->fast_path_cqe);
1573 goto next_cqe;
1574 }
1575 }
1576
a2fbb9ea
ET
1577 pci_dma_sync_single_for_device(bp->pdev,
1578 pci_unmap_addr(rx_buf, mapping),
1579 pad + RX_COPY_THRESH,
1580 PCI_DMA_FROMDEVICE);
1581 prefetch(skb);
1582 prefetch(((char *)(skb)) + 128);
1583
1584 /* is this an error packet? */
34f80b04 1585 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1586 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1587 "ERROR flags %x rx packet %u\n",
1588 cqe_fp_flags, sw_comp_cons);
de832a55 1589 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1590 goto reuse_rx;
1591 }
1592
1593 /* Since we don't have a jumbo ring
1594 * copy small packets if mtu > 1500
1595 */
1596 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1597 (len <= RX_COPY_THRESH)) {
1598 struct sk_buff *new_skb;
1599
1600 new_skb = netdev_alloc_skb(bp->dev,
1601 len + pad);
1602 if (new_skb == NULL) {
1603 DP(NETIF_MSG_RX_ERR,
34f80b04 1604 "ERROR packet dropped "
a2fbb9ea 1605 "because of alloc failure\n");
de832a55 1606 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1607 goto reuse_rx;
1608 }
1609
1610 /* aligned copy */
1611 skb_copy_from_linear_data_offset(skb, pad,
1612 new_skb->data + pad, len);
1613 skb_reserve(new_skb, pad);
1614 skb_put(new_skb, len);
1615
1616 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1617
1618 skb = new_skb;
1619
a119a069
EG
1620 } else
1621 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
a2fbb9ea
ET
1622 pci_unmap_single(bp->pdev,
1623 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1624 bp->rx_buf_size,
a2fbb9ea
ET
1625 PCI_DMA_FROMDEVICE);
1626 skb_reserve(skb, pad);
1627 skb_put(skb, len);
1628
1629 } else {
1630 DP(NETIF_MSG_RX_ERR,
34f80b04 1631 "ERROR packet dropped because "
a2fbb9ea 1632 "of alloc failure\n");
de832a55 1633 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1634reuse_rx:
1635 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1636 goto next_rx;
1637 }
1638
1639 skb->protocol = eth_type_trans(skb, bp->dev);
1640
1641 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1642 if (bp->rx_csum) {
1adcd8be
EG
1643 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1644 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1645 else
de832a55 1646 fp->eth_q_stats.hw_csum_err++;
66e855f3 1647 }
a2fbb9ea
ET
1648 }
1649
748e5439 1650 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1651#ifdef BCM_VLAN
0c6671b0 1652 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1653 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1654 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1655 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1656 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1657 else
1658#endif
34f80b04 1659 netif_receive_skb(skb);
a2fbb9ea 1660
a2fbb9ea
ET
1661
1662next_rx:
1663 rx_buf->skb = NULL;
1664
1665 bd_cons = NEXT_RX_IDX(bd_cons);
1666 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1667 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1668 rx_pkt++;
a2fbb9ea
ET
1669next_cqe:
1670 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1671 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1672
34f80b04 1673 if (rx_pkt == budget)
a2fbb9ea
ET
1674 break;
1675 } /* while */
1676
1677 fp->rx_bd_cons = bd_cons;
34f80b04 1678 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1679 fp->rx_comp_cons = sw_comp_cons;
1680 fp->rx_comp_prod = sw_comp_prod;
1681
7a9b2557
VZ
1682 /* Update producers */
1683 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1684 fp->rx_sge_prod);
a2fbb9ea
ET
1685
1686 fp->rx_pkt += rx_pkt;
1687 fp->rx_calls++;
1688
1689 return rx_pkt;
1690}
1691
1692static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1693{
1694 struct bnx2x_fastpath *fp = fp_cookie;
1695 struct bnx2x *bp = fp->bp;
a2fbb9ea 1696
da5a662a
VZ
1697 /* Return here if interrupt is disabled */
1698 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1699 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1700 return IRQ_HANDLED;
1701 }
1702
34f80b04 1703 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1704 fp->index, fp->sb_id);
0626b899 1705 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1706
1707#ifdef BNX2X_STOP_ON_ERROR
1708 if (unlikely(bp->panic))
1709 return IRQ_HANDLED;
1710#endif
ca00392c
EG
1711 /* Handle Rx or Tx according to MSI-X vector */
1712 if (fp->is_rx_queue) {
1713 prefetch(fp->rx_cons_sb);
1714 prefetch(&fp->status_blk->u_status_block.status_block_index);
a2fbb9ea 1715
ca00392c 1716 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1717
ca00392c
EG
1718 } else {
1719 prefetch(fp->tx_cons_sb);
1720 prefetch(&fp->status_blk->c_status_block.status_block_index);
1721
1722 bnx2x_update_fpsb_idx(fp);
1723 rmb();
1724 bnx2x_tx_int(fp);
1725
1726 /* Re-enable interrupts */
1727 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1728 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1729 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1730 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1731 }
34f80b04 1732
a2fbb9ea
ET
1733 return IRQ_HANDLED;
1734}
1735
1736static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1737{
555f6c78 1738 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1739 u16 status = bnx2x_ack_int(bp);
34f80b04 1740 u16 mask;
ca00392c 1741 int i;
a2fbb9ea 1742
34f80b04 1743 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1744 if (unlikely(status == 0)) {
1745 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1746 return IRQ_NONE;
1747 }
f5372251 1748 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1749
34f80b04 1750 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1751 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1752 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1753 return IRQ_HANDLED;
1754 }
1755
3196a88a
EG
1756#ifdef BNX2X_STOP_ON_ERROR
1757 if (unlikely(bp->panic))
1758 return IRQ_HANDLED;
1759#endif
1760
ca00392c
EG
1761 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1762 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1763
ca00392c
EG
1764 mask = 0x2 << fp->sb_id;
1765 if (status & mask) {
1766 /* Handle Rx or Tx according to SB id */
1767 if (fp->is_rx_queue) {
1768 prefetch(fp->rx_cons_sb);
1769 prefetch(&fp->status_blk->u_status_block.
1770 status_block_index);
a2fbb9ea 1771
ca00392c 1772 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1773
ca00392c
EG
1774 } else {
1775 prefetch(fp->tx_cons_sb);
1776 prefetch(&fp->status_blk->c_status_block.
1777 status_block_index);
1778
1779 bnx2x_update_fpsb_idx(fp);
1780 rmb();
1781 bnx2x_tx_int(fp);
1782
1783 /* Re-enable interrupts */
1784 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1785 le16_to_cpu(fp->fp_u_idx),
1786 IGU_INT_NOP, 1);
1787 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1788 le16_to_cpu(fp->fp_c_idx),
1789 IGU_INT_ENABLE, 1);
1790 }
1791 status &= ~mask;
1792 }
a2fbb9ea
ET
1793 }
1794
a2fbb9ea 1795
34f80b04 1796 if (unlikely(status & 0x1)) {
1cf167f2 1797 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1798
1799 status &= ~0x1;
1800 if (!status)
1801 return IRQ_HANDLED;
1802 }
1803
34f80b04
EG
1804 if (status)
1805 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1806 status);
a2fbb9ea 1807
c18487ee 1808 return IRQ_HANDLED;
a2fbb9ea
ET
1809}
1810
c18487ee 1811/* end of fast path */
a2fbb9ea 1812
bb2a0f7a 1813static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1814
c18487ee
YR
1815/* Link */
1816
1817/*
1818 * General service functions
1819 */
a2fbb9ea 1820
4a37fb66 1821static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1822{
1823 u32 lock_status;
1824 u32 resource_bit = (1 << resource);
4a37fb66
YG
1825 int func = BP_FUNC(bp);
1826 u32 hw_lock_control_reg;
c18487ee 1827 int cnt;
a2fbb9ea 1828
c18487ee
YR
1829 /* Validating that the resource is within range */
1830 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1831 DP(NETIF_MSG_HW,
1832 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1833 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1834 return -EINVAL;
1835 }
a2fbb9ea 1836
4a37fb66
YG
1837 if (func <= 5) {
1838 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1839 } else {
1840 hw_lock_control_reg =
1841 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1842 }
1843
c18487ee 1844 /* Validating that the resource is not already taken */
4a37fb66 1845 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1846 if (lock_status & resource_bit) {
1847 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1848 lock_status, resource_bit);
1849 return -EEXIST;
1850 }
a2fbb9ea 1851
46230476
EG
1852 /* Try for 5 second every 5ms */
1853 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1854 /* Try to acquire the lock */
4a37fb66
YG
1855 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1856 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1857 if (lock_status & resource_bit)
1858 return 0;
a2fbb9ea 1859
c18487ee 1860 msleep(5);
a2fbb9ea 1861 }
c18487ee
YR
1862 DP(NETIF_MSG_HW, "Timeout\n");
1863 return -EAGAIN;
1864}
a2fbb9ea 1865
4a37fb66 1866static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1867{
1868 u32 lock_status;
1869 u32 resource_bit = (1 << resource);
4a37fb66
YG
1870 int func = BP_FUNC(bp);
1871 u32 hw_lock_control_reg;
a2fbb9ea 1872
c18487ee
YR
1873 /* Validating that the resource is within range */
1874 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1875 DP(NETIF_MSG_HW,
1876 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1877 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1878 return -EINVAL;
1879 }
1880
4a37fb66
YG
1881 if (func <= 5) {
1882 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1883 } else {
1884 hw_lock_control_reg =
1885 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1886 }
1887
c18487ee 1888 /* Validating that the resource is currently taken */
4a37fb66 1889 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1890 if (!(lock_status & resource_bit)) {
1891 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1892 lock_status, resource_bit);
1893 return -EFAULT;
a2fbb9ea
ET
1894 }
1895
4a37fb66 1896 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1897 return 0;
1898}
1899
1900/* HW Lock for shared dual port PHYs */
4a37fb66 1901static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1902{
34f80b04 1903 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1904
46c6a674
EG
1905 if (bp->port.need_hw_lock)
1906 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1907}
a2fbb9ea 1908
4a37fb66 1909static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1910{
46c6a674
EG
1911 if (bp->port.need_hw_lock)
1912 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1913
34f80b04 1914 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1915}
a2fbb9ea 1916
4acac6a5
EG
1917int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1918{
1919 /* The GPIO should be swapped if swap register is set and active */
1920 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1921 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1922 int gpio_shift = gpio_num +
1923 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1924 u32 gpio_mask = (1 << gpio_shift);
1925 u32 gpio_reg;
1926 int value;
1927
1928 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1929 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1930 return -EINVAL;
1931 }
1932
1933 /* read GPIO value */
1934 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1935
1936 /* get the requested pin value */
1937 if ((gpio_reg & gpio_mask) == gpio_mask)
1938 value = 1;
1939 else
1940 value = 0;
1941
1942 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1943
1944 return value;
1945}
1946
17de50b7 1947int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1948{
1949 /* The GPIO should be swapped if swap register is set and active */
1950 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1951 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1952 int gpio_shift = gpio_num +
1953 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1954 u32 gpio_mask = (1 << gpio_shift);
1955 u32 gpio_reg;
a2fbb9ea 1956
c18487ee
YR
1957 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1958 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1959 return -EINVAL;
1960 }
a2fbb9ea 1961
4a37fb66 1962 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1963 /* read GPIO and mask except the float bits */
1964 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1965
c18487ee
YR
1966 switch (mode) {
1967 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1968 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1969 gpio_num, gpio_shift);
1970 /* clear FLOAT and set CLR */
1971 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1972 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1973 break;
a2fbb9ea 1974
c18487ee
YR
1975 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1976 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1977 gpio_num, gpio_shift);
1978 /* clear FLOAT and set SET */
1979 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1980 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1981 break;
a2fbb9ea 1982
17de50b7 1983 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1984 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1985 gpio_num, gpio_shift);
1986 /* set FLOAT */
1987 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1988 break;
a2fbb9ea 1989
c18487ee
YR
1990 default:
1991 break;
a2fbb9ea
ET
1992 }
1993
c18487ee 1994 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1995 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1996
c18487ee 1997 return 0;
a2fbb9ea
ET
1998}
1999
4acac6a5
EG
2000int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2001{
2002 /* The GPIO should be swapped if swap register is set and active */
2003 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2004 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2005 int gpio_shift = gpio_num +
2006 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2007 u32 gpio_mask = (1 << gpio_shift);
2008 u32 gpio_reg;
2009
2010 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2011 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2012 return -EINVAL;
2013 }
2014
2015 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2016 /* read GPIO int */
2017 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2018
2019 switch (mode) {
2020 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2021 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2022 "output low\n", gpio_num, gpio_shift);
2023 /* clear SET and set CLR */
2024 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2025 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2026 break;
2027
2028 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2029 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2030 "output high\n", gpio_num, gpio_shift);
2031 /* clear CLR and set SET */
2032 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2033 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2034 break;
2035
2036 default:
2037 break;
2038 }
2039
2040 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2041 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2042
2043 return 0;
2044}
2045
c18487ee 2046static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2047{
c18487ee
YR
2048 u32 spio_mask = (1 << spio_num);
2049 u32 spio_reg;
a2fbb9ea 2050
c18487ee
YR
2051 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2052 (spio_num > MISC_REGISTERS_SPIO_7)) {
2053 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2054 return -EINVAL;
a2fbb9ea
ET
2055 }
2056
4a37fb66 2057 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2058 /* read SPIO and mask except the float bits */
2059 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2060
c18487ee 2061 switch (mode) {
6378c025 2062 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2063 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2064 /* clear FLOAT and set CLR */
2065 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2066 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2067 break;
a2fbb9ea 2068
6378c025 2069 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2070 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2071 /* clear FLOAT and set SET */
2072 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2073 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2074 break;
a2fbb9ea 2075
c18487ee
YR
2076 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2077 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2078 /* set FLOAT */
2079 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2080 break;
a2fbb9ea 2081
c18487ee
YR
2082 default:
2083 break;
a2fbb9ea
ET
2084 }
2085
c18487ee 2086 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2087 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2088
a2fbb9ea
ET
2089 return 0;
2090}
2091
c18487ee 2092static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2093{
ad33ea3a
EG
2094 switch (bp->link_vars.ieee_fc &
2095 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2096 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2097 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2098 ADVERTISED_Pause);
2099 break;
356e2385 2100
c18487ee 2101 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2102 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2103 ADVERTISED_Pause);
2104 break;
356e2385 2105
c18487ee 2106 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2107 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2108 break;
356e2385 2109
c18487ee 2110 default:
34f80b04 2111 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2112 ADVERTISED_Pause);
2113 break;
2114 }
2115}
f1410647 2116
c18487ee
YR
2117static void bnx2x_link_report(struct bnx2x *bp)
2118{
2691d51d
EG
2119 if (bp->state == BNX2X_STATE_DISABLED) {
2120 netif_carrier_off(bp->dev);
2121 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2122 return;
2123 }
2124
c18487ee
YR
2125 if (bp->link_vars.link_up) {
2126 if (bp->state == BNX2X_STATE_OPEN)
2127 netif_carrier_on(bp->dev);
2128 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2129
c18487ee 2130 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2131
c18487ee
YR
2132 if (bp->link_vars.duplex == DUPLEX_FULL)
2133 printk("full duplex");
2134 else
2135 printk("half duplex");
f1410647 2136
c0700f90
DM
2137 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2138 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2139 printk(", receive ");
356e2385
EG
2140 if (bp->link_vars.flow_ctrl &
2141 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2142 printk("& transmit ");
2143 } else {
2144 printk(", transmit ");
2145 }
2146 printk("flow control ON");
2147 }
2148 printk("\n");
f1410647 2149
c18487ee
YR
2150 } else { /* link_down */
2151 netif_carrier_off(bp->dev);
2152 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2153 }
c18487ee
YR
2154}
2155
b5bf9068 2156static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2157{
19680c48
EG
2158 if (!BP_NOMCP(bp)) {
2159 u8 rc;
a2fbb9ea 2160
19680c48 2161 /* Initialize link parameters structure variables */
8c99e7b0
YR
2162 /* It is recommended to turn off RX FC for jumbo frames
2163 for better performance */
0c593270 2164 if (bp->dev->mtu > 5000)
c0700f90 2165 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2166 else
c0700f90 2167 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2168
4a37fb66 2169 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2170
2171 if (load_mode == LOAD_DIAG)
2172 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2173
19680c48 2174 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2175
4a37fb66 2176 bnx2x_release_phy_lock(bp);
a2fbb9ea 2177
3c96c68b
EG
2178 bnx2x_calc_fc_adv(bp);
2179
b5bf9068
EG
2180 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2181 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2182 bnx2x_link_report(bp);
b5bf9068 2183 }
34f80b04 2184
19680c48
EG
2185 return rc;
2186 }
f5372251 2187 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2188 return -EINVAL;
a2fbb9ea
ET
2189}
2190
c18487ee 2191static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2192{
19680c48 2193 if (!BP_NOMCP(bp)) {
4a37fb66 2194 bnx2x_acquire_phy_lock(bp);
19680c48 2195 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2196 bnx2x_release_phy_lock(bp);
a2fbb9ea 2197
19680c48
EG
2198 bnx2x_calc_fc_adv(bp);
2199 } else
f5372251 2200 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2201}
a2fbb9ea 2202
c18487ee
YR
2203static void bnx2x__link_reset(struct bnx2x *bp)
2204{
19680c48 2205 if (!BP_NOMCP(bp)) {
4a37fb66 2206 bnx2x_acquire_phy_lock(bp);
589abe3a 2207 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2208 bnx2x_release_phy_lock(bp);
19680c48 2209 } else
f5372251 2210 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2211}
a2fbb9ea 2212
c18487ee
YR
2213static u8 bnx2x_link_test(struct bnx2x *bp)
2214{
2215 u8 rc;
a2fbb9ea 2216
4a37fb66 2217 bnx2x_acquire_phy_lock(bp);
c18487ee 2218 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2219 bnx2x_release_phy_lock(bp);
a2fbb9ea 2220
c18487ee
YR
2221 return rc;
2222}
a2fbb9ea 2223
8a1c38d1 2224static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2225{
8a1c38d1
EG
2226 u32 r_param = bp->link_vars.line_speed / 8;
2227 u32 fair_periodic_timeout_usec;
2228 u32 t_fair;
34f80b04 2229
8a1c38d1
EG
2230 memset(&(bp->cmng.rs_vars), 0,
2231 sizeof(struct rate_shaping_vars_per_port));
2232 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2233
8a1c38d1
EG
2234 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2235 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2236
8a1c38d1
EG
2237 /* this is the threshold below which no timer arming will occur
2238 1.25 coefficient is for the threshold to be a little bigger
2239 than the real time, to compensate for timer in-accuracy */
2240 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2241 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2242
8a1c38d1
EG
2243 /* resolution of fairness timer */
2244 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2245 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2246 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2247
8a1c38d1
EG
2248 /* this is the threshold below which we won't arm the timer anymore */
2249 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2250
8a1c38d1
EG
2251 /* we multiply by 1e3/8 to get bytes/msec.
2252 We don't want the credits to pass a credit
2253 of the t_fair*FAIR_MEM (algorithm resolution) */
2254 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2255 /* since each tick is 4 usec */
2256 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2257}
2258
2691d51d
EG
2259/* Calculates the sum of vn_min_rates.
2260 It's needed for further normalizing of the min_rates.
2261 Returns:
2262 sum of vn_min_rates.
2263 or
2264 0 - if all the min_rates are 0.
2265 In the later case fainess algorithm should be deactivated.
2266 If not all min_rates are zero then those that are zeroes will be set to 1.
2267 */
2268static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2269{
2270 int all_zero = 1;
2271 int port = BP_PORT(bp);
2272 int vn;
2273
2274 bp->vn_weight_sum = 0;
2275 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2276 int func = 2*vn + port;
2277 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2278 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2279 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2280
2281 /* Skip hidden vns */
2282 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2283 continue;
2284
2285 /* If min rate is zero - set it to 1 */
2286 if (!vn_min_rate)
2287 vn_min_rate = DEF_MIN_RATE;
2288 else
2289 all_zero = 0;
2290
2291 bp->vn_weight_sum += vn_min_rate;
2292 }
2293
2294 /* ... only if all min rates are zeros - disable fairness */
2295 if (all_zero)
2296 bp->vn_weight_sum = 0;
2297}
2298
8a1c38d1 2299static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2300{
2301 struct rate_shaping_vars_per_vn m_rs_vn;
2302 struct fairness_vars_per_vn m_fair_vn;
2303 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2304 u16 vn_min_rate, vn_max_rate;
2305 int i;
2306
2307 /* If function is hidden - set min and max to zeroes */
2308 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2309 vn_min_rate = 0;
2310 vn_max_rate = 0;
2311
2312 } else {
2313 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2314 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2315 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2316 if current min rate is zero - set it to 1.
33471629 2317 This is a requirement of the algorithm. */
8a1c38d1 2318 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2319 vn_min_rate = DEF_MIN_RATE;
2320 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2321 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2322 }
2323
8a1c38d1
EG
2324 DP(NETIF_MSG_IFUP,
2325 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2326 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2327
2328 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2329 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2330
2331 /* global vn counter - maximal Mbps for this vn */
2332 m_rs_vn.vn_counter.rate = vn_max_rate;
2333
2334 /* quota - number of bytes transmitted in this period */
2335 m_rs_vn.vn_counter.quota =
2336 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2337
8a1c38d1 2338 if (bp->vn_weight_sum) {
34f80b04
EG
2339 /* credit for each period of the fairness algorithm:
2340 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2341 vn_weight_sum should not be larger than 10000, thus
2342 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2343 than zero */
34f80b04 2344 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2345 max((u32)(vn_min_rate * (T_FAIR_COEF /
2346 (8 * bp->vn_weight_sum))),
2347 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2348 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2349 m_fair_vn.vn_credit_delta);
2350 }
2351
34f80b04
EG
2352 /* Store it to internal memory */
2353 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2354 REG_WR(bp, BAR_XSTRORM_INTMEM +
2355 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2356 ((u32 *)(&m_rs_vn))[i]);
2357
2358 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2359 REG_WR(bp, BAR_XSTRORM_INTMEM +
2360 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2361 ((u32 *)(&m_fair_vn))[i]);
2362}
2363
8a1c38d1 2364
c18487ee
YR
2365/* This function is called upon link interrupt */
2366static void bnx2x_link_attn(struct bnx2x *bp)
2367{
bb2a0f7a
YG
2368 /* Make sure that we are synced with the current statistics */
2369 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2370
c18487ee 2371 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2372
bb2a0f7a
YG
2373 if (bp->link_vars.link_up) {
2374
1c06328c 2375 /* dropless flow control */
a18f5128 2376 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
2377 int port = BP_PORT(bp);
2378 u32 pause_enabled = 0;
2379
2380 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2381 pause_enabled = 1;
2382
2383 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2384 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2385 pause_enabled);
2386 }
2387
bb2a0f7a
YG
2388 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2389 struct host_port_stats *pstats;
2390
2391 pstats = bnx2x_sp(bp, port_stats);
2392 /* reset old bmac stats */
2393 memset(&(pstats->mac_stx[0]), 0,
2394 sizeof(struct mac_stx));
2395 }
2396 if ((bp->state == BNX2X_STATE_OPEN) ||
2397 (bp->state == BNX2X_STATE_DISABLED))
2398 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2399 }
2400
c18487ee
YR
2401 /* indicate link status */
2402 bnx2x_link_report(bp);
34f80b04
EG
2403
2404 if (IS_E1HMF(bp)) {
8a1c38d1 2405 int port = BP_PORT(bp);
34f80b04 2406 int func;
8a1c38d1 2407 int vn;
34f80b04
EG
2408
2409 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2410 if (vn == BP_E1HVN(bp))
2411 continue;
2412
8a1c38d1 2413 func = ((vn << 1) | port);
34f80b04
EG
2414
2415 /* Set the attention towards other drivers
2416 on the same port */
2417 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2418 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2419 }
34f80b04 2420
8a1c38d1
EG
2421 if (bp->link_vars.link_up) {
2422 int i;
2423
2424 /* Init rate shaping and fairness contexts */
2425 bnx2x_init_port_minmax(bp);
34f80b04 2426
34f80b04 2427 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2428 bnx2x_init_vn_minmax(bp, 2*vn + port);
2429
2430 /* Store it to internal memory */
2431 for (i = 0;
2432 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2433 REG_WR(bp, BAR_XSTRORM_INTMEM +
2434 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2435 ((u32 *)(&bp->cmng))[i]);
2436 }
34f80b04 2437 }
c18487ee 2438}
a2fbb9ea 2439
c18487ee
YR
2440static void bnx2x__link_status_update(struct bnx2x *bp)
2441{
2691d51d
EG
2442 int func = BP_FUNC(bp);
2443
c18487ee
YR
2444 if (bp->state != BNX2X_STATE_OPEN)
2445 return;
a2fbb9ea 2446
c18487ee 2447 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2448
bb2a0f7a
YG
2449 if (bp->link_vars.link_up)
2450 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2451 else
2452 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2453
2691d51d
EG
2454 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2455 bnx2x_calc_vn_weight_sum(bp);
2456
c18487ee
YR
2457 /* indicate link status */
2458 bnx2x_link_report(bp);
a2fbb9ea 2459}
a2fbb9ea 2460
34f80b04
EG
2461static void bnx2x_pmf_update(struct bnx2x *bp)
2462{
2463 int port = BP_PORT(bp);
2464 u32 val;
2465
2466 bp->port.pmf = 1;
2467 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2468
2469 /* enable nig attention */
2470 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2471 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2472 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2473
2474 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2475}
2476
c18487ee 2477/* end of Link */
a2fbb9ea
ET
2478
2479/* slow path */
2480
2481/*
2482 * General service functions
2483 */
2484
2691d51d
EG
2485/* send the MCP a request, block until there is a reply */
2486u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2487{
2488 int func = BP_FUNC(bp);
2489 u32 seq = ++bp->fw_seq;
2490 u32 rc = 0;
2491 u32 cnt = 1;
2492 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2493
2494 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2495 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2496
2497 do {
2498 /* let the FW do it's magic ... */
2499 msleep(delay);
2500
2501 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2502
2503 /* Give the FW up to 2 second (200*10ms) */
2504 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2505
2506 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2507 cnt*delay, rc, seq);
2508
2509 /* is this a reply to our command? */
2510 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2511 rc &= FW_MSG_CODE_MASK;
2512 else {
2513 /* FW BUG! */
2514 BNX2X_ERR("FW failed to respond!\n");
2515 bnx2x_fw_dump(bp);
2516 rc = 0;
2517 }
2518
2519 return rc;
2520}
2521
2522static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2523static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set);
2524static void bnx2x_set_rx_mode(struct net_device *dev);
2525
2526static void bnx2x_e1h_disable(struct bnx2x *bp)
2527{
2528 int port = BP_PORT(bp);
2529 int i;
2530
2531 bp->rx_mode = BNX2X_RX_MODE_NONE;
2532 bnx2x_set_storm_rx_mode(bp);
2533
2534 netif_tx_disable(bp->dev);
2535 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2536
2537 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2538
2539 bnx2x_set_mac_addr_e1h(bp, 0);
2540
2541 for (i = 0; i < MC_HASH_SIZE; i++)
2542 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2543
2544 netif_carrier_off(bp->dev);
2545}
2546
2547static void bnx2x_e1h_enable(struct bnx2x *bp)
2548{
2549 int port = BP_PORT(bp);
2550
2551 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2552
2553 bnx2x_set_mac_addr_e1h(bp, 1);
2554
2555 /* Tx queue should be only reenabled */
2556 netif_tx_wake_all_queues(bp->dev);
2557
2558 /* Initialize the receive filter. */
2559 bnx2x_set_rx_mode(bp->dev);
2560}
2561
2562static void bnx2x_update_min_max(struct bnx2x *bp)
2563{
2564 int port = BP_PORT(bp);
2565 int vn, i;
2566
2567 /* Init rate shaping and fairness contexts */
2568 bnx2x_init_port_minmax(bp);
2569
2570 bnx2x_calc_vn_weight_sum(bp);
2571
2572 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2573 bnx2x_init_vn_minmax(bp, 2*vn + port);
2574
2575 if (bp->port.pmf) {
2576 int func;
2577
2578 /* Set the attention towards other drivers on the same port */
2579 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2580 if (vn == BP_E1HVN(bp))
2581 continue;
2582
2583 func = ((vn << 1) | port);
2584 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2585 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2586 }
2587
2588 /* Store it to internal memory */
2589 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2590 REG_WR(bp, BAR_XSTRORM_INTMEM +
2591 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2592 ((u32 *)(&bp->cmng))[i]);
2593 }
2594}
2595
2596static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2597{
2598 int func = BP_FUNC(bp);
2599
2600 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2601 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2602
2603 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2604
2605 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2606 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2607 bp->state = BNX2X_STATE_DISABLED;
2608
2609 bnx2x_e1h_disable(bp);
2610 } else {
2611 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2612 bp->state = BNX2X_STATE_OPEN;
2613
2614 bnx2x_e1h_enable(bp);
2615 }
2616 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2617 }
2618 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2619
2620 bnx2x_update_min_max(bp);
2621 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2622 }
2623
2624 /* Report results to MCP */
2625 if (dcc_event)
2626 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2627 else
2628 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2629}
2630
a2fbb9ea
ET
2631/* the slow path queue is odd since completions arrive on the fastpath ring */
2632static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2633 u32 data_hi, u32 data_lo, int common)
2634{
34f80b04 2635 int func = BP_FUNC(bp);
a2fbb9ea 2636
34f80b04
EG
2637 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2638 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2639 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2640 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2641 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2642
2643#ifdef BNX2X_STOP_ON_ERROR
2644 if (unlikely(bp->panic))
2645 return -EIO;
2646#endif
2647
34f80b04 2648 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2649
2650 if (!bp->spq_left) {
2651 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2652 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2653 bnx2x_panic();
2654 return -EBUSY;
2655 }
f1410647 2656
a2fbb9ea
ET
2657 /* CID needs port number to be encoded int it */
2658 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2659 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2660 HW_CID(bp, cid)));
2661 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2662 if (common)
2663 bp->spq_prod_bd->hdr.type |=
2664 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2665
2666 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2667 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2668
2669 bp->spq_left--;
2670
2671 if (bp->spq_prod_bd == bp->spq_last_bd) {
2672 bp->spq_prod_bd = bp->spq;
2673 bp->spq_prod_idx = 0;
2674 DP(NETIF_MSG_TIMER, "end of spq\n");
2675
2676 } else {
2677 bp->spq_prod_bd++;
2678 bp->spq_prod_idx++;
2679 }
2680
37dbbf32
EG
2681 /* Make sure that BD data is updated before writing the producer */
2682 wmb();
2683
34f80b04 2684 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2685 bp->spq_prod_idx);
2686
37dbbf32
EG
2687 mmiowb();
2688
34f80b04 2689 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2690 return 0;
2691}
2692
2693/* acquire split MCP access lock register */
4a37fb66 2694static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2695{
a2fbb9ea 2696 u32 i, j, val;
34f80b04 2697 int rc = 0;
a2fbb9ea
ET
2698
2699 might_sleep();
2700 i = 100;
2701 for (j = 0; j < i*10; j++) {
2702 val = (1UL << 31);
2703 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2704 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2705 if (val & (1L << 31))
2706 break;
2707
2708 msleep(5);
2709 }
a2fbb9ea 2710 if (!(val & (1L << 31))) {
19680c48 2711 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2712 rc = -EBUSY;
2713 }
2714
2715 return rc;
2716}
2717
4a37fb66
YG
2718/* release split MCP access lock register */
2719static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2720{
2721 u32 val = 0;
2722
2723 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2724}
2725
2726static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2727{
2728 struct host_def_status_block *def_sb = bp->def_status_blk;
2729 u16 rc = 0;
2730
2731 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2732 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2733 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2734 rc |= 1;
2735 }
2736 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2737 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2738 rc |= 2;
2739 }
2740 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2741 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2742 rc |= 4;
2743 }
2744 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2745 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2746 rc |= 8;
2747 }
2748 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2749 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2750 rc |= 16;
2751 }
2752 return rc;
2753}
2754
2755/*
2756 * slow path service functions
2757 */
2758
2759static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2760{
34f80b04 2761 int port = BP_PORT(bp);
5c862848
EG
2762 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2763 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2764 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2765 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2766 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2767 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2768 u32 aeu_mask;
87942b46 2769 u32 nig_mask = 0;
a2fbb9ea 2770
a2fbb9ea
ET
2771 if (bp->attn_state & asserted)
2772 BNX2X_ERR("IGU ERROR\n");
2773
3fcaf2e5
EG
2774 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2775 aeu_mask = REG_RD(bp, aeu_addr);
2776
a2fbb9ea 2777 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2778 aeu_mask, asserted);
2779 aeu_mask &= ~(asserted & 0xff);
2780 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2781
3fcaf2e5
EG
2782 REG_WR(bp, aeu_addr, aeu_mask);
2783 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2784
3fcaf2e5 2785 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2786 bp->attn_state |= asserted;
3fcaf2e5 2787 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2788
2789 if (asserted & ATTN_HARD_WIRED_MASK) {
2790 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2791
a5e9a7cf
EG
2792 bnx2x_acquire_phy_lock(bp);
2793
877e9aa4 2794 /* save nig interrupt mask */
87942b46 2795 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2796 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2797
c18487ee 2798 bnx2x_link_attn(bp);
a2fbb9ea
ET
2799
2800 /* handle unicore attn? */
2801 }
2802 if (asserted & ATTN_SW_TIMER_4_FUNC)
2803 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2804
2805 if (asserted & GPIO_2_FUNC)
2806 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2807
2808 if (asserted & GPIO_3_FUNC)
2809 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2810
2811 if (asserted & GPIO_4_FUNC)
2812 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2813
2814 if (port == 0) {
2815 if (asserted & ATTN_GENERAL_ATTN_1) {
2816 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2817 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2818 }
2819 if (asserted & ATTN_GENERAL_ATTN_2) {
2820 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2821 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2822 }
2823 if (asserted & ATTN_GENERAL_ATTN_3) {
2824 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2825 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2826 }
2827 } else {
2828 if (asserted & ATTN_GENERAL_ATTN_4) {
2829 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2830 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2831 }
2832 if (asserted & ATTN_GENERAL_ATTN_5) {
2833 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2834 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2835 }
2836 if (asserted & ATTN_GENERAL_ATTN_6) {
2837 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2838 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2839 }
2840 }
2841
2842 } /* if hardwired */
2843
5c862848
EG
2844 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2845 asserted, hc_addr);
2846 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2847
2848 /* now set back the mask */
a5e9a7cf 2849 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2850 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2851 bnx2x_release_phy_lock(bp);
2852 }
a2fbb9ea
ET
2853}
2854
fd4ef40d
EG
2855static inline void bnx2x_fan_failure(struct bnx2x *bp)
2856{
2857 int port = BP_PORT(bp);
2858
2859 /* mark the failure */
2860 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2861 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2862 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2863 bp->link_params.ext_phy_config);
2864
2865 /* log the failure */
2866 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2867 " the driver to shutdown the card to prevent permanent"
2868 " damage. Please contact Dell Support for assistance\n",
2869 bp->dev->name);
2870}
877e9aa4 2871static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2872{
34f80b04 2873 int port = BP_PORT(bp);
877e9aa4 2874 int reg_offset;
4d295db0 2875 u32 val, swap_val, swap_override;
877e9aa4 2876
34f80b04
EG
2877 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2878 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2879
34f80b04 2880 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2881
2882 val = REG_RD(bp, reg_offset);
2883 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2884 REG_WR(bp, reg_offset, val);
2885
2886 BNX2X_ERR("SPIO5 hw attention\n");
2887
fd4ef40d 2888 /* Fan failure attention */
35b19ba5
EG
2889 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2890 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2891 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2892 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2893 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2894 /* The PHY reset is controlled by GPIO 1 */
2895 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2896 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2897 break;
2898
4d295db0
EG
2899 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2900 /* The PHY reset is controlled by GPIO 1 */
2901 /* fake the port number to cancel the swap done in
2902 set_gpio() */
2903 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2904 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2905 port = (swap_val && swap_override) ^ 1;
2906 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2907 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2908 break;
2909
877e9aa4
ET
2910 default:
2911 break;
2912 }
fd4ef40d 2913 bnx2x_fan_failure(bp);
877e9aa4 2914 }
34f80b04 2915
589abe3a
EG
2916 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2917 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2918 bnx2x_acquire_phy_lock(bp);
2919 bnx2x_handle_module_detect_int(&bp->link_params);
2920 bnx2x_release_phy_lock(bp);
2921 }
2922
34f80b04
EG
2923 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2924
2925 val = REG_RD(bp, reg_offset);
2926 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2927 REG_WR(bp, reg_offset, val);
2928
2929 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2930 (attn & HW_INTERRUT_ASSERT_SET_0));
2931 bnx2x_panic();
2932 }
877e9aa4
ET
2933}
2934
2935static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2936{
2937 u32 val;
2938
0626b899 2939 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2940
2941 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2942 BNX2X_ERR("DB hw attention 0x%x\n", val);
2943 /* DORQ discard attention */
2944 if (val & 0x2)
2945 BNX2X_ERR("FATAL error from DORQ\n");
2946 }
34f80b04
EG
2947
2948 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2949
2950 int port = BP_PORT(bp);
2951 int reg_offset;
2952
2953 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2954 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2955
2956 val = REG_RD(bp, reg_offset);
2957 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2958 REG_WR(bp, reg_offset, val);
2959
2960 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2961 (attn & HW_INTERRUT_ASSERT_SET_1));
2962 bnx2x_panic();
2963 }
877e9aa4
ET
2964}
2965
2966static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2967{
2968 u32 val;
2969
2970 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2971
2972 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2973 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2974 /* CFC error attention */
2975 if (val & 0x2)
2976 BNX2X_ERR("FATAL error from CFC\n");
2977 }
2978
2979 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2980
2981 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2982 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2983 /* RQ_USDMDP_FIFO_OVERFLOW */
2984 if (val & 0x18000)
2985 BNX2X_ERR("FATAL error from PXP\n");
2986 }
34f80b04
EG
2987
2988 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2989
2990 int port = BP_PORT(bp);
2991 int reg_offset;
2992
2993 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2994 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2995
2996 val = REG_RD(bp, reg_offset);
2997 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2998 REG_WR(bp, reg_offset, val);
2999
3000 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3001 (attn & HW_INTERRUT_ASSERT_SET_2));
3002 bnx2x_panic();
3003 }
877e9aa4
ET
3004}
3005
3006static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3007{
34f80b04
EG
3008 u32 val;
3009
877e9aa4
ET
3010 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3011
34f80b04
EG
3012 if (attn & BNX2X_PMF_LINK_ASSERT) {
3013 int func = BP_FUNC(bp);
3014
3015 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2691d51d
EG
3016 val = SHMEM_RD(bp, func_mb[func].drv_status);
3017 if (val & DRV_STATUS_DCC_EVENT_MASK)
3018 bnx2x_dcc_event(bp,
3019 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3020 bnx2x__link_status_update(bp);
2691d51d 3021 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3022 bnx2x_pmf_update(bp);
3023
3024 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3025
3026 BNX2X_ERR("MC assert!\n");
3027 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3028 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3029 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3030 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3031 bnx2x_panic();
3032
3033 } else if (attn & BNX2X_MCP_ASSERT) {
3034
3035 BNX2X_ERR("MCP assert!\n");
3036 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3037 bnx2x_fw_dump(bp);
877e9aa4
ET
3038
3039 } else
3040 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3041 }
3042
3043 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3044 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3045 if (attn & BNX2X_GRC_TIMEOUT) {
3046 val = CHIP_IS_E1H(bp) ?
3047 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3048 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3049 }
3050 if (attn & BNX2X_GRC_RSV) {
3051 val = CHIP_IS_E1H(bp) ?
3052 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3053 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3054 }
877e9aa4 3055 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3056 }
3057}
3058
3059static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3060{
a2fbb9ea
ET
3061 struct attn_route attn;
3062 struct attn_route group_mask;
34f80b04 3063 int port = BP_PORT(bp);
877e9aa4 3064 int index;
a2fbb9ea
ET
3065 u32 reg_addr;
3066 u32 val;
3fcaf2e5 3067 u32 aeu_mask;
a2fbb9ea
ET
3068
3069 /* need to take HW lock because MCP or other port might also
3070 try to handle this event */
4a37fb66 3071 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
3072
3073 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3074 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3075 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3076 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3077 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3078 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3079
3080 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3081 if (deasserted & (1 << index)) {
3082 group_mask = bp->attn_group[index];
3083
34f80b04
EG
3084 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3085 index, group_mask.sig[0], group_mask.sig[1],
3086 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 3087
877e9aa4
ET
3088 bnx2x_attn_int_deasserted3(bp,
3089 attn.sig[3] & group_mask.sig[3]);
3090 bnx2x_attn_int_deasserted1(bp,
3091 attn.sig[1] & group_mask.sig[1]);
3092 bnx2x_attn_int_deasserted2(bp,
3093 attn.sig[2] & group_mask.sig[2]);
3094 bnx2x_attn_int_deasserted0(bp,
3095 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 3096
a2fbb9ea
ET
3097 if ((attn.sig[0] & group_mask.sig[0] &
3098 HW_PRTY_ASSERT_SET_0) ||
3099 (attn.sig[1] & group_mask.sig[1] &
3100 HW_PRTY_ASSERT_SET_1) ||
3101 (attn.sig[2] & group_mask.sig[2] &
3102 HW_PRTY_ASSERT_SET_2))
6378c025 3103 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
3104 }
3105 }
3106
4a37fb66 3107 bnx2x_release_alr(bp);
a2fbb9ea 3108
5c862848 3109 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3110
3111 val = ~deasserted;
3fcaf2e5
EG
3112 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3113 val, reg_addr);
5c862848 3114 REG_WR(bp, reg_addr, val);
a2fbb9ea 3115
a2fbb9ea 3116 if (~bp->attn_state & deasserted)
3fcaf2e5 3117 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3118
3119 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3120 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3121
3fcaf2e5
EG
3122 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3123 aeu_mask = REG_RD(bp, reg_addr);
3124
3125 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3126 aeu_mask, deasserted);
3127 aeu_mask |= (deasserted & 0xff);
3128 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3129
3fcaf2e5
EG
3130 REG_WR(bp, reg_addr, aeu_mask);
3131 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3132
3133 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3134 bp->attn_state &= ~deasserted;
3135 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3136}
3137
3138static void bnx2x_attn_int(struct bnx2x *bp)
3139{
3140 /* read local copy of bits */
68d59484
EG
3141 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3142 attn_bits);
3143 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3144 attn_bits_ack);
a2fbb9ea
ET
3145 u32 attn_state = bp->attn_state;
3146
3147 /* look for changed bits */
3148 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3149 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3150
3151 DP(NETIF_MSG_HW,
3152 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3153 attn_bits, attn_ack, asserted, deasserted);
3154
3155 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3156 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3157
3158 /* handle bits that were raised */
3159 if (asserted)
3160 bnx2x_attn_int_asserted(bp, asserted);
3161
3162 if (deasserted)
3163 bnx2x_attn_int_deasserted(bp, deasserted);
3164}
3165
3166static void bnx2x_sp_task(struct work_struct *work)
3167{
1cf167f2 3168 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3169 u16 status;
3170
34f80b04 3171
a2fbb9ea
ET
3172 /* Return here if interrupt is disabled */
3173 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3174 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3175 return;
3176 }
3177
3178 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3179/* if (status == 0) */
3180/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3181
3196a88a 3182 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 3183
877e9aa4
ET
3184 /* HW attentions */
3185 if (status & 0x1)
a2fbb9ea 3186 bnx2x_attn_int(bp);
a2fbb9ea 3187
68d59484 3188 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3189 IGU_INT_NOP, 1);
3190 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3191 IGU_INT_NOP, 1);
3192 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3193 IGU_INT_NOP, 1);
3194 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3195 IGU_INT_NOP, 1);
3196 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3197 IGU_INT_ENABLE, 1);
877e9aa4 3198
a2fbb9ea
ET
3199}
3200
3201static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3202{
3203 struct net_device *dev = dev_instance;
3204 struct bnx2x *bp = netdev_priv(dev);
3205
3206 /* Return here if interrupt is disabled */
3207 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3208 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3209 return IRQ_HANDLED;
3210 }
3211
8d9c5f34 3212 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3213
3214#ifdef BNX2X_STOP_ON_ERROR
3215 if (unlikely(bp->panic))
3216 return IRQ_HANDLED;
3217#endif
3218
1cf167f2 3219 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3220
3221 return IRQ_HANDLED;
3222}
3223
3224/* end of slow path */
3225
3226/* Statistics */
3227
3228/****************************************************************************
3229* Macros
3230****************************************************************************/
3231
a2fbb9ea
ET
3232/* sum[hi:lo] += add[hi:lo] */
3233#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3234 do { \
3235 s_lo += a_lo; \
f5ba6772 3236 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3237 } while (0)
3238
3239/* difference = minuend - subtrahend */
3240#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3241 do { \
bb2a0f7a
YG
3242 if (m_lo < s_lo) { \
3243 /* underflow */ \
a2fbb9ea 3244 d_hi = m_hi - s_hi; \
bb2a0f7a 3245 if (d_hi > 0) { \
6378c025 3246 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3247 d_hi--; \
3248 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3249 } else { \
6378c025 3250 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3251 d_hi = 0; \
3252 d_lo = 0; \
3253 } \
bb2a0f7a
YG
3254 } else { \
3255 /* m_lo >= s_lo */ \
a2fbb9ea 3256 if (m_hi < s_hi) { \
bb2a0f7a
YG
3257 d_hi = 0; \
3258 d_lo = 0; \
3259 } else { \
6378c025 3260 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3261 d_hi = m_hi - s_hi; \
3262 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3263 } \
3264 } \
3265 } while (0)
3266
bb2a0f7a 3267#define UPDATE_STAT64(s, t) \
a2fbb9ea 3268 do { \
bb2a0f7a
YG
3269 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3270 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3271 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3272 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3273 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3274 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3275 } while (0)
3276
bb2a0f7a 3277#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3278 do { \
bb2a0f7a
YG
3279 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3280 diff.lo, new->s##_lo, old->s##_lo); \
3281 ADD_64(estats->t##_hi, diff.hi, \
3282 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3283 } while (0)
3284
3285/* sum[hi:lo] += add */
3286#define ADD_EXTEND_64(s_hi, s_lo, a) \
3287 do { \
3288 s_lo += a; \
3289 s_hi += (s_lo < a) ? 1 : 0; \
3290 } while (0)
3291
bb2a0f7a 3292#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3293 do { \
bb2a0f7a
YG
3294 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3295 pstats->mac_stx[1].s##_lo, \
3296 new->s); \
a2fbb9ea
ET
3297 } while (0)
3298
bb2a0f7a 3299#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3300 do { \
4781bfad
EG
3301 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3302 old_tclient->s = tclient->s; \
de832a55
EG
3303 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3304 } while (0)
3305
3306#define UPDATE_EXTEND_USTAT(s, t) \
3307 do { \
3308 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3309 old_uclient->s = uclient->s; \
3310 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3311 } while (0)
3312
3313#define UPDATE_EXTEND_XSTAT(s, t) \
3314 do { \
4781bfad
EG
3315 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3316 old_xclient->s = xclient->s; \
de832a55
EG
3317 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3318 } while (0)
3319
3320/* minuend -= subtrahend */
3321#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3322 do { \
3323 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3324 } while (0)
3325
3326/* minuend[hi:lo] -= subtrahend */
3327#define SUB_EXTEND_64(m_hi, m_lo, s) \
3328 do { \
3329 SUB_64(m_hi, 0, m_lo, s); \
3330 } while (0)
3331
3332#define SUB_EXTEND_USTAT(s, t) \
3333 do { \
3334 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3335 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3336 } while (0)
3337
3338/*
3339 * General service functions
3340 */
3341
3342static inline long bnx2x_hilo(u32 *hiref)
3343{
3344 u32 lo = *(hiref + 1);
3345#if (BITS_PER_LONG == 64)
3346 u32 hi = *hiref;
3347
3348 return HILO_U64(hi, lo);
3349#else
3350 return lo;
3351#endif
3352}
3353
3354/*
3355 * Init service functions
3356 */
3357
bb2a0f7a
YG
3358static void bnx2x_storm_stats_post(struct bnx2x *bp)
3359{
3360 if (!bp->stats_pending) {
3361 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3362 int i, rc;
bb2a0f7a
YG
3363
3364 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3365 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3366 for_each_queue(bp, i)
3367 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3368
3369 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3370 ((u32 *)&ramrod_data)[1],
3371 ((u32 *)&ramrod_data)[0], 0);
3372 if (rc == 0) {
3373 /* stats ramrod has it's own slot on the spq */
3374 bp->spq_left++;
3375 bp->stats_pending = 1;
3376 }
3377 }
3378}
3379
bb2a0f7a
YG
3380static void bnx2x_hw_stats_post(struct bnx2x *bp)
3381{
3382 struct dmae_command *dmae = &bp->stats_dmae;
3383 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3384
3385 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3386 if (CHIP_REV_IS_SLOW(bp))
3387 return;
bb2a0f7a
YG
3388
3389 /* loader */
3390 if (bp->executer_idx) {
3391 int loader_idx = PMF_DMAE_C(bp);
3392
3393 memset(dmae, 0, sizeof(struct dmae_command));
3394
3395 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3396 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3397 DMAE_CMD_DST_RESET |
3398#ifdef __BIG_ENDIAN
3399 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3400#else
3401 DMAE_CMD_ENDIANITY_DW_SWAP |
3402#endif
3403 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3404 DMAE_CMD_PORT_0) |
3405 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3406 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3407 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3408 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3409 sizeof(struct dmae_command) *
3410 (loader_idx + 1)) >> 2;
3411 dmae->dst_addr_hi = 0;
3412 dmae->len = sizeof(struct dmae_command) >> 2;
3413 if (CHIP_IS_E1(bp))
3414 dmae->len--;
3415 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3416 dmae->comp_addr_hi = 0;
3417 dmae->comp_val = 1;
3418
3419 *stats_comp = 0;
3420 bnx2x_post_dmae(bp, dmae, loader_idx);
3421
3422 } else if (bp->func_stx) {
3423 *stats_comp = 0;
3424 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3425 }
3426}
3427
3428static int bnx2x_stats_comp(struct bnx2x *bp)
3429{
3430 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3431 int cnt = 10;
3432
3433 might_sleep();
3434 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3435 if (!cnt) {
3436 BNX2X_ERR("timeout waiting for stats finished\n");
3437 break;
3438 }
3439 cnt--;
12469401 3440 msleep(1);
bb2a0f7a
YG
3441 }
3442 return 1;
3443}
3444
3445/*
3446 * Statistics service functions
3447 */
3448
3449static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3450{
3451 struct dmae_command *dmae;
3452 u32 opcode;
3453 int loader_idx = PMF_DMAE_C(bp);
3454 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3455
3456 /* sanity */
3457 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3458 BNX2X_ERR("BUG!\n");
3459 return;
3460 }
3461
3462 bp->executer_idx = 0;
3463
3464 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3465 DMAE_CMD_C_ENABLE |
3466 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3467#ifdef __BIG_ENDIAN
3468 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3469#else
3470 DMAE_CMD_ENDIANITY_DW_SWAP |
3471#endif
3472 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3473 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3474
3475 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3476 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3477 dmae->src_addr_lo = bp->port.port_stx >> 2;
3478 dmae->src_addr_hi = 0;
3479 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3480 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3481 dmae->len = DMAE_LEN32_RD_MAX;
3482 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3483 dmae->comp_addr_hi = 0;
3484 dmae->comp_val = 1;
3485
3486 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3487 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3488 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3489 dmae->src_addr_hi = 0;
7a9b2557
VZ
3490 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3491 DMAE_LEN32_RD_MAX * 4);
3492 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3493 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3494 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3495 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3496 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3497 dmae->comp_val = DMAE_COMP_VAL;
3498
3499 *stats_comp = 0;
3500 bnx2x_hw_stats_post(bp);
3501 bnx2x_stats_comp(bp);
3502}
3503
3504static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3505{
3506 struct dmae_command *dmae;
34f80b04 3507 int port = BP_PORT(bp);
bb2a0f7a 3508 int vn = BP_E1HVN(bp);
a2fbb9ea 3509 u32 opcode;
bb2a0f7a 3510 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3511 u32 mac_addr;
bb2a0f7a
YG
3512 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3513
3514 /* sanity */
3515 if (!bp->link_vars.link_up || !bp->port.pmf) {
3516 BNX2X_ERR("BUG!\n");
3517 return;
3518 }
a2fbb9ea
ET
3519
3520 bp->executer_idx = 0;
bb2a0f7a
YG
3521
3522 /* MCP */
3523 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3524 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3525 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3526#ifdef __BIG_ENDIAN
bb2a0f7a 3527 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3528#else
bb2a0f7a 3529 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3530#endif
bb2a0f7a
YG
3531 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3532 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3533
bb2a0f7a 3534 if (bp->port.port_stx) {
a2fbb9ea
ET
3535
3536 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3537 dmae->opcode = opcode;
bb2a0f7a
YG
3538 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3539 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3540 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3541 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3542 dmae->len = sizeof(struct host_port_stats) >> 2;
3543 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3544 dmae->comp_addr_hi = 0;
3545 dmae->comp_val = 1;
a2fbb9ea
ET
3546 }
3547
bb2a0f7a
YG
3548 if (bp->func_stx) {
3549
3550 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3551 dmae->opcode = opcode;
3552 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3553 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3554 dmae->dst_addr_lo = bp->func_stx >> 2;
3555 dmae->dst_addr_hi = 0;
3556 dmae->len = sizeof(struct host_func_stats) >> 2;
3557 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3558 dmae->comp_addr_hi = 0;
3559 dmae->comp_val = 1;
a2fbb9ea
ET
3560 }
3561
bb2a0f7a 3562 /* MAC */
a2fbb9ea
ET
3563 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3564 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3565 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3566#ifdef __BIG_ENDIAN
3567 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3568#else
3569 DMAE_CMD_ENDIANITY_DW_SWAP |
3570#endif
bb2a0f7a
YG
3571 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3572 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3573
c18487ee 3574 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3575
3576 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3577 NIG_REG_INGRESS_BMAC0_MEM);
3578
3579 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3580 BIGMAC_REGISTER_TX_STAT_GTBYT */
3581 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3582 dmae->opcode = opcode;
3583 dmae->src_addr_lo = (mac_addr +
3584 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3585 dmae->src_addr_hi = 0;
3586 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3587 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3588 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3589 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3590 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3591 dmae->comp_addr_hi = 0;
3592 dmae->comp_val = 1;
3593
3594 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3595 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3596 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3597 dmae->opcode = opcode;
3598 dmae->src_addr_lo = (mac_addr +
3599 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3600 dmae->src_addr_hi = 0;
3601 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3602 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3603 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3604 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3605 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3606 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3607 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3608 dmae->comp_addr_hi = 0;
3609 dmae->comp_val = 1;
3610
c18487ee 3611 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3612
3613 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3614
3615 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3616 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3617 dmae->opcode = opcode;
3618 dmae->src_addr_lo = (mac_addr +
3619 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3620 dmae->src_addr_hi = 0;
3621 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3622 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3623 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3624 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3625 dmae->comp_addr_hi = 0;
3626 dmae->comp_val = 1;
3627
3628 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3629 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3630 dmae->opcode = opcode;
3631 dmae->src_addr_lo = (mac_addr +
3632 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3633 dmae->src_addr_hi = 0;
3634 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3635 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3636 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3637 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3638 dmae->len = 1;
3639 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3640 dmae->comp_addr_hi = 0;
3641 dmae->comp_val = 1;
3642
3643 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3644 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3645 dmae->opcode = opcode;
3646 dmae->src_addr_lo = (mac_addr +
3647 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3648 dmae->src_addr_hi = 0;
3649 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3650 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3651 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3652 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3653 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3654 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3655 dmae->comp_addr_hi = 0;
3656 dmae->comp_val = 1;
3657 }
3658
3659 /* NIG */
bb2a0f7a
YG
3660 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3661 dmae->opcode = opcode;
3662 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3663 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3664 dmae->src_addr_hi = 0;
3665 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3666 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3667 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3668 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3669 dmae->comp_addr_hi = 0;
3670 dmae->comp_val = 1;
3671
3672 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3673 dmae->opcode = opcode;
3674 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3675 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3676 dmae->src_addr_hi = 0;
3677 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3678 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3679 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3680 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3681 dmae->len = (2*sizeof(u32)) >> 2;
3682 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3683 dmae->comp_addr_hi = 0;
3684 dmae->comp_val = 1;
3685
a2fbb9ea
ET
3686 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3687 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3688 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3689 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3690#ifdef __BIG_ENDIAN
3691 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3692#else
3693 DMAE_CMD_ENDIANITY_DW_SWAP |
3694#endif
bb2a0f7a
YG
3695 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3696 (vn << DMAE_CMD_E1HVN_SHIFT));
3697 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3698 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3699 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3700 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3701 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3702 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3703 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3704 dmae->len = (2*sizeof(u32)) >> 2;
3705 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3706 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3707 dmae->comp_val = DMAE_COMP_VAL;
3708
3709 *stats_comp = 0;
a2fbb9ea
ET
3710}
3711
bb2a0f7a 3712static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3713{
bb2a0f7a
YG
3714 struct dmae_command *dmae = &bp->stats_dmae;
3715 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3716
bb2a0f7a
YG
3717 /* sanity */
3718 if (!bp->func_stx) {
3719 BNX2X_ERR("BUG!\n");
3720 return;
3721 }
a2fbb9ea 3722
bb2a0f7a
YG
3723 bp->executer_idx = 0;
3724 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3725
bb2a0f7a
YG
3726 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3727 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3728 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3729#ifdef __BIG_ENDIAN
3730 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3731#else
3732 DMAE_CMD_ENDIANITY_DW_SWAP |
3733#endif
3734 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3735 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3736 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3737 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3738 dmae->dst_addr_lo = bp->func_stx >> 2;
3739 dmae->dst_addr_hi = 0;
3740 dmae->len = sizeof(struct host_func_stats) >> 2;
3741 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3742 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3743 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3744
bb2a0f7a
YG
3745 *stats_comp = 0;
3746}
a2fbb9ea 3747
bb2a0f7a
YG
3748static void bnx2x_stats_start(struct bnx2x *bp)
3749{
3750 if (bp->port.pmf)
3751 bnx2x_port_stats_init(bp);
3752
3753 else if (bp->func_stx)
3754 bnx2x_func_stats_init(bp);
3755
3756 bnx2x_hw_stats_post(bp);
3757 bnx2x_storm_stats_post(bp);
3758}
3759
3760static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3761{
3762 bnx2x_stats_comp(bp);
3763 bnx2x_stats_pmf_update(bp);
3764 bnx2x_stats_start(bp);
3765}
3766
3767static void bnx2x_stats_restart(struct bnx2x *bp)
3768{
3769 bnx2x_stats_comp(bp);
3770 bnx2x_stats_start(bp);
3771}
3772
3773static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3774{
3775 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3776 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3777 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3778 struct {
3779 u32 lo;
3780 u32 hi;
3781 } diff;
bb2a0f7a
YG
3782
3783 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3784 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3785 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3786 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3787 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3788 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3789 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3790 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3791 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3792 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3793 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3794 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3795 UPDATE_STAT64(tx_stat_gt127,
3796 tx_stat_etherstatspkts65octetsto127octets);
3797 UPDATE_STAT64(tx_stat_gt255,
3798 tx_stat_etherstatspkts128octetsto255octets);
3799 UPDATE_STAT64(tx_stat_gt511,
3800 tx_stat_etherstatspkts256octetsto511octets);
3801 UPDATE_STAT64(tx_stat_gt1023,
3802 tx_stat_etherstatspkts512octetsto1023octets);
3803 UPDATE_STAT64(tx_stat_gt1518,
3804 tx_stat_etherstatspkts1024octetsto1522octets);
3805 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3806 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3807 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3808 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3809 UPDATE_STAT64(tx_stat_gterr,
3810 tx_stat_dot3statsinternalmactransmiterrors);
3811 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3812
3813 estats->pause_frames_received_hi =
3814 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3815 estats->pause_frames_received_lo =
3816 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3817
3818 estats->pause_frames_sent_hi =
3819 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3820 estats->pause_frames_sent_lo =
3821 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3822}
3823
3824static void bnx2x_emac_stats_update(struct bnx2x *bp)
3825{
3826 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3827 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3828 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3829
3830 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3831 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3832 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3833 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3834 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3835 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3836 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3837 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3838 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3839 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3840 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3841 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3842 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3843 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3844 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3845 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3846 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3847 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3848 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3849 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3850 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3851 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3852 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3853 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3854 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3855 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3856 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3857 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3858 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3859 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3860 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3861
3862 estats->pause_frames_received_hi =
3863 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3864 estats->pause_frames_received_lo =
3865 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3866 ADD_64(estats->pause_frames_received_hi,
3867 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3868 estats->pause_frames_received_lo,
3869 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3870
3871 estats->pause_frames_sent_hi =
3872 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3873 estats->pause_frames_sent_lo =
3874 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3875 ADD_64(estats->pause_frames_sent_hi,
3876 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3877 estats->pause_frames_sent_lo,
3878 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3879}
3880
3881static int bnx2x_hw_stats_update(struct bnx2x *bp)
3882{
3883 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3884 struct nig_stats *old = &(bp->port.old_nig_stats);
3885 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3886 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3887 struct {
3888 u32 lo;
3889 u32 hi;
3890 } diff;
de832a55 3891 u32 nig_timer_max;
bb2a0f7a
YG
3892
3893 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3894 bnx2x_bmac_stats_update(bp);
3895
3896 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3897 bnx2x_emac_stats_update(bp);
3898
3899 else { /* unreached */
c3eefaf6 3900 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3901 return -1;
3902 }
a2fbb9ea 3903
bb2a0f7a
YG
3904 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3905 new->brb_discard - old->brb_discard);
66e855f3
YG
3906 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3907 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3908
bb2a0f7a
YG
3909 UPDATE_STAT64_NIG(egress_mac_pkt0,
3910 etherstatspkts1024octetsto1522octets);
3911 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3912
bb2a0f7a 3913 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3914
bb2a0f7a
YG
3915 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3916 sizeof(struct mac_stx));
3917 estats->brb_drop_hi = pstats->brb_drop_hi;
3918 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3919
bb2a0f7a 3920 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3921
de832a55
EG
3922 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3923 if (nig_timer_max != estats->nig_timer_max) {
3924 estats->nig_timer_max = nig_timer_max;
3925 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3926 }
3927
bb2a0f7a 3928 return 0;
a2fbb9ea
ET
3929}
3930
bb2a0f7a 3931static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3932{
3933 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3934 struct tstorm_per_port_stats *tport =
de832a55 3935 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3936 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3937 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3938 int i;
3939
6fe49bb9
EG
3940 memcpy(&(fstats->total_bytes_received_hi),
3941 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
3942 sizeof(struct host_func_stats) - 2*sizeof(u32));
3943 estats->error_bytes_received_hi = 0;
3944 estats->error_bytes_received_lo = 0;
3945 estats->etherstatsoverrsizepkts_hi = 0;
3946 estats->etherstatsoverrsizepkts_lo = 0;
3947 estats->no_buff_discard_hi = 0;
3948 estats->no_buff_discard_lo = 0;
a2fbb9ea 3949
ca00392c 3950 for_each_rx_queue(bp, i) {
de832a55
EG
3951 struct bnx2x_fastpath *fp = &bp->fp[i];
3952 int cl_id = fp->cl_id;
3953 struct tstorm_per_client_stats *tclient =
3954 &stats->tstorm_common.client_statistics[cl_id];
3955 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3956 struct ustorm_per_client_stats *uclient =
3957 &stats->ustorm_common.client_statistics[cl_id];
3958 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3959 struct xstorm_per_client_stats *xclient =
3960 &stats->xstorm_common.client_statistics[cl_id];
3961 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3962 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3963 u32 diff;
3964
3965 /* are storm stats valid? */
3966 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3967 bp->stats_counter) {
de832a55
EG
3968 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3969 " xstorm counter (%d) != stats_counter (%d)\n",
3970 i, xclient->stats_counter, bp->stats_counter);
3971 return -1;
3972 }
3973 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3974 bp->stats_counter) {
de832a55
EG
3975 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3976 " tstorm counter (%d) != stats_counter (%d)\n",
3977 i, tclient->stats_counter, bp->stats_counter);
3978 return -2;
3979 }
3980 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3981 bp->stats_counter) {
3982 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3983 " ustorm counter (%d) != stats_counter (%d)\n",
3984 i, uclient->stats_counter, bp->stats_counter);
3985 return -4;
3986 }
a2fbb9ea 3987
de832a55 3988 qstats->total_bytes_received_hi =
ca00392c 3989 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 3990 qstats->total_bytes_received_lo =
ca00392c
EG
3991 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3992
3993 ADD_64(qstats->total_bytes_received_hi,
3994 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
3995 qstats->total_bytes_received_lo,
3996 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
3997
3998 ADD_64(qstats->total_bytes_received_hi,
3999 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4000 qstats->total_bytes_received_lo,
4001 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4002
4003 qstats->valid_bytes_received_hi =
4004 qstats->total_bytes_received_hi;
de832a55 4005 qstats->valid_bytes_received_lo =
ca00392c 4006 qstats->total_bytes_received_lo;
bb2a0f7a 4007
de832a55 4008 qstats->error_bytes_received_hi =
bb2a0f7a 4009 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4010 qstats->error_bytes_received_lo =
bb2a0f7a 4011 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4012
de832a55
EG
4013 ADD_64(qstats->total_bytes_received_hi,
4014 qstats->error_bytes_received_hi,
4015 qstats->total_bytes_received_lo,
4016 qstats->error_bytes_received_lo);
4017
4018 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4019 total_unicast_packets_received);
4020 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4021 total_multicast_packets_received);
4022 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4023 total_broadcast_packets_received);
4024 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4025 etherstatsoverrsizepkts);
4026 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4027
4028 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4029 total_unicast_packets_received);
4030 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4031 total_multicast_packets_received);
4032 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4033 total_broadcast_packets_received);
4034 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4035 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4036 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4037
4038 qstats->total_bytes_transmitted_hi =
ca00392c 4039 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4040 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4041 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4042
4043 ADD_64(qstats->total_bytes_transmitted_hi,
4044 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4045 qstats->total_bytes_transmitted_lo,
4046 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4047
4048 ADD_64(qstats->total_bytes_transmitted_hi,
4049 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4050 qstats->total_bytes_transmitted_lo,
4051 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4052
de832a55
EG
4053 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4054 total_unicast_packets_transmitted);
4055 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4056 total_multicast_packets_transmitted);
4057 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4058 total_broadcast_packets_transmitted);
4059
4060 old_tclient->checksum_discard = tclient->checksum_discard;
4061 old_tclient->ttl0_discard = tclient->ttl0_discard;
4062
4063 ADD_64(fstats->total_bytes_received_hi,
4064 qstats->total_bytes_received_hi,
4065 fstats->total_bytes_received_lo,
4066 qstats->total_bytes_received_lo);
4067 ADD_64(fstats->total_bytes_transmitted_hi,
4068 qstats->total_bytes_transmitted_hi,
4069 fstats->total_bytes_transmitted_lo,
4070 qstats->total_bytes_transmitted_lo);
4071 ADD_64(fstats->total_unicast_packets_received_hi,
4072 qstats->total_unicast_packets_received_hi,
4073 fstats->total_unicast_packets_received_lo,
4074 qstats->total_unicast_packets_received_lo);
4075 ADD_64(fstats->total_multicast_packets_received_hi,
4076 qstats->total_multicast_packets_received_hi,
4077 fstats->total_multicast_packets_received_lo,
4078 qstats->total_multicast_packets_received_lo);
4079 ADD_64(fstats->total_broadcast_packets_received_hi,
4080 qstats->total_broadcast_packets_received_hi,
4081 fstats->total_broadcast_packets_received_lo,
4082 qstats->total_broadcast_packets_received_lo);
4083 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4084 qstats->total_unicast_packets_transmitted_hi,
4085 fstats->total_unicast_packets_transmitted_lo,
4086 qstats->total_unicast_packets_transmitted_lo);
4087 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4088 qstats->total_multicast_packets_transmitted_hi,
4089 fstats->total_multicast_packets_transmitted_lo,
4090 qstats->total_multicast_packets_transmitted_lo);
4091 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4092 qstats->total_broadcast_packets_transmitted_hi,
4093 fstats->total_broadcast_packets_transmitted_lo,
4094 qstats->total_broadcast_packets_transmitted_lo);
4095 ADD_64(fstats->valid_bytes_received_hi,
4096 qstats->valid_bytes_received_hi,
4097 fstats->valid_bytes_received_lo,
4098 qstats->valid_bytes_received_lo);
4099
4100 ADD_64(estats->error_bytes_received_hi,
4101 qstats->error_bytes_received_hi,
4102 estats->error_bytes_received_lo,
4103 qstats->error_bytes_received_lo);
4104 ADD_64(estats->etherstatsoverrsizepkts_hi,
4105 qstats->etherstatsoverrsizepkts_hi,
4106 estats->etherstatsoverrsizepkts_lo,
4107 qstats->etherstatsoverrsizepkts_lo);
4108 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4109 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4110 }
4111
4112 ADD_64(fstats->total_bytes_received_hi,
4113 estats->rx_stat_ifhcinbadoctets_hi,
4114 fstats->total_bytes_received_lo,
4115 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4116
4117 memcpy(estats, &(fstats->total_bytes_received_hi),
4118 sizeof(struct host_func_stats) - 2*sizeof(u32));
4119
de832a55
EG
4120 ADD_64(estats->etherstatsoverrsizepkts_hi,
4121 estats->rx_stat_dot3statsframestoolong_hi,
4122 estats->etherstatsoverrsizepkts_lo,
4123 estats->rx_stat_dot3statsframestoolong_lo);
4124 ADD_64(estats->error_bytes_received_hi,
4125 estats->rx_stat_ifhcinbadoctets_hi,
4126 estats->error_bytes_received_lo,
4127 estats->rx_stat_ifhcinbadoctets_lo);
4128
4129 if (bp->port.pmf) {
4130 estats->mac_filter_discard =
4131 le32_to_cpu(tport->mac_filter_discard);
4132 estats->xxoverflow_discard =
4133 le32_to_cpu(tport->xxoverflow_discard);
4134 estats->brb_truncate_discard =
bb2a0f7a 4135 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4136 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4137 }
bb2a0f7a
YG
4138
4139 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4140
de832a55
EG
4141 bp->stats_pending = 0;
4142
a2fbb9ea
ET
4143 return 0;
4144}
4145
bb2a0f7a 4146static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4147{
bb2a0f7a 4148 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4149 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4150 int i;
a2fbb9ea
ET
4151
4152 nstats->rx_packets =
4153 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4154 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4155 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4156
4157 nstats->tx_packets =
4158 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4159 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4160 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4161
de832a55 4162 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4163
0e39e645 4164 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4165
de832a55 4166 nstats->rx_dropped = estats->mac_discard;
ca00392c 4167 for_each_rx_queue(bp, i)
de832a55
EG
4168 nstats->rx_dropped +=
4169 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4170
a2fbb9ea
ET
4171 nstats->tx_dropped = 0;
4172
4173 nstats->multicast =
de832a55 4174 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4175
bb2a0f7a 4176 nstats->collisions =
de832a55 4177 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4178
4179 nstats->rx_length_errors =
de832a55
EG
4180 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4181 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4182 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4183 bnx2x_hilo(&estats->brb_truncate_hi);
4184 nstats->rx_crc_errors =
4185 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4186 nstats->rx_frame_errors =
4187 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4188 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4189 nstats->rx_missed_errors = estats->xxoverflow_discard;
4190
4191 nstats->rx_errors = nstats->rx_length_errors +
4192 nstats->rx_over_errors +
4193 nstats->rx_crc_errors +
4194 nstats->rx_frame_errors +
0e39e645
ET
4195 nstats->rx_fifo_errors +
4196 nstats->rx_missed_errors;
a2fbb9ea 4197
bb2a0f7a 4198 nstats->tx_aborted_errors =
de832a55
EG
4199 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4200 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4201 nstats->tx_carrier_errors =
4202 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4203 nstats->tx_fifo_errors = 0;
4204 nstats->tx_heartbeat_errors = 0;
4205 nstats->tx_window_errors = 0;
4206
4207 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4208 nstats->tx_carrier_errors +
4209 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4210}
4211
4212static void bnx2x_drv_stats_update(struct bnx2x *bp)
4213{
4214 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4215 int i;
4216
4217 estats->driver_xoff = 0;
4218 estats->rx_err_discard_pkt = 0;
4219 estats->rx_skb_alloc_failed = 0;
4220 estats->hw_csum_err = 0;
ca00392c 4221 for_each_rx_queue(bp, i) {
de832a55
EG
4222 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4223
4224 estats->driver_xoff += qstats->driver_xoff;
4225 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4226 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4227 estats->hw_csum_err += qstats->hw_csum_err;
4228 }
a2fbb9ea
ET
4229}
4230
bb2a0f7a 4231static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4232{
bb2a0f7a 4233 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4234
bb2a0f7a
YG
4235 if (*stats_comp != DMAE_COMP_VAL)
4236 return;
4237
4238 if (bp->port.pmf)
de832a55 4239 bnx2x_hw_stats_update(bp);
a2fbb9ea 4240
de832a55
EG
4241 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4242 BNX2X_ERR("storm stats were not updated for 3 times\n");
4243 bnx2x_panic();
4244 return;
a2fbb9ea
ET
4245 }
4246
de832a55
EG
4247 bnx2x_net_stats_update(bp);
4248 bnx2x_drv_stats_update(bp);
4249
a2fbb9ea 4250 if (bp->msglevel & NETIF_MSG_TIMER) {
ca00392c
EG
4251 struct bnx2x_fastpath *fp0_rx = bp->fp;
4252 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
de832a55
EG
4253 struct tstorm_per_client_stats *old_tclient =
4254 &bp->fp->old_tclient;
4255 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4256 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4257 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4258 int i;
a2fbb9ea
ET
4259
4260 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4261 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4262 " tx pkt (%lx)\n",
ca00392c
EG
4263 bnx2x_tx_avail(fp0_tx),
4264 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4265 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4266 " rx pkt (%lx)\n",
ca00392c
EG
4267 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4268 fp0_rx->rx_comp_cons),
4269 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4270 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4271 "brb truncate %u\n",
4272 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4273 qstats->driver_xoff,
4274 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4275 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4276 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4277 "mac_discard %u mac_filter_discard %u "
4278 "xxovrflow_discard %u brb_truncate_discard %u "
4279 "ttl0_discard %u\n",
4781bfad 4280 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4281 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4282 bnx2x_hilo(&qstats->no_buff_discard_hi),
4283 estats->mac_discard, estats->mac_filter_discard,
4284 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4285 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4286
4287 for_each_queue(bp, i) {
4288 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4289 bnx2x_fp(bp, i, tx_pkt),
4290 bnx2x_fp(bp, i, rx_pkt),
4291 bnx2x_fp(bp, i, rx_calls));
4292 }
4293 }
4294
bb2a0f7a
YG
4295 bnx2x_hw_stats_post(bp);
4296 bnx2x_storm_stats_post(bp);
4297}
a2fbb9ea 4298
bb2a0f7a
YG
4299static void bnx2x_port_stats_stop(struct bnx2x *bp)
4300{
4301 struct dmae_command *dmae;
4302 u32 opcode;
4303 int loader_idx = PMF_DMAE_C(bp);
4304 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4305
bb2a0f7a 4306 bp->executer_idx = 0;
a2fbb9ea 4307
bb2a0f7a
YG
4308 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4309 DMAE_CMD_C_ENABLE |
4310 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4311#ifdef __BIG_ENDIAN
bb2a0f7a 4312 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4313#else
bb2a0f7a 4314 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4315#endif
bb2a0f7a
YG
4316 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4317 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4318
4319 if (bp->port.port_stx) {
4320
4321 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4322 if (bp->func_stx)
4323 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4324 else
4325 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4326 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4327 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4328 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4329 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4330 dmae->len = sizeof(struct host_port_stats) >> 2;
4331 if (bp->func_stx) {
4332 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4333 dmae->comp_addr_hi = 0;
4334 dmae->comp_val = 1;
4335 } else {
4336 dmae->comp_addr_lo =
4337 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4338 dmae->comp_addr_hi =
4339 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4340 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4341
bb2a0f7a
YG
4342 *stats_comp = 0;
4343 }
a2fbb9ea
ET
4344 }
4345
bb2a0f7a
YG
4346 if (bp->func_stx) {
4347
4348 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4349 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4350 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4351 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4352 dmae->dst_addr_lo = bp->func_stx >> 2;
4353 dmae->dst_addr_hi = 0;
4354 dmae->len = sizeof(struct host_func_stats) >> 2;
4355 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4356 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4357 dmae->comp_val = DMAE_COMP_VAL;
4358
4359 *stats_comp = 0;
a2fbb9ea 4360 }
bb2a0f7a
YG
4361}
4362
4363static void bnx2x_stats_stop(struct bnx2x *bp)
4364{
4365 int update = 0;
4366
4367 bnx2x_stats_comp(bp);
4368
4369 if (bp->port.pmf)
4370 update = (bnx2x_hw_stats_update(bp) == 0);
4371
4372 update |= (bnx2x_storm_stats_update(bp) == 0);
4373
4374 if (update) {
4375 bnx2x_net_stats_update(bp);
a2fbb9ea 4376
bb2a0f7a
YG
4377 if (bp->port.pmf)
4378 bnx2x_port_stats_stop(bp);
4379
4380 bnx2x_hw_stats_post(bp);
4381 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4382 }
4383}
4384
bb2a0f7a
YG
4385static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4386{
4387}
4388
4389static const struct {
4390 void (*action)(struct bnx2x *bp);
4391 enum bnx2x_stats_state next_state;
4392} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4393/* state event */
4394{
4395/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4396/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4397/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4398/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4399},
4400{
4401/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4402/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4403/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4404/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4405}
4406};
4407
4408static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4409{
4410 enum bnx2x_stats_state state = bp->stats_state;
4411
4412 bnx2x_stats_stm[state][event].action(bp);
4413 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4414
8924665a
EG
4415 /* Make sure the state has been "changed" */
4416 smp_wmb();
4417
bb2a0f7a
YG
4418 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4419 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4420 state, event, bp->stats_state);
4421}
4422
6fe49bb9
EG
4423static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4424{
4425 struct dmae_command *dmae;
4426 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4427
4428 /* sanity */
4429 if (!bp->port.pmf || !bp->port.port_stx) {
4430 BNX2X_ERR("BUG!\n");
4431 return;
4432 }
4433
4434 bp->executer_idx = 0;
4435
4436 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4437 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4438 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4439 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4440#ifdef __BIG_ENDIAN
4441 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4442#else
4443 DMAE_CMD_ENDIANITY_DW_SWAP |
4444#endif
4445 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4446 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4447 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4448 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4449 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4450 dmae->dst_addr_hi = 0;
4451 dmae->len = sizeof(struct host_port_stats) >> 2;
4452 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4453 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4454 dmae->comp_val = DMAE_COMP_VAL;
4455
4456 *stats_comp = 0;
4457 bnx2x_hw_stats_post(bp);
4458 bnx2x_stats_comp(bp);
4459}
4460
4461static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4462{
4463 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4464 int port = BP_PORT(bp);
4465 int func;
4466 u32 func_stx;
4467
4468 /* sanity */
4469 if (!bp->port.pmf || !bp->func_stx) {
4470 BNX2X_ERR("BUG!\n");
4471 return;
4472 }
4473
4474 /* save our func_stx */
4475 func_stx = bp->func_stx;
4476
4477 for (vn = VN_0; vn < vn_max; vn++) {
4478 func = 2*vn + port;
4479
4480 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4481 bnx2x_func_stats_init(bp);
4482 bnx2x_hw_stats_post(bp);
4483 bnx2x_stats_comp(bp);
4484 }
4485
4486 /* restore our func_stx */
4487 bp->func_stx = func_stx;
4488}
4489
4490static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4491{
4492 struct dmae_command *dmae = &bp->stats_dmae;
4493 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4494
4495 /* sanity */
4496 if (!bp->func_stx) {
4497 BNX2X_ERR("BUG!\n");
4498 return;
4499 }
4500
4501 bp->executer_idx = 0;
4502 memset(dmae, 0, sizeof(struct dmae_command));
4503
4504 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4505 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4506 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4507#ifdef __BIG_ENDIAN
4508 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4509#else
4510 DMAE_CMD_ENDIANITY_DW_SWAP |
4511#endif
4512 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4513 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4514 dmae->src_addr_lo = bp->func_stx >> 2;
4515 dmae->src_addr_hi = 0;
4516 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4517 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4518 dmae->len = sizeof(struct host_func_stats) >> 2;
4519 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4520 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4521 dmae->comp_val = DMAE_COMP_VAL;
4522
4523 *stats_comp = 0;
4524 bnx2x_hw_stats_post(bp);
4525 bnx2x_stats_comp(bp);
4526}
4527
4528static void bnx2x_stats_init(struct bnx2x *bp)
4529{
4530 int port = BP_PORT(bp);
4531 int func = BP_FUNC(bp);
4532 int i;
4533
4534 bp->stats_pending = 0;
4535 bp->executer_idx = 0;
4536 bp->stats_counter = 0;
4537
4538 /* port and func stats for management */
4539 if (!BP_NOMCP(bp)) {
4540 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4541 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4542
4543 } else {
4544 bp->port.port_stx = 0;
4545 bp->func_stx = 0;
4546 }
4547 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4548 bp->port.port_stx, bp->func_stx);
4549
4550 /* port stats */
4551 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4552 bp->port.old_nig_stats.brb_discard =
4553 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4554 bp->port.old_nig_stats.brb_truncate =
4555 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4556 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4557 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4558 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4559 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4560
4561 /* function stats */
4562 for_each_queue(bp, i) {
4563 struct bnx2x_fastpath *fp = &bp->fp[i];
4564
4565 memset(&fp->old_tclient, 0,
4566 sizeof(struct tstorm_per_client_stats));
4567 memset(&fp->old_uclient, 0,
4568 sizeof(struct ustorm_per_client_stats));
4569 memset(&fp->old_xclient, 0,
4570 sizeof(struct xstorm_per_client_stats));
4571 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4572 }
4573
4574 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4575 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4576
4577 bp->stats_state = STATS_STATE_DISABLED;
4578
4579 if (bp->port.pmf) {
4580 if (bp->port.port_stx)
4581 bnx2x_port_stats_base_init(bp);
4582
4583 if (bp->func_stx)
4584 bnx2x_func_stats_base_init(bp);
4585
4586 } else if (bp->func_stx)
4587 bnx2x_func_stats_base_update(bp);
4588}
4589
a2fbb9ea
ET
4590static void bnx2x_timer(unsigned long data)
4591{
4592 struct bnx2x *bp = (struct bnx2x *) data;
4593
4594 if (!netif_running(bp->dev))
4595 return;
4596
4597 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4598 goto timer_restart;
a2fbb9ea
ET
4599
4600 if (poll) {
4601 struct bnx2x_fastpath *fp = &bp->fp[0];
4602 int rc;
4603
7961f791 4604 bnx2x_tx_int(fp);
a2fbb9ea
ET
4605 rc = bnx2x_rx_int(fp, 1000);
4606 }
4607
34f80b04
EG
4608 if (!BP_NOMCP(bp)) {
4609 int func = BP_FUNC(bp);
a2fbb9ea
ET
4610 u32 drv_pulse;
4611 u32 mcp_pulse;
4612
4613 ++bp->fw_drv_pulse_wr_seq;
4614 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4615 /* TBD - add SYSTEM_TIME */
4616 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4617 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4618
34f80b04 4619 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4620 MCP_PULSE_SEQ_MASK);
4621 /* The delta between driver pulse and mcp response
4622 * should be 1 (before mcp response) or 0 (after mcp response)
4623 */
4624 if ((drv_pulse != mcp_pulse) &&
4625 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4626 /* someone lost a heartbeat... */
4627 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4628 drv_pulse, mcp_pulse);
4629 }
4630 }
4631
bb2a0f7a
YG
4632 if ((bp->state == BNX2X_STATE_OPEN) ||
4633 (bp->state == BNX2X_STATE_DISABLED))
4634 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4635
f1410647 4636timer_restart:
a2fbb9ea
ET
4637 mod_timer(&bp->timer, jiffies + bp->current_interval);
4638}
4639
4640/* end of Statistics */
4641
4642/* nic init */
4643
4644/*
4645 * nic init service functions
4646 */
4647
34f80b04 4648static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4649{
34f80b04
EG
4650 int port = BP_PORT(bp);
4651
ca00392c
EG
4652 /* "CSTORM" */
4653 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4654 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4655 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4656 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4657 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4658 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
4659}
4660
5c862848
EG
4661static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4662 dma_addr_t mapping, int sb_id)
34f80b04
EG
4663{
4664 int port = BP_PORT(bp);
bb2a0f7a 4665 int func = BP_FUNC(bp);
a2fbb9ea 4666 int index;
34f80b04 4667 u64 section;
a2fbb9ea
ET
4668
4669 /* USTORM */
4670 section = ((u64)mapping) + offsetof(struct host_status_block,
4671 u_status_block);
34f80b04 4672 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 4673
ca00392c
EG
4674 REG_WR(bp, BAR_CSTRORM_INTMEM +
4675 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4676 REG_WR(bp, BAR_CSTRORM_INTMEM +
4677 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4678 U64_HI(section));
ca00392c
EG
4679 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4680 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4681
4682 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
4683 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4684 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4685
4686 /* CSTORM */
4687 section = ((u64)mapping) + offsetof(struct host_status_block,
4688 c_status_block);
34f80b04 4689 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4690
4691 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4692 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4693 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4694 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4695 U64_HI(section));
7a9b2557 4696 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 4697 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4698
4699 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4700 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4701 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
4702
4703 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4704}
4705
4706static void bnx2x_zero_def_sb(struct bnx2x *bp)
4707{
4708 int func = BP_FUNC(bp);
a2fbb9ea 4709
ca00392c 4710 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
4711 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4712 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
4713 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4714 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4715 sizeof(struct cstorm_def_status_block_u)/4);
4716 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4717 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4718 sizeof(struct cstorm_def_status_block_c)/4);
4719 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
4720 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4721 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4722}
4723
4724static void bnx2x_init_def_sb(struct bnx2x *bp,
4725 struct host_def_status_block *def_sb,
34f80b04 4726 dma_addr_t mapping, int sb_id)
a2fbb9ea 4727{
34f80b04
EG
4728 int port = BP_PORT(bp);
4729 int func = BP_FUNC(bp);
a2fbb9ea
ET
4730 int index, val, reg_offset;
4731 u64 section;
4732
4733 /* ATTN */
4734 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4735 atten_status_block);
34f80b04 4736 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4737
49d66772
ET
4738 bp->attn_state = 0;
4739
a2fbb9ea
ET
4740 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4741 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4742
34f80b04 4743 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4744 bp->attn_group[index].sig[0] = REG_RD(bp,
4745 reg_offset + 0x10*index);
4746 bp->attn_group[index].sig[1] = REG_RD(bp,
4747 reg_offset + 0x4 + 0x10*index);
4748 bp->attn_group[index].sig[2] = REG_RD(bp,
4749 reg_offset + 0x8 + 0x10*index);
4750 bp->attn_group[index].sig[3] = REG_RD(bp,
4751 reg_offset + 0xc + 0x10*index);
4752 }
4753
a2fbb9ea
ET
4754 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4755 HC_REG_ATTN_MSG0_ADDR_L);
4756
4757 REG_WR(bp, reg_offset, U64_LO(section));
4758 REG_WR(bp, reg_offset + 4, U64_HI(section));
4759
4760 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4761
4762 val = REG_RD(bp, reg_offset);
34f80b04 4763 val |= sb_id;
a2fbb9ea
ET
4764 REG_WR(bp, reg_offset, val);
4765
4766 /* USTORM */
4767 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4768 u_def_status_block);
34f80b04 4769 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4770
ca00392c
EG
4771 REG_WR(bp, BAR_CSTRORM_INTMEM +
4772 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4773 REG_WR(bp, BAR_CSTRORM_INTMEM +
4774 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 4775 U64_HI(section));
ca00392c
EG
4776 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4777 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
4778
4779 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
4780 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4781 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
4782
4783 /* CSTORM */
4784 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4785 c_def_status_block);
34f80b04 4786 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4787
4788 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4789 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 4790 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4791 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 4792 U64_HI(section));
5c862848 4793 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 4794 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
4795
4796 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4797 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4798 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
4799
4800 /* TSTORM */
4801 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4802 t_def_status_block);
34f80b04 4803 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4804
4805 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4806 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4807 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4808 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4809 U64_HI(section));
5c862848 4810 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4811 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4812
4813 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4814 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4815 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4816
4817 /* XSTORM */
4818 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4819 x_def_status_block);
34f80b04 4820 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4821
4822 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4823 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4824 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4825 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4826 U64_HI(section));
5c862848 4827 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4828 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4829
4830 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4831 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4832 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4833
bb2a0f7a 4834 bp->stats_pending = 0;
66e855f3 4835 bp->set_mac_pending = 0;
bb2a0f7a 4836
34f80b04 4837 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4838}
4839
4840static void bnx2x_update_coalesce(struct bnx2x *bp)
4841{
34f80b04 4842 int port = BP_PORT(bp);
a2fbb9ea
ET
4843 int i;
4844
4845 for_each_queue(bp, i) {
34f80b04 4846 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4847
4848 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
4849 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4850 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4851 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4852 bp->rx_ticks/12);
ca00392c
EG
4853 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4854 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4855 U_SB_ETH_RX_CQ_INDEX),
3799cf47 4856 (bp->rx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4857
4858 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4859 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4860 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4861 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4862 bp->tx_ticks/12);
a2fbb9ea 4863 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4864 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4865 C_SB_ETH_TX_CQ_INDEX),
3799cf47 4866 (bp->tx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4867 }
4868}
4869
7a9b2557
VZ
4870static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4871 struct bnx2x_fastpath *fp, int last)
4872{
4873 int i;
4874
4875 for (i = 0; i < last; i++) {
4876 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4877 struct sk_buff *skb = rx_buf->skb;
4878
4879 if (skb == NULL) {
4880 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4881 continue;
4882 }
4883
4884 if (fp->tpa_state[i] == BNX2X_TPA_START)
4885 pci_unmap_single(bp->pdev,
4886 pci_unmap_addr(rx_buf, mapping),
356e2385 4887 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4888
4889 dev_kfree_skb(skb);
4890 rx_buf->skb = NULL;
4891 }
4892}
4893
a2fbb9ea
ET
4894static void bnx2x_init_rx_rings(struct bnx2x *bp)
4895{
7a9b2557 4896 int func = BP_FUNC(bp);
32626230
EG
4897 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4898 ETH_MAX_AGGREGATION_QUEUES_E1H;
4899 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4900 int i, j;
a2fbb9ea 4901
87942b46 4902 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4903 DP(NETIF_MSG_IFUP,
4904 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4905
7a9b2557 4906 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4907
555f6c78 4908 for_each_rx_queue(bp, j) {
32626230 4909 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4910
32626230 4911 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4912 fp->tpa_pool[i].skb =
4913 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4914 if (!fp->tpa_pool[i].skb) {
4915 BNX2X_ERR("Failed to allocate TPA "
4916 "skb pool for queue[%d] - "
4917 "disabling TPA on this "
4918 "queue!\n", j);
4919 bnx2x_free_tpa_pool(bp, fp, i);
4920 fp->disable_tpa = 1;
4921 break;
4922 }
4923 pci_unmap_addr_set((struct sw_rx_bd *)
4924 &bp->fp->tpa_pool[i],
4925 mapping, 0);
4926 fp->tpa_state[i] = BNX2X_TPA_STOP;
4927 }
4928 }
4929 }
4930
555f6c78 4931 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4932 struct bnx2x_fastpath *fp = &bp->fp[j];
4933
4934 fp->rx_bd_cons = 0;
4935 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4936 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4937
ca00392c
EG
4938 /* Mark queue as Rx */
4939 fp->is_rx_queue = 1;
4940
7a9b2557
VZ
4941 /* "next page" elements initialization */
4942 /* SGE ring */
4943 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4944 struct eth_rx_sge *sge;
4945
4946 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4947 sge->addr_hi =
4948 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4949 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4950 sge->addr_lo =
4951 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4952 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4953 }
4954
4955 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4956
7a9b2557 4957 /* RX BD ring */
a2fbb9ea
ET
4958 for (i = 1; i <= NUM_RX_RINGS; i++) {
4959 struct eth_rx_bd *rx_bd;
4960
4961 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4962 rx_bd->addr_hi =
4963 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4964 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4965 rx_bd->addr_lo =
4966 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4967 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4968 }
4969
34f80b04 4970 /* CQ ring */
a2fbb9ea
ET
4971 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4972 struct eth_rx_cqe_next_page *nextpg;
4973
4974 nextpg = (struct eth_rx_cqe_next_page *)
4975 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4976 nextpg->addr_hi =
4977 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4978 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4979 nextpg->addr_lo =
4980 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4981 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4982 }
4983
7a9b2557
VZ
4984 /* Allocate SGEs and initialize the ring elements */
4985 for (i = 0, ring_prod = 0;
4986 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4987
7a9b2557
VZ
4988 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4989 BNX2X_ERR("was only able to allocate "
4990 "%d rx sges\n", i);
4991 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4992 /* Cleanup already allocated elements */
4993 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4994 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4995 fp->disable_tpa = 1;
4996 ring_prod = 0;
4997 break;
4998 }
4999 ring_prod = NEXT_SGE_IDX(ring_prod);
5000 }
5001 fp->rx_sge_prod = ring_prod;
5002
5003 /* Allocate BDs and initialize BD ring */
66e855f3 5004 fp->rx_comp_cons = 0;
7a9b2557 5005 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
5006 for (i = 0; i < bp->rx_ring_size; i++) {
5007 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5008 BNX2X_ERR("was only able to allocate "
de832a55
EG
5009 "%d rx skbs on queue[%d]\n", i, j);
5010 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
5011 break;
5012 }
5013 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 5014 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5015 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5016 }
5017
7a9b2557
VZ
5018 fp->rx_bd_prod = ring_prod;
5019 /* must not have more available CQEs than BDs */
5020 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5021 cqe_ring_prod);
a2fbb9ea
ET
5022 fp->rx_pkt = fp->rx_calls = 0;
5023
7a9b2557
VZ
5024 /* Warning!
5025 * this will generate an interrupt (to the TSTORM)
5026 * must only be done after chip is initialized
5027 */
5028 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5029 fp->rx_sge_prod);
a2fbb9ea
ET
5030 if (j != 0)
5031 continue;
5032
5033 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5034 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5035 U64_LO(fp->rx_comp_mapping));
5036 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5037 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5038 U64_HI(fp->rx_comp_mapping));
5039 }
5040}
5041
5042static void bnx2x_init_tx_ring(struct bnx2x *bp)
5043{
5044 int i, j;
5045
555f6c78 5046 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
5047 struct bnx2x_fastpath *fp = &bp->fp[j];
5048
5049 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5050 struct eth_tx_next_bd *tx_next_bd =
5051 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5052
ca00392c 5053 tx_next_bd->addr_hi =
a2fbb9ea 5054 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5055 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5056 tx_next_bd->addr_lo =
a2fbb9ea 5057 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5058 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5059 }
5060
ca00392c
EG
5061 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5062 fp->tx_db.data.zero_fill1 = 0;
5063 fp->tx_db.data.prod = 0;
5064
a2fbb9ea
ET
5065 fp->tx_pkt_prod = 0;
5066 fp->tx_pkt_cons = 0;
5067 fp->tx_bd_prod = 0;
5068 fp->tx_bd_cons = 0;
5069 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5070 fp->tx_pkt = 0;
5071 }
6fe49bb9
EG
5072
5073 /* clean tx statistics */
5074 for_each_rx_queue(bp, i)
5075 bnx2x_fp(bp, i, tx_pkt) = 0;
a2fbb9ea
ET
5076}
5077
5078static void bnx2x_init_sp_ring(struct bnx2x *bp)
5079{
34f80b04 5080 int func = BP_FUNC(bp);
a2fbb9ea
ET
5081
5082 spin_lock_init(&bp->spq_lock);
5083
5084 bp->spq_left = MAX_SPQ_PENDING;
5085 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5086 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5087 bp->spq_prod_bd = bp->spq;
5088 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5089
34f80b04 5090 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5091 U64_LO(bp->spq_mapping));
34f80b04
EG
5092 REG_WR(bp,
5093 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5094 U64_HI(bp->spq_mapping));
5095
34f80b04 5096 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5097 bp->spq_prod_idx);
5098}
5099
5100static void bnx2x_init_context(struct bnx2x *bp)
5101{
5102 int i;
5103
ca00392c 5104 for_each_rx_queue(bp, i) {
a2fbb9ea
ET
5105 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5106 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5107 u8 cl_id = fp->cl_id;
a2fbb9ea 5108
34f80b04
EG
5109 context->ustorm_st_context.common.sb_index_numbers =
5110 BNX2X_RX_SB_INDEX_NUM;
0626b899 5111 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5112 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5113 context->ustorm_st_context.common.flags =
de832a55
EG
5114 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5115 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5116 context->ustorm_st_context.common.statistics_counter_id =
5117 cl_id;
8d9c5f34 5118 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5119 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5120 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5121 bp->rx_buf_size;
34f80b04 5122 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5123 U64_HI(fp->rx_desc_mapping);
34f80b04 5124 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5125 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5126 if (!fp->disable_tpa) {
5127 context->ustorm_st_context.common.flags |=
ca00392c 5128 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5129 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
5130 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5131 (u32)0xffff);
7a9b2557
VZ
5132 context->ustorm_st_context.common.sge_page_base_hi =
5133 U64_HI(fp->rx_sge_mapping);
5134 context->ustorm_st_context.common.sge_page_base_lo =
5135 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5136
5137 context->ustorm_st_context.common.max_sges_for_packet =
5138 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5139 context->ustorm_st_context.common.max_sges_for_packet =
5140 ((context->ustorm_st_context.common.
5141 max_sges_for_packet + PAGES_PER_SGE - 1) &
5142 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5143 }
5144
8d9c5f34
EG
5145 context->ustorm_ag_context.cdu_usage =
5146 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5147 CDU_REGION_NUMBER_UCM_AG,
5148 ETH_CONNECTION_TYPE);
5149
ca00392c
EG
5150 context->xstorm_ag_context.cdu_reserved =
5151 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5152 CDU_REGION_NUMBER_XCM_AG,
5153 ETH_CONNECTION_TYPE);
5154 }
5155
5156 for_each_tx_queue(bp, i) {
5157 struct bnx2x_fastpath *fp = &bp->fp[i];
5158 struct eth_context *context =
5159 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5160
5161 context->cstorm_st_context.sb_index_number =
5162 C_SB_ETH_TX_CQ_INDEX;
5163 context->cstorm_st_context.status_block_id = fp->sb_id;
5164
8d9c5f34
EG
5165 context->xstorm_st_context.tx_bd_page_base_hi =
5166 U64_HI(fp->tx_desc_mapping);
5167 context->xstorm_st_context.tx_bd_page_base_lo =
5168 U64_LO(fp->tx_desc_mapping);
ca00392c 5169 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5170 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5171 }
5172}
5173
5174static void bnx2x_init_ind_table(struct bnx2x *bp)
5175{
26c8fa4d 5176 int func = BP_FUNC(bp);
a2fbb9ea
ET
5177 int i;
5178
555f6c78 5179 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5180 return;
5181
555f6c78
EG
5182 DP(NETIF_MSG_IFUP,
5183 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5184 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5185 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5186 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 5187 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
5188}
5189
49d66772
ET
5190static void bnx2x_set_client_config(struct bnx2x *bp)
5191{
49d66772 5192 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5193 int port = BP_PORT(bp);
5194 int i;
49d66772 5195
e7799c5f 5196 tstorm_client.mtu = bp->dev->mtu;
49d66772 5197 tstorm_client.config_flags =
de832a55
EG
5198 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5199 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5200#ifdef BCM_VLAN
0c6671b0 5201 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5202 tstorm_client.config_flags |=
8d9c5f34 5203 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5204 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5205 }
5206#endif
49d66772
ET
5207
5208 for_each_queue(bp, i) {
de832a55
EG
5209 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5210
49d66772 5211 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5212 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5213 ((u32 *)&tstorm_client)[0]);
5214 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5215 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5216 ((u32 *)&tstorm_client)[1]);
5217 }
5218
34f80b04
EG
5219 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5220 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5221}
5222
a2fbb9ea
ET
5223static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5224{
a2fbb9ea 5225 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
5226 int mode = bp->rx_mode;
5227 int mask = (1 << BP_L_ID(bp));
5228 int func = BP_FUNC(bp);
581ce43d 5229 int port = BP_PORT(bp);
a2fbb9ea 5230 int i;
581ce43d
EG
5231 /* All but management unicast packets should pass to the host as well */
5232 u32 llh_mask =
5233 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5234 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5235 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5236 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5237
3196a88a 5238 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5239
5240 switch (mode) {
5241 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5242 tstorm_mac_filter.ucast_drop_all = mask;
5243 tstorm_mac_filter.mcast_drop_all = mask;
5244 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5245 break;
356e2385 5246
a2fbb9ea 5247 case BNX2X_RX_MODE_NORMAL:
34f80b04 5248 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5249 break;
356e2385 5250
a2fbb9ea 5251 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5252 tstorm_mac_filter.mcast_accept_all = mask;
5253 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5254 break;
356e2385 5255
a2fbb9ea 5256 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5257 tstorm_mac_filter.ucast_accept_all = mask;
5258 tstorm_mac_filter.mcast_accept_all = mask;
5259 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5260 /* pass management unicast packets as well */
5261 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5262 break;
356e2385 5263
a2fbb9ea 5264 default:
34f80b04
EG
5265 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5266 break;
a2fbb9ea
ET
5267 }
5268
581ce43d
EG
5269 REG_WR(bp,
5270 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5271 llh_mask);
5272
a2fbb9ea
ET
5273 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5274 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5275 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5276 ((u32 *)&tstorm_mac_filter)[i]);
5277
34f80b04 5278/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5279 ((u32 *)&tstorm_mac_filter)[i]); */
5280 }
a2fbb9ea 5281
49d66772
ET
5282 if (mode != BNX2X_RX_MODE_NONE)
5283 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5284}
5285
471de716
EG
5286static void bnx2x_init_internal_common(struct bnx2x *bp)
5287{
5288 int i;
5289
5290 /* Zero this manually as its initialization is
5291 currently missing in the initTool */
5292 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5293 REG_WR(bp, BAR_USTRORM_INTMEM +
5294 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5295}
5296
5297static void bnx2x_init_internal_port(struct bnx2x *bp)
5298{
5299 int port = BP_PORT(bp);
5300
ca00392c
EG
5301 REG_WR(bp,
5302 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5303 REG_WR(bp,
5304 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5305 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5306 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5307}
5308
5309static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5310{
a2fbb9ea
ET
5311 struct tstorm_eth_function_common_config tstorm_config = {0};
5312 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5313 int port = BP_PORT(bp);
5314 int func = BP_FUNC(bp);
de832a55
EG
5315 int i, j;
5316 u32 offset;
471de716 5317 u16 max_agg_size;
a2fbb9ea
ET
5318
5319 if (is_multi(bp)) {
555f6c78 5320 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5321 tstorm_config.rss_result_mask = MULTI_MASK;
5322 }
ca00392c
EG
5323
5324 /* Enable TPA if needed */
5325 if (bp->flags & TPA_ENABLE_FLAG)
5326 tstorm_config.config_flags |=
5327 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5328
8d9c5f34
EG
5329 if (IS_E1HMF(bp))
5330 tstorm_config.config_flags |=
5331 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5332
34f80b04
EG
5333 tstorm_config.leading_client_id = BP_L_ID(bp);
5334
a2fbb9ea 5335 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5336 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5337 (*(u32 *)&tstorm_config));
5338
c14423fe 5339 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
5340 bnx2x_set_storm_rx_mode(bp);
5341
de832a55
EG
5342 for_each_queue(bp, i) {
5343 u8 cl_id = bp->fp[i].cl_id;
5344
5345 /* reset xstorm per client statistics */
5346 offset = BAR_XSTRORM_INTMEM +
5347 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5348 for (j = 0;
5349 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5350 REG_WR(bp, offset + j*4, 0);
5351
5352 /* reset tstorm per client statistics */
5353 offset = BAR_TSTRORM_INTMEM +
5354 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5355 for (j = 0;
5356 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5357 REG_WR(bp, offset + j*4, 0);
5358
5359 /* reset ustorm per client statistics */
5360 offset = BAR_USTRORM_INTMEM +
5361 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5362 for (j = 0;
5363 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5364 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5365 }
5366
5367 /* Init statistics related context */
34f80b04 5368 stats_flags.collect_eth = 1;
a2fbb9ea 5369
66e855f3 5370 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5371 ((u32 *)&stats_flags)[0]);
66e855f3 5372 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5373 ((u32 *)&stats_flags)[1]);
5374
66e855f3 5375 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5376 ((u32 *)&stats_flags)[0]);
66e855f3 5377 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5378 ((u32 *)&stats_flags)[1]);
5379
de832a55
EG
5380 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5381 ((u32 *)&stats_flags)[0]);
5382 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5383 ((u32 *)&stats_flags)[1]);
5384
66e855f3 5385 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5386 ((u32 *)&stats_flags)[0]);
66e855f3 5387 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5388 ((u32 *)&stats_flags)[1]);
5389
66e855f3
YG
5390 REG_WR(bp, BAR_XSTRORM_INTMEM +
5391 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5392 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5393 REG_WR(bp, BAR_XSTRORM_INTMEM +
5394 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5395 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5396
5397 REG_WR(bp, BAR_TSTRORM_INTMEM +
5398 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5399 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5400 REG_WR(bp, BAR_TSTRORM_INTMEM +
5401 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5402 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5403
de832a55
EG
5404 REG_WR(bp, BAR_USTRORM_INTMEM +
5405 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5406 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5407 REG_WR(bp, BAR_USTRORM_INTMEM +
5408 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5409 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5410
34f80b04
EG
5411 if (CHIP_IS_E1H(bp)) {
5412 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5413 IS_E1HMF(bp));
5414 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5415 IS_E1HMF(bp));
5416 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5417 IS_E1HMF(bp));
5418 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5419 IS_E1HMF(bp));
5420
7a9b2557
VZ
5421 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5422 bp->e1hov);
34f80b04
EG
5423 }
5424
4f40f2cb
EG
5425 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5426 max_agg_size =
5427 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5428 SGE_PAGE_SIZE * PAGES_PER_SGE),
5429 (u32)0xffff);
555f6c78 5430 for_each_rx_queue(bp, i) {
7a9b2557 5431 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5432
5433 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5434 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5435 U64_LO(fp->rx_comp_mapping));
5436 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5437 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5438 U64_HI(fp->rx_comp_mapping));
5439
ca00392c
EG
5440 /* Next page */
5441 REG_WR(bp, BAR_USTRORM_INTMEM +
5442 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5443 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5444 REG_WR(bp, BAR_USTRORM_INTMEM +
5445 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5446 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5447
7a9b2557 5448 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5449 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5450 max_agg_size);
5451 }
8a1c38d1 5452
1c06328c
EG
5453 /* dropless flow control */
5454 if (CHIP_IS_E1H(bp)) {
5455 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5456
5457 rx_pause.bd_thr_low = 250;
5458 rx_pause.cqe_thr_low = 250;
5459 rx_pause.cos = 1;
5460 rx_pause.sge_thr_low = 0;
5461 rx_pause.bd_thr_high = 350;
5462 rx_pause.cqe_thr_high = 350;
5463 rx_pause.sge_thr_high = 0;
5464
5465 for_each_rx_queue(bp, i) {
5466 struct bnx2x_fastpath *fp = &bp->fp[i];
5467
5468 if (!fp->disable_tpa) {
5469 rx_pause.sge_thr_low = 150;
5470 rx_pause.sge_thr_high = 250;
5471 }
5472
5473
5474 offset = BAR_USTRORM_INTMEM +
5475 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5476 fp->cl_id);
5477 for (j = 0;
5478 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5479 j++)
5480 REG_WR(bp, offset + j*4,
5481 ((u32 *)&rx_pause)[j]);
5482 }
5483 }
5484
8a1c38d1
EG
5485 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5486
5487 /* Init rate shaping and fairness contexts */
5488 if (IS_E1HMF(bp)) {
5489 int vn;
5490
5491 /* During init there is no active link
5492 Until link is up, set link rate to 10Gbps */
5493 bp->link_vars.line_speed = SPEED_10000;
5494 bnx2x_init_port_minmax(bp);
5495
5496 bnx2x_calc_vn_weight_sum(bp);
5497
5498 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5499 bnx2x_init_vn_minmax(bp, 2*vn + port);
5500
5501 /* Enable rate shaping and fairness */
5502 bp->cmng.flags.cmng_enables =
5503 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5504 if (bp->vn_weight_sum)
5505 bp->cmng.flags.cmng_enables |=
5506 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5507 else
5508 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5509 " fairness will be disabled\n");
5510 } else {
5511 /* rate shaping and fairness are disabled */
5512 DP(NETIF_MSG_IFUP,
5513 "single function mode minmax will be disabled\n");
5514 }
5515
5516
5517 /* Store it to internal memory */
5518 if (bp->port.pmf)
5519 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5520 REG_WR(bp, BAR_XSTRORM_INTMEM +
5521 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5522 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5523}
5524
471de716
EG
5525static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5526{
5527 switch (load_code) {
5528 case FW_MSG_CODE_DRV_LOAD_COMMON:
5529 bnx2x_init_internal_common(bp);
5530 /* no break */
5531
5532 case FW_MSG_CODE_DRV_LOAD_PORT:
5533 bnx2x_init_internal_port(bp);
5534 /* no break */
5535
5536 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5537 bnx2x_init_internal_func(bp);
5538 break;
5539
5540 default:
5541 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5542 break;
5543 }
5544}
5545
5546static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5547{
5548 int i;
5549
5550 for_each_queue(bp, i) {
5551 struct bnx2x_fastpath *fp = &bp->fp[i];
5552
34f80b04 5553 fp->bp = bp;
a2fbb9ea 5554 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5555 fp->index = i;
34f80b04
EG
5556 fp->cl_id = BP_L_ID(bp) + i;
5557 fp->sb_id = fp->cl_id;
ca00392c
EG
5558 /* Suitable Rx and Tx SBs are served by the same client */
5559 if (i >= bp->num_rx_queues)
5560 fp->cl_id -= bp->num_rx_queues;
34f80b04 5561 DP(NETIF_MSG_IFUP,
f5372251
EG
5562 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5563 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5564 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5565 fp->sb_id);
5c862848 5566 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5567 }
5568
16119785
EG
5569 /* ensure status block indices were read */
5570 rmb();
5571
5572
5c862848
EG
5573 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5574 DEF_SB_ID);
5575 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5576 bnx2x_update_coalesce(bp);
5577 bnx2x_init_rx_rings(bp);
5578 bnx2x_init_tx_ring(bp);
5579 bnx2x_init_sp_ring(bp);
5580 bnx2x_init_context(bp);
471de716 5581 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5582 bnx2x_init_ind_table(bp);
0ef00459
EG
5583 bnx2x_stats_init(bp);
5584
5585 /* At this point, we are ready for interrupts */
5586 atomic_set(&bp->intr_sem, 0);
5587
5588 /* flush all before enabling interrupts */
5589 mb();
5590 mmiowb();
5591
615f8fd9 5592 bnx2x_int_enable(bp);
eb8da205
EG
5593
5594 /* Check for SPIO5 */
5595 bnx2x_attn_int_deasserted0(bp,
5596 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5597 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5598}
5599
5600/* end of nic init */
5601
5602/*
5603 * gzip service functions
5604 */
5605
5606static int bnx2x_gunzip_init(struct bnx2x *bp)
5607{
5608 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5609 &bp->gunzip_mapping);
5610 if (bp->gunzip_buf == NULL)
5611 goto gunzip_nomem1;
5612
5613 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5614 if (bp->strm == NULL)
5615 goto gunzip_nomem2;
5616
5617 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5618 GFP_KERNEL);
5619 if (bp->strm->workspace == NULL)
5620 goto gunzip_nomem3;
5621
5622 return 0;
5623
5624gunzip_nomem3:
5625 kfree(bp->strm);
5626 bp->strm = NULL;
5627
5628gunzip_nomem2:
5629 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5630 bp->gunzip_mapping);
5631 bp->gunzip_buf = NULL;
5632
5633gunzip_nomem1:
5634 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5635 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5636 return -ENOMEM;
5637}
5638
5639static void bnx2x_gunzip_end(struct bnx2x *bp)
5640{
5641 kfree(bp->strm->workspace);
5642
5643 kfree(bp->strm);
5644 bp->strm = NULL;
5645
5646 if (bp->gunzip_buf) {
5647 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5648 bp->gunzip_mapping);
5649 bp->gunzip_buf = NULL;
5650 }
5651}
5652
94a78b79 5653static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5654{
5655 int n, rc;
5656
5657 /* check gzip header */
94a78b79
VZ
5658 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5659 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5660 return -EINVAL;
94a78b79 5661 }
a2fbb9ea
ET
5662
5663 n = 10;
5664
34f80b04 5665#define FNAME 0x8
a2fbb9ea
ET
5666
5667 if (zbuf[3] & FNAME)
5668 while ((zbuf[n++] != 0) && (n < len));
5669
94a78b79 5670 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5671 bp->strm->avail_in = len - n;
5672 bp->strm->next_out = bp->gunzip_buf;
5673 bp->strm->avail_out = FW_BUF_SIZE;
5674
5675 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5676 if (rc != Z_OK)
5677 return rc;
5678
5679 rc = zlib_inflate(bp->strm, Z_FINISH);
5680 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5681 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5682 bp->dev->name, bp->strm->msg);
5683
5684 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5685 if (bp->gunzip_outlen & 0x3)
5686 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5687 " gunzip_outlen (%d) not aligned\n",
5688 bp->dev->name, bp->gunzip_outlen);
5689 bp->gunzip_outlen >>= 2;
5690
5691 zlib_inflateEnd(bp->strm);
5692
5693 if (rc == Z_STREAM_END)
5694 return 0;
5695
5696 return rc;
5697}
5698
5699/* nic load/unload */
5700
5701/*
34f80b04 5702 * General service functions
a2fbb9ea
ET
5703 */
5704
5705/* send a NIG loopback debug packet */
5706static void bnx2x_lb_pckt(struct bnx2x *bp)
5707{
a2fbb9ea 5708 u32 wb_write[3];
a2fbb9ea
ET
5709
5710 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5711 wb_write[0] = 0x55555555;
5712 wb_write[1] = 0x55555555;
34f80b04 5713 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5714 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5715
5716 /* NON-IP protocol */
a2fbb9ea
ET
5717 wb_write[0] = 0x09000000;
5718 wb_write[1] = 0x55555555;
34f80b04 5719 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5720 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5721}
5722
5723/* some of the internal memories
5724 * are not directly readable from the driver
5725 * to test them we send debug packets
5726 */
5727static int bnx2x_int_mem_test(struct bnx2x *bp)
5728{
5729 int factor;
5730 int count, i;
5731 u32 val = 0;
5732
ad8d3948 5733 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5734 factor = 120;
ad8d3948
EG
5735 else if (CHIP_REV_IS_EMUL(bp))
5736 factor = 200;
5737 else
a2fbb9ea 5738 factor = 1;
a2fbb9ea
ET
5739
5740 DP(NETIF_MSG_HW, "start part1\n");
5741
5742 /* Disable inputs of parser neighbor blocks */
5743 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5744 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5745 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5746 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5747
5748 /* Write 0 to parser credits for CFC search request */
5749 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5750
5751 /* send Ethernet packet */
5752 bnx2x_lb_pckt(bp);
5753
5754 /* TODO do i reset NIG statistic? */
5755 /* Wait until NIG register shows 1 packet of size 0x10 */
5756 count = 1000 * factor;
5757 while (count) {
34f80b04 5758
a2fbb9ea
ET
5759 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5760 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5761 if (val == 0x10)
5762 break;
5763
5764 msleep(10);
5765 count--;
5766 }
5767 if (val != 0x10) {
5768 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5769 return -1;
5770 }
5771
5772 /* Wait until PRS register shows 1 packet */
5773 count = 1000 * factor;
5774 while (count) {
5775 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5776 if (val == 1)
5777 break;
5778
5779 msleep(10);
5780 count--;
5781 }
5782 if (val != 0x1) {
5783 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5784 return -2;
5785 }
5786
5787 /* Reset and init BRB, PRS */
34f80b04 5788 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5789 msleep(50);
34f80b04 5790 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5791 msleep(50);
94a78b79
VZ
5792 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5793 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5794
5795 DP(NETIF_MSG_HW, "part2\n");
5796
5797 /* Disable inputs of parser neighbor blocks */
5798 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5799 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5800 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5801 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5802
5803 /* Write 0 to parser credits for CFC search request */
5804 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5805
5806 /* send 10 Ethernet packets */
5807 for (i = 0; i < 10; i++)
5808 bnx2x_lb_pckt(bp);
5809
5810 /* Wait until NIG register shows 10 + 1
5811 packets of size 11*0x10 = 0xb0 */
5812 count = 1000 * factor;
5813 while (count) {
34f80b04 5814
a2fbb9ea
ET
5815 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5816 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5817 if (val == 0xb0)
5818 break;
5819
5820 msleep(10);
5821 count--;
5822 }
5823 if (val != 0xb0) {
5824 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5825 return -3;
5826 }
5827
5828 /* Wait until PRS register shows 2 packets */
5829 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5830 if (val != 2)
5831 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5832
5833 /* Write 1 to parser credits for CFC search request */
5834 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5835
5836 /* Wait until PRS register shows 3 packets */
5837 msleep(10 * factor);
5838 /* Wait until NIG register shows 1 packet of size 0x10 */
5839 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5840 if (val != 3)
5841 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5842
5843 /* clear NIG EOP FIFO */
5844 for (i = 0; i < 11; i++)
5845 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5846 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5847 if (val != 1) {
5848 BNX2X_ERR("clear of NIG failed\n");
5849 return -4;
5850 }
5851
5852 /* Reset and init BRB, PRS, NIG */
5853 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5854 msleep(50);
5855 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5856 msleep(50);
94a78b79
VZ
5857 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5858 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5859#ifndef BCM_ISCSI
5860 /* set NIC mode */
5861 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5862#endif
5863
5864 /* Enable inputs of parser neighbor blocks */
5865 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5866 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5867 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5868 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5869
5870 DP(NETIF_MSG_HW, "done\n");
5871
5872 return 0; /* OK */
5873}
5874
5875static void enable_blocks_attention(struct bnx2x *bp)
5876{
5877 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5878 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5879 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5880 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5881 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5882 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5883 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5884 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5885 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5886/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5887/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5888 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5889 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5890 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5891/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5892/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5893 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5894 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5895 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5896 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5897/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5898/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5899 if (CHIP_REV_IS_FPGA(bp))
5900 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5901 else
5902 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5903 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5904 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5905 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5906/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5907/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5908 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5909 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5910/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5911 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5912}
5913
34f80b04 5914
81f75bbf
EG
5915static void bnx2x_reset_common(struct bnx2x *bp)
5916{
5917 /* reset_common */
5918 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5919 0xd3ffff7f);
5920 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5921}
5922
fd4ef40d
EG
5923
5924static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5925{
5926 u32 val;
5927 u8 port;
5928 u8 is_required = 0;
5929
5930 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5931 SHARED_HW_CFG_FAN_FAILURE_MASK;
5932
5933 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5934 is_required = 1;
5935
5936 /*
5937 * The fan failure mechanism is usually related to the PHY type since
5938 * the power consumption of the board is affected by the PHY. Currently,
5939 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5940 */
5941 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5942 for (port = PORT_0; port < PORT_MAX; port++) {
5943 u32 phy_type =
5944 SHMEM_RD(bp, dev_info.port_hw_config[port].
5945 external_phy_config) &
5946 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5947 is_required |=
5948 ((phy_type ==
5949 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
5950 (phy_type ==
5951 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
5952 (phy_type ==
5953 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5954 }
5955
5956 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5957
5958 if (is_required == 0)
5959 return;
5960
5961 /* Fan failure is indicated by SPIO 5 */
5962 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5963 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5964
5965 /* set to active low mode */
5966 val = REG_RD(bp, MISC_REG_SPIO_INT);
5967 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5968 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5969 REG_WR(bp, MISC_REG_SPIO_INT, val);
5970
5971 /* enable interrupt to signal the IGU */
5972 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5973 val |= (1 << MISC_REGISTERS_SPIO_5);
5974 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5975}
5976
34f80b04 5977static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5978{
a2fbb9ea 5979 u32 val, i;
a2fbb9ea 5980
34f80b04 5981 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5982
81f75bbf 5983 bnx2x_reset_common(bp);
34f80b04
EG
5984 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5985 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5986
94a78b79 5987 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
5988 if (CHIP_IS_E1H(bp))
5989 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5990
34f80b04
EG
5991 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5992 msleep(30);
5993 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5994
94a78b79 5995 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
5996 if (CHIP_IS_E1(bp)) {
5997 /* enable HW interrupt from PXP on USDM overflow
5998 bit 16 on INT_MASK_0 */
5999 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6000 }
a2fbb9ea 6001
94a78b79 6002 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 6003 bnx2x_init_pxp(bp);
a2fbb9ea
ET
6004
6005#ifdef __BIG_ENDIAN
34f80b04
EG
6006 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6007 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6008 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6009 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6010 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6011 /* make sure this value is 0 */
6012 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6013
6014/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6015 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6016 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6017 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6018 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6019#endif
6020
34f80b04 6021 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 6022#ifdef BCM_ISCSI
34f80b04
EG
6023 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6024 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6025 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6026#endif
6027
34f80b04
EG
6028 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6029 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6030
34f80b04
EG
6031 /* let the HW do it's magic ... */
6032 msleep(100);
6033 /* finish PXP init */
6034 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6035 if (val != 1) {
6036 BNX2X_ERR("PXP2 CFG failed\n");
6037 return -EBUSY;
6038 }
6039 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6040 if (val != 1) {
6041 BNX2X_ERR("PXP2 RD_INIT failed\n");
6042 return -EBUSY;
6043 }
a2fbb9ea 6044
34f80b04
EG
6045 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6046 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6047
94a78b79 6048 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6049
34f80b04
EG
6050 /* clean the DMAE memory */
6051 bp->dmae_ready = 1;
6052 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6053
94a78b79
VZ
6054 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6055 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6056 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6057 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6058
34f80b04
EG
6059 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6060 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6061 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6062 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6063
94a78b79 6064 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
34f80b04
EG
6065 /* soft reset pulse */
6066 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6067 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
6068
6069#ifdef BCM_ISCSI
94a78b79 6070 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6071#endif
a2fbb9ea 6072
94a78b79 6073 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6074 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6075 if (!CHIP_REV_IS_SLOW(bp)) {
6076 /* enable hw interrupt from doorbell Q */
6077 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6078 }
a2fbb9ea 6079
94a78b79
VZ
6080 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6081 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6082 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
6083 /* set NIC mode */
6084 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
6085 if (CHIP_IS_E1H(bp))
6086 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6087
94a78b79
VZ
6088 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6089 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6090 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6091 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6092
ca00392c
EG
6093 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6094 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6095 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6096 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6097
94a78b79
VZ
6098 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6099 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6100 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6101 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6102
34f80b04
EG
6103 /* sync semi rtc */
6104 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6105 0x80000000);
6106 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6107 0x80000000);
a2fbb9ea 6108
94a78b79
VZ
6109 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6110 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6111 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6112
34f80b04
EG
6113 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6114 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6115 REG_WR(bp, i, 0xc0cac01a);
6116 /* TODO: replace with something meaningful */
6117 }
94a78b79 6118 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
34f80b04 6119 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6120
34f80b04
EG
6121 if (sizeof(union cdu_context) != 1024)
6122 /* we currently assume that a context is 1024 bytes */
6123 printk(KERN_ALERT PFX "please adjust the size of"
6124 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 6125
94a78b79 6126 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6127 val = (4 << 24) + (0 << 12) + 1024;
6128 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6129
94a78b79 6130 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6131 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6132 /* enable context validation interrupt from CFC */
6133 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6134
6135 /* set the thresholds to prevent CFC/CDU race */
6136 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6137
94a78b79
VZ
6138 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6139 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6140
94a78b79 6141 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6142 /* Reset PCIE errors for debug */
6143 REG_WR(bp, 0x2814, 0xffffffff);
6144 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6145
94a78b79 6146 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6147 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6148 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6149 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6150
94a78b79 6151 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6152 if (CHIP_IS_E1H(bp)) {
6153 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6154 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6155 }
6156
6157 if (CHIP_REV_IS_SLOW(bp))
6158 msleep(200);
6159
6160 /* finish CFC init */
6161 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6162 if (val != 1) {
6163 BNX2X_ERR("CFC LL_INIT failed\n");
6164 return -EBUSY;
6165 }
6166 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6167 if (val != 1) {
6168 BNX2X_ERR("CFC AC_INIT failed\n");
6169 return -EBUSY;
6170 }
6171 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6172 if (val != 1) {
6173 BNX2X_ERR("CFC CAM_INIT failed\n");
6174 return -EBUSY;
6175 }
6176 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6177
34f80b04
EG
6178 /* read NIG statistic
6179 to see if this is our first up since powerup */
6180 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6181 val = *bnx2x_sp(bp, wb_data[0]);
6182
6183 /* do internal memory self test */
6184 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6185 BNX2X_ERR("internal mem self test failed\n");
6186 return -EBUSY;
6187 }
6188
35b19ba5 6189 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6190 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6191 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6192 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6193 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6194 bp->port.need_hw_lock = 1;
6195 break;
6196
34f80b04
EG
6197 default:
6198 break;
6199 }
f1410647 6200
fd4ef40d
EG
6201 bnx2x_setup_fan_failure_detection(bp);
6202
34f80b04
EG
6203 /* clear PXP2 attentions */
6204 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6205
34f80b04 6206 enable_blocks_attention(bp);
a2fbb9ea 6207
6bbca910
YR
6208 if (!BP_NOMCP(bp)) {
6209 bnx2x_acquire_phy_lock(bp);
6210 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6211 bnx2x_release_phy_lock(bp);
6212 } else
6213 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6214
34f80b04
EG
6215 return 0;
6216}
a2fbb9ea 6217
34f80b04
EG
6218static int bnx2x_init_port(struct bnx2x *bp)
6219{
6220 int port = BP_PORT(bp);
94a78b79 6221 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6222 u32 low, high;
34f80b04 6223 u32 val;
a2fbb9ea 6224
34f80b04
EG
6225 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6226
6227 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6228
94a78b79 6229 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6230 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6231
6232 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6233 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6234 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
a2fbb9ea
ET
6235#ifdef BCM_ISCSI
6236 /* Port0 1
6237 * Port1 385 */
6238 i++;
6239 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6240 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6241 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6242 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6243
6244 /* Port0 2
6245 * Port1 386 */
6246 i++;
6247 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6248 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6249 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6250 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6251
6252 /* Port0 3
6253 * Port1 387 */
6254 i++;
6255 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6256 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6257 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6258 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6259#endif
94a78b79 6260 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6261
a2fbb9ea
ET
6262#ifdef BCM_ISCSI
6263 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6264 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6265
94a78b79 6266 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
a2fbb9ea 6267#endif
94a78b79 6268 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6269
94a78b79 6270 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6271 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6272 /* no pause for emulation and FPGA */
6273 low = 0;
6274 high = 513;
6275 } else {
6276 if (IS_E1HMF(bp))
6277 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6278 else if (bp->dev->mtu > 4096) {
6279 if (bp->flags & ONE_PORT_FLAG)
6280 low = 160;
6281 else {
6282 val = bp->dev->mtu;
6283 /* (24*1024 + val*4)/256 */
6284 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6285 }
6286 } else
6287 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6288 high = low + 56; /* 14*1024/256 */
6289 }
6290 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6291 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6292
6293
94a78b79 6294 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6295
94a78b79 6296 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6297 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6298 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6299 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6300
94a78b79
VZ
6301 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6302 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6303 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6304 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6305
94a78b79 6306 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6307 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6308
94a78b79 6309 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6310
6311 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6312 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6313
6314 /* update threshold */
34f80b04 6315 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6316 /* update init credit */
34f80b04 6317 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6318
6319 /* probe changes */
34f80b04 6320 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6321 msleep(5);
34f80b04 6322 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
6323
6324#ifdef BCM_ISCSI
6325 /* tell the searcher where the T2 table is */
6326 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6327
6328 wb_write[0] = U64_LO(bp->t2_mapping);
6329 wb_write[1] = U64_HI(bp->t2_mapping);
6330 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6331 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6332 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6333 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6334
6335 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
a2fbb9ea 6336#endif
94a78b79 6337 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6338 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6339
6340 if (CHIP_IS_E1(bp)) {
6341 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6342 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6343 }
94a78b79 6344 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6345
94a78b79 6346 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6347 /* init aeu_mask_attn_func_0/1:
6348 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6349 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6350 * bits 4-7 are used for "per vn group attention" */
6351 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6352 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6353
94a78b79 6354 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6355 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6356 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6357 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6358 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6359
94a78b79 6360 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6361
6362 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6363
6364 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6365 /* 0x2 disable e1hov, 0x1 enable */
6366 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6367 (IS_E1HMF(bp) ? 0x1 : 0x2));
6368
1c06328c
EG
6369 {
6370 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6371 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6372 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6373 }
34f80b04
EG
6374 }
6375
94a78b79 6376 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6377 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6378
35b19ba5 6379 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6380 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6381 {
6382 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6383
6384 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6385 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6386
6387 /* The GPIO should be swapped if the swap register is
6388 set and active */
6389 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6390 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6391
6392 /* Select function upon port-swap configuration */
6393 if (port == 0) {
6394 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6395 aeu_gpio_mask = (swap_val && swap_override) ?
6396 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6397 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6398 } else {
6399 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6400 aeu_gpio_mask = (swap_val && swap_override) ?
6401 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6402 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6403 }
6404 val = REG_RD(bp, offset);
6405 /* add GPIO3 to group */
6406 val |= aeu_gpio_mask;
6407 REG_WR(bp, offset, val);
6408 }
6409 break;
6410
35b19ba5 6411 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6412 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6413 /* add SPIO 5 to group 0 */
4d295db0
EG
6414 {
6415 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6416 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6417 val = REG_RD(bp, reg_addr);
f1410647 6418 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6419 REG_WR(bp, reg_addr, val);
6420 }
f1410647
ET
6421 break;
6422
6423 default:
6424 break;
6425 }
6426
c18487ee 6427 bnx2x__link_reset(bp);
a2fbb9ea 6428
34f80b04
EG
6429 return 0;
6430}
6431
6432#define ILT_PER_FUNC (768/2)
6433#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6434/* the phys address is shifted right 12 bits and has an added
6435 1=valid bit added to the 53rd bit
6436 then since this is a wide register(TM)
6437 we split it into two 32 bit writes
6438 */
6439#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6440#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6441#define PXP_ONE_ILT(x) (((x) << 10) | x)
6442#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6443
6444#define CNIC_ILT_LINES 0
6445
6446static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6447{
6448 int reg;
6449
6450 if (CHIP_IS_E1H(bp))
6451 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6452 else /* E1 */
6453 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6454
6455 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6456}
6457
6458static int bnx2x_init_func(struct bnx2x *bp)
6459{
6460 int port = BP_PORT(bp);
6461 int func = BP_FUNC(bp);
8badd27a 6462 u32 addr, val;
34f80b04
EG
6463 int i;
6464
6465 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6466
8badd27a
EG
6467 /* set MSI reconfigure capability */
6468 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6469 val = REG_RD(bp, addr);
6470 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6471 REG_WR(bp, addr, val);
6472
34f80b04
EG
6473 i = FUNC_ILT_BASE(func);
6474
6475 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6476 if (CHIP_IS_E1H(bp)) {
6477 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6478 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6479 } else /* E1 */
6480 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6481 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6482
6483
6484 if (CHIP_IS_E1H(bp)) {
6485 for (i = 0; i < 9; i++)
6486 bnx2x_init_block(bp,
94a78b79 6487 cm_blocks[i], FUNC0_STAGE + func);
34f80b04
EG
6488
6489 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6490 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6491 }
6492
6493 /* HC init per function */
6494 if (CHIP_IS_E1H(bp)) {
6495 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6496
6497 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6498 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6499 }
94a78b79 6500 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6501
c14423fe 6502 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6503 REG_WR(bp, 0x2114, 0xffffffff);
6504 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6505
34f80b04
EG
6506 return 0;
6507}
6508
6509static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6510{
6511 int i, rc = 0;
a2fbb9ea 6512
34f80b04
EG
6513 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6514 BP_FUNC(bp), load_code);
a2fbb9ea 6515
34f80b04
EG
6516 bp->dmae_ready = 0;
6517 mutex_init(&bp->dmae_mutex);
54016b26
EG
6518 rc = bnx2x_gunzip_init(bp);
6519 if (rc)
6520 return rc;
a2fbb9ea 6521
34f80b04
EG
6522 switch (load_code) {
6523 case FW_MSG_CODE_DRV_LOAD_COMMON:
6524 rc = bnx2x_init_common(bp);
6525 if (rc)
6526 goto init_hw_err;
6527 /* no break */
6528
6529 case FW_MSG_CODE_DRV_LOAD_PORT:
6530 bp->dmae_ready = 1;
6531 rc = bnx2x_init_port(bp);
6532 if (rc)
6533 goto init_hw_err;
6534 /* no break */
6535
6536 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6537 bp->dmae_ready = 1;
6538 rc = bnx2x_init_func(bp);
6539 if (rc)
6540 goto init_hw_err;
6541 break;
6542
6543 default:
6544 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6545 break;
6546 }
6547
6548 if (!BP_NOMCP(bp)) {
6549 int func = BP_FUNC(bp);
a2fbb9ea
ET
6550
6551 bp->fw_drv_pulse_wr_seq =
34f80b04 6552 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6553 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
6554 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6555 }
a2fbb9ea 6556
34f80b04
EG
6557 /* this needs to be done before gunzip end */
6558 bnx2x_zero_def_sb(bp);
6559 for_each_queue(bp, i)
6560 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6561
6562init_hw_err:
6563 bnx2x_gunzip_end(bp);
6564
6565 return rc;
a2fbb9ea
ET
6566}
6567
a2fbb9ea
ET
6568static void bnx2x_free_mem(struct bnx2x *bp)
6569{
6570
6571#define BNX2X_PCI_FREE(x, y, size) \
6572 do { \
6573 if (x) { \
6574 pci_free_consistent(bp->pdev, size, x, y); \
6575 x = NULL; \
6576 y = 0; \
6577 } \
6578 } while (0)
6579
6580#define BNX2X_FREE(x) \
6581 do { \
6582 if (x) { \
6583 vfree(x); \
6584 x = NULL; \
6585 } \
6586 } while (0)
6587
6588 int i;
6589
6590 /* fastpath */
555f6c78 6591 /* Common */
a2fbb9ea
ET
6592 for_each_queue(bp, i) {
6593
555f6c78 6594 /* status blocks */
a2fbb9ea
ET
6595 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6596 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6597 sizeof(struct host_status_block));
555f6c78
EG
6598 }
6599 /* Rx */
6600 for_each_rx_queue(bp, i) {
a2fbb9ea 6601
555f6c78 6602 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6603 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6604 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6605 bnx2x_fp(bp, i, rx_desc_mapping),
6606 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6607
6608 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6609 bnx2x_fp(bp, i, rx_comp_mapping),
6610 sizeof(struct eth_fast_path_rx_cqe) *
6611 NUM_RCQ_BD);
a2fbb9ea 6612
7a9b2557 6613 /* SGE ring */
32626230 6614 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6615 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6616 bnx2x_fp(bp, i, rx_sge_mapping),
6617 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6618 }
555f6c78
EG
6619 /* Tx */
6620 for_each_tx_queue(bp, i) {
6621
6622 /* fastpath tx rings: tx_buf tx_desc */
6623 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6624 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6625 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6626 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6627 }
a2fbb9ea
ET
6628 /* end of fastpath */
6629
6630 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6631 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6632
6633 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6634 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6635
6636#ifdef BCM_ISCSI
6637 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6638 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6639 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6640 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6641#endif
7a9b2557 6642 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6643
6644#undef BNX2X_PCI_FREE
6645#undef BNX2X_KFREE
6646}
6647
6648static int bnx2x_alloc_mem(struct bnx2x *bp)
6649{
6650
6651#define BNX2X_PCI_ALLOC(x, y, size) \
6652 do { \
6653 x = pci_alloc_consistent(bp->pdev, size, y); \
6654 if (x == NULL) \
6655 goto alloc_mem_err; \
6656 memset(x, 0, size); \
6657 } while (0)
6658
6659#define BNX2X_ALLOC(x, size) \
6660 do { \
6661 x = vmalloc(size); \
6662 if (x == NULL) \
6663 goto alloc_mem_err; \
6664 memset(x, 0, size); \
6665 } while (0)
6666
6667 int i;
6668
6669 /* fastpath */
555f6c78 6670 /* Common */
a2fbb9ea
ET
6671 for_each_queue(bp, i) {
6672 bnx2x_fp(bp, i, bp) = bp;
6673
555f6c78 6674 /* status blocks */
a2fbb9ea
ET
6675 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6676 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6677 sizeof(struct host_status_block));
555f6c78
EG
6678 }
6679 /* Rx */
6680 for_each_rx_queue(bp, i) {
a2fbb9ea 6681
555f6c78 6682 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6683 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6684 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6685 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6686 &bnx2x_fp(bp, i, rx_desc_mapping),
6687 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6688
6689 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6690 &bnx2x_fp(bp, i, rx_comp_mapping),
6691 sizeof(struct eth_fast_path_rx_cqe) *
6692 NUM_RCQ_BD);
6693
7a9b2557
VZ
6694 /* SGE ring */
6695 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6696 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6697 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6698 &bnx2x_fp(bp, i, rx_sge_mapping),
6699 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6700 }
555f6c78
EG
6701 /* Tx */
6702 for_each_tx_queue(bp, i) {
6703
555f6c78
EG
6704 /* fastpath tx rings: tx_buf tx_desc */
6705 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6706 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6707 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6708 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6709 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6710 }
a2fbb9ea
ET
6711 /* end of fastpath */
6712
6713 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6714 sizeof(struct host_def_status_block));
6715
6716 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6717 sizeof(struct bnx2x_slowpath));
6718
6719#ifdef BCM_ISCSI
6720 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6721
6722 /* Initialize T1 */
6723 for (i = 0; i < 64*1024; i += 64) {
6724 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6725 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6726 }
6727
6728 /* allocate searcher T2 table
6729 we allocate 1/4 of alloc num for T2
6730 (which is not entered into the ILT) */
6731 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6732
6733 /* Initialize T2 */
6734 for (i = 0; i < 16*1024; i += 64)
6735 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6736
c14423fe 6737 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6738 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6739
6740 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6741 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6742
6743 /* QM queues (128*MAX_CONN) */
6744 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6745#endif
6746
6747 /* Slow path ring */
6748 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6749
6750 return 0;
6751
6752alloc_mem_err:
6753 bnx2x_free_mem(bp);
6754 return -ENOMEM;
6755
6756#undef BNX2X_PCI_ALLOC
6757#undef BNX2X_ALLOC
6758}
6759
6760static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6761{
6762 int i;
6763
555f6c78 6764 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6765 struct bnx2x_fastpath *fp = &bp->fp[i];
6766
6767 u16 bd_cons = fp->tx_bd_cons;
6768 u16 sw_prod = fp->tx_pkt_prod;
6769 u16 sw_cons = fp->tx_pkt_cons;
6770
a2fbb9ea
ET
6771 while (sw_cons != sw_prod) {
6772 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6773 sw_cons++;
6774 }
6775 }
6776}
6777
6778static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6779{
6780 int i, j;
6781
555f6c78 6782 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6783 struct bnx2x_fastpath *fp = &bp->fp[j];
6784
a2fbb9ea
ET
6785 for (i = 0; i < NUM_RX_BD; i++) {
6786 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6787 struct sk_buff *skb = rx_buf->skb;
6788
6789 if (skb == NULL)
6790 continue;
6791
6792 pci_unmap_single(bp->pdev,
6793 pci_unmap_addr(rx_buf, mapping),
356e2385 6794 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6795
6796 rx_buf->skb = NULL;
6797 dev_kfree_skb(skb);
6798 }
7a9b2557 6799 if (!fp->disable_tpa)
32626230
EG
6800 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6801 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6802 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6803 }
6804}
6805
6806static void bnx2x_free_skbs(struct bnx2x *bp)
6807{
6808 bnx2x_free_tx_skbs(bp);
6809 bnx2x_free_rx_skbs(bp);
6810}
6811
6812static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6813{
34f80b04 6814 int i, offset = 1;
a2fbb9ea
ET
6815
6816 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6817 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6818 bp->msix_table[0].vector);
6819
6820 for_each_queue(bp, i) {
c14423fe 6821 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6822 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6823 bnx2x_fp(bp, i, state));
6824
34f80b04 6825 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6826 }
a2fbb9ea
ET
6827}
6828
6829static void bnx2x_free_irq(struct bnx2x *bp)
6830{
a2fbb9ea 6831 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6832 bnx2x_free_msix_irqs(bp);
6833 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6834 bp->flags &= ~USING_MSIX_FLAG;
6835
8badd27a
EG
6836 } else if (bp->flags & USING_MSI_FLAG) {
6837 free_irq(bp->pdev->irq, bp->dev);
6838 pci_disable_msi(bp->pdev);
6839 bp->flags &= ~USING_MSI_FLAG;
6840
a2fbb9ea
ET
6841 } else
6842 free_irq(bp->pdev->irq, bp->dev);
6843}
6844
6845static int bnx2x_enable_msix(struct bnx2x *bp)
6846{
8badd27a
EG
6847 int i, rc, offset = 1;
6848 int igu_vec = 0;
a2fbb9ea 6849
8badd27a
EG
6850 bp->msix_table[0].entry = igu_vec;
6851 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6852
34f80b04 6853 for_each_queue(bp, i) {
8badd27a 6854 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6855 bp->msix_table[i + offset].entry = igu_vec;
6856 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6857 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6858 }
6859
34f80b04 6860 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6861 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6862 if (rc) {
8badd27a
EG
6863 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6864 return rc;
34f80b04 6865 }
8badd27a 6866
a2fbb9ea
ET
6867 bp->flags |= USING_MSIX_FLAG;
6868
6869 return 0;
a2fbb9ea
ET
6870}
6871
a2fbb9ea
ET
6872static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6873{
34f80b04 6874 int i, rc, offset = 1;
a2fbb9ea 6875
a2fbb9ea
ET
6876 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6877 bp->dev->name, bp->dev);
a2fbb9ea
ET
6878 if (rc) {
6879 BNX2X_ERR("request sp irq failed\n");
6880 return -EBUSY;
6881 }
6882
6883 for_each_queue(bp, i) {
555f6c78
EG
6884 struct bnx2x_fastpath *fp = &bp->fp[i];
6885
ca00392c
EG
6886 if (i < bp->num_rx_queues)
6887 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6888 else
6889 sprintf(fp->name, "%s-tx-%d",
6890 bp->dev->name, i - bp->num_rx_queues);
6891
34f80b04 6892 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6893 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6894 if (rc) {
555f6c78 6895 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6896 bnx2x_free_msix_irqs(bp);
6897 return -EBUSY;
6898 }
6899
555f6c78 6900 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6901 }
6902
555f6c78 6903 i = BNX2X_NUM_QUEUES(bp);
ca00392c
EG
6904 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
6905 " ... fp[%d] %d\n",
6906 bp->dev->name, bp->msix_table[0].vector,
6907 0, bp->msix_table[offset].vector,
6908 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 6909
a2fbb9ea 6910 return 0;
a2fbb9ea
ET
6911}
6912
8badd27a
EG
6913static int bnx2x_enable_msi(struct bnx2x *bp)
6914{
6915 int rc;
6916
6917 rc = pci_enable_msi(bp->pdev);
6918 if (rc) {
6919 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6920 return -1;
6921 }
6922 bp->flags |= USING_MSI_FLAG;
6923
6924 return 0;
6925}
6926
a2fbb9ea
ET
6927static int bnx2x_req_irq(struct bnx2x *bp)
6928{
8badd27a 6929 unsigned long flags;
34f80b04 6930 int rc;
a2fbb9ea 6931
8badd27a
EG
6932 if (bp->flags & USING_MSI_FLAG)
6933 flags = 0;
6934 else
6935 flags = IRQF_SHARED;
6936
6937 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6938 bp->dev->name, bp->dev);
a2fbb9ea
ET
6939 if (!rc)
6940 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6941
6942 return rc;
a2fbb9ea
ET
6943}
6944
65abd74d
YG
6945static void bnx2x_napi_enable(struct bnx2x *bp)
6946{
6947 int i;
6948
555f6c78 6949 for_each_rx_queue(bp, i)
65abd74d
YG
6950 napi_enable(&bnx2x_fp(bp, i, napi));
6951}
6952
6953static void bnx2x_napi_disable(struct bnx2x *bp)
6954{
6955 int i;
6956
555f6c78 6957 for_each_rx_queue(bp, i)
65abd74d
YG
6958 napi_disable(&bnx2x_fp(bp, i, napi));
6959}
6960
6961static void bnx2x_netif_start(struct bnx2x *bp)
6962{
e1510706
EG
6963 int intr_sem;
6964
6965 intr_sem = atomic_dec_and_test(&bp->intr_sem);
6966 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6967
6968 if (intr_sem) {
65abd74d 6969 if (netif_running(bp->dev)) {
65abd74d
YG
6970 bnx2x_napi_enable(bp);
6971 bnx2x_int_enable(bp);
555f6c78
EG
6972 if (bp->state == BNX2X_STATE_OPEN)
6973 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6974 }
6975 }
6976}
6977
f8ef6e44 6978static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6979{
f8ef6e44 6980 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6981 bnx2x_napi_disable(bp);
762d5f6c
EG
6982 netif_tx_disable(bp->dev);
6983 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
6984}
6985
a2fbb9ea
ET
6986/*
6987 * Init service functions
6988 */
6989
3101c2bc 6990static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6991{
6992 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6993 int port = BP_PORT(bp);
a2fbb9ea
ET
6994
6995 /* CAM allocation
6996 * unicasts 0-31:port0 32-63:port1
6997 * multicast 64-127:port0 128-191:port1
6998 */
8d9c5f34 6999 config->hdr.length = 2;
af246401 7000 config->hdr.offset = port ? 32 : 0;
0626b899 7001 config->hdr.client_id = bp->fp->cl_id;
a2fbb9ea
ET
7002 config->hdr.reserved1 = 0;
7003
7004 /* primary MAC */
7005 config->config_table[0].cam_entry.msb_mac_addr =
7006 swab16(*(u16 *)&bp->dev->dev_addr[0]);
7007 config->config_table[0].cam_entry.middle_mac_addr =
7008 swab16(*(u16 *)&bp->dev->dev_addr[2]);
7009 config->config_table[0].cam_entry.lsb_mac_addr =
7010 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 7011 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7012 if (set)
7013 config->config_table[0].target_table_entry.flags = 0;
7014 else
7015 CAM_INVALIDATE(config->config_table[0]);
ca00392c
EG
7016 config->config_table[0].target_table_entry.clients_bit_vector =
7017 cpu_to_le32(1 << BP_L_ID(bp));
a2fbb9ea
ET
7018 config->config_table[0].target_table_entry.vlan_id = 0;
7019
3101c2bc
YG
7020 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7021 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7022 config->config_table[0].cam_entry.msb_mac_addr,
7023 config->config_table[0].cam_entry.middle_mac_addr,
7024 config->config_table[0].cam_entry.lsb_mac_addr);
7025
7026 /* broadcast */
4781bfad
EG
7027 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
7028 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
7029 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
34f80b04 7030 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7031 if (set)
7032 config->config_table[1].target_table_entry.flags =
a2fbb9ea 7033 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
7034 else
7035 CAM_INVALIDATE(config->config_table[1]);
ca00392c
EG
7036 config->config_table[1].target_table_entry.clients_bit_vector =
7037 cpu_to_le32(1 << BP_L_ID(bp));
a2fbb9ea
ET
7038 config->config_table[1].target_table_entry.vlan_id = 0;
7039
7040 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7041 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7042 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7043}
7044
3101c2bc 7045static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
7046{
7047 struct mac_configuration_cmd_e1h *config =
7048 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7049
34f80b04
EG
7050 /* CAM allocation for E1H
7051 * unicasts: by func number
7052 * multicast: 20+FUNC*20, 20 each
7053 */
8d9c5f34 7054 config->hdr.length = 1;
34f80b04 7055 config->hdr.offset = BP_FUNC(bp);
0626b899 7056 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
7057 config->hdr.reserved1 = 0;
7058
7059 /* primary MAC */
7060 config->config_table[0].msb_mac_addr =
7061 swab16(*(u16 *)&bp->dev->dev_addr[0]);
7062 config->config_table[0].middle_mac_addr =
7063 swab16(*(u16 *)&bp->dev->dev_addr[2]);
7064 config->config_table[0].lsb_mac_addr =
7065 swab16(*(u16 *)&bp->dev->dev_addr[4]);
ca00392c
EG
7066 config->config_table[0].clients_bit_vector =
7067 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
7068 config->config_table[0].vlan_id = 0;
7069 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7070 if (set)
7071 config->config_table[0].flags = BP_PORT(bp);
7072 else
7073 config->config_table[0].flags =
7074 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7075
3101c2bc
YG
7076 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
7077 (set ? "setting" : "clearing"),
34f80b04
EG
7078 config->config_table[0].msb_mac_addr,
7079 config->config_table[0].middle_mac_addr,
7080 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
7081
7082 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7083 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7084 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7085}
7086
a2fbb9ea
ET
7087static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7088 int *state_p, int poll)
7089{
7090 /* can take a while if any port is running */
8b3a0f0b 7091 int cnt = 5000;
a2fbb9ea 7092
c14423fe
ET
7093 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7094 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7095
7096 might_sleep();
34f80b04 7097 while (cnt--) {
a2fbb9ea
ET
7098 if (poll) {
7099 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7100 /* if index is different from 0
7101 * the reply for some commands will
3101c2bc 7102 * be on the non default queue
a2fbb9ea
ET
7103 */
7104 if (idx)
7105 bnx2x_rx_int(&bp->fp[idx], 10);
7106 }
a2fbb9ea 7107
3101c2bc 7108 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7109 if (*state_p == state) {
7110#ifdef BNX2X_STOP_ON_ERROR
7111 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7112#endif
a2fbb9ea 7113 return 0;
8b3a0f0b 7114 }
a2fbb9ea 7115
a2fbb9ea 7116 msleep(1);
e3553b29
EG
7117
7118 if (bp->panic)
7119 return -EIO;
a2fbb9ea
ET
7120 }
7121
a2fbb9ea 7122 /* timeout! */
49d66772
ET
7123 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7124 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7125#ifdef BNX2X_STOP_ON_ERROR
7126 bnx2x_panic();
7127#endif
a2fbb9ea 7128
49d66772 7129 return -EBUSY;
a2fbb9ea
ET
7130}
7131
7132static int bnx2x_setup_leading(struct bnx2x *bp)
7133{
34f80b04 7134 int rc;
a2fbb9ea 7135
c14423fe 7136 /* reset IGU state */
34f80b04 7137 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7138
7139 /* SETUP ramrod */
7140 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7141
34f80b04
EG
7142 /* Wait for completion */
7143 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7144
34f80b04 7145 return rc;
a2fbb9ea
ET
7146}
7147
7148static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7149{
555f6c78
EG
7150 struct bnx2x_fastpath *fp = &bp->fp[index];
7151
a2fbb9ea 7152 /* reset IGU state */
555f6c78 7153 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7154
228241eb 7155 /* SETUP ramrod */
555f6c78
EG
7156 fp->state = BNX2X_FP_STATE_OPENING;
7157 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7158 fp->cl_id, 0);
a2fbb9ea
ET
7159
7160 /* Wait for completion */
7161 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7162 &(fp->state), 0);
a2fbb9ea
ET
7163}
7164
a2fbb9ea 7165static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7166
ca00392c
EG
7167static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7168 int *num_tx_queues_out)
7169{
7170 int _num_rx_queues = 0, _num_tx_queues = 0;
7171
7172 switch (bp->multi_mode) {
7173 case ETH_RSS_MODE_DISABLED:
7174 _num_rx_queues = 1;
7175 _num_tx_queues = 1;
7176 break;
7177
7178 case ETH_RSS_MODE_REGULAR:
7179 if (num_rx_queues)
7180 _num_rx_queues = min_t(u32, num_rx_queues,
7181 BNX2X_MAX_QUEUES(bp));
7182 else
7183 _num_rx_queues = min_t(u32, num_online_cpus(),
7184 BNX2X_MAX_QUEUES(bp));
7185
7186 if (num_tx_queues)
7187 _num_tx_queues = min_t(u32, num_tx_queues,
7188 BNX2X_MAX_QUEUES(bp));
7189 else
7190 _num_tx_queues = min_t(u32, num_online_cpus(),
7191 BNX2X_MAX_QUEUES(bp));
7192
7193 /* There must be not more Tx queues than Rx queues */
7194 if (_num_tx_queues > _num_rx_queues) {
7195 BNX2X_ERR("number of tx queues (%d) > "
7196 "number of rx queues (%d)"
7197 " defaulting to %d\n",
7198 _num_tx_queues, _num_rx_queues,
7199 _num_rx_queues);
7200 _num_tx_queues = _num_rx_queues;
7201 }
7202 break;
7203
7204
7205 default:
7206 _num_rx_queues = 1;
7207 _num_tx_queues = 1;
7208 break;
7209 }
7210
7211 *num_rx_queues_out = _num_rx_queues;
7212 *num_tx_queues_out = _num_tx_queues;
7213}
7214
7215static int bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 7216{
ca00392c 7217 int rc = 0;
a2fbb9ea 7218
8badd27a
EG
7219 switch (int_mode) {
7220 case INT_MODE_INTx:
7221 case INT_MODE_MSI:
ca00392c
EG
7222 bp->num_rx_queues = 1;
7223 bp->num_tx_queues = 1;
7224 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a
EG
7225 break;
7226
7227 case INT_MODE_MSIX:
7228 default:
ca00392c
EG
7229 /* Set interrupt mode according to bp->multi_mode value */
7230 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7231 &bp->num_tx_queues);
7232
7233 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
555f6c78 7234 bp->num_rx_queues, bp->num_tx_queues);
ca00392c 7235
2dfe0e1f
EG
7236 /* if we can't use MSI-X we only need one fp,
7237 * so try to enable MSI-X with the requested number of fp's
7238 * and fallback to MSI or legacy INTx with one fp
7239 */
ca00392c
EG
7240 rc = bnx2x_enable_msix(bp);
7241 if (rc) {
34f80b04 7242 /* failed to enable MSI-X */
555f6c78
EG
7243 if (bp->multi_mode)
7244 BNX2X_ERR("Multi requested but failed to "
ca00392c
EG
7245 "enable MSI-X (rx %d tx %d), "
7246 "set number of queues to 1\n",
7247 bp->num_rx_queues, bp->num_tx_queues);
7248 bp->num_rx_queues = 1;
7249 bp->num_tx_queues = 1;
a2fbb9ea 7250 }
8badd27a 7251 break;
a2fbb9ea 7252 }
555f6c78 7253 bp->dev->real_num_tx_queues = bp->num_tx_queues;
ca00392c 7254 return rc;
8badd27a
EG
7255}
7256
8badd27a
EG
7257
7258/* must be called with rtnl_lock */
7259static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7260{
7261 u32 load_code;
ca00392c
EG
7262 int i, rc;
7263
8badd27a 7264#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7265 if (unlikely(bp->panic))
7266 return -EPERM;
7267#endif
7268
7269 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7270
ca00392c 7271 rc = bnx2x_set_int_mode(bp);
c14423fe 7272
a2fbb9ea
ET
7273 if (bnx2x_alloc_mem(bp))
7274 return -ENOMEM;
7275
555f6c78 7276 for_each_rx_queue(bp, i)
7a9b2557
VZ
7277 bnx2x_fp(bp, i, disable_tpa) =
7278 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7279
555f6c78 7280 for_each_rx_queue(bp, i)
2dfe0e1f
EG
7281 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7282 bnx2x_poll, 128);
7283
2dfe0e1f
EG
7284 bnx2x_napi_enable(bp);
7285
34f80b04
EG
7286 if (bp->flags & USING_MSIX_FLAG) {
7287 rc = bnx2x_req_msix_irqs(bp);
7288 if (rc) {
7289 pci_disable_msix(bp->pdev);
2dfe0e1f 7290 goto load_error1;
34f80b04
EG
7291 }
7292 } else {
ca00392c
EG
7293 /* Fall to INTx if failed to enable MSI-X due to lack of
7294 memory (in bnx2x_set_int_mode()) */
8badd27a
EG
7295 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7296 bnx2x_enable_msi(bp);
34f80b04
EG
7297 bnx2x_ack_int(bp);
7298 rc = bnx2x_req_irq(bp);
7299 if (rc) {
2dfe0e1f 7300 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
7301 if (bp->flags & USING_MSI_FLAG)
7302 pci_disable_msi(bp->pdev);
2dfe0e1f 7303 goto load_error1;
a2fbb9ea 7304 }
8badd27a
EG
7305 if (bp->flags & USING_MSI_FLAG) {
7306 bp->dev->irq = bp->pdev->irq;
7307 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7308 bp->dev->name, bp->pdev->irq);
7309 }
a2fbb9ea
ET
7310 }
7311
2dfe0e1f
EG
7312 /* Send LOAD_REQUEST command to MCP
7313 Returns the type of LOAD command:
7314 if it is the first port to be initialized
7315 common blocks should be initialized, otherwise - not
7316 */
7317 if (!BP_NOMCP(bp)) {
7318 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7319 if (!load_code) {
7320 BNX2X_ERR("MCP response failure, aborting\n");
7321 rc = -EBUSY;
7322 goto load_error2;
7323 }
7324 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7325 rc = -EBUSY; /* other port in diagnostic mode */
7326 goto load_error2;
7327 }
7328
7329 } else {
7330 int port = BP_PORT(bp);
7331
f5372251 7332 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7333 load_count[0], load_count[1], load_count[2]);
7334 load_count[0]++;
7335 load_count[1 + port]++;
f5372251 7336 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7337 load_count[0], load_count[1], load_count[2]);
7338 if (load_count[0] == 1)
7339 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7340 else if (load_count[1 + port] == 1)
7341 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7342 else
7343 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7344 }
7345
7346 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7347 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7348 bp->port.pmf = 1;
7349 else
7350 bp->port.pmf = 0;
7351 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7352
a2fbb9ea 7353 /* Initialize HW */
34f80b04
EG
7354 rc = bnx2x_init_hw(bp, load_code);
7355 if (rc) {
a2fbb9ea 7356 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 7357 goto load_error2;
a2fbb9ea
ET
7358 }
7359
a2fbb9ea 7360 /* Setup NIC internals and enable interrupts */
471de716 7361 bnx2x_nic_init(bp, load_code);
a2fbb9ea 7362
2691d51d
EG
7363 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7364 (bp->common.shmem2_base))
7365 SHMEM2_WR(bp, dcc_support,
7366 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7367 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7368
a2fbb9ea 7369 /* Send LOAD_DONE command to MCP */
34f80b04 7370 if (!BP_NOMCP(bp)) {
228241eb
ET
7371 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7372 if (!load_code) {
da5a662a 7373 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7374 rc = -EBUSY;
2dfe0e1f 7375 goto load_error3;
a2fbb9ea
ET
7376 }
7377 }
7378
7379 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7380
34f80b04
EG
7381 rc = bnx2x_setup_leading(bp);
7382 if (rc) {
da5a662a 7383 BNX2X_ERR("Setup leading failed!\n");
e3553b29 7384#ifndef BNX2X_STOP_ON_ERROR
2dfe0e1f 7385 goto load_error3;
e3553b29
EG
7386#else
7387 bp->panic = 1;
7388 return -EBUSY;
7389#endif
34f80b04 7390 }
a2fbb9ea 7391
34f80b04
EG
7392 if (CHIP_IS_E1H(bp))
7393 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7394 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
7395 bp->state = BNX2X_STATE_DISABLED;
7396 }
a2fbb9ea 7397
ca00392c 7398 if (bp->state == BNX2X_STATE_OPEN) {
34f80b04
EG
7399 for_each_nondefault_queue(bp, i) {
7400 rc = bnx2x_setup_multi(bp, i);
7401 if (rc)
2dfe0e1f 7402 goto load_error3;
34f80b04 7403 }
a2fbb9ea 7404
ca00392c
EG
7405 if (CHIP_IS_E1(bp))
7406 bnx2x_set_mac_addr_e1(bp, 1);
7407 else
7408 bnx2x_set_mac_addr_e1h(bp, 1);
7409 }
34f80b04
EG
7410
7411 if (bp->port.pmf)
b5bf9068 7412 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7413
7414 /* Start fast path */
34f80b04
EG
7415 switch (load_mode) {
7416 case LOAD_NORMAL:
ca00392c
EG
7417 if (bp->state == BNX2X_STATE_OPEN) {
7418 /* Tx queue should be only reenabled */
7419 netif_tx_wake_all_queues(bp->dev);
7420 }
2dfe0e1f 7421 /* Initialize the receive filter. */
34f80b04
EG
7422 bnx2x_set_rx_mode(bp->dev);
7423 break;
7424
7425 case LOAD_OPEN:
555f6c78 7426 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
7427 if (bp->state != BNX2X_STATE_OPEN)
7428 netif_tx_disable(bp->dev);
2dfe0e1f 7429 /* Initialize the receive filter. */
34f80b04 7430 bnx2x_set_rx_mode(bp->dev);
34f80b04 7431 break;
a2fbb9ea 7432
34f80b04 7433 case LOAD_DIAG:
2dfe0e1f 7434 /* Initialize the receive filter. */
a2fbb9ea 7435 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7436 bp->state = BNX2X_STATE_DIAG;
7437 break;
7438
7439 default:
7440 break;
a2fbb9ea
ET
7441 }
7442
34f80b04
EG
7443 if (!bp->port.pmf)
7444 bnx2x__link_status_update(bp);
7445
a2fbb9ea
ET
7446 /* start the timer */
7447 mod_timer(&bp->timer, jiffies + bp->current_interval);
7448
34f80b04 7449
a2fbb9ea
ET
7450 return 0;
7451
2dfe0e1f
EG
7452load_error3:
7453 bnx2x_int_disable_sync(bp, 1);
7454 if (!BP_NOMCP(bp)) {
7455 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7456 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7457 }
7458 bp->port.pmf = 0;
7a9b2557
VZ
7459 /* Free SKBs, SGEs, TPA pool and driver internals */
7460 bnx2x_free_skbs(bp);
555f6c78 7461 for_each_rx_queue(bp, i)
3196a88a 7462 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7463load_error2:
d1014634
YG
7464 /* Release IRQs */
7465 bnx2x_free_irq(bp);
2dfe0e1f
EG
7466load_error1:
7467 bnx2x_napi_disable(bp);
555f6c78 7468 for_each_rx_queue(bp, i)
7cde1c8b 7469 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7470 bnx2x_free_mem(bp);
7471
34f80b04 7472 return rc;
a2fbb9ea
ET
7473}
7474
7475static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7476{
555f6c78 7477 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7478 int rc;
7479
c14423fe 7480 /* halt the connection */
555f6c78
EG
7481 fp->state = BNX2X_FP_STATE_HALTING;
7482 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7483
34f80b04 7484 /* Wait for completion */
a2fbb9ea 7485 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7486 &(fp->state), 1);
c14423fe 7487 if (rc) /* timeout */
a2fbb9ea
ET
7488 return rc;
7489
7490 /* delete cfc entry */
7491 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7492
34f80b04
EG
7493 /* Wait for completion */
7494 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7495 &(fp->state), 1);
34f80b04 7496 return rc;
a2fbb9ea
ET
7497}
7498
da5a662a 7499static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7500{
4781bfad 7501 __le16 dsb_sp_prod_idx;
c14423fe 7502 /* if the other port is handling traffic,
a2fbb9ea 7503 this can take a lot of time */
34f80b04
EG
7504 int cnt = 500;
7505 int rc;
a2fbb9ea
ET
7506
7507 might_sleep();
7508
7509 /* Send HALT ramrod */
7510 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7511 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7512
34f80b04
EG
7513 /* Wait for completion */
7514 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7515 &(bp->fp[0].state), 1);
7516 if (rc) /* timeout */
da5a662a 7517 return rc;
a2fbb9ea 7518
49d66772 7519 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7520
228241eb 7521 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7522 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7523
49d66772 7524 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7525 we are going to reset the chip anyway
7526 so there is not much to do if this times out
7527 */
34f80b04 7528 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7529 if (!cnt) {
7530 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7531 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7532 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7533#ifdef BNX2X_STOP_ON_ERROR
7534 bnx2x_panic();
7535#endif
36e552ab 7536 rc = -EBUSY;
34f80b04
EG
7537 break;
7538 }
7539 cnt--;
da5a662a 7540 msleep(1);
5650d9d4 7541 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7542 }
7543 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7544 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7545
7546 return rc;
a2fbb9ea
ET
7547}
7548
34f80b04
EG
7549static void bnx2x_reset_func(struct bnx2x *bp)
7550{
7551 int port = BP_PORT(bp);
7552 int func = BP_FUNC(bp);
7553 int base, i;
7554
7555 /* Configure IGU */
7556 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7557 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7558
34f80b04
EG
7559 /* Clear ILT */
7560 base = FUNC_ILT_BASE(func);
7561 for (i = base; i < base + ILT_PER_FUNC; i++)
7562 bnx2x_ilt_wr(bp, i, 0);
7563}
7564
7565static void bnx2x_reset_port(struct bnx2x *bp)
7566{
7567 int port = BP_PORT(bp);
7568 u32 val;
7569
7570 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7571
7572 /* Do not rcv packets to BRB */
7573 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7574 /* Do not direct rcv packets that are not for MCP to the BRB */
7575 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7576 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7577
7578 /* Configure AEU */
7579 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7580
7581 msleep(100);
7582 /* Check for BRB port occupancy */
7583 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7584 if (val)
7585 DP(NETIF_MSG_IFDOWN,
33471629 7586 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7587
7588 /* TODO: Close Doorbell port? */
7589}
7590
34f80b04
EG
7591static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7592{
7593 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7594 BP_FUNC(bp), reset_code);
7595
7596 switch (reset_code) {
7597 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7598 bnx2x_reset_port(bp);
7599 bnx2x_reset_func(bp);
7600 bnx2x_reset_common(bp);
7601 break;
7602
7603 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7604 bnx2x_reset_port(bp);
7605 bnx2x_reset_func(bp);
7606 break;
7607
7608 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7609 bnx2x_reset_func(bp);
7610 break;
49d66772 7611
34f80b04
EG
7612 default:
7613 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7614 break;
7615 }
7616}
7617
33471629 7618/* must be called with rtnl_lock */
34f80b04 7619static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7620{
da5a662a 7621 int port = BP_PORT(bp);
a2fbb9ea 7622 u32 reset_code = 0;
da5a662a 7623 int i, cnt, rc;
a2fbb9ea
ET
7624
7625 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7626
228241eb
ET
7627 bp->rx_mode = BNX2X_RX_MODE_NONE;
7628 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7629
f8ef6e44 7630 bnx2x_netif_stop(bp, 1);
e94d8af3 7631
34f80b04
EG
7632 del_timer_sync(&bp->timer);
7633 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7634 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7635 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7636
70b9986c
EG
7637 /* Release IRQs */
7638 bnx2x_free_irq(bp);
7639
555f6c78
EG
7640 /* Wait until tx fastpath tasks complete */
7641 for_each_tx_queue(bp, i) {
228241eb
ET
7642 struct bnx2x_fastpath *fp = &bp->fp[i];
7643
34f80b04 7644 cnt = 1000;
e8b5fc51 7645 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7646
7961f791 7647 bnx2x_tx_int(fp);
34f80b04
EG
7648 if (!cnt) {
7649 BNX2X_ERR("timeout waiting for queue[%d]\n",
7650 i);
7651#ifdef BNX2X_STOP_ON_ERROR
7652 bnx2x_panic();
7653 return -EBUSY;
7654#else
7655 break;
7656#endif
7657 }
7658 cnt--;
da5a662a 7659 msleep(1);
34f80b04 7660 }
228241eb 7661 }
da5a662a
VZ
7662 /* Give HW time to discard old tx messages */
7663 msleep(1);
a2fbb9ea 7664
3101c2bc
YG
7665 if (CHIP_IS_E1(bp)) {
7666 struct mac_configuration_cmd *config =
7667 bnx2x_sp(bp, mcast_config);
7668
7669 bnx2x_set_mac_addr_e1(bp, 0);
7670
8d9c5f34 7671 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7672 CAM_INVALIDATE(config->config_table[i]);
7673
8d9c5f34 7674 config->hdr.length = i;
3101c2bc
YG
7675 if (CHIP_REV_IS_SLOW(bp))
7676 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7677 else
7678 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7679 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7680 config->hdr.reserved1 = 0;
7681
7682 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7683 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7684 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7685
7686 } else { /* E1H */
65abd74d
YG
7687 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7688
3101c2bc
YG
7689 bnx2x_set_mac_addr_e1h(bp, 0);
7690
7691 for (i = 0; i < MC_HASH_SIZE; i++)
7692 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
7693
7694 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc
YG
7695 }
7696
65abd74d
YG
7697 if (unload_mode == UNLOAD_NORMAL)
7698 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7699
7d0446c2 7700 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7701 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7702
7d0446c2 7703 else if (bp->wol) {
65abd74d
YG
7704 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7705 u8 *mac_addr = bp->dev->dev_addr;
7706 u32 val;
7707 /* The mac address is written to entries 1-4 to
7708 preserve entry 0 which is used by the PMF */
7709 u8 entry = (BP_E1HVN(bp) + 1)*8;
7710
7711 val = (mac_addr[0] << 8) | mac_addr[1];
7712 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7713
7714 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7715 (mac_addr[4] << 8) | mac_addr[5];
7716 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7717
7718 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7719
7720 } else
7721 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7722
34f80b04
EG
7723 /* Close multi and leading connections
7724 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7725 for_each_nondefault_queue(bp, i)
7726 if (bnx2x_stop_multi(bp, i))
228241eb 7727 goto unload_error;
a2fbb9ea 7728
da5a662a
VZ
7729 rc = bnx2x_stop_leading(bp);
7730 if (rc) {
34f80b04 7731 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7732#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7733 return -EBUSY;
da5a662a
VZ
7734#else
7735 goto unload_error;
34f80b04 7736#endif
228241eb
ET
7737 }
7738
7739unload_error:
34f80b04 7740 if (!BP_NOMCP(bp))
228241eb 7741 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7742 else {
f5372251 7743 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7744 load_count[0], load_count[1], load_count[2]);
7745 load_count[0]--;
da5a662a 7746 load_count[1 + port]--;
f5372251 7747 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7748 load_count[0], load_count[1], load_count[2]);
7749 if (load_count[0] == 0)
7750 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7751 else if (load_count[1 + port] == 0)
34f80b04
EG
7752 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7753 else
7754 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7755 }
a2fbb9ea 7756
34f80b04
EG
7757 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7758 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7759 bnx2x__link_reset(bp);
a2fbb9ea
ET
7760
7761 /* Reset the chip */
228241eb 7762 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7763
7764 /* Report UNLOAD_DONE to MCP */
34f80b04 7765 if (!BP_NOMCP(bp))
a2fbb9ea 7766 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 7767
9a035440 7768 bp->port.pmf = 0;
a2fbb9ea 7769
7a9b2557 7770 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7771 bnx2x_free_skbs(bp);
555f6c78 7772 for_each_rx_queue(bp, i)
3196a88a 7773 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7774 for_each_rx_queue(bp, i)
7cde1c8b 7775 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7776 bnx2x_free_mem(bp);
7777
7778 bp->state = BNX2X_STATE_CLOSED;
228241eb 7779
a2fbb9ea
ET
7780 netif_carrier_off(bp->dev);
7781
7782 return 0;
7783}
7784
34f80b04
EG
7785static void bnx2x_reset_task(struct work_struct *work)
7786{
7787 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7788
7789#ifdef BNX2X_STOP_ON_ERROR
7790 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7791 " so reset not done to allow debug dump,\n"
ad361c98 7792 " you will need to reboot when done\n");
34f80b04
EG
7793 return;
7794#endif
7795
7796 rtnl_lock();
7797
7798 if (!netif_running(bp->dev))
7799 goto reset_task_exit;
7800
7801 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7802 bnx2x_nic_load(bp, LOAD_NORMAL);
7803
7804reset_task_exit:
7805 rtnl_unlock();
7806}
7807
a2fbb9ea
ET
7808/* end of nic load/unload */
7809
7810/* ethtool_ops */
7811
7812/*
7813 * Init service functions
7814 */
7815
f1ef27ef
EG
7816static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7817{
7818 switch (func) {
7819 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7820 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7821 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7822 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7823 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7824 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7825 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7826 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7827 default:
7828 BNX2X_ERR("Unsupported function index: %d\n", func);
7829 return (u32)(-1);
7830 }
7831}
7832
7833static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7834{
7835 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7836
7837 /* Flush all outstanding writes */
7838 mmiowb();
7839
7840 /* Pretend to be function 0 */
7841 REG_WR(bp, reg, 0);
7842 /* Flush the GRC transaction (in the chip) */
7843 new_val = REG_RD(bp, reg);
7844 if (new_val != 0) {
7845 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7846 new_val);
7847 BUG();
7848 }
7849
7850 /* From now we are in the "like-E1" mode */
7851 bnx2x_int_disable(bp);
7852
7853 /* Flush all outstanding writes */
7854 mmiowb();
7855
7856 /* Restore the original funtion settings */
7857 REG_WR(bp, reg, orig_func);
7858 new_val = REG_RD(bp, reg);
7859 if (new_val != orig_func) {
7860 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7861 orig_func, new_val);
7862 BUG();
7863 }
7864}
7865
7866static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7867{
7868 if (CHIP_IS_E1H(bp))
7869 bnx2x_undi_int_disable_e1h(bp, func);
7870 else
7871 bnx2x_int_disable(bp);
7872}
7873
34f80b04
EG
7874static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7875{
7876 u32 val;
7877
7878 /* Check if there is any driver already loaded */
7879 val = REG_RD(bp, MISC_REG_UNPREPARED);
7880 if (val == 0x1) {
7881 /* Check if it is the UNDI driver
7882 * UNDI driver initializes CID offset for normal bell to 0x7
7883 */
4a37fb66 7884 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7885 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7886 if (val == 0x7) {
7887 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7888 /* save our func */
34f80b04 7889 int func = BP_FUNC(bp);
da5a662a
VZ
7890 u32 swap_en;
7891 u32 swap_val;
34f80b04 7892
b4661739
EG
7893 /* clear the UNDI indication */
7894 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7895
34f80b04
EG
7896 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7897
7898 /* try unload UNDI on port 0 */
7899 bp->func = 0;
da5a662a
VZ
7900 bp->fw_seq =
7901 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7902 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7903 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7904
7905 /* if UNDI is loaded on the other port */
7906 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7907
da5a662a
VZ
7908 /* send "DONE" for previous unload */
7909 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7910
7911 /* unload UNDI on port 1 */
34f80b04 7912 bp->func = 1;
da5a662a
VZ
7913 bp->fw_seq =
7914 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7915 DRV_MSG_SEQ_NUMBER_MASK);
7916 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7917
7918 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7919 }
7920
b4661739
EG
7921 /* now it's safe to release the lock */
7922 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7923
f1ef27ef 7924 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7925
7926 /* close input traffic and wait for it */
7927 /* Do not rcv packets to BRB */
7928 REG_WR(bp,
7929 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7930 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7931 /* Do not direct rcv packets that are not for MCP to
7932 * the BRB */
7933 REG_WR(bp,
7934 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7935 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7936 /* clear AEU */
7937 REG_WR(bp,
7938 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7939 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7940 msleep(10);
7941
7942 /* save NIG port swap info */
7943 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7944 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7945 /* reset device */
7946 REG_WR(bp,
7947 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7948 0xd3ffffff);
34f80b04
EG
7949 REG_WR(bp,
7950 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7951 0x1403);
da5a662a
VZ
7952 /* take the NIG out of reset and restore swap values */
7953 REG_WR(bp,
7954 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7955 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7956 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7957 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7958
7959 /* send unload done to the MCP */
7960 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7961
7962 /* restore our func and fw_seq */
7963 bp->func = func;
7964 bp->fw_seq =
7965 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7966 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7967
7968 } else
7969 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7970 }
7971}
7972
7973static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7974{
7975 u32 val, val2, val3, val4, id;
72ce58c3 7976 u16 pmc;
34f80b04
EG
7977
7978 /* Get the chip revision id and number. */
7979 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7980 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7981 id = ((val & 0xffff) << 16);
7982 val = REG_RD(bp, MISC_REG_CHIP_REV);
7983 id |= ((val & 0xf) << 12);
7984 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7985 id |= ((val & 0xff) << 4);
5a40e08e 7986 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7987 id |= (val & 0xf);
7988 bp->common.chip_id = id;
7989 bp->link_params.chip_id = bp->common.chip_id;
7990 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7991
1c06328c
EG
7992 val = (REG_RD(bp, 0x2874) & 0x55);
7993 if ((bp->common.chip_id & 0x1) ||
7994 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7995 bp->flags |= ONE_PORT_FLAG;
7996 BNX2X_DEV_INFO("single port device\n");
7997 }
7998
34f80b04
EG
7999 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8000 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8001 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8002 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8003 bp->common.flash_size, bp->common.flash_size);
8004
8005 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 8006 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 8007 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
8008 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8009 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
8010
8011 if (!bp->common.shmem_base ||
8012 (bp->common.shmem_base < 0xA0000) ||
8013 (bp->common.shmem_base >= 0xC0000)) {
8014 BNX2X_DEV_INFO("MCP not active\n");
8015 bp->flags |= NO_MCP_FLAG;
8016 return;
8017 }
8018
8019 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8020 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8021 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8022 BNX2X_ERR("BAD MCP validity signature\n");
8023
8024 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 8025 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
8026
8027 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8028 SHARED_HW_CFG_LED_MODE_MASK) >>
8029 SHARED_HW_CFG_LED_MODE_SHIFT);
8030
c2c8b03e
EG
8031 bp->link_params.feature_config_flags = 0;
8032 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8033 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8034 bp->link_params.feature_config_flags |=
8035 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8036 else
8037 bp->link_params.feature_config_flags &=
8038 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8039
34f80b04
EG
8040 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8041 bp->common.bc_ver = val;
8042 BNX2X_DEV_INFO("bc_ver %X\n", val);
8043 if (val < BNX2X_BC_VER) {
8044 /* for now only warn
8045 * later we might need to enforce this */
8046 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8047 " please upgrade BC\n", BNX2X_BC_VER, val);
8048 }
4d295db0
EG
8049 bp->link_params.feature_config_flags |=
8050 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8051 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
8052
8053 if (BP_E1HVN(bp) == 0) {
8054 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8055 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8056 } else {
8057 /* no WOL capability for E1HVN != 0 */
8058 bp->flags |= NO_WOL_FLAG;
8059 }
8060 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 8061 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
8062
8063 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8064 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8065 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8066 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8067
8068 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8069 val, val2, val3, val4);
8070}
8071
8072static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8073 u32 switch_cfg)
a2fbb9ea 8074{
34f80b04 8075 int port = BP_PORT(bp);
a2fbb9ea
ET
8076 u32 ext_phy_type;
8077
a2fbb9ea
ET
8078 switch (switch_cfg) {
8079 case SWITCH_CFG_1G:
8080 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8081
c18487ee
YR
8082 ext_phy_type =
8083 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8084 switch (ext_phy_type) {
8085 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8086 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8087 ext_phy_type);
8088
34f80b04
EG
8089 bp->port.supported |= (SUPPORTED_10baseT_Half |
8090 SUPPORTED_10baseT_Full |
8091 SUPPORTED_100baseT_Half |
8092 SUPPORTED_100baseT_Full |
8093 SUPPORTED_1000baseT_Full |
8094 SUPPORTED_2500baseX_Full |
8095 SUPPORTED_TP |
8096 SUPPORTED_FIBRE |
8097 SUPPORTED_Autoneg |
8098 SUPPORTED_Pause |
8099 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8100 break;
8101
8102 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8103 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8104 ext_phy_type);
8105
34f80b04
EG
8106 bp->port.supported |= (SUPPORTED_10baseT_Half |
8107 SUPPORTED_10baseT_Full |
8108 SUPPORTED_100baseT_Half |
8109 SUPPORTED_100baseT_Full |
8110 SUPPORTED_1000baseT_Full |
8111 SUPPORTED_TP |
8112 SUPPORTED_FIBRE |
8113 SUPPORTED_Autoneg |
8114 SUPPORTED_Pause |
8115 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8116 break;
8117
8118 default:
8119 BNX2X_ERR("NVRAM config error. "
8120 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 8121 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8122 return;
8123 }
8124
34f80b04
EG
8125 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8126 port*0x10);
8127 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
8128 break;
8129
8130 case SWITCH_CFG_10G:
8131 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8132
c18487ee
YR
8133 ext_phy_type =
8134 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8135 switch (ext_phy_type) {
8136 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8137 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8138 ext_phy_type);
8139
34f80b04
EG
8140 bp->port.supported |= (SUPPORTED_10baseT_Half |
8141 SUPPORTED_10baseT_Full |
8142 SUPPORTED_100baseT_Half |
8143 SUPPORTED_100baseT_Full |
8144 SUPPORTED_1000baseT_Full |
8145 SUPPORTED_2500baseX_Full |
8146 SUPPORTED_10000baseT_Full |
8147 SUPPORTED_TP |
8148 SUPPORTED_FIBRE |
8149 SUPPORTED_Autoneg |
8150 SUPPORTED_Pause |
8151 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8152 break;
8153
589abe3a
EG
8154 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8155 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 8156 ext_phy_type);
f1410647 8157
34f80b04 8158 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8159 SUPPORTED_1000baseT_Full |
34f80b04 8160 SUPPORTED_FIBRE |
589abe3a 8161 SUPPORTED_Autoneg |
34f80b04
EG
8162 SUPPORTED_Pause |
8163 SUPPORTED_Asym_Pause);
f1410647
ET
8164 break;
8165
589abe3a
EG
8166 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8167 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
8168 ext_phy_type);
8169
34f80b04 8170 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8171 SUPPORTED_2500baseX_Full |
34f80b04 8172 SUPPORTED_1000baseT_Full |
589abe3a
EG
8173 SUPPORTED_FIBRE |
8174 SUPPORTED_Autoneg |
8175 SUPPORTED_Pause |
8176 SUPPORTED_Asym_Pause);
8177 break;
8178
8179 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8180 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8181 ext_phy_type);
8182
8183 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
8184 SUPPORTED_FIBRE |
8185 SUPPORTED_Pause |
8186 SUPPORTED_Asym_Pause);
f1410647
ET
8187 break;
8188
589abe3a
EG
8189 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8190 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
8191 ext_phy_type);
8192
34f80b04
EG
8193 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8194 SUPPORTED_1000baseT_Full |
8195 SUPPORTED_FIBRE |
34f80b04
EG
8196 SUPPORTED_Pause |
8197 SUPPORTED_Asym_Pause);
f1410647
ET
8198 break;
8199
589abe3a
EG
8200 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8201 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
8202 ext_phy_type);
8203
34f80b04 8204 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 8205 SUPPORTED_1000baseT_Full |
34f80b04 8206 SUPPORTED_Autoneg |
589abe3a 8207 SUPPORTED_FIBRE |
34f80b04
EG
8208 SUPPORTED_Pause |
8209 SUPPORTED_Asym_Pause);
c18487ee
YR
8210 break;
8211
4d295db0
EG
8212 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8213 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8214 ext_phy_type);
8215
8216 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8217 SUPPORTED_1000baseT_Full |
8218 SUPPORTED_Autoneg |
8219 SUPPORTED_FIBRE |
8220 SUPPORTED_Pause |
8221 SUPPORTED_Asym_Pause);
8222 break;
8223
f1410647
ET
8224 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8225 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8226 ext_phy_type);
8227
34f80b04
EG
8228 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8229 SUPPORTED_TP |
8230 SUPPORTED_Autoneg |
8231 SUPPORTED_Pause |
8232 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8233 break;
8234
28577185
EG
8235 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8236 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8237 ext_phy_type);
8238
8239 bp->port.supported |= (SUPPORTED_10baseT_Half |
8240 SUPPORTED_10baseT_Full |
8241 SUPPORTED_100baseT_Half |
8242 SUPPORTED_100baseT_Full |
8243 SUPPORTED_1000baseT_Full |
8244 SUPPORTED_10000baseT_Full |
8245 SUPPORTED_TP |
8246 SUPPORTED_Autoneg |
8247 SUPPORTED_Pause |
8248 SUPPORTED_Asym_Pause);
8249 break;
8250
c18487ee
YR
8251 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8252 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8253 bp->link_params.ext_phy_config);
8254 break;
8255
a2fbb9ea
ET
8256 default:
8257 BNX2X_ERR("NVRAM config error. "
8258 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 8259 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8260 return;
8261 }
8262
34f80b04
EG
8263 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8264 port*0x18);
8265 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 8266
a2fbb9ea
ET
8267 break;
8268
8269 default:
8270 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 8271 bp->port.link_config);
a2fbb9ea
ET
8272 return;
8273 }
34f80b04 8274 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
8275
8276 /* mask what we support according to speed_cap_mask */
c18487ee
YR
8277 if (!(bp->link_params.speed_cap_mask &
8278 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 8279 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 8280
c18487ee
YR
8281 if (!(bp->link_params.speed_cap_mask &
8282 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 8283 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 8284
c18487ee
YR
8285 if (!(bp->link_params.speed_cap_mask &
8286 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 8287 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 8288
c18487ee
YR
8289 if (!(bp->link_params.speed_cap_mask &
8290 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 8291 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 8292
c18487ee
YR
8293 if (!(bp->link_params.speed_cap_mask &
8294 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
8295 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8296 SUPPORTED_1000baseT_Full);
a2fbb9ea 8297
c18487ee
YR
8298 if (!(bp->link_params.speed_cap_mask &
8299 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 8300 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 8301
c18487ee
YR
8302 if (!(bp->link_params.speed_cap_mask &
8303 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 8304 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 8305
34f80b04 8306 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
8307}
8308
34f80b04 8309static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8310{
c18487ee 8311 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 8312
34f80b04 8313 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 8314 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 8315 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 8316 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8317 bp->port.advertising = bp->port.supported;
a2fbb9ea 8318 } else {
c18487ee
YR
8319 u32 ext_phy_type =
8320 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8321
8322 if ((ext_phy_type ==
8323 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8324 (ext_phy_type ==
8325 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 8326 /* force 10G, no AN */
c18487ee 8327 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 8328 bp->port.advertising =
a2fbb9ea
ET
8329 (ADVERTISED_10000baseT_Full |
8330 ADVERTISED_FIBRE);
8331 break;
8332 }
8333 BNX2X_ERR("NVRAM config error. "
8334 "Invalid link_config 0x%x"
8335 " Autoneg not supported\n",
34f80b04 8336 bp->port.link_config);
a2fbb9ea
ET
8337 return;
8338 }
8339 break;
8340
8341 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 8342 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 8343 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
8344 bp->port.advertising = (ADVERTISED_10baseT_Full |
8345 ADVERTISED_TP);
a2fbb9ea
ET
8346 } else {
8347 BNX2X_ERR("NVRAM config error. "
8348 "Invalid link_config 0x%x"
8349 " speed_cap_mask 0x%x\n",
34f80b04 8350 bp->port.link_config,
c18487ee 8351 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8352 return;
8353 }
8354 break;
8355
8356 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 8357 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
8358 bp->link_params.req_line_speed = SPEED_10;
8359 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8360 bp->port.advertising = (ADVERTISED_10baseT_Half |
8361 ADVERTISED_TP);
a2fbb9ea
ET
8362 } else {
8363 BNX2X_ERR("NVRAM config error. "
8364 "Invalid link_config 0x%x"
8365 " speed_cap_mask 0x%x\n",
34f80b04 8366 bp->port.link_config,
c18487ee 8367 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8368 return;
8369 }
8370 break;
8371
8372 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 8373 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 8374 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
8375 bp->port.advertising = (ADVERTISED_100baseT_Full |
8376 ADVERTISED_TP);
a2fbb9ea
ET
8377 } else {
8378 BNX2X_ERR("NVRAM config error. "
8379 "Invalid link_config 0x%x"
8380 " speed_cap_mask 0x%x\n",
34f80b04 8381 bp->port.link_config,
c18487ee 8382 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8383 return;
8384 }
8385 break;
8386
8387 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 8388 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
8389 bp->link_params.req_line_speed = SPEED_100;
8390 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8391 bp->port.advertising = (ADVERTISED_100baseT_Half |
8392 ADVERTISED_TP);
a2fbb9ea
ET
8393 } else {
8394 BNX2X_ERR("NVRAM config error. "
8395 "Invalid link_config 0x%x"
8396 " speed_cap_mask 0x%x\n",
34f80b04 8397 bp->port.link_config,
c18487ee 8398 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8399 return;
8400 }
8401 break;
8402
8403 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 8404 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 8405 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
8406 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8407 ADVERTISED_TP);
a2fbb9ea
ET
8408 } else {
8409 BNX2X_ERR("NVRAM config error. "
8410 "Invalid link_config 0x%x"
8411 " speed_cap_mask 0x%x\n",
34f80b04 8412 bp->port.link_config,
c18487ee 8413 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8414 return;
8415 }
8416 break;
8417
8418 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8419 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8420 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8421 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8422 ADVERTISED_TP);
a2fbb9ea
ET
8423 } else {
8424 BNX2X_ERR("NVRAM config error. "
8425 "Invalid link_config 0x%x"
8426 " speed_cap_mask 0x%x\n",
34f80b04 8427 bp->port.link_config,
c18487ee 8428 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8429 return;
8430 }
8431 break;
8432
8433 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8434 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8435 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8436 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8437 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8438 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8439 ADVERTISED_FIBRE);
a2fbb9ea
ET
8440 } else {
8441 BNX2X_ERR("NVRAM config error. "
8442 "Invalid link_config 0x%x"
8443 " speed_cap_mask 0x%x\n",
34f80b04 8444 bp->port.link_config,
c18487ee 8445 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8446 return;
8447 }
8448 break;
8449
8450 default:
8451 BNX2X_ERR("NVRAM config error. "
8452 "BAD link speed link_config 0x%x\n",
34f80b04 8453 bp->port.link_config);
c18487ee 8454 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8455 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8456 break;
8457 }
a2fbb9ea 8458
34f80b04
EG
8459 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8460 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8461 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8462 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8463 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8464
c18487ee 8465 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8466 " advertising 0x%x\n",
c18487ee
YR
8467 bp->link_params.req_line_speed,
8468 bp->link_params.req_duplex,
34f80b04 8469 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8470}
8471
34f80b04 8472static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8473{
34f80b04
EG
8474 int port = BP_PORT(bp);
8475 u32 val, val2;
589abe3a 8476 u32 config;
c2c8b03e 8477 u16 i;
01cd4528 8478 u32 ext_phy_type;
a2fbb9ea 8479
c18487ee 8480 bp->link_params.bp = bp;
34f80b04 8481 bp->link_params.port = port;
c18487ee 8482
c18487ee 8483 bp->link_params.lane_config =
a2fbb9ea 8484 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8485 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8486 SHMEM_RD(bp,
8487 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
8488 /* BCM8727_NOC => BCM8727 no over current */
8489 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8490 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8491 bp->link_params.ext_phy_config &=
8492 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8493 bp->link_params.ext_phy_config |=
8494 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8495 bp->link_params.feature_config_flags |=
8496 FEATURE_CONFIG_BCM8727_NOC;
8497 }
8498
c18487ee 8499 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8500 SHMEM_RD(bp,
8501 dev_info.port_hw_config[port].speed_capability_mask);
8502
34f80b04 8503 bp->port.link_config =
a2fbb9ea
ET
8504 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8505
c2c8b03e
EG
8506 /* Get the 4 lanes xgxs config rx and tx */
8507 for (i = 0; i < 2; i++) {
8508 val = SHMEM_RD(bp,
8509 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8510 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8511 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8512
8513 val = SHMEM_RD(bp,
8514 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8515 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8516 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8517 }
8518
3ce2c3f9
EG
8519 /* If the device is capable of WoL, set the default state according
8520 * to the HW
8521 */
4d295db0 8522 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8523 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8524 (config & PORT_FEATURE_WOL_ENABLED));
8525
c2c8b03e
EG
8526 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8527 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8528 bp->link_params.lane_config,
8529 bp->link_params.ext_phy_config,
34f80b04 8530 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8531
4d295db0
EG
8532 bp->link_params.switch_cfg |= (bp->port.link_config &
8533 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 8534 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8535
8536 bnx2x_link_settings_requested(bp);
8537
01cd4528
EG
8538 /*
8539 * If connected directly, work with the internal PHY, otherwise, work
8540 * with the external PHY
8541 */
8542 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8543 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8544 bp->mdio.prtad = bp->link_params.phy_addr;
8545
8546 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8547 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8548 bp->mdio.prtad =
8549 (bp->link_params.ext_phy_config &
8550 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
8551 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT;
8552
a2fbb9ea
ET
8553 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8554 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8555 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8556 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8557 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8558 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8559 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8560 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8561 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8562 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8563}
8564
8565static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8566{
8567 int func = BP_FUNC(bp);
8568 u32 val, val2;
8569 int rc = 0;
a2fbb9ea 8570
34f80b04 8571 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8572
34f80b04
EG
8573 bp->e1hov = 0;
8574 bp->e1hmf = 0;
8575 if (CHIP_IS_E1H(bp)) {
8576 bp->mf_config =
8577 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8578
2691d51d 8579 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 8580 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 8581 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 8582 bp->e1hmf = 1;
2691d51d
EG
8583 BNX2X_DEV_INFO("%s function mode\n",
8584 IS_E1HMF(bp) ? "multi" : "single");
8585
8586 if (IS_E1HMF(bp)) {
8587 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8588 e1hov_tag) &
8589 FUNC_MF_CFG_E1HOV_TAG_MASK);
8590 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8591 bp->e1hov = val;
8592 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8593 "(0x%04x)\n",
8594 func, bp->e1hov, bp->e1hov);
8595 } else {
34f80b04
EG
8596 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8597 " aborting\n", func);
8598 rc = -EPERM;
8599 }
2691d51d
EG
8600 } else {
8601 if (BP_E1HVN(bp)) {
8602 BNX2X_ERR("!!! VN %d in single function mode,"
8603 " aborting\n", BP_E1HVN(bp));
8604 rc = -EPERM;
8605 }
34f80b04
EG
8606 }
8607 }
a2fbb9ea 8608
34f80b04
EG
8609 if (!BP_NOMCP(bp)) {
8610 bnx2x_get_port_hwinfo(bp);
8611
8612 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8613 DRV_MSG_SEQ_NUMBER_MASK);
8614 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8615 }
8616
8617 if (IS_E1HMF(bp)) {
8618 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8619 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8620 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8621 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8622 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8623 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8624 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8625 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8626 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8627 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8628 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8629 ETH_ALEN);
8630 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8631 ETH_ALEN);
a2fbb9ea 8632 }
34f80b04
EG
8633
8634 return rc;
a2fbb9ea
ET
8635 }
8636
34f80b04
EG
8637 if (BP_NOMCP(bp)) {
8638 /* only supposed to happen on emulation/FPGA */
33471629 8639 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8640 random_ether_addr(bp->dev->dev_addr);
8641 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8642 }
a2fbb9ea 8643
34f80b04
EG
8644 return rc;
8645}
8646
8647static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8648{
8649 int func = BP_FUNC(bp);
87942b46 8650 int timer_interval;
34f80b04
EG
8651 int rc;
8652
da5a662a
VZ
8653 /* Disable interrupt handling until HW is initialized */
8654 atomic_set(&bp->intr_sem, 1);
e1510706 8655 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8656
34f80b04 8657 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8658
1cf167f2 8659 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8660 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8661
8662 rc = bnx2x_get_hwinfo(bp);
8663
8664 /* need to reset chip if undi was active */
8665 if (!BP_NOMCP(bp))
8666 bnx2x_undi_unload(bp);
8667
8668 if (CHIP_REV_IS_FPGA(bp))
8669 printk(KERN_ERR PFX "FPGA detected\n");
8670
8671 if (BP_NOMCP(bp) && (func == 0))
8672 printk(KERN_ERR PFX
8673 "MCP disabled, must load devices in order!\n");
8674
555f6c78 8675 /* Set multi queue mode */
8badd27a
EG
8676 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8677 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8678 printk(KERN_ERR PFX
8badd27a 8679 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8680 multi_mode = ETH_RSS_MODE_DISABLED;
8681 }
8682 bp->multi_mode = multi_mode;
8683
8684
7a9b2557
VZ
8685 /* Set TPA flags */
8686 if (disable_tpa) {
8687 bp->flags &= ~TPA_ENABLE_FLAG;
8688 bp->dev->features &= ~NETIF_F_LRO;
8689 } else {
8690 bp->flags |= TPA_ENABLE_FLAG;
8691 bp->dev->features |= NETIF_F_LRO;
8692 }
8693
a18f5128
EG
8694 if (CHIP_IS_E1(bp))
8695 bp->dropless_fc = 0;
8696 else
8697 bp->dropless_fc = dropless_fc;
8698
8d5726c4 8699 bp->mrrs = mrrs;
7a9b2557 8700
34f80b04
EG
8701 bp->tx_ring_size = MAX_TX_AVAIL;
8702 bp->rx_ring_size = MAX_RX_AVAIL;
8703
8704 bp->rx_csum = 1;
34f80b04
EG
8705
8706 bp->tx_ticks = 50;
8707 bp->rx_ticks = 25;
8708
87942b46
EG
8709 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8710 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8711
8712 init_timer(&bp->timer);
8713 bp->timer.expires = jiffies + bp->current_interval;
8714 bp->timer.data = (unsigned long) bp;
8715 bp->timer.function = bnx2x_timer;
8716
8717 return rc;
a2fbb9ea
ET
8718}
8719
8720/*
8721 * ethtool service functions
8722 */
8723
8724/* All ethtool functions called with rtnl_lock */
8725
8726static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8727{
8728 struct bnx2x *bp = netdev_priv(dev);
8729
34f80b04
EG
8730 cmd->supported = bp->port.supported;
8731 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8732
8733 if (netif_carrier_ok(dev)) {
c18487ee
YR
8734 cmd->speed = bp->link_vars.line_speed;
8735 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8736 } else {
c18487ee
YR
8737 cmd->speed = bp->link_params.req_line_speed;
8738 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8739 }
34f80b04
EG
8740 if (IS_E1HMF(bp)) {
8741 u16 vn_max_rate;
8742
8743 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8744 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8745 if (vn_max_rate < cmd->speed)
8746 cmd->speed = vn_max_rate;
8747 }
a2fbb9ea 8748
c18487ee
YR
8749 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8750 u32 ext_phy_type =
8751 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8752
8753 switch (ext_phy_type) {
8754 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8755 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8756 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8757 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8758 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8759 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 8760 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
8761 cmd->port = PORT_FIBRE;
8762 break;
8763
8764 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8765 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8766 cmd->port = PORT_TP;
8767 break;
8768
c18487ee
YR
8769 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8770 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8771 bp->link_params.ext_phy_config);
8772 break;
8773
f1410647
ET
8774 default:
8775 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8776 bp->link_params.ext_phy_config);
8777 break;
f1410647
ET
8778 }
8779 } else
a2fbb9ea 8780 cmd->port = PORT_TP;
a2fbb9ea 8781
01cd4528 8782 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
8783 cmd->transceiver = XCVR_INTERNAL;
8784
c18487ee 8785 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8786 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8787 else
a2fbb9ea 8788 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8789
8790 cmd->maxtxpkt = 0;
8791 cmd->maxrxpkt = 0;
8792
8793 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8794 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8795 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8796 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8797 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8798 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8799 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8800
8801 return 0;
8802}
8803
8804static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8805{
8806 struct bnx2x *bp = netdev_priv(dev);
8807 u32 advertising;
8808
34f80b04
EG
8809 if (IS_E1HMF(bp))
8810 return 0;
8811
a2fbb9ea
ET
8812 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8813 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8814 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8815 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8816 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8817 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8818 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8819
a2fbb9ea 8820 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8821 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8822 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8823 return -EINVAL;
f1410647 8824 }
a2fbb9ea
ET
8825
8826 /* advertise the requested speed and duplex if supported */
34f80b04 8827 cmd->advertising &= bp->port.supported;
a2fbb9ea 8828
c18487ee
YR
8829 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8830 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8831 bp->port.advertising |= (ADVERTISED_Autoneg |
8832 cmd->advertising);
a2fbb9ea
ET
8833
8834 } else { /* forced speed */
8835 /* advertise the requested speed and duplex if supported */
8836 switch (cmd->speed) {
8837 case SPEED_10:
8838 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8839 if (!(bp->port.supported &
f1410647
ET
8840 SUPPORTED_10baseT_Full)) {
8841 DP(NETIF_MSG_LINK,
8842 "10M full not supported\n");
a2fbb9ea 8843 return -EINVAL;
f1410647 8844 }
a2fbb9ea
ET
8845
8846 advertising = (ADVERTISED_10baseT_Full |
8847 ADVERTISED_TP);
8848 } else {
34f80b04 8849 if (!(bp->port.supported &
f1410647
ET
8850 SUPPORTED_10baseT_Half)) {
8851 DP(NETIF_MSG_LINK,
8852 "10M half not supported\n");
a2fbb9ea 8853 return -EINVAL;
f1410647 8854 }
a2fbb9ea
ET
8855
8856 advertising = (ADVERTISED_10baseT_Half |
8857 ADVERTISED_TP);
8858 }
8859 break;
8860
8861 case SPEED_100:
8862 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8863 if (!(bp->port.supported &
f1410647
ET
8864 SUPPORTED_100baseT_Full)) {
8865 DP(NETIF_MSG_LINK,
8866 "100M full not supported\n");
a2fbb9ea 8867 return -EINVAL;
f1410647 8868 }
a2fbb9ea
ET
8869
8870 advertising = (ADVERTISED_100baseT_Full |
8871 ADVERTISED_TP);
8872 } else {
34f80b04 8873 if (!(bp->port.supported &
f1410647
ET
8874 SUPPORTED_100baseT_Half)) {
8875 DP(NETIF_MSG_LINK,
8876 "100M half not supported\n");
a2fbb9ea 8877 return -EINVAL;
f1410647 8878 }
a2fbb9ea
ET
8879
8880 advertising = (ADVERTISED_100baseT_Half |
8881 ADVERTISED_TP);
8882 }
8883 break;
8884
8885 case SPEED_1000:
f1410647
ET
8886 if (cmd->duplex != DUPLEX_FULL) {
8887 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8888 return -EINVAL;
f1410647 8889 }
a2fbb9ea 8890
34f80b04 8891 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8892 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8893 return -EINVAL;
f1410647 8894 }
a2fbb9ea
ET
8895
8896 advertising = (ADVERTISED_1000baseT_Full |
8897 ADVERTISED_TP);
8898 break;
8899
8900 case SPEED_2500:
f1410647
ET
8901 if (cmd->duplex != DUPLEX_FULL) {
8902 DP(NETIF_MSG_LINK,
8903 "2.5G half not supported\n");
a2fbb9ea 8904 return -EINVAL;
f1410647 8905 }
a2fbb9ea 8906
34f80b04 8907 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8908 DP(NETIF_MSG_LINK,
8909 "2.5G full not supported\n");
a2fbb9ea 8910 return -EINVAL;
f1410647 8911 }
a2fbb9ea 8912
f1410647 8913 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8914 ADVERTISED_TP);
8915 break;
8916
8917 case SPEED_10000:
f1410647
ET
8918 if (cmd->duplex != DUPLEX_FULL) {
8919 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8920 return -EINVAL;
f1410647 8921 }
a2fbb9ea 8922
34f80b04 8923 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8924 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8925 return -EINVAL;
f1410647 8926 }
a2fbb9ea
ET
8927
8928 advertising = (ADVERTISED_10000baseT_Full |
8929 ADVERTISED_FIBRE);
8930 break;
8931
8932 default:
f1410647 8933 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8934 return -EINVAL;
8935 }
8936
c18487ee
YR
8937 bp->link_params.req_line_speed = cmd->speed;
8938 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8939 bp->port.advertising = advertising;
a2fbb9ea
ET
8940 }
8941
c18487ee 8942 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8943 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8944 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8945 bp->port.advertising);
a2fbb9ea 8946
34f80b04 8947 if (netif_running(dev)) {
bb2a0f7a 8948 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8949 bnx2x_link_set(bp);
8950 }
a2fbb9ea
ET
8951
8952 return 0;
8953}
8954
0a64ea57
EG
8955#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8956#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8957
8958static int bnx2x_get_regs_len(struct net_device *dev)
8959{
0a64ea57 8960 struct bnx2x *bp = netdev_priv(dev);
0d28e49a 8961 int regdump_len = 0;
0a64ea57
EG
8962 int i;
8963
0a64ea57
EG
8964 if (CHIP_IS_E1(bp)) {
8965 for (i = 0; i < REGS_COUNT; i++)
8966 if (IS_E1_ONLINE(reg_addrs[i].info))
8967 regdump_len += reg_addrs[i].size;
8968
8969 for (i = 0; i < WREGS_COUNT_E1; i++)
8970 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8971 regdump_len += wreg_addrs_e1[i].size *
8972 (1 + wreg_addrs_e1[i].read_regs_count);
8973
8974 } else { /* E1H */
8975 for (i = 0; i < REGS_COUNT; i++)
8976 if (IS_E1H_ONLINE(reg_addrs[i].info))
8977 regdump_len += reg_addrs[i].size;
8978
8979 for (i = 0; i < WREGS_COUNT_E1H; i++)
8980 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8981 regdump_len += wreg_addrs_e1h[i].size *
8982 (1 + wreg_addrs_e1h[i].read_regs_count);
8983 }
8984 regdump_len *= 4;
8985 regdump_len += sizeof(struct dump_hdr);
8986
8987 return regdump_len;
8988}
8989
8990static void bnx2x_get_regs(struct net_device *dev,
8991 struct ethtool_regs *regs, void *_p)
8992{
8993 u32 *p = _p, i, j;
8994 struct bnx2x *bp = netdev_priv(dev);
8995 struct dump_hdr dump_hdr = {0};
8996
8997 regs->version = 0;
8998 memset(p, 0, regs->len);
8999
9000 if (!netif_running(bp->dev))
9001 return;
9002
9003 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9004 dump_hdr.dump_sign = dump_sign_all;
9005 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9006 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9007 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9008 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9009 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9010
9011 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9012 p += dump_hdr.hdr_size + 1;
9013
9014 if (CHIP_IS_E1(bp)) {
9015 for (i = 0; i < REGS_COUNT; i++)
9016 if (IS_E1_ONLINE(reg_addrs[i].info))
9017 for (j = 0; j < reg_addrs[i].size; j++)
9018 *p++ = REG_RD(bp,
9019 reg_addrs[i].addr + j*4);
9020
9021 } else { /* E1H */
9022 for (i = 0; i < REGS_COUNT; i++)
9023 if (IS_E1H_ONLINE(reg_addrs[i].info))
9024 for (j = 0; j < reg_addrs[i].size; j++)
9025 *p++ = REG_RD(bp,
9026 reg_addrs[i].addr + j*4);
9027 }
9028}
9029
0d28e49a
EG
9030#define PHY_FW_VER_LEN 10
9031
9032static void bnx2x_get_drvinfo(struct net_device *dev,
9033 struct ethtool_drvinfo *info)
9034{
9035 struct bnx2x *bp = netdev_priv(dev);
9036 u8 phy_fw_ver[PHY_FW_VER_LEN];
9037
9038 strcpy(info->driver, DRV_MODULE_NAME);
9039 strcpy(info->version, DRV_MODULE_VERSION);
9040
9041 phy_fw_ver[0] = '\0';
9042 if (bp->port.pmf) {
9043 bnx2x_acquire_phy_lock(bp);
9044 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9045 (bp->state != BNX2X_STATE_CLOSED),
9046 phy_fw_ver, PHY_FW_VER_LEN);
9047 bnx2x_release_phy_lock(bp);
9048 }
9049
9050 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9051 (bp->common.bc_ver & 0xff0000) >> 16,
9052 (bp->common.bc_ver & 0xff00) >> 8,
9053 (bp->common.bc_ver & 0xff),
9054 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9055 strcpy(info->bus_info, pci_name(bp->pdev));
9056 info->n_stats = BNX2X_NUM_STATS;
9057 info->testinfo_len = BNX2X_NUM_TESTS;
9058 info->eedump_len = bp->common.flash_size;
9059 info->regdump_len = bnx2x_get_regs_len(dev);
9060}
9061
a2fbb9ea
ET
9062static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9063{
9064 struct bnx2x *bp = netdev_priv(dev);
9065
9066 if (bp->flags & NO_WOL_FLAG) {
9067 wol->supported = 0;
9068 wol->wolopts = 0;
9069 } else {
9070 wol->supported = WAKE_MAGIC;
9071 if (bp->wol)
9072 wol->wolopts = WAKE_MAGIC;
9073 else
9074 wol->wolopts = 0;
9075 }
9076 memset(&wol->sopass, 0, sizeof(wol->sopass));
9077}
9078
9079static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9080{
9081 struct bnx2x *bp = netdev_priv(dev);
9082
9083 if (wol->wolopts & ~WAKE_MAGIC)
9084 return -EINVAL;
9085
9086 if (wol->wolopts & WAKE_MAGIC) {
9087 if (bp->flags & NO_WOL_FLAG)
9088 return -EINVAL;
9089
9090 bp->wol = 1;
34f80b04 9091 } else
a2fbb9ea 9092 bp->wol = 0;
34f80b04 9093
a2fbb9ea
ET
9094 return 0;
9095}
9096
9097static u32 bnx2x_get_msglevel(struct net_device *dev)
9098{
9099 struct bnx2x *bp = netdev_priv(dev);
9100
9101 return bp->msglevel;
9102}
9103
9104static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9105{
9106 struct bnx2x *bp = netdev_priv(dev);
9107
9108 if (capable(CAP_NET_ADMIN))
9109 bp->msglevel = level;
9110}
9111
9112static int bnx2x_nway_reset(struct net_device *dev)
9113{
9114 struct bnx2x *bp = netdev_priv(dev);
9115
34f80b04
EG
9116 if (!bp->port.pmf)
9117 return 0;
a2fbb9ea 9118
34f80b04 9119 if (netif_running(dev)) {
bb2a0f7a 9120 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9121 bnx2x_link_set(bp);
9122 }
a2fbb9ea
ET
9123
9124 return 0;
9125}
9126
01e53298
NO
9127static u32
9128bnx2x_get_link(struct net_device *dev)
9129{
9130 struct bnx2x *bp = netdev_priv(dev);
9131
9132 return bp->link_vars.link_up;
9133}
9134
a2fbb9ea
ET
9135static int bnx2x_get_eeprom_len(struct net_device *dev)
9136{
9137 struct bnx2x *bp = netdev_priv(dev);
9138
34f80b04 9139 return bp->common.flash_size;
a2fbb9ea
ET
9140}
9141
9142static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9143{
34f80b04 9144 int port = BP_PORT(bp);
a2fbb9ea
ET
9145 int count, i;
9146 u32 val = 0;
9147
9148 /* adjust timeout for emulation/FPGA */
9149 count = NVRAM_TIMEOUT_COUNT;
9150 if (CHIP_REV_IS_SLOW(bp))
9151 count *= 100;
9152
9153 /* request access to nvram interface */
9154 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9155 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9156
9157 for (i = 0; i < count*10; i++) {
9158 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9159 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9160 break;
9161
9162 udelay(5);
9163 }
9164
9165 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 9166 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
9167 return -EBUSY;
9168 }
9169
9170 return 0;
9171}
9172
9173static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9174{
34f80b04 9175 int port = BP_PORT(bp);
a2fbb9ea
ET
9176 int count, i;
9177 u32 val = 0;
9178
9179 /* adjust timeout for emulation/FPGA */
9180 count = NVRAM_TIMEOUT_COUNT;
9181 if (CHIP_REV_IS_SLOW(bp))
9182 count *= 100;
9183
9184 /* relinquish nvram interface */
9185 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9186 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9187
9188 for (i = 0; i < count*10; i++) {
9189 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9190 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9191 break;
9192
9193 udelay(5);
9194 }
9195
9196 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 9197 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
9198 return -EBUSY;
9199 }
9200
9201 return 0;
9202}
9203
9204static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9205{
9206 u32 val;
9207
9208 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9209
9210 /* enable both bits, even on read */
9211 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9212 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9213 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9214}
9215
9216static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9217{
9218 u32 val;
9219
9220 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9221
9222 /* disable both bits, even after read */
9223 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9224 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9225 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9226}
9227
4781bfad 9228static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
9229 u32 cmd_flags)
9230{
f1410647 9231 int count, i, rc;
a2fbb9ea
ET
9232 u32 val;
9233
9234 /* build the command word */
9235 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9236
9237 /* need to clear DONE bit separately */
9238 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9239
9240 /* address of the NVRAM to read from */
9241 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9242 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9243
9244 /* issue a read command */
9245 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9246
9247 /* adjust timeout for emulation/FPGA */
9248 count = NVRAM_TIMEOUT_COUNT;
9249 if (CHIP_REV_IS_SLOW(bp))
9250 count *= 100;
9251
9252 /* wait for completion */
9253 *ret_val = 0;
9254 rc = -EBUSY;
9255 for (i = 0; i < count; i++) {
9256 udelay(5);
9257 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9258
9259 if (val & MCPR_NVM_COMMAND_DONE) {
9260 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
9261 /* we read nvram data in cpu order
9262 * but ethtool sees it as an array of bytes
9263 * converting to big-endian will do the work */
4781bfad 9264 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
9265 rc = 0;
9266 break;
9267 }
9268 }
9269
9270 return rc;
9271}
9272
9273static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9274 int buf_size)
9275{
9276 int rc;
9277 u32 cmd_flags;
4781bfad 9278 __be32 val;
a2fbb9ea
ET
9279
9280 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9281 DP(BNX2X_MSG_NVM,
c14423fe 9282 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9283 offset, buf_size);
9284 return -EINVAL;
9285 }
9286
34f80b04
EG
9287 if (offset + buf_size > bp->common.flash_size) {
9288 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9289 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9290 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9291 return -EINVAL;
9292 }
9293
9294 /* request access to nvram interface */
9295 rc = bnx2x_acquire_nvram_lock(bp);
9296 if (rc)
9297 return rc;
9298
9299 /* enable access to nvram interface */
9300 bnx2x_enable_nvram_access(bp);
9301
9302 /* read the first word(s) */
9303 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9304 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9305 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9306 memcpy(ret_buf, &val, 4);
9307
9308 /* advance to the next dword */
9309 offset += sizeof(u32);
9310 ret_buf += sizeof(u32);
9311 buf_size -= sizeof(u32);
9312 cmd_flags = 0;
9313 }
9314
9315 if (rc == 0) {
9316 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9317 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9318 memcpy(ret_buf, &val, 4);
9319 }
9320
9321 /* disable access to nvram interface */
9322 bnx2x_disable_nvram_access(bp);
9323 bnx2x_release_nvram_lock(bp);
9324
9325 return rc;
9326}
9327
9328static int bnx2x_get_eeprom(struct net_device *dev,
9329 struct ethtool_eeprom *eeprom, u8 *eebuf)
9330{
9331 struct bnx2x *bp = netdev_priv(dev);
9332 int rc;
9333
2add3acb
EG
9334 if (!netif_running(dev))
9335 return -EAGAIN;
9336
34f80b04 9337 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9338 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9339 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9340 eeprom->len, eeprom->len);
9341
9342 /* parameters already validated in ethtool_get_eeprom */
9343
9344 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9345
9346 return rc;
9347}
9348
9349static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9350 u32 cmd_flags)
9351{
f1410647 9352 int count, i, rc;
a2fbb9ea
ET
9353
9354 /* build the command word */
9355 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9356
9357 /* need to clear DONE bit separately */
9358 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9359
9360 /* write the data */
9361 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9362
9363 /* address of the NVRAM to write to */
9364 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9365 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9366
9367 /* issue the write command */
9368 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9369
9370 /* adjust timeout for emulation/FPGA */
9371 count = NVRAM_TIMEOUT_COUNT;
9372 if (CHIP_REV_IS_SLOW(bp))
9373 count *= 100;
9374
9375 /* wait for completion */
9376 rc = -EBUSY;
9377 for (i = 0; i < count; i++) {
9378 udelay(5);
9379 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9380 if (val & MCPR_NVM_COMMAND_DONE) {
9381 rc = 0;
9382 break;
9383 }
9384 }
9385
9386 return rc;
9387}
9388
f1410647 9389#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
9390
9391static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9392 int buf_size)
9393{
9394 int rc;
9395 u32 cmd_flags;
9396 u32 align_offset;
4781bfad 9397 __be32 val;
a2fbb9ea 9398
34f80b04
EG
9399 if (offset + buf_size > bp->common.flash_size) {
9400 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9401 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9402 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9403 return -EINVAL;
9404 }
9405
9406 /* request access to nvram interface */
9407 rc = bnx2x_acquire_nvram_lock(bp);
9408 if (rc)
9409 return rc;
9410
9411 /* enable access to nvram interface */
9412 bnx2x_enable_nvram_access(bp);
9413
9414 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9415 align_offset = (offset & ~0x03);
9416 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9417
9418 if (rc == 0) {
9419 val &= ~(0xff << BYTE_OFFSET(offset));
9420 val |= (*data_buf << BYTE_OFFSET(offset));
9421
9422 /* nvram data is returned as an array of bytes
9423 * convert it back to cpu order */
9424 val = be32_to_cpu(val);
9425
a2fbb9ea
ET
9426 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9427 cmd_flags);
9428 }
9429
9430 /* disable access to nvram interface */
9431 bnx2x_disable_nvram_access(bp);
9432 bnx2x_release_nvram_lock(bp);
9433
9434 return rc;
9435}
9436
9437static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9438 int buf_size)
9439{
9440 int rc;
9441 u32 cmd_flags;
9442 u32 val;
9443 u32 written_so_far;
9444
34f80b04 9445 if (buf_size == 1) /* ethtool */
a2fbb9ea 9446 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9447
9448 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9449 DP(BNX2X_MSG_NVM,
c14423fe 9450 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9451 offset, buf_size);
9452 return -EINVAL;
9453 }
9454
34f80b04
EG
9455 if (offset + buf_size > bp->common.flash_size) {
9456 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9457 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9458 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9459 return -EINVAL;
9460 }
9461
9462 /* request access to nvram interface */
9463 rc = bnx2x_acquire_nvram_lock(bp);
9464 if (rc)
9465 return rc;
9466
9467 /* enable access to nvram interface */
9468 bnx2x_enable_nvram_access(bp);
9469
9470 written_so_far = 0;
9471 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9472 while ((written_so_far < buf_size) && (rc == 0)) {
9473 if (written_so_far == (buf_size - sizeof(u32)))
9474 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9475 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9476 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9477 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9478 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9479
9480 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9481
9482 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9483
9484 /* advance to the next dword */
9485 offset += sizeof(u32);
9486 data_buf += sizeof(u32);
9487 written_so_far += sizeof(u32);
9488 cmd_flags = 0;
9489 }
9490
9491 /* disable access to nvram interface */
9492 bnx2x_disable_nvram_access(bp);
9493 bnx2x_release_nvram_lock(bp);
9494
9495 return rc;
9496}
9497
9498static int bnx2x_set_eeprom(struct net_device *dev,
9499 struct ethtool_eeprom *eeprom, u8 *eebuf)
9500{
9501 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
9502 int port = BP_PORT(bp);
9503 int rc = 0;
a2fbb9ea 9504
9f4c9583
EG
9505 if (!netif_running(dev))
9506 return -EAGAIN;
9507
34f80b04 9508 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9509 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9510 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9511 eeprom->len, eeprom->len);
9512
9513 /* parameters already validated in ethtool_set_eeprom */
9514
f57a6025
EG
9515 /* PHY eeprom can be accessed only by the PMF */
9516 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9517 !bp->port.pmf)
9518 return -EINVAL;
9519
9520 if (eeprom->magic == 0x50485950) {
9521 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9522 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 9523
f57a6025
EG
9524 bnx2x_acquire_phy_lock(bp);
9525 rc |= bnx2x_link_reset(&bp->link_params,
9526 &bp->link_vars, 0);
9527 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9528 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9529 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9530 MISC_REGISTERS_GPIO_HIGH, port);
9531 bnx2x_release_phy_lock(bp);
9532 bnx2x_link_report(bp);
9533
9534 } else if (eeprom->magic == 0x50485952) {
9535 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9536 if ((bp->state == BNX2X_STATE_OPEN) ||
9537 (bp->state == BNX2X_STATE_DISABLED)) {
4a37fb66 9538 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
9539 rc |= bnx2x_link_reset(&bp->link_params,
9540 &bp->link_vars, 1);
9541
9542 rc |= bnx2x_phy_init(&bp->link_params,
9543 &bp->link_vars);
4a37fb66 9544 bnx2x_release_phy_lock(bp);
f57a6025
EG
9545 bnx2x_calc_fc_adv(bp);
9546 }
9547 } else if (eeprom->magic == 0x53985943) {
9548 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9549 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9550 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9551 u8 ext_phy_addr =
9552 (bp->link_params.ext_phy_config &
9553 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
9554 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT;
9555
9556 /* DSP Remove Download Mode */
9557 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9558 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 9559
f57a6025
EG
9560 bnx2x_acquire_phy_lock(bp);
9561
9562 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9563
9564 /* wait 0.5 sec to allow it to run */
9565 msleep(500);
9566 bnx2x_ext_phy_hw_reset(bp, port);
9567 msleep(500);
9568 bnx2x_release_phy_lock(bp);
9569 }
9570 } else
c18487ee 9571 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9572
9573 return rc;
9574}
9575
9576static int bnx2x_get_coalesce(struct net_device *dev,
9577 struct ethtool_coalesce *coal)
9578{
9579 struct bnx2x *bp = netdev_priv(dev);
9580
9581 memset(coal, 0, sizeof(struct ethtool_coalesce));
9582
9583 coal->rx_coalesce_usecs = bp->rx_ticks;
9584 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9585
9586 return 0;
9587}
9588
ca00392c 9589#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
a2fbb9ea
ET
9590static int bnx2x_set_coalesce(struct net_device *dev,
9591 struct ethtool_coalesce *coal)
9592{
9593 struct bnx2x *bp = netdev_priv(dev);
9594
9595 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
ca00392c
EG
9596 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9597 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea
ET
9598
9599 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
ca00392c
EG
9600 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9601 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea 9602
34f80b04 9603 if (netif_running(dev))
a2fbb9ea
ET
9604 bnx2x_update_coalesce(bp);
9605
9606 return 0;
9607}
9608
9609static void bnx2x_get_ringparam(struct net_device *dev,
9610 struct ethtool_ringparam *ering)
9611{
9612 struct bnx2x *bp = netdev_priv(dev);
9613
9614 ering->rx_max_pending = MAX_RX_AVAIL;
9615 ering->rx_mini_max_pending = 0;
9616 ering->rx_jumbo_max_pending = 0;
9617
9618 ering->rx_pending = bp->rx_ring_size;
9619 ering->rx_mini_pending = 0;
9620 ering->rx_jumbo_pending = 0;
9621
9622 ering->tx_max_pending = MAX_TX_AVAIL;
9623 ering->tx_pending = bp->tx_ring_size;
9624}
9625
9626static int bnx2x_set_ringparam(struct net_device *dev,
9627 struct ethtool_ringparam *ering)
9628{
9629 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9630 int rc = 0;
a2fbb9ea
ET
9631
9632 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9633 (ering->tx_pending > MAX_TX_AVAIL) ||
9634 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9635 return -EINVAL;
9636
9637 bp->rx_ring_size = ering->rx_pending;
9638 bp->tx_ring_size = ering->tx_pending;
9639
34f80b04
EG
9640 if (netif_running(dev)) {
9641 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9642 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9643 }
9644
34f80b04 9645 return rc;
a2fbb9ea
ET
9646}
9647
9648static void bnx2x_get_pauseparam(struct net_device *dev,
9649 struct ethtool_pauseparam *epause)
9650{
9651 struct bnx2x *bp = netdev_priv(dev);
9652
356e2385
EG
9653 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9654 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9655 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9656
c0700f90
DM
9657 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9658 BNX2X_FLOW_CTRL_RX);
9659 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9660 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9661
9662 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9663 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9664 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9665}
9666
9667static int bnx2x_set_pauseparam(struct net_device *dev,
9668 struct ethtool_pauseparam *epause)
9669{
9670 struct bnx2x *bp = netdev_priv(dev);
9671
34f80b04
EG
9672 if (IS_E1HMF(bp))
9673 return 0;
9674
a2fbb9ea
ET
9675 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9676 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9677 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9678
c0700f90 9679 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9680
f1410647 9681 if (epause->rx_pause)
c0700f90 9682 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9683
f1410647 9684 if (epause->tx_pause)
c0700f90 9685 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9686
c0700f90
DM
9687 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9688 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9689
c18487ee 9690 if (epause->autoneg) {
34f80b04 9691 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9692 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9693 return -EINVAL;
9694 }
a2fbb9ea 9695
c18487ee 9696 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9697 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9698 }
a2fbb9ea 9699
c18487ee
YR
9700 DP(NETIF_MSG_LINK,
9701 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9702
9703 if (netif_running(dev)) {
bb2a0f7a 9704 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9705 bnx2x_link_set(bp);
9706 }
a2fbb9ea
ET
9707
9708 return 0;
9709}
9710
df0f2343
VZ
9711static int bnx2x_set_flags(struct net_device *dev, u32 data)
9712{
9713 struct bnx2x *bp = netdev_priv(dev);
9714 int changed = 0;
9715 int rc = 0;
9716
9717 /* TPA requires Rx CSUM offloading */
9718 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9719 if (!(dev->features & NETIF_F_LRO)) {
9720 dev->features |= NETIF_F_LRO;
9721 bp->flags |= TPA_ENABLE_FLAG;
9722 changed = 1;
9723 }
9724
9725 } else if (dev->features & NETIF_F_LRO) {
9726 dev->features &= ~NETIF_F_LRO;
9727 bp->flags &= ~TPA_ENABLE_FLAG;
9728 changed = 1;
9729 }
9730
9731 if (changed && netif_running(dev)) {
9732 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9733 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9734 }
9735
9736 return rc;
9737}
9738
a2fbb9ea
ET
9739static u32 bnx2x_get_rx_csum(struct net_device *dev)
9740{
9741 struct bnx2x *bp = netdev_priv(dev);
9742
9743 return bp->rx_csum;
9744}
9745
9746static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9747{
9748 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9749 int rc = 0;
a2fbb9ea
ET
9750
9751 bp->rx_csum = data;
df0f2343
VZ
9752
9753 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9754 TPA'ed packets will be discarded due to wrong TCP CSUM */
9755 if (!data) {
9756 u32 flags = ethtool_op_get_flags(dev);
9757
9758 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9759 }
9760
9761 return rc;
a2fbb9ea
ET
9762}
9763
9764static int bnx2x_set_tso(struct net_device *dev, u32 data)
9765{
755735eb 9766 if (data) {
a2fbb9ea 9767 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9768 dev->features |= NETIF_F_TSO6;
9769 } else {
a2fbb9ea 9770 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9771 dev->features &= ~NETIF_F_TSO6;
9772 }
9773
a2fbb9ea
ET
9774 return 0;
9775}
9776
f3c87cdd 9777static const struct {
a2fbb9ea
ET
9778 char string[ETH_GSTRING_LEN];
9779} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9780 { "register_test (offline)" },
9781 { "memory_test (offline)" },
9782 { "loopback_test (offline)" },
9783 { "nvram_test (online)" },
9784 { "interrupt_test (online)" },
9785 { "link_test (online)" },
d3d4f495 9786 { "idle check (online)" }
a2fbb9ea
ET
9787};
9788
9789static int bnx2x_self_test_count(struct net_device *dev)
9790{
9791 return BNX2X_NUM_TESTS;
9792}
9793
f3c87cdd
YG
9794static int bnx2x_test_registers(struct bnx2x *bp)
9795{
9796 int idx, i, rc = -ENODEV;
9797 u32 wr_val = 0;
9dabc424 9798 int port = BP_PORT(bp);
f3c87cdd
YG
9799 static const struct {
9800 u32 offset0;
9801 u32 offset1;
9802 u32 mask;
9803 } reg_tbl[] = {
9804/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9805 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9806 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9807 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9808 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9809 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9810 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9811 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9812 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9813 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9814/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9815 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9816 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9817 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9818 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9819 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9820 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9821 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 9822 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
9823 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9824/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
9825 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9826 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9827 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9828 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9829 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9830 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9831 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9832 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
9833 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9834/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
9835 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9836 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9837 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9838 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9839 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9840 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9841
9842 { 0xffffffff, 0, 0x00000000 }
9843 };
9844
9845 if (!netif_running(bp->dev))
9846 return rc;
9847
9848 /* Repeat the test twice:
9849 First by writing 0x00000000, second by writing 0xffffffff */
9850 for (idx = 0; idx < 2; idx++) {
9851
9852 switch (idx) {
9853 case 0:
9854 wr_val = 0;
9855 break;
9856 case 1:
9857 wr_val = 0xffffffff;
9858 break;
9859 }
9860
9861 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9862 u32 offset, mask, save_val, val;
f3c87cdd
YG
9863
9864 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9865 mask = reg_tbl[i].mask;
9866
9867 save_val = REG_RD(bp, offset);
9868
9869 REG_WR(bp, offset, wr_val);
9870 val = REG_RD(bp, offset);
9871
9872 /* Restore the original register's value */
9873 REG_WR(bp, offset, save_val);
9874
9875 /* verify that value is as expected value */
9876 if ((val & mask) != (wr_val & mask))
9877 goto test_reg_exit;
9878 }
9879 }
9880
9881 rc = 0;
9882
9883test_reg_exit:
9884 return rc;
9885}
9886
9887static int bnx2x_test_memory(struct bnx2x *bp)
9888{
9889 int i, j, rc = -ENODEV;
9890 u32 val;
9891 static const struct {
9892 u32 offset;
9893 int size;
9894 } mem_tbl[] = {
9895 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9896 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9897 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9898 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9899 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9900 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9901 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9902
9903 { 0xffffffff, 0 }
9904 };
9905 static const struct {
9906 char *name;
9907 u32 offset;
9dabc424
YG
9908 u32 e1_mask;
9909 u32 e1h_mask;
f3c87cdd 9910 } prty_tbl[] = {
9dabc424
YG
9911 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9912 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9913 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9914 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9915 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9916 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9917
9918 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9919 };
9920
9921 if (!netif_running(bp->dev))
9922 return rc;
9923
9924 /* Go through all the memories */
9925 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9926 for (j = 0; j < mem_tbl[i].size; j++)
9927 REG_RD(bp, mem_tbl[i].offset + j*4);
9928
9929 /* Check the parity status */
9930 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9931 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9932 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9933 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9934 DP(NETIF_MSG_HW,
9935 "%s is 0x%x\n", prty_tbl[i].name, val);
9936 goto test_mem_exit;
9937 }
9938 }
9939
9940 rc = 0;
9941
9942test_mem_exit:
9943 return rc;
9944}
9945
f3c87cdd
YG
9946static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9947{
9948 int cnt = 1000;
9949
9950 if (link_up)
9951 while (bnx2x_link_test(bp) && cnt--)
9952 msleep(10);
9953}
9954
9955static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9956{
9957 unsigned int pkt_size, num_pkts, i;
9958 struct sk_buff *skb;
9959 unsigned char *packet;
ca00392c
EG
9960 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
9961 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
f3c87cdd
YG
9962 u16 tx_start_idx, tx_idx;
9963 u16 rx_start_idx, rx_idx;
ca00392c 9964 u16 pkt_prod, bd_prod;
f3c87cdd 9965 struct sw_tx_bd *tx_buf;
ca00392c
EG
9966 struct eth_tx_start_bd *tx_start_bd;
9967 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
9968 dma_addr_t mapping;
9969 union eth_rx_cqe *cqe;
9970 u8 cqe_fp_flags;
9971 struct sw_rx_bd *rx_buf;
9972 u16 len;
9973 int rc = -ENODEV;
9974
b5bf9068
EG
9975 /* check the loopback mode */
9976 switch (loopback_mode) {
9977 case BNX2X_PHY_LOOPBACK:
9978 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9979 return -EINVAL;
9980 break;
9981 case BNX2X_MAC_LOOPBACK:
f3c87cdd 9982 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9983 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
9984 break;
9985 default:
f3c87cdd 9986 return -EINVAL;
b5bf9068 9987 }
f3c87cdd 9988
b5bf9068
EG
9989 /* prepare the loopback packet */
9990 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9991 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
9992 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9993 if (!skb) {
9994 rc = -ENOMEM;
9995 goto test_loopback_exit;
9996 }
9997 packet = skb_put(skb, pkt_size);
9998 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
9999 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10000 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
10001 for (i = ETH_HLEN; i < pkt_size; i++)
10002 packet[i] = (unsigned char) (i & 0xff);
10003
b5bf9068 10004 /* send the loopback packet */
f3c87cdd 10005 num_pkts = 0;
ca00392c
EG
10006 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10007 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 10008
ca00392c
EG
10009 pkt_prod = fp_tx->tx_pkt_prod++;
10010 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10011 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 10012 tx_buf->skb = skb;
ca00392c 10013 tx_buf->flags = 0;
f3c87cdd 10014
ca00392c
EG
10015 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10016 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
f3c87cdd
YG
10017 mapping = pci_map_single(bp->pdev, skb->data,
10018 skb_headlen(skb), PCI_DMA_TODEVICE);
ca00392c
EG
10019 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10020 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10021 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10022 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10023 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10024 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10025 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10026 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10027
10028 /* turn on parsing and get a BD */
10029 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10030 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10031
10032 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 10033
58f4c4cf
EG
10034 wmb();
10035
ca00392c
EG
10036 fp_tx->tx_db.data.prod += 2;
10037 barrier();
10038 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
f3c87cdd
YG
10039
10040 mmiowb();
10041
10042 num_pkts++;
ca00392c 10043 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
10044 bp->dev->trans_start = jiffies;
10045
10046 udelay(100);
10047
ca00392c 10048 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
10049 if (tx_idx != tx_start_idx + num_pkts)
10050 goto test_loopback_exit;
10051
ca00392c 10052 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
10053 if (rx_idx != rx_start_idx + num_pkts)
10054 goto test_loopback_exit;
10055
ca00392c 10056 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
10057 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10058 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10059 goto test_loopback_rx_exit;
10060
10061 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10062 if (len != pkt_size)
10063 goto test_loopback_rx_exit;
10064
ca00392c 10065 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
10066 skb = rx_buf->skb;
10067 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10068 for (i = ETH_HLEN; i < pkt_size; i++)
10069 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10070 goto test_loopback_rx_exit;
10071
10072 rc = 0;
10073
10074test_loopback_rx_exit:
f3c87cdd 10075
ca00392c
EG
10076 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10077 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10078 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10079 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
10080
10081 /* Update producers */
ca00392c
EG
10082 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10083 fp_rx->rx_sge_prod);
f3c87cdd
YG
10084
10085test_loopback_exit:
10086 bp->link_params.loopback_mode = LOOPBACK_NONE;
10087
10088 return rc;
10089}
10090
10091static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10092{
b5bf9068 10093 int rc = 0, res;
f3c87cdd
YG
10094
10095 if (!netif_running(bp->dev))
10096 return BNX2X_LOOPBACK_FAILED;
10097
f8ef6e44 10098 bnx2x_netif_stop(bp, 1);
3910c8ae 10099 bnx2x_acquire_phy_lock(bp);
f3c87cdd 10100
b5bf9068
EG
10101 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10102 if (res) {
10103 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10104 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
10105 }
10106
b5bf9068
EG
10107 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10108 if (res) {
10109 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10110 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
10111 }
10112
3910c8ae 10113 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
10114 bnx2x_netif_start(bp);
10115
10116 return rc;
10117}
10118
10119#define CRC32_RESIDUAL 0xdebb20e3
10120
10121static int bnx2x_test_nvram(struct bnx2x *bp)
10122{
10123 static const struct {
10124 int offset;
10125 int size;
10126 } nvram_tbl[] = {
10127 { 0, 0x14 }, /* bootstrap */
10128 { 0x14, 0xec }, /* dir */
10129 { 0x100, 0x350 }, /* manuf_info */
10130 { 0x450, 0xf0 }, /* feature_info */
10131 { 0x640, 0x64 }, /* upgrade_key_info */
10132 { 0x6a4, 0x64 },
10133 { 0x708, 0x70 }, /* manuf_key_info */
10134 { 0x778, 0x70 },
10135 { 0, 0 }
10136 };
4781bfad 10137 __be32 buf[0x350 / 4];
f3c87cdd
YG
10138 u8 *data = (u8 *)buf;
10139 int i, rc;
10140 u32 magic, csum;
10141
10142 rc = bnx2x_nvram_read(bp, 0, data, 4);
10143 if (rc) {
f5372251 10144 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
10145 goto test_nvram_exit;
10146 }
10147
10148 magic = be32_to_cpu(buf[0]);
10149 if (magic != 0x669955aa) {
10150 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10151 rc = -ENODEV;
10152 goto test_nvram_exit;
10153 }
10154
10155 for (i = 0; nvram_tbl[i].size; i++) {
10156
10157 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10158 nvram_tbl[i].size);
10159 if (rc) {
10160 DP(NETIF_MSG_PROBE,
f5372251 10161 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
10162 goto test_nvram_exit;
10163 }
10164
10165 csum = ether_crc_le(nvram_tbl[i].size, data);
10166 if (csum != CRC32_RESIDUAL) {
10167 DP(NETIF_MSG_PROBE,
10168 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
10169 rc = -ENODEV;
10170 goto test_nvram_exit;
10171 }
10172 }
10173
10174test_nvram_exit:
10175 return rc;
10176}
10177
10178static int bnx2x_test_intr(struct bnx2x *bp)
10179{
10180 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10181 int i, rc;
10182
10183 if (!netif_running(bp->dev))
10184 return -ENODEV;
10185
8d9c5f34 10186 config->hdr.length = 0;
af246401
EG
10187 if (CHIP_IS_E1(bp))
10188 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10189 else
10190 config->hdr.offset = BP_FUNC(bp);
0626b899 10191 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
10192 config->hdr.reserved1 = 0;
10193
10194 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10195 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10196 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10197 if (rc == 0) {
10198 bp->set_mac_pending++;
10199 for (i = 0; i < 10; i++) {
10200 if (!bp->set_mac_pending)
10201 break;
10202 msleep_interruptible(10);
10203 }
10204 if (i == 10)
10205 rc = -ENODEV;
10206 }
10207
10208 return rc;
10209}
10210
a2fbb9ea
ET
10211static void bnx2x_self_test(struct net_device *dev,
10212 struct ethtool_test *etest, u64 *buf)
10213{
10214 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
10215
10216 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10217
f3c87cdd 10218 if (!netif_running(dev))
a2fbb9ea 10219 return;
a2fbb9ea 10220
33471629 10221 /* offline tests are not supported in MF mode */
f3c87cdd
YG
10222 if (IS_E1HMF(bp))
10223 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10224
10225 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
10226 int port = BP_PORT(bp);
10227 u32 val;
f3c87cdd
YG
10228 u8 link_up;
10229
279abdf5
EG
10230 /* save current value of input enable for TX port IF */
10231 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10232 /* disable input for TX port IF */
10233 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10234
f3c87cdd
YG
10235 link_up = bp->link_vars.link_up;
10236 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10237 bnx2x_nic_load(bp, LOAD_DIAG);
10238 /* wait until link state is restored */
10239 bnx2x_wait_for_link(bp, link_up);
10240
10241 if (bnx2x_test_registers(bp) != 0) {
10242 buf[0] = 1;
10243 etest->flags |= ETH_TEST_FL_FAILED;
10244 }
10245 if (bnx2x_test_memory(bp) != 0) {
10246 buf[1] = 1;
10247 etest->flags |= ETH_TEST_FL_FAILED;
10248 }
10249 buf[2] = bnx2x_test_loopback(bp, link_up);
10250 if (buf[2] != 0)
10251 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 10252
f3c87cdd 10253 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
10254
10255 /* restore input for TX port IF */
10256 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10257
f3c87cdd
YG
10258 bnx2x_nic_load(bp, LOAD_NORMAL);
10259 /* wait until link state is restored */
10260 bnx2x_wait_for_link(bp, link_up);
10261 }
10262 if (bnx2x_test_nvram(bp) != 0) {
10263 buf[3] = 1;
a2fbb9ea
ET
10264 etest->flags |= ETH_TEST_FL_FAILED;
10265 }
f3c87cdd
YG
10266 if (bnx2x_test_intr(bp) != 0) {
10267 buf[4] = 1;
10268 etest->flags |= ETH_TEST_FL_FAILED;
10269 }
10270 if (bp->port.pmf)
10271 if (bnx2x_link_test(bp) != 0) {
10272 buf[5] = 1;
10273 etest->flags |= ETH_TEST_FL_FAILED;
10274 }
f3c87cdd
YG
10275
10276#ifdef BNX2X_EXTRA_DEBUG
10277 bnx2x_panic_dump(bp);
10278#endif
a2fbb9ea
ET
10279}
10280
de832a55
EG
10281static const struct {
10282 long offset;
10283 int size;
10284 u8 string[ETH_GSTRING_LEN];
10285} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10286/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10287 { Q_STATS_OFFSET32(error_bytes_received_hi),
10288 8, "[%d]: rx_error_bytes" },
10289 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10290 8, "[%d]: rx_ucast_packets" },
10291 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10292 8, "[%d]: rx_mcast_packets" },
10293 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10294 8, "[%d]: rx_bcast_packets" },
10295 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10296 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10297 4, "[%d]: rx_phy_ip_err_discards"},
10298 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10299 4, "[%d]: rx_skb_alloc_discard" },
10300 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10301
10302/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10303 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10304 8, "[%d]: tx_packets" }
10305};
10306
bb2a0f7a
YG
10307static const struct {
10308 long offset;
10309 int size;
10310 u32 flags;
66e855f3
YG
10311#define STATS_FLAGS_PORT 1
10312#define STATS_FLAGS_FUNC 2
de832a55 10313#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 10314 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 10315} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
10316/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10317 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 10318 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 10319 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 10320 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 10321 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 10322 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 10323 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 10324 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 10325 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 10326 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 10327 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 10328 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 10329 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
10330 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10331 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10332 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10333 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10334/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10335 8, STATS_FLAGS_PORT, "rx_fragments" },
10336 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10337 8, STATS_FLAGS_PORT, "rx_jabbers" },
10338 { STATS_OFFSET32(no_buff_discard_hi),
10339 8, STATS_FLAGS_BOTH, "rx_discards" },
10340 { STATS_OFFSET32(mac_filter_discard),
10341 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10342 { STATS_OFFSET32(xxoverflow_discard),
10343 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10344 { STATS_OFFSET32(brb_drop_hi),
10345 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10346 { STATS_OFFSET32(brb_truncate_hi),
10347 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10348 { STATS_OFFSET32(pause_frames_received_hi),
10349 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10350 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10351 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10352 { STATS_OFFSET32(nig_timer_max),
10353 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10354/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10355 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10356 { STATS_OFFSET32(rx_skb_alloc_failed),
10357 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10358 { STATS_OFFSET32(hw_csum_err),
10359 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10360
10361 { STATS_OFFSET32(total_bytes_transmitted_hi),
10362 8, STATS_FLAGS_BOTH, "tx_bytes" },
10363 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10364 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10365 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10366 8, STATS_FLAGS_BOTH, "tx_packets" },
10367 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10368 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10369 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10370 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 10371 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 10372 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 10373 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 10374 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 10375/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 10376 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 10377 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 10378 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 10379 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 10380 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 10381 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 10382 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 10383 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 10384 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 10385 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 10386 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 10387 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 10388 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 10389 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 10390 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 10391 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 10392 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 10393 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 10394 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 10395/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 10396 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
10397 { STATS_OFFSET32(pause_frames_sent_hi),
10398 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
10399};
10400
de832a55
EG
10401#define IS_PORT_STAT(i) \
10402 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10403#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10404#define IS_E1HMF_MODE_STAT(bp) \
10405 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 10406
a2fbb9ea
ET
10407static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10408{
bb2a0f7a 10409 struct bnx2x *bp = netdev_priv(dev);
de832a55 10410 int i, j, k;
bb2a0f7a 10411
a2fbb9ea
ET
10412 switch (stringset) {
10413 case ETH_SS_STATS:
de832a55
EG
10414 if (is_multi(bp)) {
10415 k = 0;
ca00392c 10416 for_each_rx_queue(bp, i) {
de832a55
EG
10417 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10418 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10419 bnx2x_q_stats_arr[j].string, i);
10420 k += BNX2X_NUM_Q_STATS;
10421 }
10422 if (IS_E1HMF_MODE_STAT(bp))
10423 break;
10424 for (j = 0; j < BNX2X_NUM_STATS; j++)
10425 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10426 bnx2x_stats_arr[j].string);
10427 } else {
10428 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10429 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10430 continue;
10431 strcpy(buf + j*ETH_GSTRING_LEN,
10432 bnx2x_stats_arr[i].string);
10433 j++;
10434 }
bb2a0f7a 10435 }
a2fbb9ea
ET
10436 break;
10437
10438 case ETH_SS_TEST:
10439 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10440 break;
10441 }
10442}
10443
10444static int bnx2x_get_stats_count(struct net_device *dev)
10445{
bb2a0f7a 10446 struct bnx2x *bp = netdev_priv(dev);
de832a55 10447 int i, num_stats;
bb2a0f7a 10448
de832a55 10449 if (is_multi(bp)) {
ca00392c 10450 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
de832a55
EG
10451 if (!IS_E1HMF_MODE_STAT(bp))
10452 num_stats += BNX2X_NUM_STATS;
10453 } else {
10454 if (IS_E1HMF_MODE_STAT(bp)) {
10455 num_stats = 0;
10456 for (i = 0; i < BNX2X_NUM_STATS; i++)
10457 if (IS_FUNC_STAT(i))
10458 num_stats++;
10459 } else
10460 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 10461 }
de832a55 10462
bb2a0f7a 10463 return num_stats;
a2fbb9ea
ET
10464}
10465
10466static void bnx2x_get_ethtool_stats(struct net_device *dev,
10467 struct ethtool_stats *stats, u64 *buf)
10468{
10469 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
10470 u32 *hw_stats, *offset;
10471 int i, j, k;
bb2a0f7a 10472
de832a55
EG
10473 if (is_multi(bp)) {
10474 k = 0;
ca00392c 10475 for_each_rx_queue(bp, i) {
de832a55
EG
10476 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10477 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10478 if (bnx2x_q_stats_arr[j].size == 0) {
10479 /* skip this counter */
10480 buf[k + j] = 0;
10481 continue;
10482 }
10483 offset = (hw_stats +
10484 bnx2x_q_stats_arr[j].offset);
10485 if (bnx2x_q_stats_arr[j].size == 4) {
10486 /* 4-byte counter */
10487 buf[k + j] = (u64) *offset;
10488 continue;
10489 }
10490 /* 8-byte counter */
10491 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10492 }
10493 k += BNX2X_NUM_Q_STATS;
10494 }
10495 if (IS_E1HMF_MODE_STAT(bp))
10496 return;
10497 hw_stats = (u32 *)&bp->eth_stats;
10498 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10499 if (bnx2x_stats_arr[j].size == 0) {
10500 /* skip this counter */
10501 buf[k + j] = 0;
10502 continue;
10503 }
10504 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10505 if (bnx2x_stats_arr[j].size == 4) {
10506 /* 4-byte counter */
10507 buf[k + j] = (u64) *offset;
10508 continue;
10509 }
10510 /* 8-byte counter */
10511 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10512 }
de832a55
EG
10513 } else {
10514 hw_stats = (u32 *)&bp->eth_stats;
10515 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10516 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10517 continue;
10518 if (bnx2x_stats_arr[i].size == 0) {
10519 /* skip this counter */
10520 buf[j] = 0;
10521 j++;
10522 continue;
10523 }
10524 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10525 if (bnx2x_stats_arr[i].size == 4) {
10526 /* 4-byte counter */
10527 buf[j] = (u64) *offset;
10528 j++;
10529 continue;
10530 }
10531 /* 8-byte counter */
10532 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10533 j++;
a2fbb9ea 10534 }
a2fbb9ea
ET
10535 }
10536}
10537
10538static int bnx2x_phys_id(struct net_device *dev, u32 data)
10539{
10540 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10541 int port = BP_PORT(bp);
a2fbb9ea
ET
10542 int i;
10543
34f80b04
EG
10544 if (!netif_running(dev))
10545 return 0;
10546
10547 if (!bp->port.pmf)
10548 return 0;
10549
a2fbb9ea
ET
10550 if (data == 0)
10551 data = 2;
10552
10553 for (i = 0; i < (data * 2); i++) {
c18487ee 10554 if ((i % 2) == 0)
34f80b04 10555 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
10556 bp->link_params.hw_led_mode,
10557 bp->link_params.chip_id);
10558 else
34f80b04 10559 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
10560 bp->link_params.hw_led_mode,
10561 bp->link_params.chip_id);
10562
a2fbb9ea
ET
10563 msleep_interruptible(500);
10564 if (signal_pending(current))
10565 break;
10566 }
10567
c18487ee 10568 if (bp->link_vars.link_up)
34f80b04 10569 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
10570 bp->link_vars.line_speed,
10571 bp->link_params.hw_led_mode,
10572 bp->link_params.chip_id);
a2fbb9ea
ET
10573
10574 return 0;
10575}
10576
10577static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10578 .get_settings = bnx2x_get_settings,
10579 .set_settings = bnx2x_set_settings,
10580 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10581 .get_regs_len = bnx2x_get_regs_len,
10582 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10583 .get_wol = bnx2x_get_wol,
10584 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10585 .get_msglevel = bnx2x_get_msglevel,
10586 .set_msglevel = bnx2x_set_msglevel,
10587 .nway_reset = bnx2x_nway_reset,
01e53298 10588 .get_link = bnx2x_get_link,
7a9b2557
VZ
10589 .get_eeprom_len = bnx2x_get_eeprom_len,
10590 .get_eeprom = bnx2x_get_eeprom,
10591 .set_eeprom = bnx2x_set_eeprom,
10592 .get_coalesce = bnx2x_get_coalesce,
10593 .set_coalesce = bnx2x_set_coalesce,
10594 .get_ringparam = bnx2x_get_ringparam,
10595 .set_ringparam = bnx2x_set_ringparam,
10596 .get_pauseparam = bnx2x_get_pauseparam,
10597 .set_pauseparam = bnx2x_set_pauseparam,
10598 .get_rx_csum = bnx2x_get_rx_csum,
10599 .set_rx_csum = bnx2x_set_rx_csum,
10600 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10601 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10602 .set_flags = bnx2x_set_flags,
10603 .get_flags = ethtool_op_get_flags,
10604 .get_sg = ethtool_op_get_sg,
10605 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10606 .get_tso = ethtool_op_get_tso,
10607 .set_tso = bnx2x_set_tso,
10608 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
10609 .self_test = bnx2x_self_test,
10610 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
10611 .phys_id = bnx2x_phys_id,
10612 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 10613 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10614};
10615
10616/* end of ethtool_ops */
10617
10618/****************************************************************************
10619* General service functions
10620****************************************************************************/
10621
10622static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10623{
10624 u16 pmcsr;
10625
10626 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10627
10628 switch (state) {
10629 case PCI_D0:
34f80b04 10630 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10631 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10632 PCI_PM_CTRL_PME_STATUS));
10633
10634 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10635 /* delay required during transition out of D3hot */
a2fbb9ea 10636 msleep(20);
34f80b04 10637 break;
a2fbb9ea 10638
34f80b04
EG
10639 case PCI_D3hot:
10640 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10641 pmcsr |= 3;
a2fbb9ea 10642
34f80b04
EG
10643 if (bp->wol)
10644 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10645
34f80b04
EG
10646 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10647 pmcsr);
a2fbb9ea 10648
34f80b04
EG
10649 /* No more memory access after this point until
10650 * device is brought back to D0.
10651 */
10652 break;
10653
10654 default:
10655 return -EINVAL;
10656 }
10657 return 0;
a2fbb9ea
ET
10658}
10659
237907c1
EG
10660static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10661{
10662 u16 rx_cons_sb;
10663
10664 /* Tell compiler that status block fields can change */
10665 barrier();
10666 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10667 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10668 rx_cons_sb++;
10669 return (fp->rx_comp_cons != rx_cons_sb);
10670}
10671
34f80b04
EG
10672/*
10673 * net_device service functions
10674 */
10675
a2fbb9ea
ET
10676static int bnx2x_poll(struct napi_struct *napi, int budget)
10677{
10678 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10679 napi);
10680 struct bnx2x *bp = fp->bp;
10681 int work_done = 0;
10682
10683#ifdef BNX2X_STOP_ON_ERROR
10684 if (unlikely(bp->panic))
34f80b04 10685 goto poll_panic;
a2fbb9ea
ET
10686#endif
10687
a2fbb9ea
ET
10688 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10689 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10690
10691 bnx2x_update_fpsb_idx(fp);
10692
8534f32c 10693 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 10694 work_done = bnx2x_rx_int(fp, budget);
356e2385 10695
8534f32c
EG
10696 /* must not complete if we consumed full budget */
10697 if (work_done >= budget)
10698 goto poll_again;
10699 }
a2fbb9ea 10700
ca00392c 10701 /* bnx2x_has_rx_work() reads the status block, thus we need to
8534f32c 10702 * ensure that status block indices have been actually read
ca00392c 10703 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
8534f32c 10704 * so that we won't write the "newer" value of the status block to IGU
ca00392c 10705 * (if there was a DMA right after bnx2x_has_rx_work and
8534f32c
EG
10706 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10707 * may be postponed to right before bnx2x_ack_sb). In this case
10708 * there will never be another interrupt until there is another update
10709 * of the status block, while there is still unhandled work.
10710 */
10711 rmb();
a2fbb9ea 10712
ca00392c 10713 if (!bnx2x_has_rx_work(fp)) {
a2fbb9ea 10714#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10715poll_panic:
a2fbb9ea 10716#endif
288379f0 10717 napi_complete(napi);
a2fbb9ea 10718
0626b899 10719 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10720 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10721 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10722 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10723 }
356e2385 10724
8534f32c 10725poll_again:
a2fbb9ea
ET
10726 return work_done;
10727}
10728
755735eb
EG
10729
10730/* we split the first BD into headers and data BDs
33471629 10731 * to ease the pain of our fellow microcode engineers
755735eb
EG
10732 * we use one mapping for both BDs
10733 * So far this has only been observed to happen
10734 * in Other Operating Systems(TM)
10735 */
10736static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10737 struct bnx2x_fastpath *fp,
ca00392c
EG
10738 struct sw_tx_bd *tx_buf,
10739 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
10740 u16 bd_prod, int nbd)
10741{
ca00392c 10742 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
10743 struct eth_tx_bd *d_tx_bd;
10744 dma_addr_t mapping;
10745 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10746
10747 /* first fix first BD */
10748 h_tx_bd->nbd = cpu_to_le16(nbd);
10749 h_tx_bd->nbytes = cpu_to_le16(hlen);
10750
10751 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10752 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10753 h_tx_bd->addr_lo, h_tx_bd->nbd);
10754
10755 /* now get a new data BD
10756 * (after the pbd) and fill it */
10757 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 10758 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
10759
10760 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10761 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10762
10763 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10764 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10765 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
10766
10767 /* this marks the BD as one that has no individual mapping */
10768 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10769
755735eb
EG
10770 DP(NETIF_MSG_TX_QUEUED,
10771 "TSO split data size is %d (%x:%x)\n",
10772 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10773
ca00392c
EG
10774 /* update tx_bd */
10775 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
10776
10777 return bd_prod;
10778}
10779
10780static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10781{
10782 if (fix > 0)
10783 csum = (u16) ~csum_fold(csum_sub(csum,
10784 csum_partial(t_header - fix, fix, 0)));
10785
10786 else if (fix < 0)
10787 csum = (u16) ~csum_fold(csum_add(csum,
10788 csum_partial(t_header, -fix, 0)));
10789
10790 return swab16(csum);
10791}
10792
10793static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10794{
10795 u32 rc;
10796
10797 if (skb->ip_summed != CHECKSUM_PARTIAL)
10798 rc = XMIT_PLAIN;
10799
10800 else {
4781bfad 10801 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10802 rc = XMIT_CSUM_V6;
10803 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10804 rc |= XMIT_CSUM_TCP;
10805
10806 } else {
10807 rc = XMIT_CSUM_V4;
10808 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10809 rc |= XMIT_CSUM_TCP;
10810 }
10811 }
10812
10813 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10814 rc |= XMIT_GSO_V4;
10815
10816 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10817 rc |= XMIT_GSO_V6;
10818
10819 return rc;
10820}
10821
632da4d6 10822#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10823/* check if packet requires linearization (packet is too fragmented)
10824 no need to check fragmentation if page size > 8K (there will be no
10825 violation to FW restrictions) */
755735eb
EG
10826static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10827 u32 xmit_type)
10828{
10829 int to_copy = 0;
10830 int hlen = 0;
10831 int first_bd_sz = 0;
10832
10833 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10834 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10835
10836 if (xmit_type & XMIT_GSO) {
10837 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10838 /* Check if LSO packet needs to be copied:
10839 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10840 int wnd_size = MAX_FETCH_BD - 3;
33471629 10841 /* Number of windows to check */
755735eb
EG
10842 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10843 int wnd_idx = 0;
10844 int frag_idx = 0;
10845 u32 wnd_sum = 0;
10846
10847 /* Headers length */
10848 hlen = (int)(skb_transport_header(skb) - skb->data) +
10849 tcp_hdrlen(skb);
10850
10851 /* Amount of data (w/o headers) on linear part of SKB*/
10852 first_bd_sz = skb_headlen(skb) - hlen;
10853
10854 wnd_sum = first_bd_sz;
10855
10856 /* Calculate the first sum - it's special */
10857 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10858 wnd_sum +=
10859 skb_shinfo(skb)->frags[frag_idx].size;
10860
10861 /* If there was data on linear skb data - check it */
10862 if (first_bd_sz > 0) {
10863 if (unlikely(wnd_sum < lso_mss)) {
10864 to_copy = 1;
10865 goto exit_lbl;
10866 }
10867
10868 wnd_sum -= first_bd_sz;
10869 }
10870
10871 /* Others are easier: run through the frag list and
10872 check all windows */
10873 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10874 wnd_sum +=
10875 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10876
10877 if (unlikely(wnd_sum < lso_mss)) {
10878 to_copy = 1;
10879 break;
10880 }
10881 wnd_sum -=
10882 skb_shinfo(skb)->frags[wnd_idx].size;
10883 }
755735eb
EG
10884 } else {
10885 /* in non-LSO too fragmented packet should always
10886 be linearized */
10887 to_copy = 1;
10888 }
10889 }
10890
10891exit_lbl:
10892 if (unlikely(to_copy))
10893 DP(NETIF_MSG_TX_QUEUED,
10894 "Linearization IS REQUIRED for %s packet. "
10895 "num_frags %d hlen %d first_bd_sz %d\n",
10896 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10897 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10898
10899 return to_copy;
10900}
632da4d6 10901#endif
755735eb
EG
10902
10903/* called with netif_tx_lock
a2fbb9ea 10904 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10905 * netif_wake_queue()
a2fbb9ea
ET
10906 */
10907static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10908{
10909 struct bnx2x *bp = netdev_priv(dev);
ca00392c 10910 struct bnx2x_fastpath *fp, *fp_stat;
555f6c78 10911 struct netdev_queue *txq;
a2fbb9ea 10912 struct sw_tx_bd *tx_buf;
ca00392c
EG
10913 struct eth_tx_start_bd *tx_start_bd;
10914 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
10915 struct eth_tx_parse_bd *pbd = NULL;
10916 u16 pkt_prod, bd_prod;
755735eb 10917 int nbd, fp_index;
a2fbb9ea 10918 dma_addr_t mapping;
755735eb 10919 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
10920 int i;
10921 u8 hlen = 0;
ca00392c 10922 __le16 pkt_size = 0;
a2fbb9ea
ET
10923
10924#ifdef BNX2X_STOP_ON_ERROR
10925 if (unlikely(bp->panic))
10926 return NETDEV_TX_BUSY;
10927#endif
10928
555f6c78
EG
10929 fp_index = skb_get_queue_mapping(skb);
10930 txq = netdev_get_tx_queue(dev, fp_index);
10931
ca00392c
EG
10932 fp = &bp->fp[fp_index + bp->num_rx_queues];
10933 fp_stat = &bp->fp[fp_index];
755735eb 10934
231fd58a 10935 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
ca00392c 10936 fp_stat->eth_q_stats.driver_xoff++;
555f6c78 10937 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10938 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10939 return NETDEV_TX_BUSY;
10940 }
10941
755735eb
EG
10942 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10943 " gso type %x xmit_type %x\n",
10944 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10945 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10946
632da4d6 10947#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10948 /* First, check if we need to linearize the skb (due to FW
10949 restrictions). No need to check fragmentation if page size > 8K
10950 (there will be no violation to FW restrictions) */
755735eb
EG
10951 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10952 /* Statistics of linearization */
10953 bp->lin_cnt++;
10954 if (skb_linearize(skb) != 0) {
10955 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10956 "silently dropping this SKB\n");
10957 dev_kfree_skb_any(skb);
da5a662a 10958 return NETDEV_TX_OK;
755735eb
EG
10959 }
10960 }
632da4d6 10961#endif
755735eb 10962
a2fbb9ea 10963 /*
755735eb 10964 Please read carefully. First we use one BD which we mark as start,
ca00392c 10965 then we have a parsing info BD (used for TSO or xsum),
755735eb 10966 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10967 (don't forget to mark the last one as last,
10968 and to unmap only AFTER you write to the BD ...)
755735eb 10969 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10970 */
10971
10972 pkt_prod = fp->tx_pkt_prod++;
755735eb 10973 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10974
755735eb 10975 /* get a tx_buf and first BD */
a2fbb9ea 10976 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 10977 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 10978
ca00392c
EG
10979 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10980 tx_start_bd->general_data = (UNICAST_ADDRESS <<
10981 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 10982 /* header nbd */
ca00392c 10983 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10984
755735eb
EG
10985 /* remember the first BD of the packet */
10986 tx_buf->first_bd = fp->tx_bd_prod;
10987 tx_buf->skb = skb;
ca00392c 10988 tx_buf->flags = 0;
a2fbb9ea
ET
10989
10990 DP(NETIF_MSG_TX_QUEUED,
10991 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 10992 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 10993
0c6671b0
EG
10994#ifdef BCM_VLAN
10995 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10996 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
10997 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10998 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 10999 } else
0c6671b0 11000#endif
ca00392c 11001 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 11002
ca00392c
EG
11003 /* turn on parsing and get a BD */
11004 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11005 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 11006
ca00392c 11007 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
11008
11009 if (xmit_type & XMIT_CSUM) {
ca00392c 11010 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
11011
11012 /* for now NS flag is not used in Linux */
4781bfad
EG
11013 pbd->global_data =
11014 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11015 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 11016
755735eb
EG
11017 pbd->ip_hlen = (skb_transport_header(skb) -
11018 skb_network_header(skb)) / 2;
11019
11020 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 11021
755735eb 11022 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 11023 hlen = hlen*2;
a2fbb9ea 11024
ca00392c 11025 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
11026
11027 if (xmit_type & XMIT_CSUM_V4)
ca00392c 11028 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
11029 ETH_TX_BD_FLAGS_IP_CSUM;
11030 else
ca00392c
EG
11031 tx_start_bd->bd_flags.as_bitfield |=
11032 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
11033
11034 if (xmit_type & XMIT_CSUM_TCP) {
11035 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11036
11037 } else {
11038 s8 fix = SKB_CS_OFF(skb); /* signed! */
11039
ca00392c 11040 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 11041
755735eb 11042 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11043 "hlen %d fix %d csum before fix %x\n",
11044 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
11045
11046 /* HW bug: fixup the CSUM */
11047 pbd->tcp_pseudo_csum =
11048 bnx2x_csum_fix(skb_transport_header(skb),
11049 SKB_CS(skb), fix);
11050
11051 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11052 pbd->tcp_pseudo_csum);
11053 }
a2fbb9ea
ET
11054 }
11055
11056 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 11057 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea 11058
ca00392c
EG
11059 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11060 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11061 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11062 tx_start_bd->nbd = cpu_to_le16(nbd);
11063 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11064 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
11065
11066 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 11067 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
11068 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11069 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11070 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 11071
755735eb 11072 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
11073
11074 DP(NETIF_MSG_TX_QUEUED,
11075 "TSO packet len %d hlen %d total len %d tso size %d\n",
11076 skb->len, hlen, skb_headlen(skb),
11077 skb_shinfo(skb)->gso_size);
11078
ca00392c 11079 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 11080
755735eb 11081 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
11082 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11083 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
11084
11085 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11086 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
11087 pbd->tcp_flags = pbd_tcp_flags(skb);
11088
11089 if (xmit_type & XMIT_GSO_V4) {
11090 pbd->ip_id = swab16(ip_hdr(skb)->id);
11091 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
11092 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11093 ip_hdr(skb)->daddr,
11094 0, IPPROTO_TCP, 0));
755735eb
EG
11095
11096 } else
11097 pbd->tcp_pseudo_csum =
11098 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11099 &ipv6_hdr(skb)->daddr,
11100 0, IPPROTO_TCP, 0));
11101
a2fbb9ea
ET
11102 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11103 }
ca00392c 11104 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 11105
755735eb
EG
11106 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11107 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 11108
755735eb 11109 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
11110 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11111 if (total_pkt_bd == NULL)
11112 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 11113
755735eb
EG
11114 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11115 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 11116
ca00392c
EG
11117 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11118 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11119 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11120 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 11121
755735eb 11122 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11123 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11124 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11125 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
11126 }
11127
ca00392c 11128 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 11129
a2fbb9ea
ET
11130 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11131
755735eb 11132 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
11133 * if the packet contains or ends with it
11134 */
11135 if (TX_BD_POFF(bd_prod) < nbd)
11136 nbd++;
11137
ca00392c
EG
11138 if (total_pkt_bd != NULL)
11139 total_pkt_bd->total_pkt_bytes = pkt_size;
11140
a2fbb9ea
ET
11141 if (pbd)
11142 DP(NETIF_MSG_TX_QUEUED,
11143 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11144 " tcp_flags %x xsum %x seq %u hlen %u\n",
11145 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11146 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 11147 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 11148
755735eb 11149 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 11150
58f4c4cf
EG
11151 /*
11152 * Make sure that the BD data is updated before updating the producer
11153 * since FW might read the BD right after the producer is updated.
11154 * This is only applicable for weak-ordered memory model archs such
11155 * as IA-64. The following barrier is also mandatory since FW will
11156 * assumes packets must have BDs.
11157 */
11158 wmb();
11159
ca00392c
EG
11160 fp->tx_db.data.prod += nbd;
11161 barrier();
11162 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
a2fbb9ea
ET
11163
11164 mmiowb();
11165
755735eb 11166 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
11167
11168 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 11169 netif_tx_stop_queue(txq);
58f4c4cf
EG
11170 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11171 if we put Tx into XOFF state. */
11172 smp_mb();
ca00392c 11173 fp_stat->eth_q_stats.driver_xoff++;
a2fbb9ea 11174 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 11175 netif_tx_wake_queue(txq);
a2fbb9ea 11176 }
ca00392c 11177 fp_stat->tx_pkt++;
a2fbb9ea
ET
11178
11179 return NETDEV_TX_OK;
11180}
11181
bb2a0f7a 11182/* called with rtnl_lock */
a2fbb9ea
ET
11183static int bnx2x_open(struct net_device *dev)
11184{
11185 struct bnx2x *bp = netdev_priv(dev);
11186
6eccabb3
EG
11187 netif_carrier_off(dev);
11188
a2fbb9ea
ET
11189 bnx2x_set_power_state(bp, PCI_D0);
11190
bb2a0f7a 11191 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
11192}
11193
bb2a0f7a 11194/* called with rtnl_lock */
a2fbb9ea
ET
11195static int bnx2x_close(struct net_device *dev)
11196{
a2fbb9ea
ET
11197 struct bnx2x *bp = netdev_priv(dev);
11198
11199 /* Unload the driver, release IRQs */
bb2a0f7a
YG
11200 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11201 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11202 if (!CHIP_REV_IS_SLOW(bp))
11203 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
11204
11205 return 0;
11206}
11207
f5372251 11208/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
11209static void bnx2x_set_rx_mode(struct net_device *dev)
11210{
11211 struct bnx2x *bp = netdev_priv(dev);
11212 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11213 int port = BP_PORT(bp);
11214
11215 if (bp->state != BNX2X_STATE_OPEN) {
11216 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11217 return;
11218 }
11219
11220 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11221
11222 if (dev->flags & IFF_PROMISC)
11223 rx_mode = BNX2X_RX_MODE_PROMISC;
11224
11225 else if ((dev->flags & IFF_ALLMULTI) ||
11226 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11227 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11228
11229 else { /* some multicasts */
11230 if (CHIP_IS_E1(bp)) {
11231 int i, old, offset;
11232 struct dev_mc_list *mclist;
11233 struct mac_configuration_cmd *config =
11234 bnx2x_sp(bp, mcast_config);
11235
11236 for (i = 0, mclist = dev->mc_list;
11237 mclist && (i < dev->mc_count);
11238 i++, mclist = mclist->next) {
11239
11240 config->config_table[i].
11241 cam_entry.msb_mac_addr =
11242 swab16(*(u16 *)&mclist->dmi_addr[0]);
11243 config->config_table[i].
11244 cam_entry.middle_mac_addr =
11245 swab16(*(u16 *)&mclist->dmi_addr[2]);
11246 config->config_table[i].
11247 cam_entry.lsb_mac_addr =
11248 swab16(*(u16 *)&mclist->dmi_addr[4]);
11249 config->config_table[i].cam_entry.flags =
11250 cpu_to_le16(port);
11251 config->config_table[i].
11252 target_table_entry.flags = 0;
ca00392c
EG
11253 config->config_table[i].target_table_entry.
11254 clients_bit_vector =
11255 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
11256 config->config_table[i].
11257 target_table_entry.vlan_id = 0;
11258
11259 DP(NETIF_MSG_IFUP,
11260 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11261 config->config_table[i].
11262 cam_entry.msb_mac_addr,
11263 config->config_table[i].
11264 cam_entry.middle_mac_addr,
11265 config->config_table[i].
11266 cam_entry.lsb_mac_addr);
11267 }
8d9c5f34 11268 old = config->hdr.length;
34f80b04
EG
11269 if (old > i) {
11270 for (; i < old; i++) {
11271 if (CAM_IS_INVALID(config->
11272 config_table[i])) {
af246401 11273 /* already invalidated */
34f80b04
EG
11274 break;
11275 }
11276 /* invalidate */
11277 CAM_INVALIDATE(config->
11278 config_table[i]);
11279 }
11280 }
11281
11282 if (CHIP_REV_IS_SLOW(bp))
11283 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11284 else
11285 offset = BNX2X_MAX_MULTICAST*(1 + port);
11286
8d9c5f34 11287 config->hdr.length = i;
34f80b04 11288 config->hdr.offset = offset;
8d9c5f34 11289 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
11290 config->hdr.reserved1 = 0;
11291
11292 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11293 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11294 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11295 0);
11296 } else { /* E1H */
11297 /* Accept one or more multicasts */
11298 struct dev_mc_list *mclist;
11299 u32 mc_filter[MC_HASH_SIZE];
11300 u32 crc, bit, regidx;
11301 int i;
11302
11303 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11304
11305 for (i = 0, mclist = dev->mc_list;
11306 mclist && (i < dev->mc_count);
11307 i++, mclist = mclist->next) {
11308
7c510e4b
JB
11309 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11310 mclist->dmi_addr);
34f80b04
EG
11311
11312 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11313 bit = (crc >> 24) & 0xff;
11314 regidx = bit >> 5;
11315 bit &= 0x1f;
11316 mc_filter[regidx] |= (1 << bit);
11317 }
11318
11319 for (i = 0; i < MC_HASH_SIZE; i++)
11320 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11321 mc_filter[i]);
11322 }
11323 }
11324
11325 bp->rx_mode = rx_mode;
11326 bnx2x_set_storm_rx_mode(bp);
11327}
11328
11329/* called with rtnl_lock */
a2fbb9ea
ET
11330static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11331{
11332 struct sockaddr *addr = p;
11333 struct bnx2x *bp = netdev_priv(dev);
11334
34f80b04 11335 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
11336 return -EINVAL;
11337
11338 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
11339 if (netif_running(dev)) {
11340 if (CHIP_IS_E1(bp))
3101c2bc 11341 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 11342 else
3101c2bc 11343 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 11344 }
a2fbb9ea
ET
11345
11346 return 0;
11347}
11348
c18487ee 11349/* called with rtnl_lock */
01cd4528
EG
11350static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11351 int devad, u16 addr)
a2fbb9ea 11352{
01cd4528
EG
11353 struct bnx2x *bp = netdev_priv(netdev);
11354 u16 value;
11355 int rc;
11356 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 11357
01cd4528
EG
11358 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11359 prtad, devad, addr);
a2fbb9ea 11360
01cd4528
EG
11361 if (prtad != bp->mdio.prtad) {
11362 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11363 prtad, bp->mdio.prtad);
11364 return -EINVAL;
11365 }
11366
11367 /* The HW expects different devad if CL22 is used */
11368 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 11369
01cd4528
EG
11370 bnx2x_acquire_phy_lock(bp);
11371 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11372 devad, addr, &value);
11373 bnx2x_release_phy_lock(bp);
11374 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 11375
01cd4528
EG
11376 if (!rc)
11377 rc = value;
11378 return rc;
11379}
a2fbb9ea 11380
01cd4528
EG
11381/* called with rtnl_lock */
11382static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11383 u16 addr, u16 value)
11384{
11385 struct bnx2x *bp = netdev_priv(netdev);
11386 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11387 int rc;
11388
11389 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11390 " value 0x%x\n", prtad, devad, addr, value);
11391
11392 if (prtad != bp->mdio.prtad) {
11393 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11394 prtad, bp->mdio.prtad);
11395 return -EINVAL;
a2fbb9ea
ET
11396 }
11397
01cd4528
EG
11398 /* The HW expects different devad if CL22 is used */
11399 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 11400
01cd4528
EG
11401 bnx2x_acquire_phy_lock(bp);
11402 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11403 devad, addr, value);
11404 bnx2x_release_phy_lock(bp);
11405 return rc;
11406}
c18487ee 11407
01cd4528
EG
11408/* called with rtnl_lock */
11409static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11410{
11411 struct bnx2x *bp = netdev_priv(dev);
11412 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 11413
01cd4528
EG
11414 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11415 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 11416
01cd4528
EG
11417 if (!netif_running(dev))
11418 return -EAGAIN;
11419
11420 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
11421}
11422
34f80b04 11423/* called with rtnl_lock */
a2fbb9ea
ET
11424static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11425{
11426 struct bnx2x *bp = netdev_priv(dev);
34f80b04 11427 int rc = 0;
a2fbb9ea
ET
11428
11429 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11430 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11431 return -EINVAL;
11432
11433 /* This does not race with packet allocation
c14423fe 11434 * because the actual alloc size is
a2fbb9ea
ET
11435 * only updated as part of load
11436 */
11437 dev->mtu = new_mtu;
11438
11439 if (netif_running(dev)) {
34f80b04
EG
11440 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11441 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 11442 }
34f80b04
EG
11443
11444 return rc;
a2fbb9ea
ET
11445}
11446
11447static void bnx2x_tx_timeout(struct net_device *dev)
11448{
11449 struct bnx2x *bp = netdev_priv(dev);
11450
11451#ifdef BNX2X_STOP_ON_ERROR
11452 if (!bp->panic)
11453 bnx2x_panic();
11454#endif
11455 /* This allows the netif to be shutdown gracefully before resetting */
11456 schedule_work(&bp->reset_task);
11457}
11458
11459#ifdef BCM_VLAN
34f80b04 11460/* called with rtnl_lock */
a2fbb9ea
ET
11461static void bnx2x_vlan_rx_register(struct net_device *dev,
11462 struct vlan_group *vlgrp)
11463{
11464 struct bnx2x *bp = netdev_priv(dev);
11465
11466 bp->vlgrp = vlgrp;
0c6671b0
EG
11467
11468 /* Set flags according to the required capabilities */
11469 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11470
11471 if (dev->features & NETIF_F_HW_VLAN_TX)
11472 bp->flags |= HW_VLAN_TX_FLAG;
11473
11474 if (dev->features & NETIF_F_HW_VLAN_RX)
11475 bp->flags |= HW_VLAN_RX_FLAG;
11476
a2fbb9ea 11477 if (netif_running(dev))
49d66772 11478 bnx2x_set_client_config(bp);
a2fbb9ea 11479}
34f80b04 11480
a2fbb9ea
ET
11481#endif
11482
11483#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11484static void poll_bnx2x(struct net_device *dev)
11485{
11486 struct bnx2x *bp = netdev_priv(dev);
11487
11488 disable_irq(bp->pdev->irq);
11489 bnx2x_interrupt(bp->pdev->irq, dev);
11490 enable_irq(bp->pdev->irq);
11491}
11492#endif
11493
c64213cd
SH
11494static const struct net_device_ops bnx2x_netdev_ops = {
11495 .ndo_open = bnx2x_open,
11496 .ndo_stop = bnx2x_close,
11497 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 11498 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
11499 .ndo_set_mac_address = bnx2x_change_mac_addr,
11500 .ndo_validate_addr = eth_validate_addr,
11501 .ndo_do_ioctl = bnx2x_ioctl,
11502 .ndo_change_mtu = bnx2x_change_mtu,
11503 .ndo_tx_timeout = bnx2x_tx_timeout,
11504#ifdef BCM_VLAN
11505 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11506#endif
11507#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11508 .ndo_poll_controller = poll_bnx2x,
11509#endif
11510};
11511
34f80b04
EG
11512static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11513 struct net_device *dev)
a2fbb9ea
ET
11514{
11515 struct bnx2x *bp;
11516 int rc;
11517
11518 SET_NETDEV_DEV(dev, &pdev->dev);
11519 bp = netdev_priv(dev);
11520
34f80b04
EG
11521 bp->dev = dev;
11522 bp->pdev = pdev;
a2fbb9ea 11523 bp->flags = 0;
34f80b04 11524 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11525
11526 rc = pci_enable_device(pdev);
11527 if (rc) {
11528 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11529 goto err_out;
11530 }
11531
11532 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11533 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11534 " aborting\n");
11535 rc = -ENODEV;
11536 goto err_out_disable;
11537 }
11538
11539 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11540 printk(KERN_ERR PFX "Cannot find second PCI device"
11541 " base address, aborting\n");
11542 rc = -ENODEV;
11543 goto err_out_disable;
11544 }
11545
34f80b04
EG
11546 if (atomic_read(&pdev->enable_cnt) == 1) {
11547 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11548 if (rc) {
11549 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11550 " aborting\n");
11551 goto err_out_disable;
11552 }
a2fbb9ea 11553
34f80b04
EG
11554 pci_set_master(pdev);
11555 pci_save_state(pdev);
11556 }
a2fbb9ea
ET
11557
11558 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11559 if (bp->pm_cap == 0) {
11560 printk(KERN_ERR PFX "Cannot find power management"
11561 " capability, aborting\n");
11562 rc = -EIO;
11563 goto err_out_release;
11564 }
11565
11566 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11567 if (bp->pcie_cap == 0) {
11568 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11569 " aborting\n");
11570 rc = -EIO;
11571 goto err_out_release;
11572 }
11573
6a35528a 11574 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11575 bp->flags |= USING_DAC_FLAG;
6a35528a 11576 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
11577 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11578 " failed, aborting\n");
11579 rc = -EIO;
11580 goto err_out_release;
11581 }
11582
284901a9 11583 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a2fbb9ea
ET
11584 printk(KERN_ERR PFX "System does not support DMA,"
11585 " aborting\n");
11586 rc = -EIO;
11587 goto err_out_release;
11588 }
11589
34f80b04
EG
11590 dev->mem_start = pci_resource_start(pdev, 0);
11591 dev->base_addr = dev->mem_start;
11592 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11593
11594 dev->irq = pdev->irq;
11595
275f165f 11596 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11597 if (!bp->regview) {
11598 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11599 rc = -ENOMEM;
11600 goto err_out_release;
11601 }
11602
34f80b04
EG
11603 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11604 min_t(u64, BNX2X_DB_SIZE,
11605 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11606 if (!bp->doorbells) {
11607 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11608 rc = -ENOMEM;
11609 goto err_out_unmap;
11610 }
11611
11612 bnx2x_set_power_state(bp, PCI_D0);
11613
34f80b04
EG
11614 /* clean indirect addresses */
11615 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11616 PCICFG_VENDOR_ID_OFFSET);
11617 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11618 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11619 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11620 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11621
34f80b04 11622 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11623
c64213cd 11624 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11625 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11626 dev->features |= NETIF_F_SG;
11627 dev->features |= NETIF_F_HW_CSUM;
11628 if (bp->flags & USING_DAC_FLAG)
11629 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
11630 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11631 dev->features |= NETIF_F_TSO6;
34f80b04
EG
11632#ifdef BCM_VLAN
11633 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11634 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
11635
11636 dev->vlan_features |= NETIF_F_SG;
11637 dev->vlan_features |= NETIF_F_HW_CSUM;
11638 if (bp->flags & USING_DAC_FLAG)
11639 dev->vlan_features |= NETIF_F_HIGHDMA;
11640 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11641 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 11642#endif
a2fbb9ea 11643
01cd4528
EG
11644 /* get_port_hwinfo() will set prtad and mmds properly */
11645 bp->mdio.prtad = MDIO_PRTAD_NONE;
11646 bp->mdio.mmds = 0;
11647 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11648 bp->mdio.dev = dev;
11649 bp->mdio.mdio_read = bnx2x_mdio_read;
11650 bp->mdio.mdio_write = bnx2x_mdio_write;
11651
a2fbb9ea
ET
11652 return 0;
11653
11654err_out_unmap:
11655 if (bp->regview) {
11656 iounmap(bp->regview);
11657 bp->regview = NULL;
11658 }
a2fbb9ea
ET
11659 if (bp->doorbells) {
11660 iounmap(bp->doorbells);
11661 bp->doorbells = NULL;
11662 }
11663
11664err_out_release:
34f80b04
EG
11665 if (atomic_read(&pdev->enable_cnt) == 1)
11666 pci_release_regions(pdev);
a2fbb9ea
ET
11667
11668err_out_disable:
11669 pci_disable_device(pdev);
11670 pci_set_drvdata(pdev, NULL);
11671
11672err_out:
11673 return rc;
11674}
11675
37f9ce62
EG
11676static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11677 int *width, int *speed)
25047950
ET
11678{
11679 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11680
37f9ce62 11681 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 11682
37f9ce62
EG
11683 /* return value of 1=2.5GHz 2=5GHz */
11684 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 11685}
37f9ce62 11686
94a78b79
VZ
11687static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11688{
37f9ce62 11689 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
11690 struct bnx2x_fw_file_hdr *fw_hdr;
11691 struct bnx2x_fw_file_section *sections;
94a78b79 11692 u32 offset, len, num_ops;
37f9ce62 11693 u16 *ops_offsets;
94a78b79 11694 int i;
37f9ce62 11695 const u8 *fw_ver;
94a78b79
VZ
11696
11697 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11698 return -EINVAL;
11699
11700 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11701 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11702
11703 /* Make sure none of the offsets and sizes make us read beyond
11704 * the end of the firmware data */
11705 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11706 offset = be32_to_cpu(sections[i].offset);
11707 len = be32_to_cpu(sections[i].len);
11708 if (offset + len > firmware->size) {
37f9ce62
EG
11709 printk(KERN_ERR PFX "Section %d length is out of "
11710 "bounds\n", i);
94a78b79
VZ
11711 return -EINVAL;
11712 }
11713 }
11714
11715 /* Likewise for the init_ops offsets */
11716 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11717 ops_offsets = (u16 *)(firmware->data + offset);
11718 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11719
11720 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11721 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
37f9ce62
EG
11722 printk(KERN_ERR PFX "Section offset %d is out of "
11723 "bounds\n", i);
94a78b79
VZ
11724 return -EINVAL;
11725 }
11726 }
11727
11728 /* Check FW version */
11729 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11730 fw_ver = firmware->data + offset;
11731 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11732 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11733 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11734 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11735 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11736 " Should be %d.%d.%d.%d\n",
11737 fw_ver[0], fw_ver[1], fw_ver[2],
11738 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11739 BCM_5710_FW_MINOR_VERSION,
11740 BCM_5710_FW_REVISION_VERSION,
11741 BCM_5710_FW_ENGINEERING_VERSION);
11742 return -EINVAL;
11743 }
11744
11745 return 0;
11746}
11747
11748static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11749{
11750 u32 i;
11751 const __be32 *source = (const __be32*)_source;
11752 u32 *target = (u32*)_target;
11753
11754 for (i = 0; i < n/4; i++)
11755 target[i] = be32_to_cpu(source[i]);
11756}
11757
11758/*
11759 Ops array is stored in the following format:
11760 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11761 */
11762static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11763{
11764 u32 i, j, tmp;
11765 const __be32 *source = (const __be32*)_source;
11766 struct raw_op *target = (struct raw_op*)_target;
11767
11768 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11769 tmp = be32_to_cpu(source[j]);
11770 target[i].op = (tmp >> 24) & 0xff;
11771 target[i].offset = tmp & 0xffffff;
11772 target[i].raw_data = be32_to_cpu(source[j+1]);
11773 }
11774}
11775static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11776{
11777 u32 i;
11778 u16 *target = (u16*)_target;
11779 const __be16 *source = (const __be16*)_source;
11780
11781 for (i = 0; i < n/2; i++)
11782 target[i] = be16_to_cpu(source[i]);
11783}
11784
11785#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11786 do { \
11787 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11788 bp->arr = kmalloc(len, GFP_KERNEL); \
11789 if (!bp->arr) { \
11790 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11791 goto lbl; \
11792 } \
11793 func(bp->firmware->data + \
11794 be32_to_cpu(fw_hdr->arr.offset), \
11795 (u8*)bp->arr, len); \
11796 } while (0)
11797
11798
11799static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11800{
11801 char fw_file_name[40] = {0};
11802 int rc, offset;
11803 struct bnx2x_fw_file_hdr *fw_hdr;
11804
11805 /* Create a FW file name */
11806 if (CHIP_IS_E1(bp))
11807 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11808 else
11809 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11810
11811 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11812 BCM_5710_FW_MAJOR_VERSION,
11813 BCM_5710_FW_MINOR_VERSION,
11814 BCM_5710_FW_REVISION_VERSION,
11815 BCM_5710_FW_ENGINEERING_VERSION);
11816
11817 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11818
11819 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11820 if (rc) {
11821 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11822 goto request_firmware_exit;
11823 }
11824
11825 rc = bnx2x_check_firmware(bp);
11826 if (rc) {
11827 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11828 goto request_firmware_exit;
11829 }
11830
11831 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11832
11833 /* Initialize the pointers to the init arrays */
11834 /* Blob */
11835 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11836
11837 /* Opcodes */
11838 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11839
11840 /* Offsets */
11841 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11842
11843 /* STORMs firmware */
11844 bp->tsem_int_table_data = bp->firmware->data +
11845 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11846 bp->tsem_pram_data = bp->firmware->data +
11847 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11848 bp->usem_int_table_data = bp->firmware->data +
11849 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11850 bp->usem_pram_data = bp->firmware->data +
11851 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11852 bp->xsem_int_table_data = bp->firmware->data +
11853 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11854 bp->xsem_pram_data = bp->firmware->data +
11855 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11856 bp->csem_int_table_data = bp->firmware->data +
11857 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11858 bp->csem_pram_data = bp->firmware->data +
11859 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11860
11861 return 0;
11862init_offsets_alloc_err:
11863 kfree(bp->init_ops);
11864init_ops_alloc_err:
11865 kfree(bp->init_data);
11866request_firmware_exit:
11867 release_firmware(bp->firmware);
11868
11869 return rc;
11870}
11871
11872
25047950 11873
a2fbb9ea
ET
11874static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11875 const struct pci_device_id *ent)
11876{
a2fbb9ea
ET
11877 struct net_device *dev = NULL;
11878 struct bnx2x *bp;
37f9ce62 11879 int pcie_width, pcie_speed;
25047950 11880 int rc;
a2fbb9ea 11881
a2fbb9ea 11882 /* dev zeroed in init_etherdev */
555f6c78 11883 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11884 if (!dev) {
11885 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11886 return -ENOMEM;
34f80b04 11887 }
a2fbb9ea 11888
a2fbb9ea
ET
11889 bp = netdev_priv(dev);
11890 bp->msglevel = debug;
11891
df4770de
EG
11892 pci_set_drvdata(pdev, dev);
11893
34f80b04 11894 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11895 if (rc < 0) {
11896 free_netdev(dev);
11897 return rc;
11898 }
11899
34f80b04 11900 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11901 if (rc)
11902 goto init_one_exit;
11903
94a78b79
VZ
11904 /* Set init arrays */
11905 rc = bnx2x_init_firmware(bp, &pdev->dev);
11906 if (rc) {
11907 printk(KERN_ERR PFX "Error loading firmware\n");
11908 goto init_one_exit;
11909 }
11910
693fc0d1 11911 rc = register_netdev(dev);
34f80b04 11912 if (rc) {
693fc0d1 11913 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11914 goto init_one_exit;
11915 }
11916
37f9ce62 11917 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
25047950 11918 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11919 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11920 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
37f9ce62 11921 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
25047950 11922 dev->base_addr, bp->pdev->irq);
e174961c 11923 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 11924
a2fbb9ea 11925 return 0;
34f80b04
EG
11926
11927init_one_exit:
11928 if (bp->regview)
11929 iounmap(bp->regview);
11930
11931 if (bp->doorbells)
11932 iounmap(bp->doorbells);
11933
11934 free_netdev(dev);
11935
11936 if (atomic_read(&pdev->enable_cnt) == 1)
11937 pci_release_regions(pdev);
11938
11939 pci_disable_device(pdev);
11940 pci_set_drvdata(pdev, NULL);
11941
11942 return rc;
a2fbb9ea
ET
11943}
11944
11945static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11946{
11947 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11948 struct bnx2x *bp;
11949
11950 if (!dev) {
228241eb
ET
11951 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11952 return;
11953 }
228241eb 11954 bp = netdev_priv(dev);
a2fbb9ea 11955
a2fbb9ea
ET
11956 unregister_netdev(dev);
11957
94a78b79
VZ
11958 kfree(bp->init_ops_offsets);
11959 kfree(bp->init_ops);
11960 kfree(bp->init_data);
11961 release_firmware(bp->firmware);
11962
a2fbb9ea
ET
11963 if (bp->regview)
11964 iounmap(bp->regview);
11965
11966 if (bp->doorbells)
11967 iounmap(bp->doorbells);
11968
11969 free_netdev(dev);
34f80b04
EG
11970
11971 if (atomic_read(&pdev->enable_cnt) == 1)
11972 pci_release_regions(pdev);
11973
a2fbb9ea
ET
11974 pci_disable_device(pdev);
11975 pci_set_drvdata(pdev, NULL);
11976}
11977
11978static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11979{
11980 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11981 struct bnx2x *bp;
11982
34f80b04
EG
11983 if (!dev) {
11984 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11985 return -ENODEV;
11986 }
11987 bp = netdev_priv(dev);
a2fbb9ea 11988
34f80b04 11989 rtnl_lock();
a2fbb9ea 11990
34f80b04 11991 pci_save_state(pdev);
228241eb 11992
34f80b04
EG
11993 if (!netif_running(dev)) {
11994 rtnl_unlock();
11995 return 0;
11996 }
a2fbb9ea
ET
11997
11998 netif_device_detach(dev);
a2fbb9ea 11999
da5a662a 12000 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 12001
a2fbb9ea 12002 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 12003
34f80b04
EG
12004 rtnl_unlock();
12005
a2fbb9ea
ET
12006 return 0;
12007}
12008
12009static int bnx2x_resume(struct pci_dev *pdev)
12010{
12011 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 12012 struct bnx2x *bp;
a2fbb9ea
ET
12013 int rc;
12014
228241eb
ET
12015 if (!dev) {
12016 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12017 return -ENODEV;
12018 }
228241eb 12019 bp = netdev_priv(dev);
a2fbb9ea 12020
34f80b04
EG
12021 rtnl_lock();
12022
228241eb 12023 pci_restore_state(pdev);
34f80b04
EG
12024
12025 if (!netif_running(dev)) {
12026 rtnl_unlock();
12027 return 0;
12028 }
12029
a2fbb9ea
ET
12030 bnx2x_set_power_state(bp, PCI_D0);
12031 netif_device_attach(dev);
12032
da5a662a 12033 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 12034
34f80b04
EG
12035 rtnl_unlock();
12036
12037 return rc;
a2fbb9ea
ET
12038}
12039
f8ef6e44
YG
12040static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12041{
12042 int i;
12043
12044 bp->state = BNX2X_STATE_ERROR;
12045
12046 bp->rx_mode = BNX2X_RX_MODE_NONE;
12047
12048 bnx2x_netif_stop(bp, 0);
12049
12050 del_timer_sync(&bp->timer);
12051 bp->stats_state = STATS_STATE_DISABLED;
12052 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12053
12054 /* Release IRQs */
12055 bnx2x_free_irq(bp);
12056
12057 if (CHIP_IS_E1(bp)) {
12058 struct mac_configuration_cmd *config =
12059 bnx2x_sp(bp, mcast_config);
12060
8d9c5f34 12061 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
12062 CAM_INVALIDATE(config->config_table[i]);
12063 }
12064
12065 /* Free SKBs, SGEs, TPA pool and driver internals */
12066 bnx2x_free_skbs(bp);
555f6c78 12067 for_each_rx_queue(bp, i)
f8ef6e44 12068 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 12069 for_each_rx_queue(bp, i)
7cde1c8b 12070 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
12071 bnx2x_free_mem(bp);
12072
12073 bp->state = BNX2X_STATE_CLOSED;
12074
12075 netif_carrier_off(bp->dev);
12076
12077 return 0;
12078}
12079
12080static void bnx2x_eeh_recover(struct bnx2x *bp)
12081{
12082 u32 val;
12083
12084 mutex_init(&bp->port.phy_mutex);
12085
12086 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12087 bp->link_params.shmem_base = bp->common.shmem_base;
12088 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12089
12090 if (!bp->common.shmem_base ||
12091 (bp->common.shmem_base < 0xA0000) ||
12092 (bp->common.shmem_base >= 0xC0000)) {
12093 BNX2X_DEV_INFO("MCP not active\n");
12094 bp->flags |= NO_MCP_FLAG;
12095 return;
12096 }
12097
12098 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12099 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12100 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12101 BNX2X_ERR("BAD MCP validity signature\n");
12102
12103 if (!BP_NOMCP(bp)) {
12104 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12105 & DRV_MSG_SEQ_NUMBER_MASK);
12106 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12107 }
12108}
12109
493adb1f
WX
12110/**
12111 * bnx2x_io_error_detected - called when PCI error is detected
12112 * @pdev: Pointer to PCI device
12113 * @state: The current pci connection state
12114 *
12115 * This function is called after a PCI bus error affecting
12116 * this device has been detected.
12117 */
12118static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12119 pci_channel_state_t state)
12120{
12121 struct net_device *dev = pci_get_drvdata(pdev);
12122 struct bnx2x *bp = netdev_priv(dev);
12123
12124 rtnl_lock();
12125
12126 netif_device_detach(dev);
12127
07ce50e4
DN
12128 if (state == pci_channel_io_perm_failure) {
12129 rtnl_unlock();
12130 return PCI_ERS_RESULT_DISCONNECT;
12131 }
12132
493adb1f 12133 if (netif_running(dev))
f8ef6e44 12134 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
12135
12136 pci_disable_device(pdev);
12137
12138 rtnl_unlock();
12139
12140 /* Request a slot reset */
12141 return PCI_ERS_RESULT_NEED_RESET;
12142}
12143
12144/**
12145 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12146 * @pdev: Pointer to PCI device
12147 *
12148 * Restart the card from scratch, as if from a cold-boot.
12149 */
12150static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12151{
12152 struct net_device *dev = pci_get_drvdata(pdev);
12153 struct bnx2x *bp = netdev_priv(dev);
12154
12155 rtnl_lock();
12156
12157 if (pci_enable_device(pdev)) {
12158 dev_err(&pdev->dev,
12159 "Cannot re-enable PCI device after reset\n");
12160 rtnl_unlock();
12161 return PCI_ERS_RESULT_DISCONNECT;
12162 }
12163
12164 pci_set_master(pdev);
12165 pci_restore_state(pdev);
12166
12167 if (netif_running(dev))
12168 bnx2x_set_power_state(bp, PCI_D0);
12169
12170 rtnl_unlock();
12171
12172 return PCI_ERS_RESULT_RECOVERED;
12173}
12174
12175/**
12176 * bnx2x_io_resume - called when traffic can start flowing again
12177 * @pdev: Pointer to PCI device
12178 *
12179 * This callback is called when the error recovery driver tells us that
12180 * its OK to resume normal operation.
12181 */
12182static void bnx2x_io_resume(struct pci_dev *pdev)
12183{
12184 struct net_device *dev = pci_get_drvdata(pdev);
12185 struct bnx2x *bp = netdev_priv(dev);
12186
12187 rtnl_lock();
12188
f8ef6e44
YG
12189 bnx2x_eeh_recover(bp);
12190
493adb1f 12191 if (netif_running(dev))
f8ef6e44 12192 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
12193
12194 netif_device_attach(dev);
12195
12196 rtnl_unlock();
12197}
12198
12199static struct pci_error_handlers bnx2x_err_handler = {
12200 .error_detected = bnx2x_io_error_detected,
356e2385
EG
12201 .slot_reset = bnx2x_io_slot_reset,
12202 .resume = bnx2x_io_resume,
493adb1f
WX
12203};
12204
a2fbb9ea 12205static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
12206 .name = DRV_MODULE_NAME,
12207 .id_table = bnx2x_pci_tbl,
12208 .probe = bnx2x_init_one,
12209 .remove = __devexit_p(bnx2x_remove_one),
12210 .suspend = bnx2x_suspend,
12211 .resume = bnx2x_resume,
12212 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
12213};
12214
12215static int __init bnx2x_init(void)
12216{
dd21ca6d
SG
12217 int ret;
12218
938cf541
EG
12219 printk(KERN_INFO "%s", version);
12220
1cf167f2
EG
12221 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12222 if (bnx2x_wq == NULL) {
12223 printk(KERN_ERR PFX "Cannot create workqueue\n");
12224 return -ENOMEM;
12225 }
12226
dd21ca6d
SG
12227 ret = pci_register_driver(&bnx2x_pci_driver);
12228 if (ret) {
12229 printk(KERN_ERR PFX "Cannot register driver\n");
12230 destroy_workqueue(bnx2x_wq);
12231 }
12232 return ret;
a2fbb9ea
ET
12233}
12234
12235static void __exit bnx2x_cleanup(void)
12236{
12237 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
12238
12239 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
12240}
12241
12242module_init(bnx2x_init);
12243module_exit(bnx2x_cleanup);
12244
94a78b79 12245