]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Add hw init code to support iSCSI.
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
0a64ea57 57#include "bnx2x_dump.h"
a2fbb9ea 58
c458bc50
EG
59#define DRV_MODULE_VERSION "1.52.1"
60#define DRV_MODULE_RELDATE "2009/08/12"
34f80b04 61#define BNX2X_BC_VER 0x040200
a2fbb9ea 62
94a78b79
VZ
63#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
ab6ad5a4
EG
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
94a78b79 68
34f80b04
EG
69/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
a2fbb9ea 71
53a10565 72static char version[] __devinitdata =
34f80b04 73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
24e3fcef 76MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 80
555f6c78
EG
81static int multi_mode = 1;
82module_param(multi_mode, int, 0);
ca00392c
EG
83MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
85
86static int num_rx_queues;
87module_param(num_rx_queues, int, 0);
88MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
90
91static int num_tx_queues;
92module_param(num_tx_queues, int, 0);
93MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
555f6c78 95
19680c48 96static int disable_tpa;
19680c48 97module_param(disable_tpa, int, 0);
9898f86d 98MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
99
100static int int_mode;
101module_param(int_mode, int, 0);
102MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
a18f5128
EG
104static int dropless_fc;
105module_param(dropless_fc, int, 0);
106MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
107
9898f86d 108static int poll;
a2fbb9ea 109module_param(poll, int, 0);
9898f86d 110MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
111
112static int mrrs = -1;
113module_param(mrrs, int, 0);
114MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
115
9898f86d 116static int debug;
a2fbb9ea 117module_param(debug, int, 0);
9898f86d
EG
118MODULE_PARM_DESC(debug, " Default debug msglevel");
119
120static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 121
1cf167f2 122static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
123
124enum bnx2x_board_type {
125 BCM57710 = 0,
34f80b04
EG
126 BCM57711 = 1,
127 BCM57711E = 2,
a2fbb9ea
ET
128};
129
34f80b04 130/* indexed by board_type, above */
53a10565 131static struct {
a2fbb9ea
ET
132 char *name;
133} board_info[] __devinitdata = {
34f80b04
EG
134 { "Broadcom NetXtreme II BCM57710 XGb" },
135 { "Broadcom NetXtreme II BCM57711 XGb" },
136 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
137};
138
34f80b04 139
a2fbb9ea 140static const struct pci_device_id bnx2x_pci_tbl[] = {
e4ed7113
EG
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
143 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
144 { 0 }
145};
146
147MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
148
149/****************************************************************************
150* General service functions
151****************************************************************************/
152
153/* used only at init
154 * locking is done by mcp
155 */
573f2035 156void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
157{
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
160 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
161 PCICFG_VENDOR_ID_OFFSET);
162}
163
a2fbb9ea
ET
164static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
165{
166 u32 val;
167
168 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
169 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
170 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
171 PCICFG_VENDOR_ID_OFFSET);
172
173 return val;
174}
a2fbb9ea
ET
175
176static const u32 dmae_reg_go_c[] = {
177 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
178 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
179 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
180 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
181};
182
183/* copy command into DMAE command memory and set DMAE command go */
184static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
185 int idx)
186{
187 u32 cmd_offset;
188 int i;
189
190 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
191 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
192 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
193
ad8d3948
EG
194 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
195 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
196 }
197 REG_WR(bp, dmae_reg_go_c[idx], 1);
198}
199
ad8d3948
EG
200void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
201 u32 len32)
a2fbb9ea 202{
5ff7b6d4 203 struct dmae_command dmae;
a2fbb9ea 204 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
205 int cnt = 200;
206
207 if (!bp->dmae_ready) {
208 u32 *data = bnx2x_sp(bp, wb_data[0]);
209
210 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
211 " using indirect\n", dst_addr, len32);
212 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
213 return;
214 }
215
5ff7b6d4 216 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 217
5ff7b6d4
EG
218 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
219 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
220 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 221#ifdef __BIG_ENDIAN
5ff7b6d4 222 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 223#else
5ff7b6d4 224 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 225#endif
5ff7b6d4
EG
226 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
227 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
228 dmae.src_addr_lo = U64_LO(dma_addr);
229 dmae.src_addr_hi = U64_HI(dma_addr);
230 dmae.dst_addr_lo = dst_addr >> 2;
231 dmae.dst_addr_hi = 0;
232 dmae.len = len32;
233 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
234 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
235 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 236
c3eefaf6 237 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
238 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
239 "dst_addr [%x:%08x (%08x)]\n"
240 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
241 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
242 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
243 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 244 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
245 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
246 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 247
5ff7b6d4
EG
248 mutex_lock(&bp->dmae_mutex);
249
a2fbb9ea
ET
250 *wb_comp = 0;
251
5ff7b6d4 252 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
253
254 udelay(5);
ad8d3948
EG
255
256 while (*wb_comp != DMAE_COMP_VAL) {
257 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
258
ad8d3948 259 if (!cnt) {
c3eefaf6 260 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
261 break;
262 }
ad8d3948 263 cnt--;
12469401
YG
264 /* adjust delay for emulation/FPGA */
265 if (CHIP_REV_IS_SLOW(bp))
266 msleep(100);
267 else
268 udelay(5);
a2fbb9ea 269 }
ad8d3948
EG
270
271 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
272}
273
c18487ee 274void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 275{
5ff7b6d4 276 struct dmae_command dmae;
a2fbb9ea 277 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
278 int cnt = 200;
279
280 if (!bp->dmae_ready) {
281 u32 *data = bnx2x_sp(bp, wb_data[0]);
282 int i;
283
284 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
285 " using indirect\n", src_addr, len32);
286 for (i = 0; i < len32; i++)
287 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
288 return;
289 }
290
5ff7b6d4 291 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 292
5ff7b6d4
EG
293 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
294 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
295 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 296#ifdef __BIG_ENDIAN
5ff7b6d4 297 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 298#else
5ff7b6d4 299 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 300#endif
5ff7b6d4
EG
301 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
302 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
303 dmae.src_addr_lo = src_addr >> 2;
304 dmae.src_addr_hi = 0;
305 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
306 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
307 dmae.len = len32;
308 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
309 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
310 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 311
c3eefaf6 312 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
313 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
314 "dst_addr [%x:%08x (%08x)]\n"
315 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
316 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
317 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
318 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 319
5ff7b6d4
EG
320 mutex_lock(&bp->dmae_mutex);
321
322 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
323 *wb_comp = 0;
324
5ff7b6d4 325 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
326
327 udelay(5);
ad8d3948
EG
328
329 while (*wb_comp != DMAE_COMP_VAL) {
330
ad8d3948 331 if (!cnt) {
c3eefaf6 332 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
333 break;
334 }
ad8d3948 335 cnt--;
12469401
YG
336 /* adjust delay for emulation/FPGA */
337 if (CHIP_REV_IS_SLOW(bp))
338 msleep(100);
339 else
340 udelay(5);
a2fbb9ea 341 }
ad8d3948 342 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
343 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
344 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
345
346 mutex_unlock(&bp->dmae_mutex);
347}
348
573f2035
EG
349void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
350 u32 addr, u32 len)
351{
352 int offset = 0;
353
354 while (len > DMAE_LEN32_WR_MAX) {
355 bnx2x_write_dmae(bp, phys_addr + offset,
356 addr + offset, DMAE_LEN32_WR_MAX);
357 offset += DMAE_LEN32_WR_MAX * 4;
358 len -= DMAE_LEN32_WR_MAX;
359 }
360
361 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
362}
363
ad8d3948
EG
364/* used only for slowpath so not inlined */
365static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
366{
367 u32 wb_write[2];
368
369 wb_write[0] = val_hi;
370 wb_write[1] = val_lo;
371 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 372}
a2fbb9ea 373
ad8d3948
EG
374#ifdef USE_WB_RD
375static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
376{
377 u32 wb_data[2];
378
379 REG_RD_DMAE(bp, reg, wb_data, 2);
380
381 return HILO_U64(wb_data[0], wb_data[1]);
382}
383#endif
384
a2fbb9ea
ET
385static int bnx2x_mc_assert(struct bnx2x *bp)
386{
a2fbb9ea 387 char last_idx;
34f80b04
EG
388 int i, rc = 0;
389 u32 row0, row1, row2, row3;
390
391 /* XSTORM */
392 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
393 XSTORM_ASSERT_LIST_INDEX_OFFSET);
394 if (last_idx)
395 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
396
397 /* print the asserts */
398 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
399
400 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
401 XSTORM_ASSERT_LIST_OFFSET(i));
402 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
403 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
404 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
406 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
408
409 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
410 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
411 " 0x%08x 0x%08x 0x%08x\n",
412 i, row3, row2, row1, row0);
413 rc++;
414 } else {
415 break;
416 }
417 }
418
419 /* TSTORM */
420 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
421 TSTORM_ASSERT_LIST_INDEX_OFFSET);
422 if (last_idx)
423 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
424
425 /* print the asserts */
426 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
427
428 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
429 TSTORM_ASSERT_LIST_OFFSET(i));
430 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
431 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
432 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
434 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
436
437 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
438 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
439 " 0x%08x 0x%08x 0x%08x\n",
440 i, row3, row2, row1, row0);
441 rc++;
442 } else {
443 break;
444 }
445 }
446
447 /* CSTORM */
448 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
449 CSTORM_ASSERT_LIST_INDEX_OFFSET);
450 if (last_idx)
451 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
452
453 /* print the asserts */
454 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
455
456 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
457 CSTORM_ASSERT_LIST_OFFSET(i));
458 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
459 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
460 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
462 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
464
465 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
466 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
467 " 0x%08x 0x%08x 0x%08x\n",
468 i, row3, row2, row1, row0);
469 rc++;
470 } else {
471 break;
472 }
473 }
474
475 /* USTORM */
476 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
477 USTORM_ASSERT_LIST_INDEX_OFFSET);
478 if (last_idx)
479 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
480
481 /* print the asserts */
482 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
483
484 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
485 USTORM_ASSERT_LIST_OFFSET(i));
486 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
487 USTORM_ASSERT_LIST_OFFSET(i) + 4);
488 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
489 USTORM_ASSERT_LIST_OFFSET(i) + 8);
490 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
491 USTORM_ASSERT_LIST_OFFSET(i) + 12);
492
493 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
494 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
495 " 0x%08x 0x%08x 0x%08x\n",
496 i, row3, row2, row1, row0);
497 rc++;
498 } else {
499 break;
a2fbb9ea
ET
500 }
501 }
34f80b04 502
a2fbb9ea
ET
503 return rc;
504}
c14423fe 505
a2fbb9ea
ET
506static void bnx2x_fw_dump(struct bnx2x *bp)
507{
508 u32 mark, offset;
4781bfad 509 __be32 data[9];
a2fbb9ea
ET
510 int word;
511
512 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772 513 mark = ((mark + 0x3) & ~0x3);
ad361c98 514 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 515
ad361c98 516 printk(KERN_ERR PFX);
a2fbb9ea
ET
517 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
518 for (word = 0; word < 8; word++)
519 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
520 offset + 4*word));
521 data[8] = 0x0;
49d66772 522 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
523 }
524 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
525 for (word = 0; word < 8; word++)
526 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
527 offset + 4*word));
528 data[8] = 0x0;
49d66772 529 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea 530 }
ad361c98 531 printk(KERN_ERR PFX "end of fw dump\n");
a2fbb9ea
ET
532}
533
534static void bnx2x_panic_dump(struct bnx2x *bp)
535{
536 int i;
537 u16 j, start, end;
538
66e855f3
YG
539 bp->stats_state = STATS_STATE_DISABLED;
540 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
541
a2fbb9ea
ET
542 BNX2X_ERR("begin crash dump -----------------\n");
543
8440d2b6
EG
544 /* Indices */
545 /* Common */
546 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
547 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
548 " spq_prod_idx(%u)\n",
549 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
550 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
551
552 /* Rx */
553 for_each_rx_queue(bp, i) {
a2fbb9ea 554 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 555
c3eefaf6 556 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
557 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
558 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 559 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
560 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
561 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 562 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
563 " fp_u_idx(%x) *sb_u_idx(%x)\n",
564 fp->rx_sge_prod, fp->last_max_sge,
565 le16_to_cpu(fp->fp_u_idx),
566 fp->status_blk->u_status_block.status_block_index);
567 }
a2fbb9ea 568
8440d2b6
EG
569 /* Tx */
570 for_each_tx_queue(bp, i) {
571 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 572
c3eefaf6 573 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
574 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
575 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
576 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 577 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
ca00392c 578 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 579 fp->status_blk->c_status_block.status_block_index,
ca00392c 580 fp->tx_db.data.prod);
8440d2b6 581 }
a2fbb9ea 582
8440d2b6
EG
583 /* Rings */
584 /* Rx */
585 for_each_rx_queue(bp, i) {
586 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
587
588 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
589 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 590 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
591 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
592 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
593
c3eefaf6
EG
594 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
595 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
596 }
597
3196a88a
EG
598 start = RX_SGE(fp->rx_sge_prod);
599 end = RX_SGE(fp->last_max_sge);
8440d2b6 600 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
601 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
602 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
603
c3eefaf6
EG
604 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
605 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
606 }
607
a2fbb9ea
ET
608 start = RCQ_BD(fp->rx_comp_cons - 10);
609 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 610 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
611 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
612
c3eefaf6
EG
613 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
614 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
615 }
616 }
617
8440d2b6
EG
618 /* Tx */
619 for_each_tx_queue(bp, i) {
620 struct bnx2x_fastpath *fp = &bp->fp[i];
621
622 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
623 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
624 for (j = start; j != end; j = TX_BD(j + 1)) {
625 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
626
c3eefaf6
EG
627 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
628 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
629 }
630
631 start = TX_BD(fp->tx_bd_cons - 10);
632 end = TX_BD(fp->tx_bd_cons + 254);
633 for (j = start; j != end; j = TX_BD(j + 1)) {
634 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
635
c3eefaf6
EG
636 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
637 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
638 }
639 }
a2fbb9ea 640
34f80b04 641 bnx2x_fw_dump(bp);
a2fbb9ea
ET
642 bnx2x_mc_assert(bp);
643 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
644}
645
615f8fd9 646static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 647{
34f80b04 648 int port = BP_PORT(bp);
a2fbb9ea
ET
649 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
650 u32 val = REG_RD(bp, addr);
651 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 652 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
653
654 if (msix) {
8badd27a
EG
655 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
656 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
657 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
658 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
659 } else if (msi) {
660 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
661 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
662 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
664 } else {
665 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 666 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
667 HC_CONFIG_0_REG_INT_LINE_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 669
8badd27a
EG
670 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
671 val, port, addr);
615f8fd9
ET
672
673 REG_WR(bp, addr, val);
674
a2fbb9ea
ET
675 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
676 }
677
8badd27a
EG
678 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
679 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
680
681 REG_WR(bp, addr, val);
37dbbf32
EG
682 /*
683 * Ensure that HC_CONFIG is written before leading/trailing edge config
684 */
685 mmiowb();
686 barrier();
34f80b04
EG
687
688 if (CHIP_IS_E1H(bp)) {
689 /* init leading/trailing edge */
690 if (IS_E1HMF(bp)) {
8badd27a 691 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 692 if (bp->port.pmf)
4acac6a5
EG
693 /* enable nig and gpio3 attention */
694 val |= 0x1100;
34f80b04
EG
695 } else
696 val = 0xffff;
697
698 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
699 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
700 }
37dbbf32
EG
701
702 /* Make sure that interrupts are indeed enabled from here on */
703 mmiowb();
a2fbb9ea
ET
704}
705
615f8fd9 706static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 707{
34f80b04 708 int port = BP_PORT(bp);
a2fbb9ea
ET
709 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
710 u32 val = REG_RD(bp, addr);
711
712 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
713 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
714 HC_CONFIG_0_REG_INT_LINE_EN_0 |
715 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
716
717 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
718 val, port, addr);
719
8badd27a
EG
720 /* flush all outstanding writes */
721 mmiowb();
722
a2fbb9ea
ET
723 REG_WR(bp, addr, val);
724 if (REG_RD(bp, addr) != val)
725 BNX2X_ERR("BUG! proper val not read from IGU!\n");
726}
727
f8ef6e44 728static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 729{
a2fbb9ea 730 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 731 int i, offset;
a2fbb9ea 732
34f80b04 733 /* disable interrupt handling */
a2fbb9ea 734 atomic_inc(&bp->intr_sem);
e1510706
EG
735 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
736
f8ef6e44
YG
737 if (disable_hw)
738 /* prevent the HW from sending interrupts */
739 bnx2x_int_disable(bp);
a2fbb9ea
ET
740
741 /* make sure all ISRs are done */
742 if (msix) {
8badd27a
EG
743 synchronize_irq(bp->msix_table[0].vector);
744 offset = 1;
37b091ba
MC
745#ifdef BCM_CNIC
746 offset++;
747#endif
a2fbb9ea 748 for_each_queue(bp, i)
8badd27a 749 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
750 } else
751 synchronize_irq(bp->pdev->irq);
752
753 /* make sure sp_task is not running */
1cf167f2
EG
754 cancel_delayed_work(&bp->sp_task);
755 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
756}
757
34f80b04 758/* fast path */
a2fbb9ea
ET
759
760/*
34f80b04 761 * General service functions
a2fbb9ea
ET
762 */
763
34f80b04 764static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
765 u8 storm, u16 index, u8 op, u8 update)
766{
5c862848
EG
767 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
768 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
769 struct igu_ack_register igu_ack;
770
771 igu_ack.status_block_index = index;
772 igu_ack.sb_id_and_flags =
34f80b04 773 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
774 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
775 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
776 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
777
5c862848
EG
778 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
779 (*(u32 *)&igu_ack), hc_addr);
780 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
781
782 /* Make sure that ACK is written */
783 mmiowb();
784 barrier();
a2fbb9ea
ET
785}
786
787static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
788{
789 struct host_status_block *fpsb = fp->status_blk;
790 u16 rc = 0;
791
792 barrier(); /* status block is written to by the chip */
793 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
794 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
795 rc |= 1;
796 }
797 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
798 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
799 rc |= 2;
800 }
801 return rc;
802}
803
a2fbb9ea
ET
804static u16 bnx2x_ack_int(struct bnx2x *bp)
805{
5c862848
EG
806 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
807 COMMAND_REG_SIMD_MASK);
808 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 809
5c862848
EG
810 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
811 result, hc_addr);
a2fbb9ea 812
a2fbb9ea
ET
813 return result;
814}
815
816
817/*
818 * fast path service functions
819 */
820
e8b5fc51
VZ
821static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
822{
823 /* Tell compiler that consumer and producer can change */
824 barrier();
825 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
826}
827
a2fbb9ea
ET
828/* free skb in the packet ring at pos idx
829 * return idx of last bd freed
830 */
831static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
832 u16 idx)
833{
834 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
835 struct eth_tx_start_bd *tx_start_bd;
836 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 837 struct sk_buff *skb = tx_buf->skb;
34f80b04 838 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
839 int nbd;
840
841 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
842 idx, tx_buf, skb);
843
844 /* unmap first bd */
845 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c
EG
846 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
847 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
848 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 849
ca00392c 850 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 851#ifdef BNX2X_STOP_ON_ERROR
ca00392c 852 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 853 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
854 bnx2x_panic();
855 }
856#endif
ca00392c 857 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 858
ca00392c
EG
859 /* Get the next bd */
860 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 861
ca00392c
EG
862 /* Skip a parse bd... */
863 --nbd;
864 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
865
866 /* ...and the TSO split header bd since they have no mapping */
867 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
868 --nbd;
869 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
870 }
871
872 /* now free frags */
873 while (nbd > 0) {
874
875 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c
EG
876 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
877 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
878 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
a2fbb9ea
ET
879 if (--nbd)
880 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
881 }
882
883 /* release skb */
53e5e96e 884 WARN_ON(!skb);
ca00392c 885 dev_kfree_skb_any(skb);
a2fbb9ea
ET
886 tx_buf->first_bd = 0;
887 tx_buf->skb = NULL;
888
34f80b04 889 return new_cons;
a2fbb9ea
ET
890}
891
34f80b04 892static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 893{
34f80b04
EG
894 s16 used;
895 u16 prod;
896 u16 cons;
a2fbb9ea 897
34f80b04 898 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
899 prod = fp->tx_bd_prod;
900 cons = fp->tx_bd_cons;
901
34f80b04
EG
902 /* NUM_TX_RINGS = number of "next-page" entries
903 It will be used as a threshold */
904 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 905
34f80b04 906#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
907 WARN_ON(used < 0);
908 WARN_ON(used > fp->bp->tx_ring_size);
909 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 910#endif
a2fbb9ea 911
34f80b04 912 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
913}
914
7961f791 915static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
916{
917 struct bnx2x *bp = fp->bp;
555f6c78 918 struct netdev_queue *txq;
a2fbb9ea
ET
919 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
920 int done = 0;
921
922#ifdef BNX2X_STOP_ON_ERROR
923 if (unlikely(bp->panic))
924 return;
925#endif
926
ca00392c 927 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
a2fbb9ea
ET
928 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
929 sw_cons = fp->tx_pkt_cons;
930
931 while (sw_cons != hw_cons) {
932 u16 pkt_cons;
933
934 pkt_cons = TX_BD(sw_cons);
935
936 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
937
34f80b04 938 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
939 hw_cons, sw_cons, pkt_cons);
940
34f80b04 941/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
942 rmb();
943 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
944 }
945*/
946 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
947 sw_cons++;
948 done++;
a2fbb9ea
ET
949 }
950
951 fp->tx_pkt_cons = sw_cons;
952 fp->tx_bd_cons = bd_cons;
953
a2fbb9ea 954 /* TBD need a thresh? */
555f6c78 955 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 956
6044735d
EG
957 /* Need to make the tx_bd_cons update visible to start_xmit()
958 * before checking for netif_tx_queue_stopped(). Without the
959 * memory barrier, there is a small possibility that
960 * start_xmit() will miss it and cause the queue to be stopped
961 * forever.
962 */
963 smp_mb();
964
555f6c78 965 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 966 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 967 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 968 netif_tx_wake_queue(txq);
a2fbb9ea
ET
969 }
970}
971
3196a88a 972
a2fbb9ea
ET
973static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
974 union eth_rx_cqe *rr_cqe)
975{
976 struct bnx2x *bp = fp->bp;
977 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
978 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
979
34f80b04 980 DP(BNX2X_MSG_SP,
a2fbb9ea 981 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 982 fp->index, cid, command, bp->state,
34f80b04 983 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
984
985 bp->spq_left++;
986
0626b899 987 if (fp->index) {
a2fbb9ea
ET
988 switch (command | fp->state) {
989 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
990 BNX2X_FP_STATE_OPENING):
991 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
992 cid);
993 fp->state = BNX2X_FP_STATE_OPEN;
994 break;
995
996 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
997 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
998 cid);
999 fp->state = BNX2X_FP_STATE_HALTED;
1000 break;
1001
1002 default:
34f80b04
EG
1003 BNX2X_ERR("unexpected MC reply (%d) "
1004 "fp->state is %x\n", command, fp->state);
1005 break;
a2fbb9ea 1006 }
34f80b04 1007 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1008 return;
1009 }
c14423fe 1010
a2fbb9ea
ET
1011 switch (command | bp->state) {
1012 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1013 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1014 bp->state = BNX2X_STATE_OPEN;
1015 break;
1016
1017 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1018 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1019 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1020 fp->state = BNX2X_FP_STATE_HALTED;
1021 break;
1022
a2fbb9ea 1023 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1024 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1025 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1026 break;
1027
3196a88a 1028
a2fbb9ea 1029 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1030 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1031 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
e665bfda
MC
1032 bp->set_mac_pending--;
1033 smp_wmb();
a2fbb9ea
ET
1034 break;
1035
49d66772 1036 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
ca00392c 1037 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
34f80b04 1038 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
e665bfda
MC
1039 bp->set_mac_pending--;
1040 smp_wmb();
49d66772
ET
1041 break;
1042
a2fbb9ea 1043 default:
34f80b04 1044 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1045 command, bp->state);
34f80b04 1046 break;
a2fbb9ea 1047 }
34f80b04 1048 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1049}
1050
7a9b2557
VZ
1051static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1052 struct bnx2x_fastpath *fp, u16 index)
1053{
1054 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1055 struct page *page = sw_buf->page;
1056 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1057
1058 /* Skip "next page" elements */
1059 if (!page)
1060 return;
1061
1062 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1063 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1064 __free_pages(page, PAGES_PER_SGE_SHIFT);
1065
1066 sw_buf->page = NULL;
1067 sge->addr_hi = 0;
1068 sge->addr_lo = 0;
1069}
1070
1071static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1072 struct bnx2x_fastpath *fp, int last)
1073{
1074 int i;
1075
1076 for (i = 0; i < last; i++)
1077 bnx2x_free_rx_sge(bp, fp, i);
1078}
1079
1080static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1081 struct bnx2x_fastpath *fp, u16 index)
1082{
1083 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1084 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1085 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1086 dma_addr_t mapping;
1087
1088 if (unlikely(page == NULL))
1089 return -ENOMEM;
1090
4f40f2cb 1091 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1092 PCI_DMA_FROMDEVICE);
8d8bb39b 1093 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1094 __free_pages(page, PAGES_PER_SGE_SHIFT);
1095 return -ENOMEM;
1096 }
1097
1098 sw_buf->page = page;
1099 pci_unmap_addr_set(sw_buf, mapping, mapping);
1100
1101 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1102 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1103
1104 return 0;
1105}
1106
a2fbb9ea
ET
1107static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1108 struct bnx2x_fastpath *fp, u16 index)
1109{
1110 struct sk_buff *skb;
1111 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1112 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1113 dma_addr_t mapping;
1114
1115 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1116 if (unlikely(skb == NULL))
1117 return -ENOMEM;
1118
437cf2f1 1119 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1120 PCI_DMA_FROMDEVICE);
8d8bb39b 1121 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1122 dev_kfree_skb(skb);
1123 return -ENOMEM;
1124 }
1125
1126 rx_buf->skb = skb;
1127 pci_unmap_addr_set(rx_buf, mapping, mapping);
1128
1129 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1130 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1131
1132 return 0;
1133}
1134
1135/* note that we are not allocating a new skb,
1136 * we are just moving one from cons to prod
1137 * we are not creating a new mapping,
1138 * so there is no need to check for dma_mapping_error().
1139 */
1140static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1141 struct sk_buff *skb, u16 cons, u16 prod)
1142{
1143 struct bnx2x *bp = fp->bp;
1144 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1145 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1146 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1147 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1148
1149 pci_dma_sync_single_for_device(bp->pdev,
1150 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1151 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1152
1153 prod_rx_buf->skb = cons_rx_buf->skb;
1154 pci_unmap_addr_set(prod_rx_buf, mapping,
1155 pci_unmap_addr(cons_rx_buf, mapping));
1156 *prod_bd = *cons_bd;
1157}
1158
7a9b2557
VZ
1159static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1160 u16 idx)
1161{
1162 u16 last_max = fp->last_max_sge;
1163
1164 if (SUB_S16(idx, last_max) > 0)
1165 fp->last_max_sge = idx;
1166}
1167
1168static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1169{
1170 int i, j;
1171
1172 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1173 int idx = RX_SGE_CNT * i - 1;
1174
1175 for (j = 0; j < 2; j++) {
1176 SGE_MASK_CLEAR_BIT(fp, idx);
1177 idx--;
1178 }
1179 }
1180}
1181
1182static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1183 struct eth_fast_path_rx_cqe *fp_cqe)
1184{
1185 struct bnx2x *bp = fp->bp;
4f40f2cb 1186 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1187 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1188 SGE_PAGE_SHIFT;
7a9b2557
VZ
1189 u16 last_max, last_elem, first_elem;
1190 u16 delta = 0;
1191 u16 i;
1192
1193 if (!sge_len)
1194 return;
1195
1196 /* First mark all used pages */
1197 for (i = 0; i < sge_len; i++)
1198 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1199
1200 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1201 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1202
1203 /* Here we assume that the last SGE index is the biggest */
1204 prefetch((void *)(fp->sge_mask));
1205 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1206
1207 last_max = RX_SGE(fp->last_max_sge);
1208 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1209 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1210
1211 /* If ring is not full */
1212 if (last_elem + 1 != first_elem)
1213 last_elem++;
1214
1215 /* Now update the prod */
1216 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1217 if (likely(fp->sge_mask[i]))
1218 break;
1219
1220 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1221 delta += RX_SGE_MASK_ELEM_SZ;
1222 }
1223
1224 if (delta > 0) {
1225 fp->rx_sge_prod += delta;
1226 /* clear page-end entries */
1227 bnx2x_clear_sge_mask_next_elems(fp);
1228 }
1229
1230 DP(NETIF_MSG_RX_STATUS,
1231 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1232 fp->last_max_sge, fp->rx_sge_prod);
1233}
1234
1235static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1236{
1237 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1238 memset(fp->sge_mask, 0xff,
1239 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1240
33471629
EG
1241 /* Clear the two last indices in the page to 1:
1242 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1243 hence will never be indicated and should be removed from
1244 the calculations. */
1245 bnx2x_clear_sge_mask_next_elems(fp);
1246}
1247
1248static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1249 struct sk_buff *skb, u16 cons, u16 prod)
1250{
1251 struct bnx2x *bp = fp->bp;
1252 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1253 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1254 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1255 dma_addr_t mapping;
1256
1257 /* move empty skb from pool to prod and map it */
1258 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1259 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1260 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1261 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1262
1263 /* move partial skb from cons to pool (don't unmap yet) */
1264 fp->tpa_pool[queue] = *cons_rx_buf;
1265
1266 /* mark bin state as start - print error if current state != stop */
1267 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1268 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1269
1270 fp->tpa_state[queue] = BNX2X_TPA_START;
1271
1272 /* point prod_bd to new skb */
1273 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1274 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1275
1276#ifdef BNX2X_STOP_ON_ERROR
1277 fp->tpa_queue_used |= (1 << queue);
1278#ifdef __powerpc64__
1279 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1280#else
1281 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1282#endif
1283 fp->tpa_queue_used);
1284#endif
1285}
1286
1287static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1288 struct sk_buff *skb,
1289 struct eth_fast_path_rx_cqe *fp_cqe,
1290 u16 cqe_idx)
1291{
1292 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1293 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1294 u32 i, frag_len, frag_size, pages;
1295 int err;
1296 int j;
1297
1298 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1299 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1300
1301 /* This is needed in order to enable forwarding support */
1302 if (frag_size)
4f40f2cb 1303 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1304 max(frag_size, (u32)len_on_bd));
1305
1306#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1307 if (pages >
1308 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1309 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1310 pages, cqe_idx);
1311 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1312 fp_cqe->pkt_len, len_on_bd);
1313 bnx2x_panic();
1314 return -EINVAL;
1315 }
1316#endif
1317
1318 /* Run through the SGL and compose the fragmented skb */
1319 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1320 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1321
1322 /* FW gives the indices of the SGE as if the ring is an array
1323 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1324 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1325 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1326 old_rx_pg = *rx_pg;
1327
1328 /* If we fail to allocate a substitute page, we simply stop
1329 where we are and drop the whole packet */
1330 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1331 if (unlikely(err)) {
de832a55 1332 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1333 return err;
1334 }
1335
1336 /* Unmap the page as we r going to pass it to the stack */
1337 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1338 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1339
1340 /* Add one frag and update the appropriate fields in the skb */
1341 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1342
1343 skb->data_len += frag_len;
1344 skb->truesize += frag_len;
1345 skb->len += frag_len;
1346
1347 frag_size -= frag_len;
1348 }
1349
1350 return 0;
1351}
1352
1353static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1354 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1355 u16 cqe_idx)
1356{
1357 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1358 struct sk_buff *skb = rx_buf->skb;
1359 /* alloc new skb */
1360 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1361
1362 /* Unmap skb in the pool anyway, as we are going to change
1363 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1364 fails. */
1365 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1366 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1367
7a9b2557 1368 if (likely(new_skb)) {
66e855f3
YG
1369 /* fix ip xsum and give it to the stack */
1370 /* (no need to map the new skb) */
0c6671b0
EG
1371#ifdef BCM_VLAN
1372 int is_vlan_cqe =
1373 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1374 PARSING_FLAGS_VLAN);
1375 int is_not_hwaccel_vlan_cqe =
1376 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1377#endif
7a9b2557
VZ
1378
1379 prefetch(skb);
1380 prefetch(((char *)(skb)) + 128);
1381
7a9b2557
VZ
1382#ifdef BNX2X_STOP_ON_ERROR
1383 if (pad + len > bp->rx_buf_size) {
1384 BNX2X_ERR("skb_put is about to fail... "
1385 "pad %d len %d rx_buf_size %d\n",
1386 pad, len, bp->rx_buf_size);
1387 bnx2x_panic();
1388 return;
1389 }
1390#endif
1391
1392 skb_reserve(skb, pad);
1393 skb_put(skb, len);
1394
1395 skb->protocol = eth_type_trans(skb, bp->dev);
1396 skb->ip_summed = CHECKSUM_UNNECESSARY;
1397
1398 {
1399 struct iphdr *iph;
1400
1401 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1402#ifdef BCM_VLAN
1403 /* If there is no Rx VLAN offloading -
1404 take VLAN tag into an account */
1405 if (unlikely(is_not_hwaccel_vlan_cqe))
1406 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1407#endif
7a9b2557
VZ
1408 iph->check = 0;
1409 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1410 }
1411
1412 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1413 &cqe->fast_path_cqe, cqe_idx)) {
1414#ifdef BCM_VLAN
0c6671b0
EG
1415 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1416 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1417 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1418 le16_to_cpu(cqe->fast_path_cqe.
1419 vlan_tag));
1420 else
1421#endif
1422 netif_receive_skb(skb);
1423 } else {
1424 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1425 " - dropping packet!\n");
1426 dev_kfree_skb(skb);
1427 }
1428
7a9b2557
VZ
1429
1430 /* put new skb in bin */
1431 fp->tpa_pool[queue].skb = new_skb;
1432
1433 } else {
66e855f3 1434 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1435 DP(NETIF_MSG_RX_STATUS,
1436 "Failed to allocate new skb - dropping packet!\n");
de832a55 1437 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1438 }
1439
1440 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1441}
1442
1443static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1444 struct bnx2x_fastpath *fp,
1445 u16 bd_prod, u16 rx_comp_prod,
1446 u16 rx_sge_prod)
1447{
8d9c5f34 1448 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1449 int i;
1450
1451 /* Update producers */
1452 rx_prods.bd_prod = bd_prod;
1453 rx_prods.cqe_prod = rx_comp_prod;
1454 rx_prods.sge_prod = rx_sge_prod;
1455
58f4c4cf
EG
1456 /*
1457 * Make sure that the BD and SGE data is updated before updating the
1458 * producers since FW might read the BD/SGE right after the producer
1459 * is updated.
1460 * This is only applicable for weak-ordered memory model archs such
1461 * as IA-64. The following barrier is also mandatory since FW will
1462 * assumes BDs must have buffers.
1463 */
1464 wmb();
1465
8d9c5f34
EG
1466 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1467 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1468 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1469 ((u32 *)&rx_prods)[i]);
1470
58f4c4cf
EG
1471 mmiowb(); /* keep prod updates ordered */
1472
7a9b2557 1473 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1474 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1475 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1476}
1477
a2fbb9ea
ET
1478static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1479{
1480 struct bnx2x *bp = fp->bp;
34f80b04 1481 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1482 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1483 int rx_pkt = 0;
1484
1485#ifdef BNX2X_STOP_ON_ERROR
1486 if (unlikely(bp->panic))
1487 return 0;
1488#endif
1489
34f80b04
EG
1490 /* CQ "next element" is of the size of the regular element,
1491 that's why it's ok here */
a2fbb9ea
ET
1492 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1493 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1494 hw_comp_cons++;
1495
1496 bd_cons = fp->rx_bd_cons;
1497 bd_prod = fp->rx_bd_prod;
34f80b04 1498 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1499 sw_comp_cons = fp->rx_comp_cons;
1500 sw_comp_prod = fp->rx_comp_prod;
1501
1502 /* Memory barrier necessary as speculative reads of the rx
1503 * buffer can be ahead of the index in the status block
1504 */
1505 rmb();
1506
1507 DP(NETIF_MSG_RX_STATUS,
1508 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1509 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1510
1511 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1512 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1513 struct sk_buff *skb;
1514 union eth_rx_cqe *cqe;
34f80b04
EG
1515 u8 cqe_fp_flags;
1516 u16 len, pad;
a2fbb9ea
ET
1517
1518 comp_ring_cons = RCQ_BD(sw_comp_cons);
1519 bd_prod = RX_BD(bd_prod);
1520 bd_cons = RX_BD(bd_cons);
1521
619e7a66
EG
1522 /* Prefetch the page containing the BD descriptor
1523 at producer's index. It will be needed when new skb is
1524 allocated */
1525 prefetch((void *)(PAGE_ALIGN((unsigned long)
1526 (&fp->rx_desc_ring[bd_prod])) -
1527 PAGE_SIZE + 1));
1528
a2fbb9ea 1529 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1530 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1531
a2fbb9ea 1532 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1533 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1534 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1535 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1536 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1537 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1538
1539 /* is this a slowpath msg? */
34f80b04 1540 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1541 bnx2x_sp_event(fp, cqe);
1542 goto next_cqe;
1543
1544 /* this is an rx packet */
1545 } else {
1546 rx_buf = &fp->rx_buf_ring[bd_cons];
1547 skb = rx_buf->skb;
a2fbb9ea
ET
1548 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1549 pad = cqe->fast_path_cqe.placement_offset;
1550
7a9b2557
VZ
1551 /* If CQE is marked both TPA_START and TPA_END
1552 it is a non-TPA CQE */
1553 if ((!fp->disable_tpa) &&
1554 (TPA_TYPE(cqe_fp_flags) !=
1555 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1556 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1557
1558 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1559 DP(NETIF_MSG_RX_STATUS,
1560 "calling tpa_start on queue %d\n",
1561 queue);
1562
1563 bnx2x_tpa_start(fp, queue, skb,
1564 bd_cons, bd_prod);
1565 goto next_rx;
1566 }
1567
1568 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1569 DP(NETIF_MSG_RX_STATUS,
1570 "calling tpa_stop on queue %d\n",
1571 queue);
1572
1573 if (!BNX2X_RX_SUM_FIX(cqe))
1574 BNX2X_ERR("STOP on none TCP "
1575 "data\n");
1576
1577 /* This is a size of the linear data
1578 on this skb */
1579 len = le16_to_cpu(cqe->fast_path_cqe.
1580 len_on_bd);
1581 bnx2x_tpa_stop(bp, fp, queue, pad,
1582 len, cqe, comp_ring_cons);
1583#ifdef BNX2X_STOP_ON_ERROR
1584 if (bp->panic)
17cb4006 1585 return 0;
7a9b2557
VZ
1586#endif
1587
1588 bnx2x_update_sge_prod(fp,
1589 &cqe->fast_path_cqe);
1590 goto next_cqe;
1591 }
1592 }
1593
a2fbb9ea
ET
1594 pci_dma_sync_single_for_device(bp->pdev,
1595 pci_unmap_addr(rx_buf, mapping),
1596 pad + RX_COPY_THRESH,
1597 PCI_DMA_FROMDEVICE);
1598 prefetch(skb);
1599 prefetch(((char *)(skb)) + 128);
1600
1601 /* is this an error packet? */
34f80b04 1602 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1603 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1604 "ERROR flags %x rx packet %u\n",
1605 cqe_fp_flags, sw_comp_cons);
de832a55 1606 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1607 goto reuse_rx;
1608 }
1609
1610 /* Since we don't have a jumbo ring
1611 * copy small packets if mtu > 1500
1612 */
1613 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1614 (len <= RX_COPY_THRESH)) {
1615 struct sk_buff *new_skb;
1616
1617 new_skb = netdev_alloc_skb(bp->dev,
1618 len + pad);
1619 if (new_skb == NULL) {
1620 DP(NETIF_MSG_RX_ERR,
34f80b04 1621 "ERROR packet dropped "
a2fbb9ea 1622 "because of alloc failure\n");
de832a55 1623 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1624 goto reuse_rx;
1625 }
1626
1627 /* aligned copy */
1628 skb_copy_from_linear_data_offset(skb, pad,
1629 new_skb->data + pad, len);
1630 skb_reserve(new_skb, pad);
1631 skb_put(new_skb, len);
1632
1633 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1634
1635 skb = new_skb;
1636
a119a069
EG
1637 } else
1638 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
a2fbb9ea
ET
1639 pci_unmap_single(bp->pdev,
1640 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1641 bp->rx_buf_size,
a2fbb9ea
ET
1642 PCI_DMA_FROMDEVICE);
1643 skb_reserve(skb, pad);
1644 skb_put(skb, len);
1645
1646 } else {
1647 DP(NETIF_MSG_RX_ERR,
34f80b04 1648 "ERROR packet dropped because "
a2fbb9ea 1649 "of alloc failure\n");
de832a55 1650 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1651reuse_rx:
1652 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1653 goto next_rx;
1654 }
1655
1656 skb->protocol = eth_type_trans(skb, bp->dev);
1657
1658 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1659 if (bp->rx_csum) {
1adcd8be
EG
1660 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1661 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1662 else
de832a55 1663 fp->eth_q_stats.hw_csum_err++;
66e855f3 1664 }
a2fbb9ea
ET
1665 }
1666
748e5439 1667 skb_record_rx_queue(skb, fp->index);
ab6ad5a4 1668
a2fbb9ea 1669#ifdef BCM_VLAN
0c6671b0 1670 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1671 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1672 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1673 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1674 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1675 else
1676#endif
34f80b04 1677 netif_receive_skb(skb);
a2fbb9ea 1678
a2fbb9ea
ET
1679
1680next_rx:
1681 rx_buf->skb = NULL;
1682
1683 bd_cons = NEXT_RX_IDX(bd_cons);
1684 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1685 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1686 rx_pkt++;
a2fbb9ea
ET
1687next_cqe:
1688 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1689 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1690
34f80b04 1691 if (rx_pkt == budget)
a2fbb9ea
ET
1692 break;
1693 } /* while */
1694
1695 fp->rx_bd_cons = bd_cons;
34f80b04 1696 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1697 fp->rx_comp_cons = sw_comp_cons;
1698 fp->rx_comp_prod = sw_comp_prod;
1699
7a9b2557
VZ
1700 /* Update producers */
1701 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1702 fp->rx_sge_prod);
a2fbb9ea
ET
1703
1704 fp->rx_pkt += rx_pkt;
1705 fp->rx_calls++;
1706
1707 return rx_pkt;
1708}
1709
1710static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1711{
1712 struct bnx2x_fastpath *fp = fp_cookie;
1713 struct bnx2x *bp = fp->bp;
a2fbb9ea 1714
da5a662a
VZ
1715 /* Return here if interrupt is disabled */
1716 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1717 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1718 return IRQ_HANDLED;
1719 }
1720
34f80b04 1721 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1722 fp->index, fp->sb_id);
0626b899 1723 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1724
1725#ifdef BNX2X_STOP_ON_ERROR
1726 if (unlikely(bp->panic))
1727 return IRQ_HANDLED;
1728#endif
ca00392c
EG
1729 /* Handle Rx or Tx according to MSI-X vector */
1730 if (fp->is_rx_queue) {
1731 prefetch(fp->rx_cons_sb);
1732 prefetch(&fp->status_blk->u_status_block.status_block_index);
a2fbb9ea 1733
ca00392c 1734 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1735
ca00392c
EG
1736 } else {
1737 prefetch(fp->tx_cons_sb);
1738 prefetch(&fp->status_blk->c_status_block.status_block_index);
1739
1740 bnx2x_update_fpsb_idx(fp);
1741 rmb();
1742 bnx2x_tx_int(fp);
1743
1744 /* Re-enable interrupts */
1745 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1746 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1747 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1748 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1749 }
34f80b04 1750
a2fbb9ea
ET
1751 return IRQ_HANDLED;
1752}
1753
1754static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1755{
555f6c78 1756 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1757 u16 status = bnx2x_ack_int(bp);
34f80b04 1758 u16 mask;
ca00392c 1759 int i;
a2fbb9ea 1760
34f80b04 1761 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1762 if (unlikely(status == 0)) {
1763 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1764 return IRQ_NONE;
1765 }
f5372251 1766 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1767
34f80b04 1768 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1769 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1770 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1771 return IRQ_HANDLED;
1772 }
1773
3196a88a
EG
1774#ifdef BNX2X_STOP_ON_ERROR
1775 if (unlikely(bp->panic))
1776 return IRQ_HANDLED;
1777#endif
1778
ca00392c
EG
1779 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1780 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1781
ca00392c
EG
1782 mask = 0x2 << fp->sb_id;
1783 if (status & mask) {
1784 /* Handle Rx or Tx according to SB id */
1785 if (fp->is_rx_queue) {
1786 prefetch(fp->rx_cons_sb);
1787 prefetch(&fp->status_blk->u_status_block.
1788 status_block_index);
a2fbb9ea 1789
ca00392c 1790 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1791
ca00392c
EG
1792 } else {
1793 prefetch(fp->tx_cons_sb);
1794 prefetch(&fp->status_blk->c_status_block.
1795 status_block_index);
1796
1797 bnx2x_update_fpsb_idx(fp);
1798 rmb();
1799 bnx2x_tx_int(fp);
1800
1801 /* Re-enable interrupts */
1802 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1803 le16_to_cpu(fp->fp_u_idx),
1804 IGU_INT_NOP, 1);
1805 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1806 le16_to_cpu(fp->fp_c_idx),
1807 IGU_INT_ENABLE, 1);
1808 }
1809 status &= ~mask;
1810 }
a2fbb9ea
ET
1811 }
1812
a2fbb9ea 1813
34f80b04 1814 if (unlikely(status & 0x1)) {
1cf167f2 1815 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1816
1817 status &= ~0x1;
1818 if (!status)
1819 return IRQ_HANDLED;
1820 }
1821
34f80b04
EG
1822 if (status)
1823 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1824 status);
a2fbb9ea 1825
c18487ee 1826 return IRQ_HANDLED;
a2fbb9ea
ET
1827}
1828
c18487ee 1829/* end of fast path */
a2fbb9ea 1830
bb2a0f7a 1831static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1832
c18487ee
YR
1833/* Link */
1834
1835/*
1836 * General service functions
1837 */
a2fbb9ea 1838
4a37fb66 1839static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1840{
1841 u32 lock_status;
1842 u32 resource_bit = (1 << resource);
4a37fb66
YG
1843 int func = BP_FUNC(bp);
1844 u32 hw_lock_control_reg;
c18487ee 1845 int cnt;
a2fbb9ea 1846
c18487ee
YR
1847 /* Validating that the resource is within range */
1848 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1849 DP(NETIF_MSG_HW,
1850 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1851 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1852 return -EINVAL;
1853 }
a2fbb9ea 1854
4a37fb66
YG
1855 if (func <= 5) {
1856 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1857 } else {
1858 hw_lock_control_reg =
1859 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1860 }
1861
c18487ee 1862 /* Validating that the resource is not already taken */
4a37fb66 1863 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1864 if (lock_status & resource_bit) {
1865 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1866 lock_status, resource_bit);
1867 return -EEXIST;
1868 }
a2fbb9ea 1869
46230476
EG
1870 /* Try for 5 second every 5ms */
1871 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1872 /* Try to acquire the lock */
4a37fb66
YG
1873 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1874 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1875 if (lock_status & resource_bit)
1876 return 0;
a2fbb9ea 1877
c18487ee 1878 msleep(5);
a2fbb9ea 1879 }
c18487ee
YR
1880 DP(NETIF_MSG_HW, "Timeout\n");
1881 return -EAGAIN;
1882}
a2fbb9ea 1883
4a37fb66 1884static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1885{
1886 u32 lock_status;
1887 u32 resource_bit = (1 << resource);
4a37fb66
YG
1888 int func = BP_FUNC(bp);
1889 u32 hw_lock_control_reg;
a2fbb9ea 1890
c18487ee
YR
1891 /* Validating that the resource is within range */
1892 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1893 DP(NETIF_MSG_HW,
1894 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1895 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1896 return -EINVAL;
1897 }
1898
4a37fb66
YG
1899 if (func <= 5) {
1900 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1901 } else {
1902 hw_lock_control_reg =
1903 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1904 }
1905
c18487ee 1906 /* Validating that the resource is currently taken */
4a37fb66 1907 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1908 if (!(lock_status & resource_bit)) {
1909 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1910 lock_status, resource_bit);
1911 return -EFAULT;
a2fbb9ea
ET
1912 }
1913
4a37fb66 1914 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1915 return 0;
1916}
1917
1918/* HW Lock for shared dual port PHYs */
4a37fb66 1919static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1920{
34f80b04 1921 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1922
46c6a674
EG
1923 if (bp->port.need_hw_lock)
1924 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1925}
a2fbb9ea 1926
4a37fb66 1927static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1928{
46c6a674
EG
1929 if (bp->port.need_hw_lock)
1930 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1931
34f80b04 1932 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1933}
a2fbb9ea 1934
4acac6a5
EG
1935int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1936{
1937 /* The GPIO should be swapped if swap register is set and active */
1938 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1939 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1940 int gpio_shift = gpio_num +
1941 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942 u32 gpio_mask = (1 << gpio_shift);
1943 u32 gpio_reg;
1944 int value;
1945
1946 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1947 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1948 return -EINVAL;
1949 }
1950
1951 /* read GPIO value */
1952 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1953
1954 /* get the requested pin value */
1955 if ((gpio_reg & gpio_mask) == gpio_mask)
1956 value = 1;
1957 else
1958 value = 0;
1959
1960 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1961
1962 return value;
1963}
1964
17de50b7 1965int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1966{
1967 /* The GPIO should be swapped if swap register is set and active */
1968 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1969 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1970 int gpio_shift = gpio_num +
1971 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1972 u32 gpio_mask = (1 << gpio_shift);
1973 u32 gpio_reg;
a2fbb9ea 1974
c18487ee
YR
1975 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1976 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1977 return -EINVAL;
1978 }
a2fbb9ea 1979
4a37fb66 1980 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1981 /* read GPIO and mask except the float bits */
1982 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1983
c18487ee
YR
1984 switch (mode) {
1985 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1986 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1987 gpio_num, gpio_shift);
1988 /* clear FLOAT and set CLR */
1989 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1990 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1991 break;
a2fbb9ea 1992
c18487ee
YR
1993 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1994 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1995 gpio_num, gpio_shift);
1996 /* clear FLOAT and set SET */
1997 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1998 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1999 break;
a2fbb9ea 2000
17de50b7 2001 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
2002 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2003 gpio_num, gpio_shift);
2004 /* set FLOAT */
2005 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2006 break;
a2fbb9ea 2007
c18487ee
YR
2008 default:
2009 break;
a2fbb9ea
ET
2010 }
2011
c18487ee 2012 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 2013 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 2014
c18487ee 2015 return 0;
a2fbb9ea
ET
2016}
2017
4acac6a5
EG
2018int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2019{
2020 /* The GPIO should be swapped if swap register is set and active */
2021 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2022 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2023 int gpio_shift = gpio_num +
2024 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2025 u32 gpio_mask = (1 << gpio_shift);
2026 u32 gpio_reg;
2027
2028 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2029 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2030 return -EINVAL;
2031 }
2032
2033 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2034 /* read GPIO int */
2035 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2036
2037 switch (mode) {
2038 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2039 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2040 "output low\n", gpio_num, gpio_shift);
2041 /* clear SET and set CLR */
2042 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2043 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2044 break;
2045
2046 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2047 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2048 "output high\n", gpio_num, gpio_shift);
2049 /* clear CLR and set SET */
2050 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2051 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2052 break;
2053
2054 default:
2055 break;
2056 }
2057
2058 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2059 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2060
2061 return 0;
2062}
2063
c18487ee 2064static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2065{
c18487ee
YR
2066 u32 spio_mask = (1 << spio_num);
2067 u32 spio_reg;
a2fbb9ea 2068
c18487ee
YR
2069 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2070 (spio_num > MISC_REGISTERS_SPIO_7)) {
2071 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2072 return -EINVAL;
a2fbb9ea
ET
2073 }
2074
4a37fb66 2075 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2076 /* read SPIO and mask except the float bits */
2077 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2078
c18487ee 2079 switch (mode) {
6378c025 2080 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2081 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2082 /* clear FLOAT and set CLR */
2083 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2084 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2085 break;
a2fbb9ea 2086
6378c025 2087 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2088 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2089 /* clear FLOAT and set SET */
2090 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2091 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2092 break;
a2fbb9ea 2093
c18487ee
YR
2094 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2095 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2096 /* set FLOAT */
2097 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2098 break;
a2fbb9ea 2099
c18487ee
YR
2100 default:
2101 break;
a2fbb9ea
ET
2102 }
2103
c18487ee 2104 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2105 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2106
a2fbb9ea
ET
2107 return 0;
2108}
2109
c18487ee 2110static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2111{
ad33ea3a
EG
2112 switch (bp->link_vars.ieee_fc &
2113 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2114 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2115 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2116 ADVERTISED_Pause);
2117 break;
356e2385 2118
c18487ee 2119 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2120 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2121 ADVERTISED_Pause);
2122 break;
356e2385 2123
c18487ee 2124 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2125 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2126 break;
356e2385 2127
c18487ee 2128 default:
34f80b04 2129 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2130 ADVERTISED_Pause);
2131 break;
2132 }
2133}
f1410647 2134
c18487ee
YR
2135static void bnx2x_link_report(struct bnx2x *bp)
2136{
2691d51d
EG
2137 if (bp->state == BNX2X_STATE_DISABLED) {
2138 netif_carrier_off(bp->dev);
2139 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2140 return;
2141 }
2142
c18487ee
YR
2143 if (bp->link_vars.link_up) {
2144 if (bp->state == BNX2X_STATE_OPEN)
2145 netif_carrier_on(bp->dev);
2146 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2147
c18487ee 2148 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2149
c18487ee
YR
2150 if (bp->link_vars.duplex == DUPLEX_FULL)
2151 printk("full duplex");
2152 else
2153 printk("half duplex");
f1410647 2154
c0700f90
DM
2155 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2156 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2157 printk(", receive ");
356e2385
EG
2158 if (bp->link_vars.flow_ctrl &
2159 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2160 printk("& transmit ");
2161 } else {
2162 printk(", transmit ");
2163 }
2164 printk("flow control ON");
2165 }
2166 printk("\n");
f1410647 2167
c18487ee
YR
2168 } else { /* link_down */
2169 netif_carrier_off(bp->dev);
2170 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2171 }
c18487ee
YR
2172}
2173
b5bf9068 2174static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2175{
19680c48
EG
2176 if (!BP_NOMCP(bp)) {
2177 u8 rc;
a2fbb9ea 2178
19680c48 2179 /* Initialize link parameters structure variables */
8c99e7b0
YR
2180 /* It is recommended to turn off RX FC for jumbo frames
2181 for better performance */
0c593270 2182 if (bp->dev->mtu > 5000)
c0700f90 2183 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2184 else
c0700f90 2185 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2186
4a37fb66 2187 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2188
2189 if (load_mode == LOAD_DIAG)
2190 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2191
19680c48 2192 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2193
4a37fb66 2194 bnx2x_release_phy_lock(bp);
a2fbb9ea 2195
3c96c68b
EG
2196 bnx2x_calc_fc_adv(bp);
2197
b5bf9068
EG
2198 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2199 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2200 bnx2x_link_report(bp);
b5bf9068 2201 }
34f80b04 2202
19680c48
EG
2203 return rc;
2204 }
f5372251 2205 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2206 return -EINVAL;
a2fbb9ea
ET
2207}
2208
c18487ee 2209static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2210{
19680c48 2211 if (!BP_NOMCP(bp)) {
4a37fb66 2212 bnx2x_acquire_phy_lock(bp);
19680c48 2213 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2214 bnx2x_release_phy_lock(bp);
a2fbb9ea 2215
19680c48
EG
2216 bnx2x_calc_fc_adv(bp);
2217 } else
f5372251 2218 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2219}
a2fbb9ea 2220
c18487ee
YR
2221static void bnx2x__link_reset(struct bnx2x *bp)
2222{
19680c48 2223 if (!BP_NOMCP(bp)) {
4a37fb66 2224 bnx2x_acquire_phy_lock(bp);
589abe3a 2225 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2226 bnx2x_release_phy_lock(bp);
19680c48 2227 } else
f5372251 2228 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2229}
a2fbb9ea 2230
c18487ee
YR
2231static u8 bnx2x_link_test(struct bnx2x *bp)
2232{
2233 u8 rc;
a2fbb9ea 2234
4a37fb66 2235 bnx2x_acquire_phy_lock(bp);
c18487ee 2236 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2237 bnx2x_release_phy_lock(bp);
a2fbb9ea 2238
c18487ee
YR
2239 return rc;
2240}
a2fbb9ea 2241
8a1c38d1 2242static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2243{
8a1c38d1
EG
2244 u32 r_param = bp->link_vars.line_speed / 8;
2245 u32 fair_periodic_timeout_usec;
2246 u32 t_fair;
34f80b04 2247
8a1c38d1
EG
2248 memset(&(bp->cmng.rs_vars), 0,
2249 sizeof(struct rate_shaping_vars_per_port));
2250 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2251
8a1c38d1
EG
2252 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2253 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2254
8a1c38d1
EG
2255 /* this is the threshold below which no timer arming will occur
2256 1.25 coefficient is for the threshold to be a little bigger
2257 than the real time, to compensate for timer in-accuracy */
2258 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2259 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2260
8a1c38d1
EG
2261 /* resolution of fairness timer */
2262 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2263 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2264 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2265
8a1c38d1
EG
2266 /* this is the threshold below which we won't arm the timer anymore */
2267 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2268
8a1c38d1
EG
2269 /* we multiply by 1e3/8 to get bytes/msec.
2270 We don't want the credits to pass a credit
2271 of the t_fair*FAIR_MEM (algorithm resolution) */
2272 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2273 /* since each tick is 4 usec */
2274 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2275}
2276
2691d51d
EG
2277/* Calculates the sum of vn_min_rates.
2278 It's needed for further normalizing of the min_rates.
2279 Returns:
2280 sum of vn_min_rates.
2281 or
2282 0 - if all the min_rates are 0.
2283 In the later case fainess algorithm should be deactivated.
2284 If not all min_rates are zero then those that are zeroes will be set to 1.
2285 */
2286static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2287{
2288 int all_zero = 1;
2289 int port = BP_PORT(bp);
2290 int vn;
2291
2292 bp->vn_weight_sum = 0;
2293 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2294 int func = 2*vn + port;
2295 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2296 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2297 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2298
2299 /* Skip hidden vns */
2300 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2301 continue;
2302
2303 /* If min rate is zero - set it to 1 */
2304 if (!vn_min_rate)
2305 vn_min_rate = DEF_MIN_RATE;
2306 else
2307 all_zero = 0;
2308
2309 bp->vn_weight_sum += vn_min_rate;
2310 }
2311
2312 /* ... only if all min rates are zeros - disable fairness */
2313 if (all_zero)
2314 bp->vn_weight_sum = 0;
2315}
2316
8a1c38d1 2317static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2318{
2319 struct rate_shaping_vars_per_vn m_rs_vn;
2320 struct fairness_vars_per_vn m_fair_vn;
2321 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2322 u16 vn_min_rate, vn_max_rate;
2323 int i;
2324
2325 /* If function is hidden - set min and max to zeroes */
2326 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2327 vn_min_rate = 0;
2328 vn_max_rate = 0;
2329
2330 } else {
2331 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2332 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2333 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2334 if current min rate is zero - set it to 1.
33471629 2335 This is a requirement of the algorithm. */
8a1c38d1 2336 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2337 vn_min_rate = DEF_MIN_RATE;
2338 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2339 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2340 }
2341
8a1c38d1
EG
2342 DP(NETIF_MSG_IFUP,
2343 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2344 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2345
2346 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2347 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2348
2349 /* global vn counter - maximal Mbps for this vn */
2350 m_rs_vn.vn_counter.rate = vn_max_rate;
2351
2352 /* quota - number of bytes transmitted in this period */
2353 m_rs_vn.vn_counter.quota =
2354 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2355
8a1c38d1 2356 if (bp->vn_weight_sum) {
34f80b04
EG
2357 /* credit for each period of the fairness algorithm:
2358 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2359 vn_weight_sum should not be larger than 10000, thus
2360 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2361 than zero */
34f80b04 2362 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2363 max((u32)(vn_min_rate * (T_FAIR_COEF /
2364 (8 * bp->vn_weight_sum))),
2365 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2366 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2367 m_fair_vn.vn_credit_delta);
2368 }
2369
34f80b04
EG
2370 /* Store it to internal memory */
2371 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2372 REG_WR(bp, BAR_XSTRORM_INTMEM +
2373 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2374 ((u32 *)(&m_rs_vn))[i]);
2375
2376 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2377 REG_WR(bp, BAR_XSTRORM_INTMEM +
2378 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2379 ((u32 *)(&m_fair_vn))[i]);
2380}
2381
8a1c38d1 2382
c18487ee
YR
2383/* This function is called upon link interrupt */
2384static void bnx2x_link_attn(struct bnx2x *bp)
2385{
bb2a0f7a
YG
2386 /* Make sure that we are synced with the current statistics */
2387 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2388
c18487ee 2389 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2390
bb2a0f7a
YG
2391 if (bp->link_vars.link_up) {
2392
1c06328c 2393 /* dropless flow control */
a18f5128 2394 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
2395 int port = BP_PORT(bp);
2396 u32 pause_enabled = 0;
2397
2398 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2399 pause_enabled = 1;
2400
2401 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2402 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2403 pause_enabled);
2404 }
2405
bb2a0f7a
YG
2406 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2407 struct host_port_stats *pstats;
2408
2409 pstats = bnx2x_sp(bp, port_stats);
2410 /* reset old bmac stats */
2411 memset(&(pstats->mac_stx[0]), 0,
2412 sizeof(struct mac_stx));
2413 }
2414 if ((bp->state == BNX2X_STATE_OPEN) ||
2415 (bp->state == BNX2X_STATE_DISABLED))
2416 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2417 }
2418
c18487ee
YR
2419 /* indicate link status */
2420 bnx2x_link_report(bp);
34f80b04
EG
2421
2422 if (IS_E1HMF(bp)) {
8a1c38d1 2423 int port = BP_PORT(bp);
34f80b04 2424 int func;
8a1c38d1 2425 int vn;
34f80b04 2426
ab6ad5a4 2427 /* Set the attention towards other drivers on the same port */
34f80b04
EG
2428 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2429 if (vn == BP_E1HVN(bp))
2430 continue;
2431
8a1c38d1 2432 func = ((vn << 1) | port);
34f80b04
EG
2433 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2434 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2435 }
34f80b04 2436
8a1c38d1
EG
2437 if (bp->link_vars.link_up) {
2438 int i;
2439
2440 /* Init rate shaping and fairness contexts */
2441 bnx2x_init_port_minmax(bp);
34f80b04 2442
34f80b04 2443 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2444 bnx2x_init_vn_minmax(bp, 2*vn + port);
2445
2446 /* Store it to internal memory */
2447 for (i = 0;
2448 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2449 REG_WR(bp, BAR_XSTRORM_INTMEM +
2450 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2451 ((u32 *)(&bp->cmng))[i]);
2452 }
34f80b04 2453 }
c18487ee 2454}
a2fbb9ea 2455
c18487ee
YR
2456static void bnx2x__link_status_update(struct bnx2x *bp)
2457{
2691d51d
EG
2458 int func = BP_FUNC(bp);
2459
c18487ee
YR
2460 if (bp->state != BNX2X_STATE_OPEN)
2461 return;
a2fbb9ea 2462
c18487ee 2463 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2464
bb2a0f7a
YG
2465 if (bp->link_vars.link_up)
2466 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2467 else
2468 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2469
2691d51d
EG
2470 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2471 bnx2x_calc_vn_weight_sum(bp);
2472
c18487ee
YR
2473 /* indicate link status */
2474 bnx2x_link_report(bp);
a2fbb9ea 2475}
a2fbb9ea 2476
34f80b04
EG
2477static void bnx2x_pmf_update(struct bnx2x *bp)
2478{
2479 int port = BP_PORT(bp);
2480 u32 val;
2481
2482 bp->port.pmf = 1;
2483 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2484
2485 /* enable nig attention */
2486 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2487 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2488 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2489
2490 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2491}
2492
c18487ee 2493/* end of Link */
a2fbb9ea
ET
2494
2495/* slow path */
2496
2497/*
2498 * General service functions
2499 */
2500
2691d51d
EG
2501/* send the MCP a request, block until there is a reply */
2502u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2503{
2504 int func = BP_FUNC(bp);
2505 u32 seq = ++bp->fw_seq;
2506 u32 rc = 0;
2507 u32 cnt = 1;
2508 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2509
2510 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2511 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2512
2513 do {
2514 /* let the FW do it's magic ... */
2515 msleep(delay);
2516
2517 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2518
2519 /* Give the FW up to 2 second (200*10ms) */
2520 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2521
2522 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2523 cnt*delay, rc, seq);
2524
2525 /* is this a reply to our command? */
2526 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2527 rc &= FW_MSG_CODE_MASK;
2528 else {
2529 /* FW BUG! */
2530 BNX2X_ERR("FW failed to respond!\n");
2531 bnx2x_fw_dump(bp);
2532 rc = 0;
2533 }
2534
2535 return rc;
2536}
2537
2538static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
e665bfda 2539static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2691d51d
EG
2540static void bnx2x_set_rx_mode(struct net_device *dev);
2541
2542static void bnx2x_e1h_disable(struct bnx2x *bp)
2543{
2544 int port = BP_PORT(bp);
2545 int i;
2546
2547 bp->rx_mode = BNX2X_RX_MODE_NONE;
2548 bnx2x_set_storm_rx_mode(bp);
2549
2550 netif_tx_disable(bp->dev);
2551 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2552
2553 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2554
e665bfda 2555 bnx2x_set_eth_mac_addr_e1h(bp, 0);
2691d51d
EG
2556
2557 for (i = 0; i < MC_HASH_SIZE; i++)
2558 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2559
2560 netif_carrier_off(bp->dev);
2561}
2562
2563static void bnx2x_e1h_enable(struct bnx2x *bp)
2564{
2565 int port = BP_PORT(bp);
2566
2567 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2568
e665bfda 2569 bnx2x_set_eth_mac_addr_e1h(bp, 1);
2691d51d
EG
2570
2571 /* Tx queue should be only reenabled */
2572 netif_tx_wake_all_queues(bp->dev);
2573
2574 /* Initialize the receive filter. */
2575 bnx2x_set_rx_mode(bp->dev);
2576}
2577
2578static void bnx2x_update_min_max(struct bnx2x *bp)
2579{
2580 int port = BP_PORT(bp);
2581 int vn, i;
2582
2583 /* Init rate shaping and fairness contexts */
2584 bnx2x_init_port_minmax(bp);
2585
2586 bnx2x_calc_vn_weight_sum(bp);
2587
2588 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2589 bnx2x_init_vn_minmax(bp, 2*vn + port);
2590
2591 if (bp->port.pmf) {
2592 int func;
2593
2594 /* Set the attention towards other drivers on the same port */
2595 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2596 if (vn == BP_E1HVN(bp))
2597 continue;
2598
2599 func = ((vn << 1) | port);
2600 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2601 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2602 }
2603
2604 /* Store it to internal memory */
2605 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2606 REG_WR(bp, BAR_XSTRORM_INTMEM +
2607 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2608 ((u32 *)(&bp->cmng))[i]);
2609 }
2610}
2611
2612static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2613{
2614 int func = BP_FUNC(bp);
2615
2616 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2617 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2618
2619 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2620
2621 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2622 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2623 bp->state = BNX2X_STATE_DISABLED;
2624
2625 bnx2x_e1h_disable(bp);
2626 } else {
2627 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2628 bp->state = BNX2X_STATE_OPEN;
2629
2630 bnx2x_e1h_enable(bp);
2631 }
2632 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2633 }
2634 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2635
2636 bnx2x_update_min_max(bp);
2637 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2638 }
2639
2640 /* Report results to MCP */
2641 if (dcc_event)
2642 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2643 else
2644 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2645}
2646
28912902
MC
2647/* must be called under the spq lock */
2648static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2649{
2650 struct eth_spe *next_spe = bp->spq_prod_bd;
2651
2652 if (bp->spq_prod_bd == bp->spq_last_bd) {
2653 bp->spq_prod_bd = bp->spq;
2654 bp->spq_prod_idx = 0;
2655 DP(NETIF_MSG_TIMER, "end of spq\n");
2656 } else {
2657 bp->spq_prod_bd++;
2658 bp->spq_prod_idx++;
2659 }
2660 return next_spe;
2661}
2662
2663/* must be called under the spq lock */
2664static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2665{
2666 int func = BP_FUNC(bp);
2667
2668 /* Make sure that BD data is updated before writing the producer */
2669 wmb();
2670
2671 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2672 bp->spq_prod_idx);
2673 mmiowb();
2674}
2675
a2fbb9ea
ET
2676/* the slow path queue is odd since completions arrive on the fastpath ring */
2677static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2678 u32 data_hi, u32 data_lo, int common)
2679{
28912902 2680 struct eth_spe *spe;
a2fbb9ea 2681
34f80b04
EG
2682 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2683 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2684 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2685 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2686 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2687
2688#ifdef BNX2X_STOP_ON_ERROR
2689 if (unlikely(bp->panic))
2690 return -EIO;
2691#endif
2692
34f80b04 2693 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2694
2695 if (!bp->spq_left) {
2696 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2697 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2698 bnx2x_panic();
2699 return -EBUSY;
2700 }
f1410647 2701
28912902
MC
2702 spe = bnx2x_sp_get_next(bp);
2703
a2fbb9ea 2704 /* CID needs port number to be encoded int it */
28912902 2705 spe->hdr.conn_and_cmd_data =
a2fbb9ea
ET
2706 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2707 HW_CID(bp, cid)));
28912902 2708 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 2709 if (common)
28912902 2710 spe->hdr.type |=
a2fbb9ea
ET
2711 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2712
28912902
MC
2713 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2714 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
2715
2716 bp->spq_left--;
2717
28912902 2718 bnx2x_sp_prod_update(bp);
34f80b04 2719 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2720 return 0;
2721}
2722
2723/* acquire split MCP access lock register */
4a37fb66 2724static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2725{
a2fbb9ea 2726 u32 i, j, val;
34f80b04 2727 int rc = 0;
a2fbb9ea
ET
2728
2729 might_sleep();
2730 i = 100;
2731 for (j = 0; j < i*10; j++) {
2732 val = (1UL << 31);
2733 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2734 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2735 if (val & (1L << 31))
2736 break;
2737
2738 msleep(5);
2739 }
a2fbb9ea 2740 if (!(val & (1L << 31))) {
19680c48 2741 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2742 rc = -EBUSY;
2743 }
2744
2745 return rc;
2746}
2747
4a37fb66
YG
2748/* release split MCP access lock register */
2749static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2750{
2751 u32 val = 0;
2752
2753 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2754}
2755
2756static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2757{
2758 struct host_def_status_block *def_sb = bp->def_status_blk;
2759 u16 rc = 0;
2760
2761 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2762 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2763 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2764 rc |= 1;
2765 }
2766 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2767 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2768 rc |= 2;
2769 }
2770 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2771 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2772 rc |= 4;
2773 }
2774 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2775 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2776 rc |= 8;
2777 }
2778 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2779 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2780 rc |= 16;
2781 }
2782 return rc;
2783}
2784
2785/*
2786 * slow path service functions
2787 */
2788
2789static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2790{
34f80b04 2791 int port = BP_PORT(bp);
5c862848
EG
2792 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2793 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2794 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2795 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2796 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2797 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2798 u32 aeu_mask;
87942b46 2799 u32 nig_mask = 0;
a2fbb9ea 2800
a2fbb9ea
ET
2801 if (bp->attn_state & asserted)
2802 BNX2X_ERR("IGU ERROR\n");
2803
3fcaf2e5
EG
2804 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2805 aeu_mask = REG_RD(bp, aeu_addr);
2806
a2fbb9ea 2807 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2808 aeu_mask, asserted);
2809 aeu_mask &= ~(asserted & 0xff);
2810 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2811
3fcaf2e5
EG
2812 REG_WR(bp, aeu_addr, aeu_mask);
2813 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2814
3fcaf2e5 2815 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2816 bp->attn_state |= asserted;
3fcaf2e5 2817 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2818
2819 if (asserted & ATTN_HARD_WIRED_MASK) {
2820 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2821
a5e9a7cf
EG
2822 bnx2x_acquire_phy_lock(bp);
2823
877e9aa4 2824 /* save nig interrupt mask */
87942b46 2825 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2826 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2827
c18487ee 2828 bnx2x_link_attn(bp);
a2fbb9ea
ET
2829
2830 /* handle unicore attn? */
2831 }
2832 if (asserted & ATTN_SW_TIMER_4_FUNC)
2833 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2834
2835 if (asserted & GPIO_2_FUNC)
2836 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2837
2838 if (asserted & GPIO_3_FUNC)
2839 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2840
2841 if (asserted & GPIO_4_FUNC)
2842 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2843
2844 if (port == 0) {
2845 if (asserted & ATTN_GENERAL_ATTN_1) {
2846 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2847 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2848 }
2849 if (asserted & ATTN_GENERAL_ATTN_2) {
2850 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2851 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2852 }
2853 if (asserted & ATTN_GENERAL_ATTN_3) {
2854 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2855 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2856 }
2857 } else {
2858 if (asserted & ATTN_GENERAL_ATTN_4) {
2859 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2860 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2861 }
2862 if (asserted & ATTN_GENERAL_ATTN_5) {
2863 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2864 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2865 }
2866 if (asserted & ATTN_GENERAL_ATTN_6) {
2867 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2868 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2869 }
2870 }
2871
2872 } /* if hardwired */
2873
5c862848
EG
2874 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2875 asserted, hc_addr);
2876 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2877
2878 /* now set back the mask */
a5e9a7cf 2879 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2880 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2881 bnx2x_release_phy_lock(bp);
2882 }
a2fbb9ea
ET
2883}
2884
fd4ef40d
EG
2885static inline void bnx2x_fan_failure(struct bnx2x *bp)
2886{
2887 int port = BP_PORT(bp);
2888
2889 /* mark the failure */
2890 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2891 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2892 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2893 bp->link_params.ext_phy_config);
2894
2895 /* log the failure */
2896 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2897 " the driver to shutdown the card to prevent permanent"
2898 " damage. Please contact Dell Support for assistance\n",
2899 bp->dev->name);
2900}
ab6ad5a4 2901
877e9aa4 2902static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2903{
34f80b04 2904 int port = BP_PORT(bp);
877e9aa4 2905 int reg_offset;
4d295db0 2906 u32 val, swap_val, swap_override;
877e9aa4 2907
34f80b04
EG
2908 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2909 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2910
34f80b04 2911 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2912
2913 val = REG_RD(bp, reg_offset);
2914 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2915 REG_WR(bp, reg_offset, val);
2916
2917 BNX2X_ERR("SPIO5 hw attention\n");
2918
fd4ef40d 2919 /* Fan failure attention */
35b19ba5
EG
2920 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2921 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2922 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2923 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2924 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2925 /* The PHY reset is controlled by GPIO 1 */
2926 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2927 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2928 break;
2929
4d295db0
EG
2930 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2931 /* The PHY reset is controlled by GPIO 1 */
2932 /* fake the port number to cancel the swap done in
2933 set_gpio() */
2934 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2935 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2936 port = (swap_val && swap_override) ^ 1;
2937 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2938 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2939 break;
2940
877e9aa4
ET
2941 default:
2942 break;
2943 }
fd4ef40d 2944 bnx2x_fan_failure(bp);
877e9aa4 2945 }
34f80b04 2946
589abe3a
EG
2947 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2948 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2949 bnx2x_acquire_phy_lock(bp);
2950 bnx2x_handle_module_detect_int(&bp->link_params);
2951 bnx2x_release_phy_lock(bp);
2952 }
2953
34f80b04
EG
2954 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2955
2956 val = REG_RD(bp, reg_offset);
2957 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2958 REG_WR(bp, reg_offset, val);
2959
2960 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2961 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2962 bnx2x_panic();
2963 }
877e9aa4
ET
2964}
2965
2966static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2967{
2968 u32 val;
2969
0626b899 2970 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2971
2972 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2973 BNX2X_ERR("DB hw attention 0x%x\n", val);
2974 /* DORQ discard attention */
2975 if (val & 0x2)
2976 BNX2X_ERR("FATAL error from DORQ\n");
2977 }
34f80b04
EG
2978
2979 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2980
2981 int port = BP_PORT(bp);
2982 int reg_offset;
2983
2984 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2985 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2986
2987 val = REG_RD(bp, reg_offset);
2988 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2989 REG_WR(bp, reg_offset, val);
2990
2991 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 2992 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
2993 bnx2x_panic();
2994 }
877e9aa4
ET
2995}
2996
2997static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2998{
2999 u32 val;
3000
3001 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3002
3003 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3004 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3005 /* CFC error attention */
3006 if (val & 0x2)
3007 BNX2X_ERR("FATAL error from CFC\n");
3008 }
3009
3010 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3011
3012 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3013 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3014 /* RQ_USDMDP_FIFO_OVERFLOW */
3015 if (val & 0x18000)
3016 BNX2X_ERR("FATAL error from PXP\n");
3017 }
34f80b04
EG
3018
3019 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3020
3021 int port = BP_PORT(bp);
3022 int reg_offset;
3023
3024 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3025 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3026
3027 val = REG_RD(bp, reg_offset);
3028 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3029 REG_WR(bp, reg_offset, val);
3030
3031 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3032 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3033 bnx2x_panic();
3034 }
877e9aa4
ET
3035}
3036
3037static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3038{
34f80b04
EG
3039 u32 val;
3040
877e9aa4
ET
3041 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3042
34f80b04
EG
3043 if (attn & BNX2X_PMF_LINK_ASSERT) {
3044 int func = BP_FUNC(bp);
3045
3046 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2691d51d
EG
3047 val = SHMEM_RD(bp, func_mb[func].drv_status);
3048 if (val & DRV_STATUS_DCC_EVENT_MASK)
3049 bnx2x_dcc_event(bp,
3050 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3051 bnx2x__link_status_update(bp);
2691d51d 3052 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3053 bnx2x_pmf_update(bp);
3054
3055 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3056
3057 BNX2X_ERR("MC assert!\n");
3058 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3059 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3060 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3061 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3062 bnx2x_panic();
3063
3064 } else if (attn & BNX2X_MCP_ASSERT) {
3065
3066 BNX2X_ERR("MCP assert!\n");
3067 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3068 bnx2x_fw_dump(bp);
877e9aa4
ET
3069
3070 } else
3071 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3072 }
3073
3074 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3075 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3076 if (attn & BNX2X_GRC_TIMEOUT) {
3077 val = CHIP_IS_E1H(bp) ?
3078 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3079 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3080 }
3081 if (attn & BNX2X_GRC_RSV) {
3082 val = CHIP_IS_E1H(bp) ?
3083 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3084 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3085 }
877e9aa4 3086 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3087 }
3088}
3089
3090static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3091{
a2fbb9ea
ET
3092 struct attn_route attn;
3093 struct attn_route group_mask;
34f80b04 3094 int port = BP_PORT(bp);
877e9aa4 3095 int index;
a2fbb9ea
ET
3096 u32 reg_addr;
3097 u32 val;
3fcaf2e5 3098 u32 aeu_mask;
a2fbb9ea
ET
3099
3100 /* need to take HW lock because MCP or other port might also
3101 try to handle this event */
4a37fb66 3102 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
3103
3104 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3105 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3106 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3107 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3108 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3109 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3110
3111 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3112 if (deasserted & (1 << index)) {
3113 group_mask = bp->attn_group[index];
3114
34f80b04
EG
3115 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3116 index, group_mask.sig[0], group_mask.sig[1],
3117 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 3118
877e9aa4
ET
3119 bnx2x_attn_int_deasserted3(bp,
3120 attn.sig[3] & group_mask.sig[3]);
3121 bnx2x_attn_int_deasserted1(bp,
3122 attn.sig[1] & group_mask.sig[1]);
3123 bnx2x_attn_int_deasserted2(bp,
3124 attn.sig[2] & group_mask.sig[2]);
3125 bnx2x_attn_int_deasserted0(bp,
3126 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 3127
a2fbb9ea
ET
3128 if ((attn.sig[0] & group_mask.sig[0] &
3129 HW_PRTY_ASSERT_SET_0) ||
3130 (attn.sig[1] & group_mask.sig[1] &
3131 HW_PRTY_ASSERT_SET_1) ||
3132 (attn.sig[2] & group_mask.sig[2] &
3133 HW_PRTY_ASSERT_SET_2))
6378c025 3134 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
3135 }
3136 }
3137
4a37fb66 3138 bnx2x_release_alr(bp);
a2fbb9ea 3139
5c862848 3140 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3141
3142 val = ~deasserted;
3fcaf2e5
EG
3143 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3144 val, reg_addr);
5c862848 3145 REG_WR(bp, reg_addr, val);
a2fbb9ea 3146
a2fbb9ea 3147 if (~bp->attn_state & deasserted)
3fcaf2e5 3148 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3149
3150 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3151 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3152
3fcaf2e5
EG
3153 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3154 aeu_mask = REG_RD(bp, reg_addr);
3155
3156 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3157 aeu_mask, deasserted);
3158 aeu_mask |= (deasserted & 0xff);
3159 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3160
3fcaf2e5
EG
3161 REG_WR(bp, reg_addr, aeu_mask);
3162 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3163
3164 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3165 bp->attn_state &= ~deasserted;
3166 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3167}
3168
3169static void bnx2x_attn_int(struct bnx2x *bp)
3170{
3171 /* read local copy of bits */
68d59484
EG
3172 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3173 attn_bits);
3174 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3175 attn_bits_ack);
a2fbb9ea
ET
3176 u32 attn_state = bp->attn_state;
3177
3178 /* look for changed bits */
3179 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3180 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3181
3182 DP(NETIF_MSG_HW,
3183 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3184 attn_bits, attn_ack, asserted, deasserted);
3185
3186 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3187 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3188
3189 /* handle bits that were raised */
3190 if (asserted)
3191 bnx2x_attn_int_asserted(bp, asserted);
3192
3193 if (deasserted)
3194 bnx2x_attn_int_deasserted(bp, deasserted);
3195}
3196
3197static void bnx2x_sp_task(struct work_struct *work)
3198{
1cf167f2 3199 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3200 u16 status;
3201
34f80b04 3202
a2fbb9ea
ET
3203 /* Return here if interrupt is disabled */
3204 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3205 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3206 return;
3207 }
3208
3209 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3210/* if (status == 0) */
3211/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3212
3196a88a 3213 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 3214
877e9aa4
ET
3215 /* HW attentions */
3216 if (status & 0x1)
a2fbb9ea 3217 bnx2x_attn_int(bp);
a2fbb9ea 3218
68d59484 3219 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3220 IGU_INT_NOP, 1);
3221 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3222 IGU_INT_NOP, 1);
3223 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3224 IGU_INT_NOP, 1);
3225 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3226 IGU_INT_NOP, 1);
3227 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3228 IGU_INT_ENABLE, 1);
877e9aa4 3229
a2fbb9ea
ET
3230}
3231
3232static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3233{
3234 struct net_device *dev = dev_instance;
3235 struct bnx2x *bp = netdev_priv(dev);
3236
3237 /* Return here if interrupt is disabled */
3238 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3239 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3240 return IRQ_HANDLED;
3241 }
3242
8d9c5f34 3243 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3244
3245#ifdef BNX2X_STOP_ON_ERROR
3246 if (unlikely(bp->panic))
3247 return IRQ_HANDLED;
3248#endif
3249
1cf167f2 3250 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3251
3252 return IRQ_HANDLED;
3253}
3254
3255/* end of slow path */
3256
3257/* Statistics */
3258
3259/****************************************************************************
3260* Macros
3261****************************************************************************/
3262
a2fbb9ea
ET
3263/* sum[hi:lo] += add[hi:lo] */
3264#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3265 do { \
3266 s_lo += a_lo; \
f5ba6772 3267 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3268 } while (0)
3269
3270/* difference = minuend - subtrahend */
3271#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3272 do { \
bb2a0f7a
YG
3273 if (m_lo < s_lo) { \
3274 /* underflow */ \
a2fbb9ea 3275 d_hi = m_hi - s_hi; \
bb2a0f7a 3276 if (d_hi > 0) { \
6378c025 3277 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3278 d_hi--; \
3279 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3280 } else { \
6378c025 3281 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3282 d_hi = 0; \
3283 d_lo = 0; \
3284 } \
bb2a0f7a
YG
3285 } else { \
3286 /* m_lo >= s_lo */ \
a2fbb9ea 3287 if (m_hi < s_hi) { \
bb2a0f7a
YG
3288 d_hi = 0; \
3289 d_lo = 0; \
3290 } else { \
6378c025 3291 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3292 d_hi = m_hi - s_hi; \
3293 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3294 } \
3295 } \
3296 } while (0)
3297
bb2a0f7a 3298#define UPDATE_STAT64(s, t) \
a2fbb9ea 3299 do { \
bb2a0f7a
YG
3300 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3301 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3302 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3303 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3304 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3305 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3306 } while (0)
3307
bb2a0f7a 3308#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3309 do { \
bb2a0f7a
YG
3310 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3311 diff.lo, new->s##_lo, old->s##_lo); \
3312 ADD_64(estats->t##_hi, diff.hi, \
3313 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3314 } while (0)
3315
3316/* sum[hi:lo] += add */
3317#define ADD_EXTEND_64(s_hi, s_lo, a) \
3318 do { \
3319 s_lo += a; \
3320 s_hi += (s_lo < a) ? 1 : 0; \
3321 } while (0)
3322
bb2a0f7a 3323#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3324 do { \
bb2a0f7a
YG
3325 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3326 pstats->mac_stx[1].s##_lo, \
3327 new->s); \
a2fbb9ea
ET
3328 } while (0)
3329
bb2a0f7a 3330#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3331 do { \
4781bfad
EG
3332 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3333 old_tclient->s = tclient->s; \
de832a55
EG
3334 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3335 } while (0)
3336
3337#define UPDATE_EXTEND_USTAT(s, t) \
3338 do { \
3339 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3340 old_uclient->s = uclient->s; \
3341 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3342 } while (0)
3343
3344#define UPDATE_EXTEND_XSTAT(s, t) \
3345 do { \
4781bfad
EG
3346 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3347 old_xclient->s = xclient->s; \
de832a55
EG
3348 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3349 } while (0)
3350
3351/* minuend -= subtrahend */
3352#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3353 do { \
3354 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3355 } while (0)
3356
3357/* minuend[hi:lo] -= subtrahend */
3358#define SUB_EXTEND_64(m_hi, m_lo, s) \
3359 do { \
3360 SUB_64(m_hi, 0, m_lo, s); \
3361 } while (0)
3362
3363#define SUB_EXTEND_USTAT(s, t) \
3364 do { \
3365 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3366 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3367 } while (0)
3368
3369/*
3370 * General service functions
3371 */
3372
3373static inline long bnx2x_hilo(u32 *hiref)
3374{
3375 u32 lo = *(hiref + 1);
3376#if (BITS_PER_LONG == 64)
3377 u32 hi = *hiref;
3378
3379 return HILO_U64(hi, lo);
3380#else
3381 return lo;
3382#endif
3383}
3384
3385/*
3386 * Init service functions
3387 */
3388
bb2a0f7a
YG
3389static void bnx2x_storm_stats_post(struct bnx2x *bp)
3390{
3391 if (!bp->stats_pending) {
3392 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3393 int i, rc;
bb2a0f7a
YG
3394
3395 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3396 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3397 for_each_queue(bp, i)
3398 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3399
3400 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3401 ((u32 *)&ramrod_data)[1],
3402 ((u32 *)&ramrod_data)[0], 0);
3403 if (rc == 0) {
3404 /* stats ramrod has it's own slot on the spq */
3405 bp->spq_left++;
3406 bp->stats_pending = 1;
3407 }
3408 }
3409}
3410
bb2a0f7a
YG
3411static void bnx2x_hw_stats_post(struct bnx2x *bp)
3412{
3413 struct dmae_command *dmae = &bp->stats_dmae;
3414 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3415
3416 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3417 if (CHIP_REV_IS_SLOW(bp))
3418 return;
bb2a0f7a
YG
3419
3420 /* loader */
3421 if (bp->executer_idx) {
3422 int loader_idx = PMF_DMAE_C(bp);
3423
3424 memset(dmae, 0, sizeof(struct dmae_command));
3425
3426 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3427 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3428 DMAE_CMD_DST_RESET |
3429#ifdef __BIG_ENDIAN
3430 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3431#else
3432 DMAE_CMD_ENDIANITY_DW_SWAP |
3433#endif
3434 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3435 DMAE_CMD_PORT_0) |
3436 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3437 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3438 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3439 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3440 sizeof(struct dmae_command) *
3441 (loader_idx + 1)) >> 2;
3442 dmae->dst_addr_hi = 0;
3443 dmae->len = sizeof(struct dmae_command) >> 2;
3444 if (CHIP_IS_E1(bp))
3445 dmae->len--;
3446 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3447 dmae->comp_addr_hi = 0;
3448 dmae->comp_val = 1;
3449
3450 *stats_comp = 0;
3451 bnx2x_post_dmae(bp, dmae, loader_idx);
3452
3453 } else if (bp->func_stx) {
3454 *stats_comp = 0;
3455 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3456 }
3457}
3458
3459static int bnx2x_stats_comp(struct bnx2x *bp)
3460{
3461 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3462 int cnt = 10;
3463
3464 might_sleep();
3465 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3466 if (!cnt) {
3467 BNX2X_ERR("timeout waiting for stats finished\n");
3468 break;
3469 }
3470 cnt--;
12469401 3471 msleep(1);
bb2a0f7a
YG
3472 }
3473 return 1;
3474}
3475
3476/*
3477 * Statistics service functions
3478 */
3479
3480static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3481{
3482 struct dmae_command *dmae;
3483 u32 opcode;
3484 int loader_idx = PMF_DMAE_C(bp);
3485 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3486
3487 /* sanity */
3488 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3489 BNX2X_ERR("BUG!\n");
3490 return;
3491 }
3492
3493 bp->executer_idx = 0;
3494
3495 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3496 DMAE_CMD_C_ENABLE |
3497 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3498#ifdef __BIG_ENDIAN
3499 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3500#else
3501 DMAE_CMD_ENDIANITY_DW_SWAP |
3502#endif
3503 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3504 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3505
3506 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3507 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3508 dmae->src_addr_lo = bp->port.port_stx >> 2;
3509 dmae->src_addr_hi = 0;
3510 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3511 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3512 dmae->len = DMAE_LEN32_RD_MAX;
3513 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3514 dmae->comp_addr_hi = 0;
3515 dmae->comp_val = 1;
3516
3517 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3518 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3519 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3520 dmae->src_addr_hi = 0;
7a9b2557
VZ
3521 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3522 DMAE_LEN32_RD_MAX * 4);
3523 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3524 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3525 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3526 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3527 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3528 dmae->comp_val = DMAE_COMP_VAL;
3529
3530 *stats_comp = 0;
3531 bnx2x_hw_stats_post(bp);
3532 bnx2x_stats_comp(bp);
3533}
3534
3535static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3536{
3537 struct dmae_command *dmae;
34f80b04 3538 int port = BP_PORT(bp);
bb2a0f7a 3539 int vn = BP_E1HVN(bp);
a2fbb9ea 3540 u32 opcode;
bb2a0f7a 3541 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3542 u32 mac_addr;
bb2a0f7a
YG
3543 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3544
3545 /* sanity */
3546 if (!bp->link_vars.link_up || !bp->port.pmf) {
3547 BNX2X_ERR("BUG!\n");
3548 return;
3549 }
a2fbb9ea
ET
3550
3551 bp->executer_idx = 0;
bb2a0f7a
YG
3552
3553 /* MCP */
3554 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3555 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3556 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3557#ifdef __BIG_ENDIAN
bb2a0f7a 3558 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3559#else
bb2a0f7a 3560 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3561#endif
bb2a0f7a
YG
3562 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3563 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3564
bb2a0f7a 3565 if (bp->port.port_stx) {
a2fbb9ea
ET
3566
3567 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3568 dmae->opcode = opcode;
bb2a0f7a
YG
3569 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3570 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3571 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3572 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3573 dmae->len = sizeof(struct host_port_stats) >> 2;
3574 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3575 dmae->comp_addr_hi = 0;
3576 dmae->comp_val = 1;
a2fbb9ea
ET
3577 }
3578
bb2a0f7a
YG
3579 if (bp->func_stx) {
3580
3581 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3582 dmae->opcode = opcode;
3583 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3584 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3585 dmae->dst_addr_lo = bp->func_stx >> 2;
3586 dmae->dst_addr_hi = 0;
3587 dmae->len = sizeof(struct host_func_stats) >> 2;
3588 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3589 dmae->comp_addr_hi = 0;
3590 dmae->comp_val = 1;
a2fbb9ea
ET
3591 }
3592
bb2a0f7a 3593 /* MAC */
a2fbb9ea
ET
3594 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3595 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3596 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3597#ifdef __BIG_ENDIAN
3598 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3599#else
3600 DMAE_CMD_ENDIANITY_DW_SWAP |
3601#endif
bb2a0f7a
YG
3602 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3603 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3604
c18487ee 3605 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3606
3607 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3608 NIG_REG_INGRESS_BMAC0_MEM);
3609
3610 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3611 BIGMAC_REGISTER_TX_STAT_GTBYT */
3612 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3613 dmae->opcode = opcode;
3614 dmae->src_addr_lo = (mac_addr +
3615 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3616 dmae->src_addr_hi = 0;
3617 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3618 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3619 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3620 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3621 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3622 dmae->comp_addr_hi = 0;
3623 dmae->comp_val = 1;
3624
3625 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3626 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3627 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3628 dmae->opcode = opcode;
3629 dmae->src_addr_lo = (mac_addr +
3630 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3631 dmae->src_addr_hi = 0;
3632 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3633 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3634 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3635 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3636 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3637 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3638 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3639 dmae->comp_addr_hi = 0;
3640 dmae->comp_val = 1;
3641
c18487ee 3642 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3643
3644 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3645
3646 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3647 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3648 dmae->opcode = opcode;
3649 dmae->src_addr_lo = (mac_addr +
3650 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3651 dmae->src_addr_hi = 0;
3652 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3653 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3654 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3655 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3656 dmae->comp_addr_hi = 0;
3657 dmae->comp_val = 1;
3658
3659 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3660 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3661 dmae->opcode = opcode;
3662 dmae->src_addr_lo = (mac_addr +
3663 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3664 dmae->src_addr_hi = 0;
3665 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3666 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3667 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3668 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3669 dmae->len = 1;
3670 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3671 dmae->comp_addr_hi = 0;
3672 dmae->comp_val = 1;
3673
3674 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3675 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3676 dmae->opcode = opcode;
3677 dmae->src_addr_lo = (mac_addr +
3678 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3679 dmae->src_addr_hi = 0;
3680 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3681 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3682 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3683 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3684 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3685 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3686 dmae->comp_addr_hi = 0;
3687 dmae->comp_val = 1;
3688 }
3689
3690 /* NIG */
bb2a0f7a
YG
3691 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3692 dmae->opcode = opcode;
3693 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3694 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3695 dmae->src_addr_hi = 0;
3696 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3697 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3698 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3699 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3700 dmae->comp_addr_hi = 0;
3701 dmae->comp_val = 1;
3702
3703 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3704 dmae->opcode = opcode;
3705 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3706 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3707 dmae->src_addr_hi = 0;
3708 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3709 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3710 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3711 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3712 dmae->len = (2*sizeof(u32)) >> 2;
3713 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3714 dmae->comp_addr_hi = 0;
3715 dmae->comp_val = 1;
3716
a2fbb9ea
ET
3717 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3718 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3719 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3720 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3721#ifdef __BIG_ENDIAN
3722 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3723#else
3724 DMAE_CMD_ENDIANITY_DW_SWAP |
3725#endif
bb2a0f7a
YG
3726 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3727 (vn << DMAE_CMD_E1HVN_SHIFT));
3728 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3729 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3730 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3731 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3732 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3733 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3734 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3735 dmae->len = (2*sizeof(u32)) >> 2;
3736 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3737 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3738 dmae->comp_val = DMAE_COMP_VAL;
3739
3740 *stats_comp = 0;
a2fbb9ea
ET
3741}
3742
bb2a0f7a 3743static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3744{
bb2a0f7a
YG
3745 struct dmae_command *dmae = &bp->stats_dmae;
3746 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3747
bb2a0f7a
YG
3748 /* sanity */
3749 if (!bp->func_stx) {
3750 BNX2X_ERR("BUG!\n");
3751 return;
3752 }
a2fbb9ea 3753
bb2a0f7a
YG
3754 bp->executer_idx = 0;
3755 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3756
bb2a0f7a
YG
3757 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3758 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3759 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3760#ifdef __BIG_ENDIAN
3761 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3762#else
3763 DMAE_CMD_ENDIANITY_DW_SWAP |
3764#endif
3765 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3766 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3767 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3768 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3769 dmae->dst_addr_lo = bp->func_stx >> 2;
3770 dmae->dst_addr_hi = 0;
3771 dmae->len = sizeof(struct host_func_stats) >> 2;
3772 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3773 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3774 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3775
bb2a0f7a
YG
3776 *stats_comp = 0;
3777}
a2fbb9ea 3778
bb2a0f7a
YG
3779static void bnx2x_stats_start(struct bnx2x *bp)
3780{
3781 if (bp->port.pmf)
3782 bnx2x_port_stats_init(bp);
3783
3784 else if (bp->func_stx)
3785 bnx2x_func_stats_init(bp);
3786
3787 bnx2x_hw_stats_post(bp);
3788 bnx2x_storm_stats_post(bp);
3789}
3790
3791static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3792{
3793 bnx2x_stats_comp(bp);
3794 bnx2x_stats_pmf_update(bp);
3795 bnx2x_stats_start(bp);
3796}
3797
3798static void bnx2x_stats_restart(struct bnx2x *bp)
3799{
3800 bnx2x_stats_comp(bp);
3801 bnx2x_stats_start(bp);
3802}
3803
3804static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3805{
3806 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3807 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3808 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3809 struct {
3810 u32 lo;
3811 u32 hi;
3812 } diff;
bb2a0f7a
YG
3813
3814 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3815 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3816 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3817 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3818 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3819 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3820 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3821 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3822 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3823 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3824 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3825 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3826 UPDATE_STAT64(tx_stat_gt127,
3827 tx_stat_etherstatspkts65octetsto127octets);
3828 UPDATE_STAT64(tx_stat_gt255,
3829 tx_stat_etherstatspkts128octetsto255octets);
3830 UPDATE_STAT64(tx_stat_gt511,
3831 tx_stat_etherstatspkts256octetsto511octets);
3832 UPDATE_STAT64(tx_stat_gt1023,
3833 tx_stat_etherstatspkts512octetsto1023octets);
3834 UPDATE_STAT64(tx_stat_gt1518,
3835 tx_stat_etherstatspkts1024octetsto1522octets);
3836 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3837 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3838 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3839 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3840 UPDATE_STAT64(tx_stat_gterr,
3841 tx_stat_dot3statsinternalmactransmiterrors);
3842 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3843
3844 estats->pause_frames_received_hi =
3845 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3846 estats->pause_frames_received_lo =
3847 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3848
3849 estats->pause_frames_sent_hi =
3850 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3851 estats->pause_frames_sent_lo =
3852 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3853}
3854
3855static void bnx2x_emac_stats_update(struct bnx2x *bp)
3856{
3857 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3858 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3859 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3860
3861 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3862 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3863 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3864 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3865 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3866 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3867 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3868 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3869 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3870 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3871 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3872 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3873 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3874 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3875 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3876 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3877 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3878 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3879 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3880 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3881 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3882 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3883 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3884 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3885 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3886 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3887 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3888 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3889 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3890 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3891 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3892
3893 estats->pause_frames_received_hi =
3894 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3895 estats->pause_frames_received_lo =
3896 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3897 ADD_64(estats->pause_frames_received_hi,
3898 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3899 estats->pause_frames_received_lo,
3900 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3901
3902 estats->pause_frames_sent_hi =
3903 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3904 estats->pause_frames_sent_lo =
3905 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3906 ADD_64(estats->pause_frames_sent_hi,
3907 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3908 estats->pause_frames_sent_lo,
3909 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3910}
3911
3912static int bnx2x_hw_stats_update(struct bnx2x *bp)
3913{
3914 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3915 struct nig_stats *old = &(bp->port.old_nig_stats);
3916 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3917 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3918 struct {
3919 u32 lo;
3920 u32 hi;
3921 } diff;
de832a55 3922 u32 nig_timer_max;
bb2a0f7a
YG
3923
3924 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3925 bnx2x_bmac_stats_update(bp);
3926
3927 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3928 bnx2x_emac_stats_update(bp);
3929
3930 else { /* unreached */
c3eefaf6 3931 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3932 return -1;
3933 }
a2fbb9ea 3934
bb2a0f7a
YG
3935 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3936 new->brb_discard - old->brb_discard);
66e855f3
YG
3937 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3938 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3939
bb2a0f7a
YG
3940 UPDATE_STAT64_NIG(egress_mac_pkt0,
3941 etherstatspkts1024octetsto1522octets);
3942 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3943
bb2a0f7a 3944 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3945
bb2a0f7a
YG
3946 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3947 sizeof(struct mac_stx));
3948 estats->brb_drop_hi = pstats->brb_drop_hi;
3949 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3950
bb2a0f7a 3951 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3952
de832a55
EG
3953 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3954 if (nig_timer_max != estats->nig_timer_max) {
3955 estats->nig_timer_max = nig_timer_max;
3956 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3957 }
3958
bb2a0f7a 3959 return 0;
a2fbb9ea
ET
3960}
3961
bb2a0f7a 3962static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3963{
3964 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3965 struct tstorm_per_port_stats *tport =
de832a55 3966 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3967 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3968 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3969 int i;
3970
6fe49bb9
EG
3971 memcpy(&(fstats->total_bytes_received_hi),
3972 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
3973 sizeof(struct host_func_stats) - 2*sizeof(u32));
3974 estats->error_bytes_received_hi = 0;
3975 estats->error_bytes_received_lo = 0;
3976 estats->etherstatsoverrsizepkts_hi = 0;
3977 estats->etherstatsoverrsizepkts_lo = 0;
3978 estats->no_buff_discard_hi = 0;
3979 estats->no_buff_discard_lo = 0;
a2fbb9ea 3980
ca00392c 3981 for_each_rx_queue(bp, i) {
de832a55
EG
3982 struct bnx2x_fastpath *fp = &bp->fp[i];
3983 int cl_id = fp->cl_id;
3984 struct tstorm_per_client_stats *tclient =
3985 &stats->tstorm_common.client_statistics[cl_id];
3986 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3987 struct ustorm_per_client_stats *uclient =
3988 &stats->ustorm_common.client_statistics[cl_id];
3989 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3990 struct xstorm_per_client_stats *xclient =
3991 &stats->xstorm_common.client_statistics[cl_id];
3992 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3993 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3994 u32 diff;
3995
3996 /* are storm stats valid? */
3997 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3998 bp->stats_counter) {
de832a55
EG
3999 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4000 " xstorm counter (%d) != stats_counter (%d)\n",
4001 i, xclient->stats_counter, bp->stats_counter);
4002 return -1;
4003 }
4004 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 4005 bp->stats_counter) {
de832a55
EG
4006 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4007 " tstorm counter (%d) != stats_counter (%d)\n",
4008 i, tclient->stats_counter, bp->stats_counter);
4009 return -2;
4010 }
4011 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4012 bp->stats_counter) {
4013 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4014 " ustorm counter (%d) != stats_counter (%d)\n",
4015 i, uclient->stats_counter, bp->stats_counter);
4016 return -4;
4017 }
a2fbb9ea 4018
de832a55 4019 qstats->total_bytes_received_hi =
ca00392c 4020 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 4021 qstats->total_bytes_received_lo =
ca00392c
EG
4022 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4023
4024 ADD_64(qstats->total_bytes_received_hi,
4025 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4026 qstats->total_bytes_received_lo,
4027 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4028
4029 ADD_64(qstats->total_bytes_received_hi,
4030 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4031 qstats->total_bytes_received_lo,
4032 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4033
4034 qstats->valid_bytes_received_hi =
4035 qstats->total_bytes_received_hi;
de832a55 4036 qstats->valid_bytes_received_lo =
ca00392c 4037 qstats->total_bytes_received_lo;
bb2a0f7a 4038
de832a55 4039 qstats->error_bytes_received_hi =
bb2a0f7a 4040 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4041 qstats->error_bytes_received_lo =
bb2a0f7a 4042 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4043
de832a55
EG
4044 ADD_64(qstats->total_bytes_received_hi,
4045 qstats->error_bytes_received_hi,
4046 qstats->total_bytes_received_lo,
4047 qstats->error_bytes_received_lo);
4048
4049 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4050 total_unicast_packets_received);
4051 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4052 total_multicast_packets_received);
4053 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4054 total_broadcast_packets_received);
4055 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4056 etherstatsoverrsizepkts);
4057 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4058
4059 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4060 total_unicast_packets_received);
4061 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4062 total_multicast_packets_received);
4063 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4064 total_broadcast_packets_received);
4065 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4066 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4067 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4068
4069 qstats->total_bytes_transmitted_hi =
ca00392c 4070 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4071 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4072 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4073
4074 ADD_64(qstats->total_bytes_transmitted_hi,
4075 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4076 qstats->total_bytes_transmitted_lo,
4077 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4078
4079 ADD_64(qstats->total_bytes_transmitted_hi,
4080 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4081 qstats->total_bytes_transmitted_lo,
4082 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4083
de832a55
EG
4084 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4085 total_unicast_packets_transmitted);
4086 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4087 total_multicast_packets_transmitted);
4088 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4089 total_broadcast_packets_transmitted);
4090
4091 old_tclient->checksum_discard = tclient->checksum_discard;
4092 old_tclient->ttl0_discard = tclient->ttl0_discard;
4093
4094 ADD_64(fstats->total_bytes_received_hi,
4095 qstats->total_bytes_received_hi,
4096 fstats->total_bytes_received_lo,
4097 qstats->total_bytes_received_lo);
4098 ADD_64(fstats->total_bytes_transmitted_hi,
4099 qstats->total_bytes_transmitted_hi,
4100 fstats->total_bytes_transmitted_lo,
4101 qstats->total_bytes_transmitted_lo);
4102 ADD_64(fstats->total_unicast_packets_received_hi,
4103 qstats->total_unicast_packets_received_hi,
4104 fstats->total_unicast_packets_received_lo,
4105 qstats->total_unicast_packets_received_lo);
4106 ADD_64(fstats->total_multicast_packets_received_hi,
4107 qstats->total_multicast_packets_received_hi,
4108 fstats->total_multicast_packets_received_lo,
4109 qstats->total_multicast_packets_received_lo);
4110 ADD_64(fstats->total_broadcast_packets_received_hi,
4111 qstats->total_broadcast_packets_received_hi,
4112 fstats->total_broadcast_packets_received_lo,
4113 qstats->total_broadcast_packets_received_lo);
4114 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4115 qstats->total_unicast_packets_transmitted_hi,
4116 fstats->total_unicast_packets_transmitted_lo,
4117 qstats->total_unicast_packets_transmitted_lo);
4118 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4119 qstats->total_multicast_packets_transmitted_hi,
4120 fstats->total_multicast_packets_transmitted_lo,
4121 qstats->total_multicast_packets_transmitted_lo);
4122 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4123 qstats->total_broadcast_packets_transmitted_hi,
4124 fstats->total_broadcast_packets_transmitted_lo,
4125 qstats->total_broadcast_packets_transmitted_lo);
4126 ADD_64(fstats->valid_bytes_received_hi,
4127 qstats->valid_bytes_received_hi,
4128 fstats->valid_bytes_received_lo,
4129 qstats->valid_bytes_received_lo);
4130
4131 ADD_64(estats->error_bytes_received_hi,
4132 qstats->error_bytes_received_hi,
4133 estats->error_bytes_received_lo,
4134 qstats->error_bytes_received_lo);
4135 ADD_64(estats->etherstatsoverrsizepkts_hi,
4136 qstats->etherstatsoverrsizepkts_hi,
4137 estats->etherstatsoverrsizepkts_lo,
4138 qstats->etherstatsoverrsizepkts_lo);
4139 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4140 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4141 }
4142
4143 ADD_64(fstats->total_bytes_received_hi,
4144 estats->rx_stat_ifhcinbadoctets_hi,
4145 fstats->total_bytes_received_lo,
4146 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4147
4148 memcpy(estats, &(fstats->total_bytes_received_hi),
4149 sizeof(struct host_func_stats) - 2*sizeof(u32));
4150
de832a55
EG
4151 ADD_64(estats->etherstatsoverrsizepkts_hi,
4152 estats->rx_stat_dot3statsframestoolong_hi,
4153 estats->etherstatsoverrsizepkts_lo,
4154 estats->rx_stat_dot3statsframestoolong_lo);
4155 ADD_64(estats->error_bytes_received_hi,
4156 estats->rx_stat_ifhcinbadoctets_hi,
4157 estats->error_bytes_received_lo,
4158 estats->rx_stat_ifhcinbadoctets_lo);
4159
4160 if (bp->port.pmf) {
4161 estats->mac_filter_discard =
4162 le32_to_cpu(tport->mac_filter_discard);
4163 estats->xxoverflow_discard =
4164 le32_to_cpu(tport->xxoverflow_discard);
4165 estats->brb_truncate_discard =
bb2a0f7a 4166 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4167 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4168 }
bb2a0f7a
YG
4169
4170 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4171
de832a55
EG
4172 bp->stats_pending = 0;
4173
a2fbb9ea
ET
4174 return 0;
4175}
4176
bb2a0f7a 4177static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4178{
bb2a0f7a 4179 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4180 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4181 int i;
a2fbb9ea
ET
4182
4183 nstats->rx_packets =
4184 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4185 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4186 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4187
4188 nstats->tx_packets =
4189 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4190 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4191 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4192
de832a55 4193 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4194
0e39e645 4195 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4196
de832a55 4197 nstats->rx_dropped = estats->mac_discard;
ca00392c 4198 for_each_rx_queue(bp, i)
de832a55
EG
4199 nstats->rx_dropped +=
4200 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4201
a2fbb9ea
ET
4202 nstats->tx_dropped = 0;
4203
4204 nstats->multicast =
de832a55 4205 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4206
bb2a0f7a 4207 nstats->collisions =
de832a55 4208 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4209
4210 nstats->rx_length_errors =
de832a55
EG
4211 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4212 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4213 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4214 bnx2x_hilo(&estats->brb_truncate_hi);
4215 nstats->rx_crc_errors =
4216 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4217 nstats->rx_frame_errors =
4218 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4219 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4220 nstats->rx_missed_errors = estats->xxoverflow_discard;
4221
4222 nstats->rx_errors = nstats->rx_length_errors +
4223 nstats->rx_over_errors +
4224 nstats->rx_crc_errors +
4225 nstats->rx_frame_errors +
0e39e645
ET
4226 nstats->rx_fifo_errors +
4227 nstats->rx_missed_errors;
a2fbb9ea 4228
bb2a0f7a 4229 nstats->tx_aborted_errors =
de832a55
EG
4230 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4231 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4232 nstats->tx_carrier_errors =
4233 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4234 nstats->tx_fifo_errors = 0;
4235 nstats->tx_heartbeat_errors = 0;
4236 nstats->tx_window_errors = 0;
4237
4238 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4239 nstats->tx_carrier_errors +
4240 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4241}
4242
4243static void bnx2x_drv_stats_update(struct bnx2x *bp)
4244{
4245 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4246 int i;
4247
4248 estats->driver_xoff = 0;
4249 estats->rx_err_discard_pkt = 0;
4250 estats->rx_skb_alloc_failed = 0;
4251 estats->hw_csum_err = 0;
ca00392c 4252 for_each_rx_queue(bp, i) {
de832a55
EG
4253 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4254
4255 estats->driver_xoff += qstats->driver_xoff;
4256 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4257 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4258 estats->hw_csum_err += qstats->hw_csum_err;
4259 }
a2fbb9ea
ET
4260}
4261
bb2a0f7a 4262static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4263{
bb2a0f7a 4264 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4265
bb2a0f7a
YG
4266 if (*stats_comp != DMAE_COMP_VAL)
4267 return;
4268
4269 if (bp->port.pmf)
de832a55 4270 bnx2x_hw_stats_update(bp);
a2fbb9ea 4271
de832a55
EG
4272 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4273 BNX2X_ERR("storm stats were not updated for 3 times\n");
4274 bnx2x_panic();
4275 return;
a2fbb9ea
ET
4276 }
4277
de832a55
EG
4278 bnx2x_net_stats_update(bp);
4279 bnx2x_drv_stats_update(bp);
4280
a2fbb9ea 4281 if (bp->msglevel & NETIF_MSG_TIMER) {
ca00392c
EG
4282 struct bnx2x_fastpath *fp0_rx = bp->fp;
4283 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
de832a55
EG
4284 struct tstorm_per_client_stats *old_tclient =
4285 &bp->fp->old_tclient;
4286 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4287 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4288 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4289 int i;
a2fbb9ea
ET
4290
4291 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4292 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4293 " tx pkt (%lx)\n",
ca00392c
EG
4294 bnx2x_tx_avail(fp0_tx),
4295 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4296 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4297 " rx pkt (%lx)\n",
ca00392c
EG
4298 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4299 fp0_rx->rx_comp_cons),
4300 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4301 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4302 "brb truncate %u\n",
4303 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4304 qstats->driver_xoff,
4305 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4306 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4307 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4308 "mac_discard %u mac_filter_discard %u "
4309 "xxovrflow_discard %u brb_truncate_discard %u "
4310 "ttl0_discard %u\n",
4781bfad 4311 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4312 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4313 bnx2x_hilo(&qstats->no_buff_discard_hi),
4314 estats->mac_discard, estats->mac_filter_discard,
4315 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4316 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4317
4318 for_each_queue(bp, i) {
4319 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4320 bnx2x_fp(bp, i, tx_pkt),
4321 bnx2x_fp(bp, i, rx_pkt),
4322 bnx2x_fp(bp, i, rx_calls));
4323 }
4324 }
4325
bb2a0f7a
YG
4326 bnx2x_hw_stats_post(bp);
4327 bnx2x_storm_stats_post(bp);
4328}
a2fbb9ea 4329
bb2a0f7a
YG
4330static void bnx2x_port_stats_stop(struct bnx2x *bp)
4331{
4332 struct dmae_command *dmae;
4333 u32 opcode;
4334 int loader_idx = PMF_DMAE_C(bp);
4335 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4336
bb2a0f7a 4337 bp->executer_idx = 0;
a2fbb9ea 4338
bb2a0f7a
YG
4339 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4340 DMAE_CMD_C_ENABLE |
4341 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4342#ifdef __BIG_ENDIAN
bb2a0f7a 4343 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4344#else
bb2a0f7a 4345 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4346#endif
bb2a0f7a
YG
4347 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4348 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4349
4350 if (bp->port.port_stx) {
4351
4352 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4353 if (bp->func_stx)
4354 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4355 else
4356 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4357 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4358 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4359 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4360 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4361 dmae->len = sizeof(struct host_port_stats) >> 2;
4362 if (bp->func_stx) {
4363 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4364 dmae->comp_addr_hi = 0;
4365 dmae->comp_val = 1;
4366 } else {
4367 dmae->comp_addr_lo =
4368 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4369 dmae->comp_addr_hi =
4370 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4371 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4372
bb2a0f7a
YG
4373 *stats_comp = 0;
4374 }
a2fbb9ea
ET
4375 }
4376
bb2a0f7a
YG
4377 if (bp->func_stx) {
4378
4379 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4380 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4381 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4382 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4383 dmae->dst_addr_lo = bp->func_stx >> 2;
4384 dmae->dst_addr_hi = 0;
4385 dmae->len = sizeof(struct host_func_stats) >> 2;
4386 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4387 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4388 dmae->comp_val = DMAE_COMP_VAL;
4389
4390 *stats_comp = 0;
a2fbb9ea 4391 }
bb2a0f7a
YG
4392}
4393
4394static void bnx2x_stats_stop(struct bnx2x *bp)
4395{
4396 int update = 0;
4397
4398 bnx2x_stats_comp(bp);
4399
4400 if (bp->port.pmf)
4401 update = (bnx2x_hw_stats_update(bp) == 0);
4402
4403 update |= (bnx2x_storm_stats_update(bp) == 0);
4404
4405 if (update) {
4406 bnx2x_net_stats_update(bp);
a2fbb9ea 4407
bb2a0f7a
YG
4408 if (bp->port.pmf)
4409 bnx2x_port_stats_stop(bp);
4410
4411 bnx2x_hw_stats_post(bp);
4412 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4413 }
4414}
4415
bb2a0f7a
YG
4416static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4417{
4418}
4419
4420static const struct {
4421 void (*action)(struct bnx2x *bp);
4422 enum bnx2x_stats_state next_state;
4423} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4424/* state event */
4425{
4426/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4427/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4428/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4429/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4430},
4431{
4432/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4433/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4434/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4435/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4436}
4437};
4438
4439static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4440{
4441 enum bnx2x_stats_state state = bp->stats_state;
4442
4443 bnx2x_stats_stm[state][event].action(bp);
4444 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4445
8924665a
EG
4446 /* Make sure the state has been "changed" */
4447 smp_wmb();
4448
bb2a0f7a
YG
4449 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4450 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4451 state, event, bp->stats_state);
4452}
4453
6fe49bb9
EG
4454static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4455{
4456 struct dmae_command *dmae;
4457 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4458
4459 /* sanity */
4460 if (!bp->port.pmf || !bp->port.port_stx) {
4461 BNX2X_ERR("BUG!\n");
4462 return;
4463 }
4464
4465 bp->executer_idx = 0;
4466
4467 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4468 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4469 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4470 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4471#ifdef __BIG_ENDIAN
4472 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4473#else
4474 DMAE_CMD_ENDIANITY_DW_SWAP |
4475#endif
4476 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4477 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4478 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4479 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4480 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4481 dmae->dst_addr_hi = 0;
4482 dmae->len = sizeof(struct host_port_stats) >> 2;
4483 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4484 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4485 dmae->comp_val = DMAE_COMP_VAL;
4486
4487 *stats_comp = 0;
4488 bnx2x_hw_stats_post(bp);
4489 bnx2x_stats_comp(bp);
4490}
4491
4492static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4493{
4494 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4495 int port = BP_PORT(bp);
4496 int func;
4497 u32 func_stx;
4498
4499 /* sanity */
4500 if (!bp->port.pmf || !bp->func_stx) {
4501 BNX2X_ERR("BUG!\n");
4502 return;
4503 }
4504
4505 /* save our func_stx */
4506 func_stx = bp->func_stx;
4507
4508 for (vn = VN_0; vn < vn_max; vn++) {
4509 func = 2*vn + port;
4510
4511 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4512 bnx2x_func_stats_init(bp);
4513 bnx2x_hw_stats_post(bp);
4514 bnx2x_stats_comp(bp);
4515 }
4516
4517 /* restore our func_stx */
4518 bp->func_stx = func_stx;
4519}
4520
4521static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4522{
4523 struct dmae_command *dmae = &bp->stats_dmae;
4524 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4525
4526 /* sanity */
4527 if (!bp->func_stx) {
4528 BNX2X_ERR("BUG!\n");
4529 return;
4530 }
4531
4532 bp->executer_idx = 0;
4533 memset(dmae, 0, sizeof(struct dmae_command));
4534
4535 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4536 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4537 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4538#ifdef __BIG_ENDIAN
4539 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4540#else
4541 DMAE_CMD_ENDIANITY_DW_SWAP |
4542#endif
4543 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4544 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4545 dmae->src_addr_lo = bp->func_stx >> 2;
4546 dmae->src_addr_hi = 0;
4547 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4548 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4549 dmae->len = sizeof(struct host_func_stats) >> 2;
4550 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4551 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4552 dmae->comp_val = DMAE_COMP_VAL;
4553
4554 *stats_comp = 0;
4555 bnx2x_hw_stats_post(bp);
4556 bnx2x_stats_comp(bp);
4557}
4558
4559static void bnx2x_stats_init(struct bnx2x *bp)
4560{
4561 int port = BP_PORT(bp);
4562 int func = BP_FUNC(bp);
4563 int i;
4564
4565 bp->stats_pending = 0;
4566 bp->executer_idx = 0;
4567 bp->stats_counter = 0;
4568
4569 /* port and func stats for management */
4570 if (!BP_NOMCP(bp)) {
4571 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4572 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4573
4574 } else {
4575 bp->port.port_stx = 0;
4576 bp->func_stx = 0;
4577 }
4578 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4579 bp->port.port_stx, bp->func_stx);
4580
4581 /* port stats */
4582 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4583 bp->port.old_nig_stats.brb_discard =
4584 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4585 bp->port.old_nig_stats.brb_truncate =
4586 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4587 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4588 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4589 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4590 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4591
4592 /* function stats */
4593 for_each_queue(bp, i) {
4594 struct bnx2x_fastpath *fp = &bp->fp[i];
4595
4596 memset(&fp->old_tclient, 0,
4597 sizeof(struct tstorm_per_client_stats));
4598 memset(&fp->old_uclient, 0,
4599 sizeof(struct ustorm_per_client_stats));
4600 memset(&fp->old_xclient, 0,
4601 sizeof(struct xstorm_per_client_stats));
4602 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4603 }
4604
4605 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4606 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4607
4608 bp->stats_state = STATS_STATE_DISABLED;
4609
4610 if (bp->port.pmf) {
4611 if (bp->port.port_stx)
4612 bnx2x_port_stats_base_init(bp);
4613
4614 if (bp->func_stx)
4615 bnx2x_func_stats_base_init(bp);
4616
4617 } else if (bp->func_stx)
4618 bnx2x_func_stats_base_update(bp);
4619}
4620
a2fbb9ea
ET
4621static void bnx2x_timer(unsigned long data)
4622{
4623 struct bnx2x *bp = (struct bnx2x *) data;
4624
4625 if (!netif_running(bp->dev))
4626 return;
4627
4628 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4629 goto timer_restart;
a2fbb9ea
ET
4630
4631 if (poll) {
4632 struct bnx2x_fastpath *fp = &bp->fp[0];
4633 int rc;
4634
7961f791 4635 bnx2x_tx_int(fp);
a2fbb9ea
ET
4636 rc = bnx2x_rx_int(fp, 1000);
4637 }
4638
34f80b04
EG
4639 if (!BP_NOMCP(bp)) {
4640 int func = BP_FUNC(bp);
a2fbb9ea
ET
4641 u32 drv_pulse;
4642 u32 mcp_pulse;
4643
4644 ++bp->fw_drv_pulse_wr_seq;
4645 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4646 /* TBD - add SYSTEM_TIME */
4647 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4648 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4649
34f80b04 4650 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4651 MCP_PULSE_SEQ_MASK);
4652 /* The delta between driver pulse and mcp response
4653 * should be 1 (before mcp response) or 0 (after mcp response)
4654 */
4655 if ((drv_pulse != mcp_pulse) &&
4656 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4657 /* someone lost a heartbeat... */
4658 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4659 drv_pulse, mcp_pulse);
4660 }
4661 }
4662
bb2a0f7a
YG
4663 if ((bp->state == BNX2X_STATE_OPEN) ||
4664 (bp->state == BNX2X_STATE_DISABLED))
4665 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4666
f1410647 4667timer_restart:
a2fbb9ea
ET
4668 mod_timer(&bp->timer, jiffies + bp->current_interval);
4669}
4670
4671/* end of Statistics */
4672
4673/* nic init */
4674
4675/*
4676 * nic init service functions
4677 */
4678
34f80b04 4679static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4680{
34f80b04
EG
4681 int port = BP_PORT(bp);
4682
ca00392c
EG
4683 /* "CSTORM" */
4684 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4685 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4686 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4687 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4688 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4689 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
4690}
4691
5c862848
EG
4692static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4693 dma_addr_t mapping, int sb_id)
34f80b04
EG
4694{
4695 int port = BP_PORT(bp);
bb2a0f7a 4696 int func = BP_FUNC(bp);
a2fbb9ea 4697 int index;
34f80b04 4698 u64 section;
a2fbb9ea
ET
4699
4700 /* USTORM */
4701 section = ((u64)mapping) + offsetof(struct host_status_block,
4702 u_status_block);
34f80b04 4703 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 4704
ca00392c
EG
4705 REG_WR(bp, BAR_CSTRORM_INTMEM +
4706 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4707 REG_WR(bp, BAR_CSTRORM_INTMEM +
4708 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4709 U64_HI(section));
ca00392c
EG
4710 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4711 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4712
4713 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
4714 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4715 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4716
4717 /* CSTORM */
4718 section = ((u64)mapping) + offsetof(struct host_status_block,
4719 c_status_block);
34f80b04 4720 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4721
4722 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4723 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4724 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4725 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4726 U64_HI(section));
7a9b2557 4727 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 4728 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4729
4730 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4731 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4732 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
4733
4734 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4735}
4736
4737static void bnx2x_zero_def_sb(struct bnx2x *bp)
4738{
4739 int func = BP_FUNC(bp);
a2fbb9ea 4740
ca00392c 4741 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
4742 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4743 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
4744 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4745 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4746 sizeof(struct cstorm_def_status_block_u)/4);
4747 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4748 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4749 sizeof(struct cstorm_def_status_block_c)/4);
4750 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
4751 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4752 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4753}
4754
4755static void bnx2x_init_def_sb(struct bnx2x *bp,
4756 struct host_def_status_block *def_sb,
34f80b04 4757 dma_addr_t mapping, int sb_id)
a2fbb9ea 4758{
34f80b04
EG
4759 int port = BP_PORT(bp);
4760 int func = BP_FUNC(bp);
a2fbb9ea
ET
4761 int index, val, reg_offset;
4762 u64 section;
4763
4764 /* ATTN */
4765 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4766 atten_status_block);
34f80b04 4767 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4768
49d66772
ET
4769 bp->attn_state = 0;
4770
a2fbb9ea
ET
4771 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4772 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4773
34f80b04 4774 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4775 bp->attn_group[index].sig[0] = REG_RD(bp,
4776 reg_offset + 0x10*index);
4777 bp->attn_group[index].sig[1] = REG_RD(bp,
4778 reg_offset + 0x4 + 0x10*index);
4779 bp->attn_group[index].sig[2] = REG_RD(bp,
4780 reg_offset + 0x8 + 0x10*index);
4781 bp->attn_group[index].sig[3] = REG_RD(bp,
4782 reg_offset + 0xc + 0x10*index);
4783 }
4784
a2fbb9ea
ET
4785 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4786 HC_REG_ATTN_MSG0_ADDR_L);
4787
4788 REG_WR(bp, reg_offset, U64_LO(section));
4789 REG_WR(bp, reg_offset + 4, U64_HI(section));
4790
4791 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4792
4793 val = REG_RD(bp, reg_offset);
34f80b04 4794 val |= sb_id;
a2fbb9ea
ET
4795 REG_WR(bp, reg_offset, val);
4796
4797 /* USTORM */
4798 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4799 u_def_status_block);
34f80b04 4800 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4801
ca00392c
EG
4802 REG_WR(bp, BAR_CSTRORM_INTMEM +
4803 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4804 REG_WR(bp, BAR_CSTRORM_INTMEM +
4805 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 4806 U64_HI(section));
ca00392c
EG
4807 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4808 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
4809
4810 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
4811 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4812 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
4813
4814 /* CSTORM */
4815 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4816 c_def_status_block);
34f80b04 4817 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4818
4819 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4820 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 4821 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4822 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 4823 U64_HI(section));
5c862848 4824 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 4825 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
4826
4827 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4828 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4829 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
4830
4831 /* TSTORM */
4832 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4833 t_def_status_block);
34f80b04 4834 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4835
4836 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4837 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4838 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4839 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4840 U64_HI(section));
5c862848 4841 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4842 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4843
4844 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4845 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4846 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4847
4848 /* XSTORM */
4849 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4850 x_def_status_block);
34f80b04 4851 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4852
4853 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4854 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4855 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4856 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4857 U64_HI(section));
5c862848 4858 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4859 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4860
4861 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4862 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4863 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4864
bb2a0f7a 4865 bp->stats_pending = 0;
66e855f3 4866 bp->set_mac_pending = 0;
bb2a0f7a 4867
34f80b04 4868 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4869}
4870
4871static void bnx2x_update_coalesce(struct bnx2x *bp)
4872{
34f80b04 4873 int port = BP_PORT(bp);
a2fbb9ea
ET
4874 int i;
4875
4876 for_each_queue(bp, i) {
34f80b04 4877 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4878
4879 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
4880 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4881 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4882 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4883 bp->rx_ticks/12);
ca00392c
EG
4884 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4885 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4886 U_SB_ETH_RX_CQ_INDEX),
3799cf47 4887 (bp->rx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4888
4889 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4890 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4891 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4892 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4893 bp->tx_ticks/12);
a2fbb9ea 4894 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4895 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4896 C_SB_ETH_TX_CQ_INDEX),
3799cf47 4897 (bp->tx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4898 }
4899}
4900
7a9b2557
VZ
4901static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4902 struct bnx2x_fastpath *fp, int last)
4903{
4904 int i;
4905
4906 for (i = 0; i < last; i++) {
4907 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4908 struct sk_buff *skb = rx_buf->skb;
4909
4910 if (skb == NULL) {
4911 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4912 continue;
4913 }
4914
4915 if (fp->tpa_state[i] == BNX2X_TPA_START)
4916 pci_unmap_single(bp->pdev,
4917 pci_unmap_addr(rx_buf, mapping),
356e2385 4918 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4919
4920 dev_kfree_skb(skb);
4921 rx_buf->skb = NULL;
4922 }
4923}
4924
a2fbb9ea
ET
4925static void bnx2x_init_rx_rings(struct bnx2x *bp)
4926{
7a9b2557 4927 int func = BP_FUNC(bp);
32626230
EG
4928 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4929 ETH_MAX_AGGREGATION_QUEUES_E1H;
4930 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4931 int i, j;
a2fbb9ea 4932
87942b46 4933 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4934 DP(NETIF_MSG_IFUP,
4935 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4936
7a9b2557 4937 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4938
555f6c78 4939 for_each_rx_queue(bp, j) {
32626230 4940 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4941
32626230 4942 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4943 fp->tpa_pool[i].skb =
4944 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4945 if (!fp->tpa_pool[i].skb) {
4946 BNX2X_ERR("Failed to allocate TPA "
4947 "skb pool for queue[%d] - "
4948 "disabling TPA on this "
4949 "queue!\n", j);
4950 bnx2x_free_tpa_pool(bp, fp, i);
4951 fp->disable_tpa = 1;
4952 break;
4953 }
4954 pci_unmap_addr_set((struct sw_rx_bd *)
4955 &bp->fp->tpa_pool[i],
4956 mapping, 0);
4957 fp->tpa_state[i] = BNX2X_TPA_STOP;
4958 }
4959 }
4960 }
4961
555f6c78 4962 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4963 struct bnx2x_fastpath *fp = &bp->fp[j];
4964
4965 fp->rx_bd_cons = 0;
4966 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4967 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4968
ca00392c
EG
4969 /* Mark queue as Rx */
4970 fp->is_rx_queue = 1;
4971
7a9b2557
VZ
4972 /* "next page" elements initialization */
4973 /* SGE ring */
4974 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4975 struct eth_rx_sge *sge;
4976
4977 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4978 sge->addr_hi =
4979 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4980 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4981 sge->addr_lo =
4982 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4983 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4984 }
4985
4986 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4987
7a9b2557 4988 /* RX BD ring */
a2fbb9ea
ET
4989 for (i = 1; i <= NUM_RX_RINGS; i++) {
4990 struct eth_rx_bd *rx_bd;
4991
4992 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4993 rx_bd->addr_hi =
4994 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4995 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4996 rx_bd->addr_lo =
4997 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4998 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4999 }
5000
34f80b04 5001 /* CQ ring */
a2fbb9ea
ET
5002 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5003 struct eth_rx_cqe_next_page *nextpg;
5004
5005 nextpg = (struct eth_rx_cqe_next_page *)
5006 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5007 nextpg->addr_hi =
5008 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 5009 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5010 nextpg->addr_lo =
5011 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 5012 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5013 }
5014
7a9b2557
VZ
5015 /* Allocate SGEs and initialize the ring elements */
5016 for (i = 0, ring_prod = 0;
5017 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 5018
7a9b2557
VZ
5019 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5020 BNX2X_ERR("was only able to allocate "
5021 "%d rx sges\n", i);
5022 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5023 /* Cleanup already allocated elements */
5024 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 5025 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
5026 fp->disable_tpa = 1;
5027 ring_prod = 0;
5028 break;
5029 }
5030 ring_prod = NEXT_SGE_IDX(ring_prod);
5031 }
5032 fp->rx_sge_prod = ring_prod;
5033
5034 /* Allocate BDs and initialize BD ring */
66e855f3 5035 fp->rx_comp_cons = 0;
7a9b2557 5036 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
5037 for (i = 0; i < bp->rx_ring_size; i++) {
5038 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5039 BNX2X_ERR("was only able to allocate "
de832a55
EG
5040 "%d rx skbs on queue[%d]\n", i, j);
5041 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
5042 break;
5043 }
5044 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 5045 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5046 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5047 }
5048
7a9b2557
VZ
5049 fp->rx_bd_prod = ring_prod;
5050 /* must not have more available CQEs than BDs */
5051 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5052 cqe_ring_prod);
a2fbb9ea
ET
5053 fp->rx_pkt = fp->rx_calls = 0;
5054
7a9b2557
VZ
5055 /* Warning!
5056 * this will generate an interrupt (to the TSTORM)
5057 * must only be done after chip is initialized
5058 */
5059 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5060 fp->rx_sge_prod);
a2fbb9ea
ET
5061 if (j != 0)
5062 continue;
5063
5064 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5065 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5066 U64_LO(fp->rx_comp_mapping));
5067 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5068 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5069 U64_HI(fp->rx_comp_mapping));
5070 }
5071}
5072
5073static void bnx2x_init_tx_ring(struct bnx2x *bp)
5074{
5075 int i, j;
5076
555f6c78 5077 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
5078 struct bnx2x_fastpath *fp = &bp->fp[j];
5079
5080 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5081 struct eth_tx_next_bd *tx_next_bd =
5082 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5083
ca00392c 5084 tx_next_bd->addr_hi =
a2fbb9ea 5085 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5086 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5087 tx_next_bd->addr_lo =
a2fbb9ea 5088 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5089 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5090 }
5091
ca00392c
EG
5092 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5093 fp->tx_db.data.zero_fill1 = 0;
5094 fp->tx_db.data.prod = 0;
5095
a2fbb9ea
ET
5096 fp->tx_pkt_prod = 0;
5097 fp->tx_pkt_cons = 0;
5098 fp->tx_bd_prod = 0;
5099 fp->tx_bd_cons = 0;
5100 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5101 fp->tx_pkt = 0;
5102 }
6fe49bb9
EG
5103
5104 /* clean tx statistics */
5105 for_each_rx_queue(bp, i)
5106 bnx2x_fp(bp, i, tx_pkt) = 0;
a2fbb9ea
ET
5107}
5108
5109static void bnx2x_init_sp_ring(struct bnx2x *bp)
5110{
34f80b04 5111 int func = BP_FUNC(bp);
a2fbb9ea
ET
5112
5113 spin_lock_init(&bp->spq_lock);
5114
5115 bp->spq_left = MAX_SPQ_PENDING;
5116 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5117 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5118 bp->spq_prod_bd = bp->spq;
5119 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5120
34f80b04 5121 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5122 U64_LO(bp->spq_mapping));
34f80b04
EG
5123 REG_WR(bp,
5124 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5125 U64_HI(bp->spq_mapping));
5126
34f80b04 5127 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5128 bp->spq_prod_idx);
5129}
5130
5131static void bnx2x_init_context(struct bnx2x *bp)
5132{
5133 int i;
5134
ca00392c 5135 for_each_rx_queue(bp, i) {
a2fbb9ea
ET
5136 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5137 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5138 u8 cl_id = fp->cl_id;
a2fbb9ea 5139
34f80b04
EG
5140 context->ustorm_st_context.common.sb_index_numbers =
5141 BNX2X_RX_SB_INDEX_NUM;
0626b899 5142 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5143 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5144 context->ustorm_st_context.common.flags =
de832a55
EG
5145 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5146 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5147 context->ustorm_st_context.common.statistics_counter_id =
5148 cl_id;
8d9c5f34 5149 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5150 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5151 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5152 bp->rx_buf_size;
34f80b04 5153 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5154 U64_HI(fp->rx_desc_mapping);
34f80b04 5155 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5156 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5157 if (!fp->disable_tpa) {
5158 context->ustorm_st_context.common.flags |=
ca00392c 5159 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5160 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
5161 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5162 (u32)0xffff);
7a9b2557
VZ
5163 context->ustorm_st_context.common.sge_page_base_hi =
5164 U64_HI(fp->rx_sge_mapping);
5165 context->ustorm_st_context.common.sge_page_base_lo =
5166 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5167
5168 context->ustorm_st_context.common.max_sges_for_packet =
5169 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5170 context->ustorm_st_context.common.max_sges_for_packet =
5171 ((context->ustorm_st_context.common.
5172 max_sges_for_packet + PAGES_PER_SGE - 1) &
5173 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5174 }
5175
8d9c5f34
EG
5176 context->ustorm_ag_context.cdu_usage =
5177 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5178 CDU_REGION_NUMBER_UCM_AG,
5179 ETH_CONNECTION_TYPE);
5180
ca00392c
EG
5181 context->xstorm_ag_context.cdu_reserved =
5182 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5183 CDU_REGION_NUMBER_XCM_AG,
5184 ETH_CONNECTION_TYPE);
5185 }
5186
5187 for_each_tx_queue(bp, i) {
5188 struct bnx2x_fastpath *fp = &bp->fp[i];
5189 struct eth_context *context =
5190 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5191
5192 context->cstorm_st_context.sb_index_number =
5193 C_SB_ETH_TX_CQ_INDEX;
5194 context->cstorm_st_context.status_block_id = fp->sb_id;
5195
8d9c5f34
EG
5196 context->xstorm_st_context.tx_bd_page_base_hi =
5197 U64_HI(fp->tx_desc_mapping);
5198 context->xstorm_st_context.tx_bd_page_base_lo =
5199 U64_LO(fp->tx_desc_mapping);
ca00392c 5200 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5201 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5202 }
5203}
5204
5205static void bnx2x_init_ind_table(struct bnx2x *bp)
5206{
26c8fa4d 5207 int func = BP_FUNC(bp);
a2fbb9ea
ET
5208 int i;
5209
555f6c78 5210 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5211 return;
5212
555f6c78
EG
5213 DP(NETIF_MSG_IFUP,
5214 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5215 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5216 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5217 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 5218 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
5219}
5220
49d66772
ET
5221static void bnx2x_set_client_config(struct bnx2x *bp)
5222{
49d66772 5223 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5224 int port = BP_PORT(bp);
5225 int i;
49d66772 5226
e7799c5f 5227 tstorm_client.mtu = bp->dev->mtu;
49d66772 5228 tstorm_client.config_flags =
de832a55
EG
5229 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5230 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5231#ifdef BCM_VLAN
0c6671b0 5232 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5233 tstorm_client.config_flags |=
8d9c5f34 5234 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5235 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5236 }
5237#endif
49d66772
ET
5238
5239 for_each_queue(bp, i) {
de832a55
EG
5240 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5241
49d66772 5242 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5243 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5244 ((u32 *)&tstorm_client)[0]);
5245 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5246 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5247 ((u32 *)&tstorm_client)[1]);
5248 }
5249
34f80b04
EG
5250 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5251 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5252}
5253
a2fbb9ea
ET
5254static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5255{
a2fbb9ea 5256 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04 5257 int mode = bp->rx_mode;
37b091ba 5258 int mask = bp->rx_mode_cl_mask;
34f80b04 5259 int func = BP_FUNC(bp);
581ce43d 5260 int port = BP_PORT(bp);
a2fbb9ea 5261 int i;
581ce43d
EG
5262 /* All but management unicast packets should pass to the host as well */
5263 u32 llh_mask =
5264 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5265 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5266 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5267 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5268
3196a88a 5269 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5270
5271 switch (mode) {
5272 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5273 tstorm_mac_filter.ucast_drop_all = mask;
5274 tstorm_mac_filter.mcast_drop_all = mask;
5275 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5276 break;
356e2385 5277
a2fbb9ea 5278 case BNX2X_RX_MODE_NORMAL:
34f80b04 5279 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5280 break;
356e2385 5281
a2fbb9ea 5282 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5283 tstorm_mac_filter.mcast_accept_all = mask;
5284 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5285 break;
356e2385 5286
a2fbb9ea 5287 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5288 tstorm_mac_filter.ucast_accept_all = mask;
5289 tstorm_mac_filter.mcast_accept_all = mask;
5290 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5291 /* pass management unicast packets as well */
5292 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5293 break;
356e2385 5294
a2fbb9ea 5295 default:
34f80b04
EG
5296 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5297 break;
a2fbb9ea
ET
5298 }
5299
581ce43d
EG
5300 REG_WR(bp,
5301 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5302 llh_mask);
5303
a2fbb9ea
ET
5304 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5305 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5306 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5307 ((u32 *)&tstorm_mac_filter)[i]);
5308
34f80b04 5309/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5310 ((u32 *)&tstorm_mac_filter)[i]); */
5311 }
a2fbb9ea 5312
49d66772
ET
5313 if (mode != BNX2X_RX_MODE_NONE)
5314 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5315}
5316
471de716
EG
5317static void bnx2x_init_internal_common(struct bnx2x *bp)
5318{
5319 int i;
5320
5321 /* Zero this manually as its initialization is
5322 currently missing in the initTool */
5323 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5324 REG_WR(bp, BAR_USTRORM_INTMEM +
5325 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5326}
5327
5328static void bnx2x_init_internal_port(struct bnx2x *bp)
5329{
5330 int port = BP_PORT(bp);
5331
ca00392c
EG
5332 REG_WR(bp,
5333 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5334 REG_WR(bp,
5335 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5336 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5337 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5338}
5339
5340static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5341{
a2fbb9ea
ET
5342 struct tstorm_eth_function_common_config tstorm_config = {0};
5343 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5344 int port = BP_PORT(bp);
5345 int func = BP_FUNC(bp);
de832a55
EG
5346 int i, j;
5347 u32 offset;
471de716 5348 u16 max_agg_size;
a2fbb9ea
ET
5349
5350 if (is_multi(bp)) {
555f6c78 5351 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5352 tstorm_config.rss_result_mask = MULTI_MASK;
5353 }
ca00392c
EG
5354
5355 /* Enable TPA if needed */
5356 if (bp->flags & TPA_ENABLE_FLAG)
5357 tstorm_config.config_flags |=
5358 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5359
8d9c5f34
EG
5360 if (IS_E1HMF(bp))
5361 tstorm_config.config_flags |=
5362 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5363
34f80b04
EG
5364 tstorm_config.leading_client_id = BP_L_ID(bp);
5365
a2fbb9ea 5366 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5367 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5368 (*(u32 *)&tstorm_config));
5369
c14423fe 5370 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
37b091ba 5371 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
a2fbb9ea
ET
5372 bnx2x_set_storm_rx_mode(bp);
5373
de832a55
EG
5374 for_each_queue(bp, i) {
5375 u8 cl_id = bp->fp[i].cl_id;
5376
5377 /* reset xstorm per client statistics */
5378 offset = BAR_XSTRORM_INTMEM +
5379 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5380 for (j = 0;
5381 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5382 REG_WR(bp, offset + j*4, 0);
5383
5384 /* reset tstorm per client statistics */
5385 offset = BAR_TSTRORM_INTMEM +
5386 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5387 for (j = 0;
5388 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5389 REG_WR(bp, offset + j*4, 0);
5390
5391 /* reset ustorm per client statistics */
5392 offset = BAR_USTRORM_INTMEM +
5393 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5394 for (j = 0;
5395 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5396 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5397 }
5398
5399 /* Init statistics related context */
34f80b04 5400 stats_flags.collect_eth = 1;
a2fbb9ea 5401
66e855f3 5402 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5403 ((u32 *)&stats_flags)[0]);
66e855f3 5404 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5405 ((u32 *)&stats_flags)[1]);
5406
66e855f3 5407 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5408 ((u32 *)&stats_flags)[0]);
66e855f3 5409 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5410 ((u32 *)&stats_flags)[1]);
5411
de832a55
EG
5412 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5413 ((u32 *)&stats_flags)[0]);
5414 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5415 ((u32 *)&stats_flags)[1]);
5416
66e855f3 5417 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5418 ((u32 *)&stats_flags)[0]);
66e855f3 5419 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5420 ((u32 *)&stats_flags)[1]);
5421
66e855f3
YG
5422 REG_WR(bp, BAR_XSTRORM_INTMEM +
5423 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5424 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5425 REG_WR(bp, BAR_XSTRORM_INTMEM +
5426 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5427 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5428
5429 REG_WR(bp, BAR_TSTRORM_INTMEM +
5430 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5431 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5432 REG_WR(bp, BAR_TSTRORM_INTMEM +
5433 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5434 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5435
de832a55
EG
5436 REG_WR(bp, BAR_USTRORM_INTMEM +
5437 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5438 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5439 REG_WR(bp, BAR_USTRORM_INTMEM +
5440 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5441 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5442
34f80b04
EG
5443 if (CHIP_IS_E1H(bp)) {
5444 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5445 IS_E1HMF(bp));
5446 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5447 IS_E1HMF(bp));
5448 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5449 IS_E1HMF(bp));
5450 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5451 IS_E1HMF(bp));
5452
7a9b2557
VZ
5453 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5454 bp->e1hov);
34f80b04
EG
5455 }
5456
4f40f2cb
EG
5457 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5458 max_agg_size =
5459 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5460 SGE_PAGE_SIZE * PAGES_PER_SGE),
5461 (u32)0xffff);
555f6c78 5462 for_each_rx_queue(bp, i) {
7a9b2557 5463 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5464
5465 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5466 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5467 U64_LO(fp->rx_comp_mapping));
5468 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5469 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5470 U64_HI(fp->rx_comp_mapping));
5471
ca00392c
EG
5472 /* Next page */
5473 REG_WR(bp, BAR_USTRORM_INTMEM +
5474 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5475 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5476 REG_WR(bp, BAR_USTRORM_INTMEM +
5477 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5478 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5479
7a9b2557 5480 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5481 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5482 max_agg_size);
5483 }
8a1c38d1 5484
1c06328c
EG
5485 /* dropless flow control */
5486 if (CHIP_IS_E1H(bp)) {
5487 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5488
5489 rx_pause.bd_thr_low = 250;
5490 rx_pause.cqe_thr_low = 250;
5491 rx_pause.cos = 1;
5492 rx_pause.sge_thr_low = 0;
5493 rx_pause.bd_thr_high = 350;
5494 rx_pause.cqe_thr_high = 350;
5495 rx_pause.sge_thr_high = 0;
5496
5497 for_each_rx_queue(bp, i) {
5498 struct bnx2x_fastpath *fp = &bp->fp[i];
5499
5500 if (!fp->disable_tpa) {
5501 rx_pause.sge_thr_low = 150;
5502 rx_pause.sge_thr_high = 250;
5503 }
5504
5505
5506 offset = BAR_USTRORM_INTMEM +
5507 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5508 fp->cl_id);
5509 for (j = 0;
5510 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5511 j++)
5512 REG_WR(bp, offset + j*4,
5513 ((u32 *)&rx_pause)[j]);
5514 }
5515 }
5516
8a1c38d1
EG
5517 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5518
5519 /* Init rate shaping and fairness contexts */
5520 if (IS_E1HMF(bp)) {
5521 int vn;
5522
5523 /* During init there is no active link
5524 Until link is up, set link rate to 10Gbps */
5525 bp->link_vars.line_speed = SPEED_10000;
5526 bnx2x_init_port_minmax(bp);
5527
5528 bnx2x_calc_vn_weight_sum(bp);
5529
5530 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5531 bnx2x_init_vn_minmax(bp, 2*vn + port);
5532
5533 /* Enable rate shaping and fairness */
5534 bp->cmng.flags.cmng_enables =
5535 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5536 if (bp->vn_weight_sum)
5537 bp->cmng.flags.cmng_enables |=
5538 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5539 else
5540 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5541 " fairness will be disabled\n");
5542 } else {
5543 /* rate shaping and fairness are disabled */
5544 DP(NETIF_MSG_IFUP,
5545 "single function mode minmax will be disabled\n");
5546 }
5547
5548
5549 /* Store it to internal memory */
5550 if (bp->port.pmf)
5551 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5552 REG_WR(bp, BAR_XSTRORM_INTMEM +
5553 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5554 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5555}
5556
471de716
EG
5557static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5558{
5559 switch (load_code) {
5560 case FW_MSG_CODE_DRV_LOAD_COMMON:
5561 bnx2x_init_internal_common(bp);
5562 /* no break */
5563
5564 case FW_MSG_CODE_DRV_LOAD_PORT:
5565 bnx2x_init_internal_port(bp);
5566 /* no break */
5567
5568 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5569 bnx2x_init_internal_func(bp);
5570 break;
5571
5572 default:
5573 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5574 break;
5575 }
5576}
5577
5578static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5579{
5580 int i;
5581
5582 for_each_queue(bp, i) {
5583 struct bnx2x_fastpath *fp = &bp->fp[i];
5584
34f80b04 5585 fp->bp = bp;
a2fbb9ea 5586 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5587 fp->index = i;
34f80b04 5588 fp->cl_id = BP_L_ID(bp) + i;
37b091ba
MC
5589#ifdef BCM_CNIC
5590 fp->sb_id = fp->cl_id + 1;
5591#else
34f80b04 5592 fp->sb_id = fp->cl_id;
37b091ba 5593#endif
ca00392c
EG
5594 /* Suitable Rx and Tx SBs are served by the same client */
5595 if (i >= bp->num_rx_queues)
5596 fp->cl_id -= bp->num_rx_queues;
34f80b04 5597 DP(NETIF_MSG_IFUP,
f5372251
EG
5598 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5599 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5600 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5601 fp->sb_id);
5c862848 5602 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5603 }
5604
16119785
EG
5605 /* ensure status block indices were read */
5606 rmb();
5607
5608
5c862848
EG
5609 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5610 DEF_SB_ID);
5611 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5612 bnx2x_update_coalesce(bp);
5613 bnx2x_init_rx_rings(bp);
5614 bnx2x_init_tx_ring(bp);
5615 bnx2x_init_sp_ring(bp);
5616 bnx2x_init_context(bp);
471de716 5617 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5618 bnx2x_init_ind_table(bp);
0ef00459
EG
5619 bnx2x_stats_init(bp);
5620
5621 /* At this point, we are ready for interrupts */
5622 atomic_set(&bp->intr_sem, 0);
5623
5624 /* flush all before enabling interrupts */
5625 mb();
5626 mmiowb();
5627
615f8fd9 5628 bnx2x_int_enable(bp);
eb8da205
EG
5629
5630 /* Check for SPIO5 */
5631 bnx2x_attn_int_deasserted0(bp,
5632 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5633 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5634}
5635
5636/* end of nic init */
5637
5638/*
5639 * gzip service functions
5640 */
5641
5642static int bnx2x_gunzip_init(struct bnx2x *bp)
5643{
5644 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5645 &bp->gunzip_mapping);
5646 if (bp->gunzip_buf == NULL)
5647 goto gunzip_nomem1;
5648
5649 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5650 if (bp->strm == NULL)
5651 goto gunzip_nomem2;
5652
5653 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5654 GFP_KERNEL);
5655 if (bp->strm->workspace == NULL)
5656 goto gunzip_nomem3;
5657
5658 return 0;
5659
5660gunzip_nomem3:
5661 kfree(bp->strm);
5662 bp->strm = NULL;
5663
5664gunzip_nomem2:
5665 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5666 bp->gunzip_mapping);
5667 bp->gunzip_buf = NULL;
5668
5669gunzip_nomem1:
5670 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5671 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5672 return -ENOMEM;
5673}
5674
5675static void bnx2x_gunzip_end(struct bnx2x *bp)
5676{
5677 kfree(bp->strm->workspace);
5678
5679 kfree(bp->strm);
5680 bp->strm = NULL;
5681
5682 if (bp->gunzip_buf) {
5683 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5684 bp->gunzip_mapping);
5685 bp->gunzip_buf = NULL;
5686 }
5687}
5688
94a78b79 5689static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5690{
5691 int n, rc;
5692
5693 /* check gzip header */
94a78b79
VZ
5694 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5695 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5696 return -EINVAL;
94a78b79 5697 }
a2fbb9ea
ET
5698
5699 n = 10;
5700
34f80b04 5701#define FNAME 0x8
a2fbb9ea
ET
5702
5703 if (zbuf[3] & FNAME)
5704 while ((zbuf[n++] != 0) && (n < len));
5705
94a78b79 5706 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5707 bp->strm->avail_in = len - n;
5708 bp->strm->next_out = bp->gunzip_buf;
5709 bp->strm->avail_out = FW_BUF_SIZE;
5710
5711 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5712 if (rc != Z_OK)
5713 return rc;
5714
5715 rc = zlib_inflate(bp->strm, Z_FINISH);
5716 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5717 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5718 bp->dev->name, bp->strm->msg);
5719
5720 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5721 if (bp->gunzip_outlen & 0x3)
5722 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5723 " gunzip_outlen (%d) not aligned\n",
5724 bp->dev->name, bp->gunzip_outlen);
5725 bp->gunzip_outlen >>= 2;
5726
5727 zlib_inflateEnd(bp->strm);
5728
5729 if (rc == Z_STREAM_END)
5730 return 0;
5731
5732 return rc;
5733}
5734
5735/* nic load/unload */
5736
5737/*
34f80b04 5738 * General service functions
a2fbb9ea
ET
5739 */
5740
5741/* send a NIG loopback debug packet */
5742static void bnx2x_lb_pckt(struct bnx2x *bp)
5743{
a2fbb9ea 5744 u32 wb_write[3];
a2fbb9ea
ET
5745
5746 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5747 wb_write[0] = 0x55555555;
5748 wb_write[1] = 0x55555555;
34f80b04 5749 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5750 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5751
5752 /* NON-IP protocol */
a2fbb9ea
ET
5753 wb_write[0] = 0x09000000;
5754 wb_write[1] = 0x55555555;
34f80b04 5755 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5756 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5757}
5758
5759/* some of the internal memories
5760 * are not directly readable from the driver
5761 * to test them we send debug packets
5762 */
5763static int bnx2x_int_mem_test(struct bnx2x *bp)
5764{
5765 int factor;
5766 int count, i;
5767 u32 val = 0;
5768
ad8d3948 5769 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5770 factor = 120;
ad8d3948
EG
5771 else if (CHIP_REV_IS_EMUL(bp))
5772 factor = 200;
5773 else
a2fbb9ea 5774 factor = 1;
a2fbb9ea
ET
5775
5776 DP(NETIF_MSG_HW, "start part1\n");
5777
5778 /* Disable inputs of parser neighbor blocks */
5779 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5780 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5781 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5782 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5783
5784 /* Write 0 to parser credits for CFC search request */
5785 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5786
5787 /* send Ethernet packet */
5788 bnx2x_lb_pckt(bp);
5789
5790 /* TODO do i reset NIG statistic? */
5791 /* Wait until NIG register shows 1 packet of size 0x10 */
5792 count = 1000 * factor;
5793 while (count) {
34f80b04 5794
a2fbb9ea
ET
5795 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5796 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5797 if (val == 0x10)
5798 break;
5799
5800 msleep(10);
5801 count--;
5802 }
5803 if (val != 0x10) {
5804 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5805 return -1;
5806 }
5807
5808 /* Wait until PRS register shows 1 packet */
5809 count = 1000 * factor;
5810 while (count) {
5811 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5812 if (val == 1)
5813 break;
5814
5815 msleep(10);
5816 count--;
5817 }
5818 if (val != 0x1) {
5819 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5820 return -2;
5821 }
5822
5823 /* Reset and init BRB, PRS */
34f80b04 5824 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5825 msleep(50);
34f80b04 5826 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5827 msleep(50);
94a78b79
VZ
5828 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5829 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5830
5831 DP(NETIF_MSG_HW, "part2\n");
5832
5833 /* Disable inputs of parser neighbor blocks */
5834 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5835 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5836 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5837 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5838
5839 /* Write 0 to parser credits for CFC search request */
5840 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5841
5842 /* send 10 Ethernet packets */
5843 for (i = 0; i < 10; i++)
5844 bnx2x_lb_pckt(bp);
5845
5846 /* Wait until NIG register shows 10 + 1
5847 packets of size 11*0x10 = 0xb0 */
5848 count = 1000 * factor;
5849 while (count) {
34f80b04 5850
a2fbb9ea
ET
5851 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5852 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5853 if (val == 0xb0)
5854 break;
5855
5856 msleep(10);
5857 count--;
5858 }
5859 if (val != 0xb0) {
5860 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5861 return -3;
5862 }
5863
5864 /* Wait until PRS register shows 2 packets */
5865 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5866 if (val != 2)
5867 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5868
5869 /* Write 1 to parser credits for CFC search request */
5870 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5871
5872 /* Wait until PRS register shows 3 packets */
5873 msleep(10 * factor);
5874 /* Wait until NIG register shows 1 packet of size 0x10 */
5875 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5876 if (val != 3)
5877 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5878
5879 /* clear NIG EOP FIFO */
5880 for (i = 0; i < 11; i++)
5881 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5882 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5883 if (val != 1) {
5884 BNX2X_ERR("clear of NIG failed\n");
5885 return -4;
5886 }
5887
5888 /* Reset and init BRB, PRS, NIG */
5889 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5890 msleep(50);
5891 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5892 msleep(50);
94a78b79
VZ
5893 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5894 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 5895#ifndef BCM_CNIC
a2fbb9ea
ET
5896 /* set NIC mode */
5897 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5898#endif
5899
5900 /* Enable inputs of parser neighbor blocks */
5901 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5902 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5903 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5904 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5905
5906 DP(NETIF_MSG_HW, "done\n");
5907
5908 return 0; /* OK */
5909}
5910
5911static void enable_blocks_attention(struct bnx2x *bp)
5912{
5913 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5914 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5915 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5916 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5917 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5918 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5919 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5920 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5921 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5922/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5923/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5924 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5925 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5926 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5927/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5928/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5929 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5930 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5931 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5932 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5933/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5934/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5935 if (CHIP_REV_IS_FPGA(bp))
5936 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5937 else
5938 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5939 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5940 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5941 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5942/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5943/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5944 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5945 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5946/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5947 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5948}
5949
34f80b04 5950
81f75bbf
EG
5951static void bnx2x_reset_common(struct bnx2x *bp)
5952{
5953 /* reset_common */
5954 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5955 0xd3ffff7f);
5956 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5957}
5958
573f2035
EG
5959static void bnx2x_init_pxp(struct bnx2x *bp)
5960{
5961 u16 devctl;
5962 int r_order, w_order;
5963
5964 pci_read_config_word(bp->pdev,
5965 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5966 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5967 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5968 if (bp->mrrs == -1)
5969 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5970 else {
5971 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5972 r_order = bp->mrrs;
5973 }
5974
5975 bnx2x_init_pxp_arb(bp, r_order, w_order);
5976}
fd4ef40d
EG
5977
5978static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5979{
5980 u32 val;
5981 u8 port;
5982 u8 is_required = 0;
5983
5984 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5985 SHARED_HW_CFG_FAN_FAILURE_MASK;
5986
5987 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5988 is_required = 1;
5989
5990 /*
5991 * The fan failure mechanism is usually related to the PHY type since
5992 * the power consumption of the board is affected by the PHY. Currently,
5993 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5994 */
5995 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5996 for (port = PORT_0; port < PORT_MAX; port++) {
5997 u32 phy_type =
5998 SHMEM_RD(bp, dev_info.port_hw_config[port].
5999 external_phy_config) &
6000 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6001 is_required |=
6002 ((phy_type ==
6003 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
6004 (phy_type ==
6005 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
6006 (phy_type ==
6007 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6008 }
6009
6010 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6011
6012 if (is_required == 0)
6013 return;
6014
6015 /* Fan failure is indicated by SPIO 5 */
6016 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6017 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6018
6019 /* set to active low mode */
6020 val = REG_RD(bp, MISC_REG_SPIO_INT);
6021 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6022 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6023 REG_WR(bp, MISC_REG_SPIO_INT, val);
6024
6025 /* enable interrupt to signal the IGU */
6026 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6027 val |= (1 << MISC_REGISTERS_SPIO_5);
6028 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6029}
6030
34f80b04 6031static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 6032{
a2fbb9ea 6033 u32 val, i;
37b091ba
MC
6034#ifdef BCM_CNIC
6035 u32 wb_write[2];
6036#endif
a2fbb9ea 6037
34f80b04 6038 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 6039
81f75bbf 6040 bnx2x_reset_common(bp);
34f80b04
EG
6041 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6042 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 6043
94a78b79 6044 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
6045 if (CHIP_IS_E1H(bp))
6046 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 6047
34f80b04
EG
6048 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6049 msleep(30);
6050 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 6051
94a78b79 6052 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
6053 if (CHIP_IS_E1(bp)) {
6054 /* enable HW interrupt from PXP on USDM overflow
6055 bit 16 on INT_MASK_0 */
6056 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6057 }
a2fbb9ea 6058
94a78b79 6059 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 6060 bnx2x_init_pxp(bp);
a2fbb9ea
ET
6061
6062#ifdef __BIG_ENDIAN
34f80b04
EG
6063 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6064 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6065 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6066 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6067 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6068 /* make sure this value is 0 */
6069 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6070
6071/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6072 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6073 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6074 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6075 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6076#endif
6077
34f80b04 6078 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
37b091ba 6079#ifdef BCM_CNIC
34f80b04
EG
6080 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6081 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6082 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6083#endif
6084
34f80b04
EG
6085 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6086 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6087
34f80b04
EG
6088 /* let the HW do it's magic ... */
6089 msleep(100);
6090 /* finish PXP init */
6091 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6092 if (val != 1) {
6093 BNX2X_ERR("PXP2 CFG failed\n");
6094 return -EBUSY;
6095 }
6096 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6097 if (val != 1) {
6098 BNX2X_ERR("PXP2 RD_INIT failed\n");
6099 return -EBUSY;
6100 }
a2fbb9ea 6101
34f80b04
EG
6102 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6103 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6104
94a78b79 6105 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6106
34f80b04
EG
6107 /* clean the DMAE memory */
6108 bp->dmae_ready = 1;
6109 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6110
94a78b79
VZ
6111 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6112 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6113 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6114 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6115
34f80b04
EG
6116 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6117 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6118 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6119 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6120
94a78b79 6121 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba
MC
6122
6123#ifdef BCM_CNIC
6124 wb_write[0] = 0;
6125 wb_write[1] = 0;
6126 for (i = 0; i < 64; i++) {
6127 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6128 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6129
6130 if (CHIP_IS_E1H(bp)) {
6131 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6132 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6133 wb_write, 2);
6134 }
6135 }
6136#endif
34f80b04
EG
6137 /* soft reset pulse */
6138 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6139 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 6140
37b091ba 6141#ifdef BCM_CNIC
94a78b79 6142 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6143#endif
a2fbb9ea 6144
94a78b79 6145 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6146 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6147 if (!CHIP_REV_IS_SLOW(bp)) {
6148 /* enable hw interrupt from doorbell Q */
6149 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6150 }
a2fbb9ea 6151
94a78b79
VZ
6152 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6153 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6154 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 6155#ifndef BCM_CNIC
3196a88a
EG
6156 /* set NIC mode */
6157 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 6158#endif
34f80b04
EG
6159 if (CHIP_IS_E1H(bp))
6160 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6161
94a78b79
VZ
6162 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6163 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6164 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6165 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6166
ca00392c
EG
6167 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6168 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6169 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6170 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6171
94a78b79
VZ
6172 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6173 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6174 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6175 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6176
34f80b04
EG
6177 /* sync semi rtc */
6178 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6179 0x80000000);
6180 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6181 0x80000000);
a2fbb9ea 6182
94a78b79
VZ
6183 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6184 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6185 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6186
34f80b04
EG
6187 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6188 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6189 REG_WR(bp, i, 0xc0cac01a);
6190 /* TODO: replace with something meaningful */
6191 }
94a78b79 6192 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
6193#ifdef BCM_CNIC
6194 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6195 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6196 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6197 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6198 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6199 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6200 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6201 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6202 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6203 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6204#endif
34f80b04 6205 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6206
34f80b04
EG
6207 if (sizeof(union cdu_context) != 1024)
6208 /* we currently assume that a context is 1024 bytes */
6209 printk(KERN_ALERT PFX "please adjust the size of"
6210 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 6211
94a78b79 6212 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6213 val = (4 << 24) + (0 << 12) + 1024;
6214 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6215
94a78b79 6216 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6217 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6218 /* enable context validation interrupt from CFC */
6219 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6220
6221 /* set the thresholds to prevent CFC/CDU race */
6222 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6223
94a78b79
VZ
6224 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6225 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6226
94a78b79 6227 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6228 /* Reset PCIE errors for debug */
6229 REG_WR(bp, 0x2814, 0xffffffff);
6230 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6231
94a78b79 6232 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6233 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6234 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6235 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6236
94a78b79 6237 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6238 if (CHIP_IS_E1H(bp)) {
6239 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6240 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6241 }
6242
6243 if (CHIP_REV_IS_SLOW(bp))
6244 msleep(200);
6245
6246 /* finish CFC init */
6247 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6248 if (val != 1) {
6249 BNX2X_ERR("CFC LL_INIT failed\n");
6250 return -EBUSY;
6251 }
6252 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6253 if (val != 1) {
6254 BNX2X_ERR("CFC AC_INIT failed\n");
6255 return -EBUSY;
6256 }
6257 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6258 if (val != 1) {
6259 BNX2X_ERR("CFC CAM_INIT failed\n");
6260 return -EBUSY;
6261 }
6262 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6263
34f80b04
EG
6264 /* read NIG statistic
6265 to see if this is our first up since powerup */
6266 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6267 val = *bnx2x_sp(bp, wb_data[0]);
6268
6269 /* do internal memory self test */
6270 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6271 BNX2X_ERR("internal mem self test failed\n");
6272 return -EBUSY;
6273 }
6274
35b19ba5 6275 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6276 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6277 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6278 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6279 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6280 bp->port.need_hw_lock = 1;
6281 break;
6282
34f80b04
EG
6283 default:
6284 break;
6285 }
f1410647 6286
fd4ef40d
EG
6287 bnx2x_setup_fan_failure_detection(bp);
6288
34f80b04
EG
6289 /* clear PXP2 attentions */
6290 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6291
34f80b04 6292 enable_blocks_attention(bp);
a2fbb9ea 6293
6bbca910
YR
6294 if (!BP_NOMCP(bp)) {
6295 bnx2x_acquire_phy_lock(bp);
6296 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6297 bnx2x_release_phy_lock(bp);
6298 } else
6299 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6300
34f80b04
EG
6301 return 0;
6302}
a2fbb9ea 6303
34f80b04
EG
6304static int bnx2x_init_port(struct bnx2x *bp)
6305{
6306 int port = BP_PORT(bp);
94a78b79 6307 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6308 u32 low, high;
34f80b04 6309 u32 val;
a2fbb9ea 6310
34f80b04
EG
6311 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6312
6313 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6314
94a78b79 6315 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6316 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6317
6318 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6319 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6320 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 6321 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6322
37b091ba
MC
6323#ifdef BCM_CNIC
6324 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
a2fbb9ea 6325
94a78b79 6326 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
6327 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6328 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 6329#endif
94a78b79 6330 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6331
94a78b79 6332 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6333 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6334 /* no pause for emulation and FPGA */
6335 low = 0;
6336 high = 513;
6337 } else {
6338 if (IS_E1HMF(bp))
6339 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6340 else if (bp->dev->mtu > 4096) {
6341 if (bp->flags & ONE_PORT_FLAG)
6342 low = 160;
6343 else {
6344 val = bp->dev->mtu;
6345 /* (24*1024 + val*4)/256 */
6346 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6347 }
6348 } else
6349 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6350 high = low + 56; /* 14*1024/256 */
6351 }
6352 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6353 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6354
6355
94a78b79 6356 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6357
94a78b79 6358 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6359 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6360 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6361 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6362
94a78b79
VZ
6363 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6364 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6365 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6366 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6367
94a78b79 6368 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6369 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6370
94a78b79 6371 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6372
6373 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6374 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6375
6376 /* update threshold */
34f80b04 6377 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6378 /* update init credit */
34f80b04 6379 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6380
6381 /* probe changes */
34f80b04 6382 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6383 msleep(5);
34f80b04 6384 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea 6385
37b091ba
MC
6386#ifdef BCM_CNIC
6387 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 6388#endif
94a78b79 6389 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6390 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6391
6392 if (CHIP_IS_E1(bp)) {
6393 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6394 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6395 }
94a78b79 6396 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6397
94a78b79 6398 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6399 /* init aeu_mask_attn_func_0/1:
6400 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6401 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6402 * bits 4-7 are used for "per vn group attention" */
6403 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6404 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6405
94a78b79 6406 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6407 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6408 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6409 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6410 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6411
94a78b79 6412 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6413
6414 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6415
6416 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6417 /* 0x2 disable e1hov, 0x1 enable */
6418 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6419 (IS_E1HMF(bp) ? 0x1 : 0x2));
6420
1c06328c
EG
6421 {
6422 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6423 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6424 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6425 }
34f80b04
EG
6426 }
6427
94a78b79 6428 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6429 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6430
35b19ba5 6431 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6432 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6433 {
6434 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6435
6436 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6437 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6438
6439 /* The GPIO should be swapped if the swap register is
6440 set and active */
6441 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6442 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6443
6444 /* Select function upon port-swap configuration */
6445 if (port == 0) {
6446 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6447 aeu_gpio_mask = (swap_val && swap_override) ?
6448 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6449 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6450 } else {
6451 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6452 aeu_gpio_mask = (swap_val && swap_override) ?
6453 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6454 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6455 }
6456 val = REG_RD(bp, offset);
6457 /* add GPIO3 to group */
6458 val |= aeu_gpio_mask;
6459 REG_WR(bp, offset, val);
6460 }
6461 break;
6462
35b19ba5 6463 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6464 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6465 /* add SPIO 5 to group 0 */
4d295db0
EG
6466 {
6467 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6468 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6469 val = REG_RD(bp, reg_addr);
f1410647 6470 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6471 REG_WR(bp, reg_addr, val);
6472 }
f1410647
ET
6473 break;
6474
6475 default:
6476 break;
6477 }
6478
c18487ee 6479 bnx2x__link_reset(bp);
a2fbb9ea 6480
34f80b04
EG
6481 return 0;
6482}
6483
6484#define ILT_PER_FUNC (768/2)
6485#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6486/* the phys address is shifted right 12 bits and has an added
6487 1=valid bit added to the 53rd bit
6488 then since this is a wide register(TM)
6489 we split it into two 32 bit writes
6490 */
6491#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6492#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6493#define PXP_ONE_ILT(x) (((x) << 10) | x)
6494#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6495
37b091ba
MC
6496#ifdef BCM_CNIC
6497#define CNIC_ILT_LINES 127
6498#define CNIC_CTX_PER_ILT 16
6499#else
34f80b04 6500#define CNIC_ILT_LINES 0
37b091ba 6501#endif
34f80b04
EG
6502
6503static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6504{
6505 int reg;
6506
6507 if (CHIP_IS_E1H(bp))
6508 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6509 else /* E1 */
6510 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6511
6512 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6513}
6514
6515static int bnx2x_init_func(struct bnx2x *bp)
6516{
6517 int port = BP_PORT(bp);
6518 int func = BP_FUNC(bp);
8badd27a 6519 u32 addr, val;
34f80b04
EG
6520 int i;
6521
6522 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6523
8badd27a
EG
6524 /* set MSI reconfigure capability */
6525 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6526 val = REG_RD(bp, addr);
6527 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6528 REG_WR(bp, addr, val);
6529
34f80b04
EG
6530 i = FUNC_ILT_BASE(func);
6531
6532 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6533 if (CHIP_IS_E1H(bp)) {
6534 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6535 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6536 } else /* E1 */
6537 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6538 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6539
37b091ba
MC
6540#ifdef BCM_CNIC
6541 i += 1 + CNIC_ILT_LINES;
6542 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6543 if (CHIP_IS_E1(bp))
6544 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6545 else {
6546 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6547 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6548 }
6549
6550 i++;
6551 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6552 if (CHIP_IS_E1(bp))
6553 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6554 else {
6555 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6556 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6557 }
6558
6559 i++;
6560 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6561 if (CHIP_IS_E1(bp))
6562 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6563 else {
6564 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6565 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6566 }
6567
6568 /* tell the searcher where the T2 table is */
6569 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6570
6571 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6572 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6573
6574 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6575 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6576 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6577
6578 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6579#endif
34f80b04
EG
6580
6581 if (CHIP_IS_E1H(bp)) {
573f2035
EG
6582 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6583 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6584 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6585 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6586 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6587 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6588 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6589 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6590 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
6591
6592 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6593 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6594 }
6595
6596 /* HC init per function */
6597 if (CHIP_IS_E1H(bp)) {
6598 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6599
6600 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6601 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6602 }
94a78b79 6603 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6604
c14423fe 6605 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6606 REG_WR(bp, 0x2114, 0xffffffff);
6607 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6608
34f80b04
EG
6609 return 0;
6610}
6611
6612static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6613{
6614 int i, rc = 0;
a2fbb9ea 6615
34f80b04
EG
6616 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6617 BP_FUNC(bp), load_code);
a2fbb9ea 6618
34f80b04
EG
6619 bp->dmae_ready = 0;
6620 mutex_init(&bp->dmae_mutex);
54016b26
EG
6621 rc = bnx2x_gunzip_init(bp);
6622 if (rc)
6623 return rc;
a2fbb9ea 6624
34f80b04
EG
6625 switch (load_code) {
6626 case FW_MSG_CODE_DRV_LOAD_COMMON:
6627 rc = bnx2x_init_common(bp);
6628 if (rc)
6629 goto init_hw_err;
6630 /* no break */
6631
6632 case FW_MSG_CODE_DRV_LOAD_PORT:
6633 bp->dmae_ready = 1;
6634 rc = bnx2x_init_port(bp);
6635 if (rc)
6636 goto init_hw_err;
6637 /* no break */
6638
6639 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6640 bp->dmae_ready = 1;
6641 rc = bnx2x_init_func(bp);
6642 if (rc)
6643 goto init_hw_err;
6644 break;
6645
6646 default:
6647 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6648 break;
6649 }
6650
6651 if (!BP_NOMCP(bp)) {
6652 int func = BP_FUNC(bp);
a2fbb9ea
ET
6653
6654 bp->fw_drv_pulse_wr_seq =
34f80b04 6655 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6656 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
6657 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6658 }
a2fbb9ea 6659
34f80b04
EG
6660 /* this needs to be done before gunzip end */
6661 bnx2x_zero_def_sb(bp);
6662 for_each_queue(bp, i)
6663 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
37b091ba
MC
6664#ifdef BCM_CNIC
6665 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6666#endif
34f80b04
EG
6667
6668init_hw_err:
6669 bnx2x_gunzip_end(bp);
6670
6671 return rc;
a2fbb9ea
ET
6672}
6673
a2fbb9ea
ET
6674static void bnx2x_free_mem(struct bnx2x *bp)
6675{
6676
6677#define BNX2X_PCI_FREE(x, y, size) \
6678 do { \
6679 if (x) { \
6680 pci_free_consistent(bp->pdev, size, x, y); \
6681 x = NULL; \
6682 y = 0; \
6683 } \
6684 } while (0)
6685
6686#define BNX2X_FREE(x) \
6687 do { \
6688 if (x) { \
6689 vfree(x); \
6690 x = NULL; \
6691 } \
6692 } while (0)
6693
6694 int i;
6695
6696 /* fastpath */
555f6c78 6697 /* Common */
a2fbb9ea
ET
6698 for_each_queue(bp, i) {
6699
555f6c78 6700 /* status blocks */
a2fbb9ea
ET
6701 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6702 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6703 sizeof(struct host_status_block));
555f6c78
EG
6704 }
6705 /* Rx */
6706 for_each_rx_queue(bp, i) {
a2fbb9ea 6707
555f6c78 6708 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6709 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6710 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6711 bnx2x_fp(bp, i, rx_desc_mapping),
6712 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6713
6714 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6715 bnx2x_fp(bp, i, rx_comp_mapping),
6716 sizeof(struct eth_fast_path_rx_cqe) *
6717 NUM_RCQ_BD);
a2fbb9ea 6718
7a9b2557 6719 /* SGE ring */
32626230 6720 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6721 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6722 bnx2x_fp(bp, i, rx_sge_mapping),
6723 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6724 }
555f6c78
EG
6725 /* Tx */
6726 for_each_tx_queue(bp, i) {
6727
6728 /* fastpath tx rings: tx_buf tx_desc */
6729 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6730 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6731 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6732 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6733 }
a2fbb9ea
ET
6734 /* end of fastpath */
6735
6736 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6737 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6738
6739 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6740 sizeof(struct bnx2x_slowpath));
a2fbb9ea 6741
37b091ba 6742#ifdef BCM_CNIC
a2fbb9ea
ET
6743 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6744 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6745 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6746 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
37b091ba
MC
6747 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6748 sizeof(struct host_status_block));
a2fbb9ea 6749#endif
7a9b2557 6750 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6751
6752#undef BNX2X_PCI_FREE
6753#undef BNX2X_KFREE
6754}
6755
6756static int bnx2x_alloc_mem(struct bnx2x *bp)
6757{
6758
6759#define BNX2X_PCI_ALLOC(x, y, size) \
6760 do { \
6761 x = pci_alloc_consistent(bp->pdev, size, y); \
6762 if (x == NULL) \
6763 goto alloc_mem_err; \
6764 memset(x, 0, size); \
6765 } while (0)
6766
6767#define BNX2X_ALLOC(x, size) \
6768 do { \
6769 x = vmalloc(size); \
6770 if (x == NULL) \
6771 goto alloc_mem_err; \
6772 memset(x, 0, size); \
6773 } while (0)
6774
6775 int i;
6776
6777 /* fastpath */
555f6c78 6778 /* Common */
a2fbb9ea
ET
6779 for_each_queue(bp, i) {
6780 bnx2x_fp(bp, i, bp) = bp;
6781
555f6c78 6782 /* status blocks */
a2fbb9ea
ET
6783 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6784 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6785 sizeof(struct host_status_block));
555f6c78
EG
6786 }
6787 /* Rx */
6788 for_each_rx_queue(bp, i) {
a2fbb9ea 6789
555f6c78 6790 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6791 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6792 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6793 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6794 &bnx2x_fp(bp, i, rx_desc_mapping),
6795 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6796
6797 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6798 &bnx2x_fp(bp, i, rx_comp_mapping),
6799 sizeof(struct eth_fast_path_rx_cqe) *
6800 NUM_RCQ_BD);
6801
7a9b2557
VZ
6802 /* SGE ring */
6803 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6804 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6805 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6806 &bnx2x_fp(bp, i, rx_sge_mapping),
6807 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6808 }
555f6c78
EG
6809 /* Tx */
6810 for_each_tx_queue(bp, i) {
6811
555f6c78
EG
6812 /* fastpath tx rings: tx_buf tx_desc */
6813 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6814 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6815 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6816 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6817 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6818 }
a2fbb9ea
ET
6819 /* end of fastpath */
6820
6821 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6822 sizeof(struct host_def_status_block));
6823
6824 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6825 sizeof(struct bnx2x_slowpath));
6826
37b091ba 6827#ifdef BCM_CNIC
a2fbb9ea
ET
6828 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6829
a2fbb9ea
ET
6830 /* allocate searcher T2 table
6831 we allocate 1/4 of alloc num for T2
6832 (which is not entered into the ILT) */
6833 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6834
37b091ba 6835 /* Initialize T2 (for 1024 connections) */
a2fbb9ea 6836 for (i = 0; i < 16*1024; i += 64)
37b091ba 6837 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
a2fbb9ea 6838
37b091ba 6839 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
a2fbb9ea
ET
6840 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6841
6842 /* QM queues (128*MAX_CONN) */
6843 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
37b091ba
MC
6844
6845 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6846 sizeof(struct host_status_block));
a2fbb9ea
ET
6847#endif
6848
6849 /* Slow path ring */
6850 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6851
6852 return 0;
6853
6854alloc_mem_err:
6855 bnx2x_free_mem(bp);
6856 return -ENOMEM;
6857
6858#undef BNX2X_PCI_ALLOC
6859#undef BNX2X_ALLOC
6860}
6861
6862static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6863{
6864 int i;
6865
555f6c78 6866 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6867 struct bnx2x_fastpath *fp = &bp->fp[i];
6868
6869 u16 bd_cons = fp->tx_bd_cons;
6870 u16 sw_prod = fp->tx_pkt_prod;
6871 u16 sw_cons = fp->tx_pkt_cons;
6872
a2fbb9ea
ET
6873 while (sw_cons != sw_prod) {
6874 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6875 sw_cons++;
6876 }
6877 }
6878}
6879
6880static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6881{
6882 int i, j;
6883
555f6c78 6884 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6885 struct bnx2x_fastpath *fp = &bp->fp[j];
6886
a2fbb9ea
ET
6887 for (i = 0; i < NUM_RX_BD; i++) {
6888 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6889 struct sk_buff *skb = rx_buf->skb;
6890
6891 if (skb == NULL)
6892 continue;
6893
6894 pci_unmap_single(bp->pdev,
6895 pci_unmap_addr(rx_buf, mapping),
356e2385 6896 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6897
6898 rx_buf->skb = NULL;
6899 dev_kfree_skb(skb);
6900 }
7a9b2557 6901 if (!fp->disable_tpa)
32626230
EG
6902 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6903 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6904 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6905 }
6906}
6907
6908static void bnx2x_free_skbs(struct bnx2x *bp)
6909{
6910 bnx2x_free_tx_skbs(bp);
6911 bnx2x_free_rx_skbs(bp);
6912}
6913
6914static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6915{
34f80b04 6916 int i, offset = 1;
a2fbb9ea
ET
6917
6918 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6919 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6920 bp->msix_table[0].vector);
6921
37b091ba
MC
6922#ifdef BCM_CNIC
6923 offset++;
6924#endif
a2fbb9ea 6925 for_each_queue(bp, i) {
c14423fe 6926 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6927 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6928 bnx2x_fp(bp, i, state));
6929
34f80b04 6930 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6931 }
a2fbb9ea
ET
6932}
6933
6934static void bnx2x_free_irq(struct bnx2x *bp)
6935{
a2fbb9ea 6936 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6937 bnx2x_free_msix_irqs(bp);
6938 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6939 bp->flags &= ~USING_MSIX_FLAG;
6940
8badd27a
EG
6941 } else if (bp->flags & USING_MSI_FLAG) {
6942 free_irq(bp->pdev->irq, bp->dev);
6943 pci_disable_msi(bp->pdev);
6944 bp->flags &= ~USING_MSI_FLAG;
6945
a2fbb9ea
ET
6946 } else
6947 free_irq(bp->pdev->irq, bp->dev);
6948}
6949
6950static int bnx2x_enable_msix(struct bnx2x *bp)
6951{
8badd27a
EG
6952 int i, rc, offset = 1;
6953 int igu_vec = 0;
a2fbb9ea 6954
8badd27a
EG
6955 bp->msix_table[0].entry = igu_vec;
6956 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6957
37b091ba
MC
6958#ifdef BCM_CNIC
6959 igu_vec = BP_L_ID(bp) + offset;
6960 bp->msix_table[1].entry = igu_vec;
6961 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6962 offset++;
6963#endif
34f80b04 6964 for_each_queue(bp, i) {
8badd27a 6965 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6966 bp->msix_table[i + offset].entry = igu_vec;
6967 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6968 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6969 }
6970
34f80b04 6971 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6972 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6973 if (rc) {
8badd27a
EG
6974 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6975 return rc;
34f80b04 6976 }
8badd27a 6977
a2fbb9ea
ET
6978 bp->flags |= USING_MSIX_FLAG;
6979
6980 return 0;
a2fbb9ea
ET
6981}
6982
a2fbb9ea
ET
6983static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6984{
34f80b04 6985 int i, rc, offset = 1;
a2fbb9ea 6986
a2fbb9ea
ET
6987 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6988 bp->dev->name, bp->dev);
a2fbb9ea
ET
6989 if (rc) {
6990 BNX2X_ERR("request sp irq failed\n");
6991 return -EBUSY;
6992 }
6993
37b091ba
MC
6994#ifdef BCM_CNIC
6995 offset++;
6996#endif
a2fbb9ea 6997 for_each_queue(bp, i) {
555f6c78
EG
6998 struct bnx2x_fastpath *fp = &bp->fp[i];
6999
ca00392c
EG
7000 if (i < bp->num_rx_queues)
7001 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
7002 else
7003 sprintf(fp->name, "%s-tx-%d",
7004 bp->dev->name, i - bp->num_rx_queues);
7005
34f80b04 7006 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 7007 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 7008 if (rc) {
555f6c78 7009 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
7010 bnx2x_free_msix_irqs(bp);
7011 return -EBUSY;
7012 }
7013
555f6c78 7014 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
7015 }
7016
555f6c78 7017 i = BNX2X_NUM_QUEUES(bp);
ca00392c
EG
7018 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
7019 " ... fp[%d] %d\n",
7020 bp->dev->name, bp->msix_table[0].vector,
7021 0, bp->msix_table[offset].vector,
7022 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 7023
a2fbb9ea 7024 return 0;
a2fbb9ea
ET
7025}
7026
8badd27a
EG
7027static int bnx2x_enable_msi(struct bnx2x *bp)
7028{
7029 int rc;
7030
7031 rc = pci_enable_msi(bp->pdev);
7032 if (rc) {
7033 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7034 return -1;
7035 }
7036 bp->flags |= USING_MSI_FLAG;
7037
7038 return 0;
7039}
7040
a2fbb9ea
ET
7041static int bnx2x_req_irq(struct bnx2x *bp)
7042{
8badd27a 7043 unsigned long flags;
34f80b04 7044 int rc;
a2fbb9ea 7045
8badd27a
EG
7046 if (bp->flags & USING_MSI_FLAG)
7047 flags = 0;
7048 else
7049 flags = IRQF_SHARED;
7050
7051 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 7052 bp->dev->name, bp->dev);
a2fbb9ea
ET
7053 if (!rc)
7054 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7055
7056 return rc;
a2fbb9ea
ET
7057}
7058
65abd74d
YG
7059static void bnx2x_napi_enable(struct bnx2x *bp)
7060{
7061 int i;
7062
555f6c78 7063 for_each_rx_queue(bp, i)
65abd74d
YG
7064 napi_enable(&bnx2x_fp(bp, i, napi));
7065}
7066
7067static void bnx2x_napi_disable(struct bnx2x *bp)
7068{
7069 int i;
7070
555f6c78 7071 for_each_rx_queue(bp, i)
65abd74d
YG
7072 napi_disable(&bnx2x_fp(bp, i, napi));
7073}
7074
7075static void bnx2x_netif_start(struct bnx2x *bp)
7076{
e1510706
EG
7077 int intr_sem;
7078
7079 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7080 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7081
7082 if (intr_sem) {
65abd74d 7083 if (netif_running(bp->dev)) {
65abd74d
YG
7084 bnx2x_napi_enable(bp);
7085 bnx2x_int_enable(bp);
555f6c78
EG
7086 if (bp->state == BNX2X_STATE_OPEN)
7087 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
7088 }
7089 }
7090}
7091
f8ef6e44 7092static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 7093{
f8ef6e44 7094 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 7095 bnx2x_napi_disable(bp);
762d5f6c
EG
7096 netif_tx_disable(bp->dev);
7097 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
7098}
7099
a2fbb9ea
ET
7100/*
7101 * Init service functions
7102 */
7103
e665bfda
MC
7104/**
7105 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7106 *
7107 * @param bp driver descriptor
7108 * @param set set or clear an entry (1 or 0)
7109 * @param mac pointer to a buffer containing a MAC
7110 * @param cl_bit_vec bit vector of clients to register a MAC for
7111 * @param cam_offset offset in a CAM to use
7112 * @param with_bcast set broadcast MAC as well
7113 */
7114static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7115 u32 cl_bit_vec, u8 cam_offset,
7116 u8 with_bcast)
a2fbb9ea
ET
7117{
7118 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 7119 int port = BP_PORT(bp);
a2fbb9ea
ET
7120
7121 /* CAM allocation
7122 * unicasts 0-31:port0 32-63:port1
7123 * multicast 64-127:port0 128-191:port1
7124 */
e665bfda
MC
7125 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7126 config->hdr.offset = cam_offset;
7127 config->hdr.client_id = 0xff;
a2fbb9ea
ET
7128 config->hdr.reserved1 = 0;
7129
7130 /* primary MAC */
7131 config->config_table[0].cam_entry.msb_mac_addr =
e665bfda 7132 swab16(*(u16 *)&mac[0]);
a2fbb9ea 7133 config->config_table[0].cam_entry.middle_mac_addr =
e665bfda 7134 swab16(*(u16 *)&mac[2]);
a2fbb9ea 7135 config->config_table[0].cam_entry.lsb_mac_addr =
e665bfda 7136 swab16(*(u16 *)&mac[4]);
34f80b04 7137 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7138 if (set)
7139 config->config_table[0].target_table_entry.flags = 0;
7140 else
7141 CAM_INVALIDATE(config->config_table[0]);
ca00392c 7142 config->config_table[0].target_table_entry.clients_bit_vector =
e665bfda 7143 cpu_to_le32(cl_bit_vec);
a2fbb9ea
ET
7144 config->config_table[0].target_table_entry.vlan_id = 0;
7145
3101c2bc
YG
7146 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7147 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7148 config->config_table[0].cam_entry.msb_mac_addr,
7149 config->config_table[0].cam_entry.middle_mac_addr,
7150 config->config_table[0].cam_entry.lsb_mac_addr);
7151
7152 /* broadcast */
e665bfda
MC
7153 if (with_bcast) {
7154 config->config_table[1].cam_entry.msb_mac_addr =
7155 cpu_to_le16(0xffff);
7156 config->config_table[1].cam_entry.middle_mac_addr =
7157 cpu_to_le16(0xffff);
7158 config->config_table[1].cam_entry.lsb_mac_addr =
7159 cpu_to_le16(0xffff);
7160 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7161 if (set)
7162 config->config_table[1].target_table_entry.flags =
7163 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7164 else
7165 CAM_INVALIDATE(config->config_table[1]);
7166 config->config_table[1].target_table_entry.clients_bit_vector =
7167 cpu_to_le32(cl_bit_vec);
7168 config->config_table[1].target_table_entry.vlan_id = 0;
7169 }
a2fbb9ea
ET
7170
7171 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7172 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7173 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7174}
7175
e665bfda
MC
7176/**
7177 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7178 *
7179 * @param bp driver descriptor
7180 * @param set set or clear an entry (1 or 0)
7181 * @param mac pointer to a buffer containing a MAC
7182 * @param cl_bit_vec bit vector of clients to register a MAC for
7183 * @param cam_offset offset in a CAM to use
7184 */
7185static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7186 u32 cl_bit_vec, u8 cam_offset)
34f80b04
EG
7187{
7188 struct mac_configuration_cmd_e1h *config =
7189 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7190
8d9c5f34 7191 config->hdr.length = 1;
e665bfda
MC
7192 config->hdr.offset = cam_offset;
7193 config->hdr.client_id = 0xff;
34f80b04
EG
7194 config->hdr.reserved1 = 0;
7195
7196 /* primary MAC */
7197 config->config_table[0].msb_mac_addr =
e665bfda 7198 swab16(*(u16 *)&mac[0]);
34f80b04 7199 config->config_table[0].middle_mac_addr =
e665bfda 7200 swab16(*(u16 *)&mac[2]);
34f80b04 7201 config->config_table[0].lsb_mac_addr =
e665bfda 7202 swab16(*(u16 *)&mac[4]);
ca00392c 7203 config->config_table[0].clients_bit_vector =
e665bfda 7204 cpu_to_le32(cl_bit_vec);
34f80b04
EG
7205 config->config_table[0].vlan_id = 0;
7206 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7207 if (set)
7208 config->config_table[0].flags = BP_PORT(bp);
7209 else
7210 config->config_table[0].flags =
7211 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7212
e665bfda 7213 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
3101c2bc 7214 (set ? "setting" : "clearing"),
34f80b04
EG
7215 config->config_table[0].msb_mac_addr,
7216 config->config_table[0].middle_mac_addr,
e665bfda 7217 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
34f80b04
EG
7218
7219 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7220 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7221 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7222}
7223
a2fbb9ea
ET
7224static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7225 int *state_p, int poll)
7226{
7227 /* can take a while if any port is running */
8b3a0f0b 7228 int cnt = 5000;
a2fbb9ea 7229
c14423fe
ET
7230 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7231 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7232
7233 might_sleep();
34f80b04 7234 while (cnt--) {
a2fbb9ea
ET
7235 if (poll) {
7236 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7237 /* if index is different from 0
7238 * the reply for some commands will
3101c2bc 7239 * be on the non default queue
a2fbb9ea
ET
7240 */
7241 if (idx)
7242 bnx2x_rx_int(&bp->fp[idx], 10);
7243 }
a2fbb9ea 7244
3101c2bc 7245 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7246 if (*state_p == state) {
7247#ifdef BNX2X_STOP_ON_ERROR
7248 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7249#endif
a2fbb9ea 7250 return 0;
8b3a0f0b 7251 }
a2fbb9ea 7252
a2fbb9ea 7253 msleep(1);
e3553b29
EG
7254
7255 if (bp->panic)
7256 return -EIO;
a2fbb9ea
ET
7257 }
7258
a2fbb9ea 7259 /* timeout! */
49d66772
ET
7260 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7261 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7262#ifdef BNX2X_STOP_ON_ERROR
7263 bnx2x_panic();
7264#endif
a2fbb9ea 7265
49d66772 7266 return -EBUSY;
a2fbb9ea
ET
7267}
7268
e665bfda
MC
7269static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7270{
7271 bp->set_mac_pending++;
7272 smp_wmb();
7273
7274 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7275 (1 << bp->fp->cl_id), BP_FUNC(bp));
7276
7277 /* Wait for a completion */
7278 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7279}
7280
7281static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7282{
7283 bp->set_mac_pending++;
7284 smp_wmb();
7285
7286 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7287 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7288 1);
7289
7290 /* Wait for a completion */
7291 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7292}
7293
a2fbb9ea
ET
7294static int bnx2x_setup_leading(struct bnx2x *bp)
7295{
34f80b04 7296 int rc;
a2fbb9ea 7297
c14423fe 7298 /* reset IGU state */
34f80b04 7299 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7300
7301 /* SETUP ramrod */
7302 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7303
34f80b04
EG
7304 /* Wait for completion */
7305 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7306
34f80b04 7307 return rc;
a2fbb9ea
ET
7308}
7309
7310static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7311{
555f6c78
EG
7312 struct bnx2x_fastpath *fp = &bp->fp[index];
7313
a2fbb9ea 7314 /* reset IGU state */
555f6c78 7315 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7316
228241eb 7317 /* SETUP ramrod */
555f6c78
EG
7318 fp->state = BNX2X_FP_STATE_OPENING;
7319 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7320 fp->cl_id, 0);
a2fbb9ea
ET
7321
7322 /* Wait for completion */
7323 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7324 &(fp->state), 0);
a2fbb9ea
ET
7325}
7326
a2fbb9ea 7327static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7328
ca00392c
EG
7329static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7330 int *num_tx_queues_out)
7331{
7332 int _num_rx_queues = 0, _num_tx_queues = 0;
7333
7334 switch (bp->multi_mode) {
7335 case ETH_RSS_MODE_DISABLED:
7336 _num_rx_queues = 1;
7337 _num_tx_queues = 1;
7338 break;
7339
7340 case ETH_RSS_MODE_REGULAR:
7341 if (num_rx_queues)
7342 _num_rx_queues = min_t(u32, num_rx_queues,
7343 BNX2X_MAX_QUEUES(bp));
7344 else
7345 _num_rx_queues = min_t(u32, num_online_cpus(),
7346 BNX2X_MAX_QUEUES(bp));
7347
7348 if (num_tx_queues)
7349 _num_tx_queues = min_t(u32, num_tx_queues,
7350 BNX2X_MAX_QUEUES(bp));
7351 else
7352 _num_tx_queues = min_t(u32, num_online_cpus(),
7353 BNX2X_MAX_QUEUES(bp));
7354
7355 /* There must be not more Tx queues than Rx queues */
7356 if (_num_tx_queues > _num_rx_queues) {
7357 BNX2X_ERR("number of tx queues (%d) > "
7358 "number of rx queues (%d)"
7359 " defaulting to %d\n",
7360 _num_tx_queues, _num_rx_queues,
7361 _num_rx_queues);
7362 _num_tx_queues = _num_rx_queues;
7363 }
7364 break;
7365
7366
7367 default:
7368 _num_rx_queues = 1;
7369 _num_tx_queues = 1;
7370 break;
7371 }
7372
7373 *num_rx_queues_out = _num_rx_queues;
7374 *num_tx_queues_out = _num_tx_queues;
7375}
7376
7377static int bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 7378{
ca00392c 7379 int rc = 0;
a2fbb9ea 7380
8badd27a
EG
7381 switch (int_mode) {
7382 case INT_MODE_INTx:
7383 case INT_MODE_MSI:
ca00392c
EG
7384 bp->num_rx_queues = 1;
7385 bp->num_tx_queues = 1;
7386 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a
EG
7387 break;
7388
7389 case INT_MODE_MSIX:
7390 default:
ca00392c
EG
7391 /* Set interrupt mode according to bp->multi_mode value */
7392 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7393 &bp->num_tx_queues);
7394
7395 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
555f6c78 7396 bp->num_rx_queues, bp->num_tx_queues);
ca00392c 7397
2dfe0e1f
EG
7398 /* if we can't use MSI-X we only need one fp,
7399 * so try to enable MSI-X with the requested number of fp's
7400 * and fallback to MSI or legacy INTx with one fp
7401 */
ca00392c
EG
7402 rc = bnx2x_enable_msix(bp);
7403 if (rc) {
34f80b04 7404 /* failed to enable MSI-X */
555f6c78
EG
7405 if (bp->multi_mode)
7406 BNX2X_ERR("Multi requested but failed to "
ca00392c
EG
7407 "enable MSI-X (rx %d tx %d), "
7408 "set number of queues to 1\n",
7409 bp->num_rx_queues, bp->num_tx_queues);
7410 bp->num_rx_queues = 1;
7411 bp->num_tx_queues = 1;
a2fbb9ea 7412 }
8badd27a 7413 break;
a2fbb9ea 7414 }
555f6c78 7415 bp->dev->real_num_tx_queues = bp->num_tx_queues;
ca00392c 7416 return rc;
8badd27a
EG
7417}
7418
8badd27a
EG
7419
7420/* must be called with rtnl_lock */
7421static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7422{
7423 u32 load_code;
ca00392c
EG
7424 int i, rc;
7425
8badd27a 7426#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7427 if (unlikely(bp->panic))
7428 return -EPERM;
7429#endif
7430
7431 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7432
ca00392c 7433 rc = bnx2x_set_int_mode(bp);
c14423fe 7434
a2fbb9ea
ET
7435 if (bnx2x_alloc_mem(bp))
7436 return -ENOMEM;
7437
555f6c78 7438 for_each_rx_queue(bp, i)
7a9b2557
VZ
7439 bnx2x_fp(bp, i, disable_tpa) =
7440 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7441
555f6c78 7442 for_each_rx_queue(bp, i)
2dfe0e1f
EG
7443 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7444 bnx2x_poll, 128);
7445
2dfe0e1f
EG
7446 bnx2x_napi_enable(bp);
7447
34f80b04
EG
7448 if (bp->flags & USING_MSIX_FLAG) {
7449 rc = bnx2x_req_msix_irqs(bp);
7450 if (rc) {
7451 pci_disable_msix(bp->pdev);
2dfe0e1f 7452 goto load_error1;
34f80b04
EG
7453 }
7454 } else {
ca00392c
EG
7455 /* Fall to INTx if failed to enable MSI-X due to lack of
7456 memory (in bnx2x_set_int_mode()) */
8badd27a
EG
7457 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7458 bnx2x_enable_msi(bp);
34f80b04
EG
7459 bnx2x_ack_int(bp);
7460 rc = bnx2x_req_irq(bp);
7461 if (rc) {
2dfe0e1f 7462 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
7463 if (bp->flags & USING_MSI_FLAG)
7464 pci_disable_msi(bp->pdev);
2dfe0e1f 7465 goto load_error1;
a2fbb9ea 7466 }
8badd27a
EG
7467 if (bp->flags & USING_MSI_FLAG) {
7468 bp->dev->irq = bp->pdev->irq;
7469 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7470 bp->dev->name, bp->pdev->irq);
7471 }
a2fbb9ea
ET
7472 }
7473
2dfe0e1f
EG
7474 /* Send LOAD_REQUEST command to MCP
7475 Returns the type of LOAD command:
7476 if it is the first port to be initialized
7477 common blocks should be initialized, otherwise - not
7478 */
7479 if (!BP_NOMCP(bp)) {
7480 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7481 if (!load_code) {
7482 BNX2X_ERR("MCP response failure, aborting\n");
7483 rc = -EBUSY;
7484 goto load_error2;
7485 }
7486 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7487 rc = -EBUSY; /* other port in diagnostic mode */
7488 goto load_error2;
7489 }
7490
7491 } else {
7492 int port = BP_PORT(bp);
7493
f5372251 7494 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7495 load_count[0], load_count[1], load_count[2]);
7496 load_count[0]++;
7497 load_count[1 + port]++;
f5372251 7498 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7499 load_count[0], load_count[1], load_count[2]);
7500 if (load_count[0] == 1)
7501 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7502 else if (load_count[1 + port] == 1)
7503 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7504 else
7505 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7506 }
7507
7508 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7509 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7510 bp->port.pmf = 1;
7511 else
7512 bp->port.pmf = 0;
7513 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7514
a2fbb9ea 7515 /* Initialize HW */
34f80b04
EG
7516 rc = bnx2x_init_hw(bp, load_code);
7517 if (rc) {
a2fbb9ea 7518 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 7519 goto load_error2;
a2fbb9ea
ET
7520 }
7521
a2fbb9ea 7522 /* Setup NIC internals and enable interrupts */
471de716 7523 bnx2x_nic_init(bp, load_code);
a2fbb9ea 7524
2691d51d
EG
7525 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7526 (bp->common.shmem2_base))
7527 SHMEM2_WR(bp, dcc_support,
7528 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7529 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7530
a2fbb9ea 7531 /* Send LOAD_DONE command to MCP */
34f80b04 7532 if (!BP_NOMCP(bp)) {
228241eb
ET
7533 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7534 if (!load_code) {
da5a662a 7535 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7536 rc = -EBUSY;
2dfe0e1f 7537 goto load_error3;
a2fbb9ea
ET
7538 }
7539 }
7540
7541 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7542
34f80b04
EG
7543 rc = bnx2x_setup_leading(bp);
7544 if (rc) {
da5a662a 7545 BNX2X_ERR("Setup leading failed!\n");
e3553b29 7546#ifndef BNX2X_STOP_ON_ERROR
2dfe0e1f 7547 goto load_error3;
e3553b29
EG
7548#else
7549 bp->panic = 1;
7550 return -EBUSY;
7551#endif
34f80b04 7552 }
a2fbb9ea 7553
34f80b04
EG
7554 if (CHIP_IS_E1H(bp))
7555 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7556 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
7557 bp->state = BNX2X_STATE_DISABLED;
7558 }
a2fbb9ea 7559
ca00392c 7560 if (bp->state == BNX2X_STATE_OPEN) {
37b091ba
MC
7561#ifdef BCM_CNIC
7562 /* Enable Timer scan */
7563 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7564#endif
34f80b04
EG
7565 for_each_nondefault_queue(bp, i) {
7566 rc = bnx2x_setup_multi(bp, i);
7567 if (rc)
37b091ba
MC
7568#ifdef BCM_CNIC
7569 goto load_error4;
7570#else
2dfe0e1f 7571 goto load_error3;
37b091ba 7572#endif
34f80b04 7573 }
a2fbb9ea 7574
ca00392c 7575 if (CHIP_IS_E1(bp))
e665bfda 7576 bnx2x_set_eth_mac_addr_e1(bp, 1);
ca00392c 7577 else
e665bfda 7578 bnx2x_set_eth_mac_addr_e1h(bp, 1);
ca00392c 7579 }
34f80b04
EG
7580
7581 if (bp->port.pmf)
b5bf9068 7582 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7583
7584 /* Start fast path */
34f80b04
EG
7585 switch (load_mode) {
7586 case LOAD_NORMAL:
ca00392c
EG
7587 if (bp->state == BNX2X_STATE_OPEN) {
7588 /* Tx queue should be only reenabled */
7589 netif_tx_wake_all_queues(bp->dev);
7590 }
2dfe0e1f 7591 /* Initialize the receive filter. */
34f80b04
EG
7592 bnx2x_set_rx_mode(bp->dev);
7593 break;
7594
7595 case LOAD_OPEN:
555f6c78 7596 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
7597 if (bp->state != BNX2X_STATE_OPEN)
7598 netif_tx_disable(bp->dev);
2dfe0e1f 7599 /* Initialize the receive filter. */
34f80b04 7600 bnx2x_set_rx_mode(bp->dev);
34f80b04 7601 break;
a2fbb9ea 7602
34f80b04 7603 case LOAD_DIAG:
2dfe0e1f 7604 /* Initialize the receive filter. */
a2fbb9ea 7605 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7606 bp->state = BNX2X_STATE_DIAG;
7607 break;
7608
7609 default:
7610 break;
a2fbb9ea
ET
7611 }
7612
34f80b04
EG
7613 if (!bp->port.pmf)
7614 bnx2x__link_status_update(bp);
7615
a2fbb9ea
ET
7616 /* start the timer */
7617 mod_timer(&bp->timer, jiffies + bp->current_interval);
7618
34f80b04 7619
a2fbb9ea
ET
7620 return 0;
7621
37b091ba
MC
7622#ifdef BCM_CNIC
7623load_error4:
7624 /* Disable Timer scan */
7625 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7626#endif
2dfe0e1f
EG
7627load_error3:
7628 bnx2x_int_disable_sync(bp, 1);
7629 if (!BP_NOMCP(bp)) {
7630 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7631 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7632 }
7633 bp->port.pmf = 0;
7a9b2557
VZ
7634 /* Free SKBs, SGEs, TPA pool and driver internals */
7635 bnx2x_free_skbs(bp);
555f6c78 7636 for_each_rx_queue(bp, i)
3196a88a 7637 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7638load_error2:
d1014634
YG
7639 /* Release IRQs */
7640 bnx2x_free_irq(bp);
2dfe0e1f
EG
7641load_error1:
7642 bnx2x_napi_disable(bp);
555f6c78 7643 for_each_rx_queue(bp, i)
7cde1c8b 7644 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7645 bnx2x_free_mem(bp);
7646
34f80b04 7647 return rc;
a2fbb9ea
ET
7648}
7649
7650static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7651{
555f6c78 7652 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7653 int rc;
7654
c14423fe 7655 /* halt the connection */
555f6c78
EG
7656 fp->state = BNX2X_FP_STATE_HALTING;
7657 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7658
34f80b04 7659 /* Wait for completion */
a2fbb9ea 7660 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7661 &(fp->state), 1);
c14423fe 7662 if (rc) /* timeout */
a2fbb9ea
ET
7663 return rc;
7664
7665 /* delete cfc entry */
7666 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7667
34f80b04
EG
7668 /* Wait for completion */
7669 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7670 &(fp->state), 1);
34f80b04 7671 return rc;
a2fbb9ea
ET
7672}
7673
da5a662a 7674static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7675{
4781bfad 7676 __le16 dsb_sp_prod_idx;
c14423fe 7677 /* if the other port is handling traffic,
a2fbb9ea 7678 this can take a lot of time */
34f80b04
EG
7679 int cnt = 500;
7680 int rc;
a2fbb9ea
ET
7681
7682 might_sleep();
7683
7684 /* Send HALT ramrod */
7685 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7686 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7687
34f80b04
EG
7688 /* Wait for completion */
7689 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7690 &(bp->fp[0].state), 1);
7691 if (rc) /* timeout */
da5a662a 7692 return rc;
a2fbb9ea 7693
49d66772 7694 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7695
228241eb 7696 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7697 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7698
49d66772 7699 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7700 we are going to reset the chip anyway
7701 so there is not much to do if this times out
7702 */
34f80b04 7703 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7704 if (!cnt) {
7705 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7706 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7707 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7708#ifdef BNX2X_STOP_ON_ERROR
7709 bnx2x_panic();
7710#endif
36e552ab 7711 rc = -EBUSY;
34f80b04
EG
7712 break;
7713 }
7714 cnt--;
da5a662a 7715 msleep(1);
5650d9d4 7716 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7717 }
7718 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7719 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7720
7721 return rc;
a2fbb9ea
ET
7722}
7723
34f80b04
EG
7724static void bnx2x_reset_func(struct bnx2x *bp)
7725{
7726 int port = BP_PORT(bp);
7727 int func = BP_FUNC(bp);
7728 int base, i;
7729
7730 /* Configure IGU */
7731 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7732 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7733
37b091ba
MC
7734#ifdef BCM_CNIC
7735 /* Disable Timer scan */
7736 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7737 /*
7738 * Wait for at least 10ms and up to 2 second for the timers scan to
7739 * complete
7740 */
7741 for (i = 0; i < 200; i++) {
7742 msleep(10);
7743 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7744 break;
7745 }
7746#endif
34f80b04
EG
7747 /* Clear ILT */
7748 base = FUNC_ILT_BASE(func);
7749 for (i = base; i < base + ILT_PER_FUNC; i++)
7750 bnx2x_ilt_wr(bp, i, 0);
7751}
7752
7753static void bnx2x_reset_port(struct bnx2x *bp)
7754{
7755 int port = BP_PORT(bp);
7756 u32 val;
7757
7758 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7759
7760 /* Do not rcv packets to BRB */
7761 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7762 /* Do not direct rcv packets that are not for MCP to the BRB */
7763 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7764 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7765
7766 /* Configure AEU */
7767 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7768
7769 msleep(100);
7770 /* Check for BRB port occupancy */
7771 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7772 if (val)
7773 DP(NETIF_MSG_IFDOWN,
33471629 7774 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7775
7776 /* TODO: Close Doorbell port? */
7777}
7778
34f80b04
EG
7779static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7780{
7781 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7782 BP_FUNC(bp), reset_code);
7783
7784 switch (reset_code) {
7785 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7786 bnx2x_reset_port(bp);
7787 bnx2x_reset_func(bp);
7788 bnx2x_reset_common(bp);
7789 break;
7790
7791 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7792 bnx2x_reset_port(bp);
7793 bnx2x_reset_func(bp);
7794 break;
7795
7796 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7797 bnx2x_reset_func(bp);
7798 break;
49d66772 7799
34f80b04
EG
7800 default:
7801 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7802 break;
7803 }
7804}
7805
33471629 7806/* must be called with rtnl_lock */
34f80b04 7807static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7808{
da5a662a 7809 int port = BP_PORT(bp);
a2fbb9ea 7810 u32 reset_code = 0;
da5a662a 7811 int i, cnt, rc;
a2fbb9ea
ET
7812
7813 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7814
ab6ad5a4 7815 /* Set "drop all" */
228241eb
ET
7816 bp->rx_mode = BNX2X_RX_MODE_NONE;
7817 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7818
ab6ad5a4 7819 /* Disable HW interrupts, NAPI and Tx */
f8ef6e44 7820 bnx2x_netif_stop(bp, 1);
e94d8af3 7821
34f80b04
EG
7822 del_timer_sync(&bp->timer);
7823 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7824 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7825 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7826
70b9986c
EG
7827 /* Release IRQs */
7828 bnx2x_free_irq(bp);
7829
555f6c78
EG
7830 /* Wait until tx fastpath tasks complete */
7831 for_each_tx_queue(bp, i) {
228241eb
ET
7832 struct bnx2x_fastpath *fp = &bp->fp[i];
7833
34f80b04 7834 cnt = 1000;
e8b5fc51 7835 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7836
7961f791 7837 bnx2x_tx_int(fp);
34f80b04
EG
7838 if (!cnt) {
7839 BNX2X_ERR("timeout waiting for queue[%d]\n",
7840 i);
7841#ifdef BNX2X_STOP_ON_ERROR
7842 bnx2x_panic();
7843 return -EBUSY;
7844#else
7845 break;
7846#endif
7847 }
7848 cnt--;
da5a662a 7849 msleep(1);
34f80b04 7850 }
228241eb 7851 }
da5a662a
VZ
7852 /* Give HW time to discard old tx messages */
7853 msleep(1);
a2fbb9ea 7854
3101c2bc
YG
7855 if (CHIP_IS_E1(bp)) {
7856 struct mac_configuration_cmd *config =
7857 bnx2x_sp(bp, mcast_config);
7858
e665bfda 7859 bnx2x_set_eth_mac_addr_e1(bp, 0);
3101c2bc 7860
8d9c5f34 7861 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7862 CAM_INVALIDATE(config->config_table[i]);
7863
8d9c5f34 7864 config->hdr.length = i;
3101c2bc
YG
7865 if (CHIP_REV_IS_SLOW(bp))
7866 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7867 else
7868 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7869 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7870 config->hdr.reserved1 = 0;
7871
e665bfda
MC
7872 bp->set_mac_pending++;
7873 smp_wmb();
7874
3101c2bc
YG
7875 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7876 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7877 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7878
7879 } else { /* E1H */
65abd74d
YG
7880 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7881
e665bfda 7882 bnx2x_set_eth_mac_addr_e1h(bp, 0);
3101c2bc
YG
7883
7884 for (i = 0; i < MC_HASH_SIZE; i++)
7885 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
7886
7887 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc
YG
7888 }
7889
65abd74d
YG
7890 if (unload_mode == UNLOAD_NORMAL)
7891 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7892
7d0446c2 7893 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7894 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7895
7d0446c2 7896 else if (bp->wol) {
65abd74d
YG
7897 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7898 u8 *mac_addr = bp->dev->dev_addr;
7899 u32 val;
7900 /* The mac address is written to entries 1-4 to
7901 preserve entry 0 which is used by the PMF */
7902 u8 entry = (BP_E1HVN(bp) + 1)*8;
7903
7904 val = (mac_addr[0] << 8) | mac_addr[1];
7905 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7906
7907 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7908 (mac_addr[4] << 8) | mac_addr[5];
7909 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7910
7911 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7912
7913 } else
7914 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7915
34f80b04
EG
7916 /* Close multi and leading connections
7917 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7918 for_each_nondefault_queue(bp, i)
7919 if (bnx2x_stop_multi(bp, i))
228241eb 7920 goto unload_error;
a2fbb9ea 7921
da5a662a
VZ
7922 rc = bnx2x_stop_leading(bp);
7923 if (rc) {
34f80b04 7924 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7925#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7926 return -EBUSY;
da5a662a
VZ
7927#else
7928 goto unload_error;
34f80b04 7929#endif
228241eb
ET
7930 }
7931
7932unload_error:
34f80b04 7933 if (!BP_NOMCP(bp))
228241eb 7934 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7935 else {
f5372251 7936 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7937 load_count[0], load_count[1], load_count[2]);
7938 load_count[0]--;
da5a662a 7939 load_count[1 + port]--;
f5372251 7940 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7941 load_count[0], load_count[1], load_count[2]);
7942 if (load_count[0] == 0)
7943 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7944 else if (load_count[1 + port] == 0)
34f80b04
EG
7945 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7946 else
7947 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7948 }
a2fbb9ea 7949
34f80b04
EG
7950 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7951 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7952 bnx2x__link_reset(bp);
a2fbb9ea
ET
7953
7954 /* Reset the chip */
228241eb 7955 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7956
7957 /* Report UNLOAD_DONE to MCP */
34f80b04 7958 if (!BP_NOMCP(bp))
a2fbb9ea 7959 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 7960
9a035440 7961 bp->port.pmf = 0;
a2fbb9ea 7962
7a9b2557 7963 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7964 bnx2x_free_skbs(bp);
555f6c78 7965 for_each_rx_queue(bp, i)
3196a88a 7966 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7967 for_each_rx_queue(bp, i)
7cde1c8b 7968 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7969 bnx2x_free_mem(bp);
7970
7971 bp->state = BNX2X_STATE_CLOSED;
228241eb 7972
a2fbb9ea
ET
7973 netif_carrier_off(bp->dev);
7974
7975 return 0;
7976}
7977
34f80b04
EG
7978static void bnx2x_reset_task(struct work_struct *work)
7979{
7980 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7981
7982#ifdef BNX2X_STOP_ON_ERROR
7983 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7984 " so reset not done to allow debug dump,\n"
ad361c98 7985 " you will need to reboot when done\n");
34f80b04
EG
7986 return;
7987#endif
7988
7989 rtnl_lock();
7990
7991 if (!netif_running(bp->dev))
7992 goto reset_task_exit;
7993
7994 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7995 bnx2x_nic_load(bp, LOAD_NORMAL);
7996
7997reset_task_exit:
7998 rtnl_unlock();
7999}
8000
a2fbb9ea
ET
8001/* end of nic load/unload */
8002
8003/* ethtool_ops */
8004
8005/*
8006 * Init service functions
8007 */
8008
f1ef27ef
EG
8009static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8010{
8011 switch (func) {
8012 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8013 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8014 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8015 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8016 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8017 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8018 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8019 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8020 default:
8021 BNX2X_ERR("Unsupported function index: %d\n", func);
8022 return (u32)(-1);
8023 }
8024}
8025
8026static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8027{
8028 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8029
8030 /* Flush all outstanding writes */
8031 mmiowb();
8032
8033 /* Pretend to be function 0 */
8034 REG_WR(bp, reg, 0);
8035 /* Flush the GRC transaction (in the chip) */
8036 new_val = REG_RD(bp, reg);
8037 if (new_val != 0) {
8038 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8039 new_val);
8040 BUG();
8041 }
8042
8043 /* From now we are in the "like-E1" mode */
8044 bnx2x_int_disable(bp);
8045
8046 /* Flush all outstanding writes */
8047 mmiowb();
8048
8049 /* Restore the original funtion settings */
8050 REG_WR(bp, reg, orig_func);
8051 new_val = REG_RD(bp, reg);
8052 if (new_val != orig_func) {
8053 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8054 orig_func, new_val);
8055 BUG();
8056 }
8057}
8058
8059static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8060{
8061 if (CHIP_IS_E1H(bp))
8062 bnx2x_undi_int_disable_e1h(bp, func);
8063 else
8064 bnx2x_int_disable(bp);
8065}
8066
34f80b04
EG
8067static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8068{
8069 u32 val;
8070
8071 /* Check if there is any driver already loaded */
8072 val = REG_RD(bp, MISC_REG_UNPREPARED);
8073 if (val == 0x1) {
8074 /* Check if it is the UNDI driver
8075 * UNDI driver initializes CID offset for normal bell to 0x7
8076 */
4a37fb66 8077 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
8078 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8079 if (val == 0x7) {
8080 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8081 /* save our func */
34f80b04 8082 int func = BP_FUNC(bp);
da5a662a
VZ
8083 u32 swap_en;
8084 u32 swap_val;
34f80b04 8085
b4661739
EG
8086 /* clear the UNDI indication */
8087 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8088
34f80b04
EG
8089 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8090
8091 /* try unload UNDI on port 0 */
8092 bp->func = 0;
da5a662a
VZ
8093 bp->fw_seq =
8094 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8095 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 8096 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
8097
8098 /* if UNDI is loaded on the other port */
8099 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8100
da5a662a
VZ
8101 /* send "DONE" for previous unload */
8102 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8103
8104 /* unload UNDI on port 1 */
34f80b04 8105 bp->func = 1;
da5a662a
VZ
8106 bp->fw_seq =
8107 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8108 DRV_MSG_SEQ_NUMBER_MASK);
8109 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8110
8111 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
8112 }
8113
b4661739
EG
8114 /* now it's safe to release the lock */
8115 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8116
f1ef27ef 8117 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
8118
8119 /* close input traffic and wait for it */
8120 /* Do not rcv packets to BRB */
8121 REG_WR(bp,
8122 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8123 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8124 /* Do not direct rcv packets that are not for MCP to
8125 * the BRB */
8126 REG_WR(bp,
8127 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8128 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8129 /* clear AEU */
8130 REG_WR(bp,
8131 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8132 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8133 msleep(10);
8134
8135 /* save NIG port swap info */
8136 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8137 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
8138 /* reset device */
8139 REG_WR(bp,
8140 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 8141 0xd3ffffff);
34f80b04
EG
8142 REG_WR(bp,
8143 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8144 0x1403);
da5a662a
VZ
8145 /* take the NIG out of reset and restore swap values */
8146 REG_WR(bp,
8147 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8148 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8149 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8150 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8151
8152 /* send unload done to the MCP */
8153 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8154
8155 /* restore our func and fw_seq */
8156 bp->func = func;
8157 bp->fw_seq =
8158 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8159 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
8160
8161 } else
8162 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
8163 }
8164}
8165
8166static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8167{
8168 u32 val, val2, val3, val4, id;
72ce58c3 8169 u16 pmc;
34f80b04
EG
8170
8171 /* Get the chip revision id and number. */
8172 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8173 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8174 id = ((val & 0xffff) << 16);
8175 val = REG_RD(bp, MISC_REG_CHIP_REV);
8176 id |= ((val & 0xf) << 12);
8177 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8178 id |= ((val & 0xff) << 4);
5a40e08e 8179 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
8180 id |= (val & 0xf);
8181 bp->common.chip_id = id;
8182 bp->link_params.chip_id = bp->common.chip_id;
8183 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8184
1c06328c
EG
8185 val = (REG_RD(bp, 0x2874) & 0x55);
8186 if ((bp->common.chip_id & 0x1) ||
8187 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8188 bp->flags |= ONE_PORT_FLAG;
8189 BNX2X_DEV_INFO("single port device\n");
8190 }
8191
34f80b04
EG
8192 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8193 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8194 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8195 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8196 bp->common.flash_size, bp->common.flash_size);
8197
8198 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 8199 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 8200 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
8201 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8202 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
8203
8204 if (!bp->common.shmem_base ||
8205 (bp->common.shmem_base < 0xA0000) ||
8206 (bp->common.shmem_base >= 0xC0000)) {
8207 BNX2X_DEV_INFO("MCP not active\n");
8208 bp->flags |= NO_MCP_FLAG;
8209 return;
8210 }
8211
8212 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8213 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8214 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8215 BNX2X_ERR("BAD MCP validity signature\n");
8216
8217 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 8218 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
8219
8220 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8221 SHARED_HW_CFG_LED_MODE_MASK) >>
8222 SHARED_HW_CFG_LED_MODE_SHIFT);
8223
c2c8b03e
EG
8224 bp->link_params.feature_config_flags = 0;
8225 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8226 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8227 bp->link_params.feature_config_flags |=
8228 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8229 else
8230 bp->link_params.feature_config_flags &=
8231 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8232
34f80b04
EG
8233 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8234 bp->common.bc_ver = val;
8235 BNX2X_DEV_INFO("bc_ver %X\n", val);
8236 if (val < BNX2X_BC_VER) {
8237 /* for now only warn
8238 * later we might need to enforce this */
8239 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8240 " please upgrade BC\n", BNX2X_BC_VER, val);
8241 }
4d295db0
EG
8242 bp->link_params.feature_config_flags |=
8243 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8244 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
8245
8246 if (BP_E1HVN(bp) == 0) {
8247 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8248 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8249 } else {
8250 /* no WOL capability for E1HVN != 0 */
8251 bp->flags |= NO_WOL_FLAG;
8252 }
8253 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 8254 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
8255
8256 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8257 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8258 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8259 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8260
8261 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8262 val, val2, val3, val4);
8263}
8264
8265static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8266 u32 switch_cfg)
a2fbb9ea 8267{
34f80b04 8268 int port = BP_PORT(bp);
a2fbb9ea
ET
8269 u32 ext_phy_type;
8270
a2fbb9ea
ET
8271 switch (switch_cfg) {
8272 case SWITCH_CFG_1G:
8273 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8274
c18487ee
YR
8275 ext_phy_type =
8276 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8277 switch (ext_phy_type) {
8278 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8279 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8280 ext_phy_type);
8281
34f80b04
EG
8282 bp->port.supported |= (SUPPORTED_10baseT_Half |
8283 SUPPORTED_10baseT_Full |
8284 SUPPORTED_100baseT_Half |
8285 SUPPORTED_100baseT_Full |
8286 SUPPORTED_1000baseT_Full |
8287 SUPPORTED_2500baseX_Full |
8288 SUPPORTED_TP |
8289 SUPPORTED_FIBRE |
8290 SUPPORTED_Autoneg |
8291 SUPPORTED_Pause |
8292 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8293 break;
8294
8295 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8296 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8297 ext_phy_type);
8298
34f80b04
EG
8299 bp->port.supported |= (SUPPORTED_10baseT_Half |
8300 SUPPORTED_10baseT_Full |
8301 SUPPORTED_100baseT_Half |
8302 SUPPORTED_100baseT_Full |
8303 SUPPORTED_1000baseT_Full |
8304 SUPPORTED_TP |
8305 SUPPORTED_FIBRE |
8306 SUPPORTED_Autoneg |
8307 SUPPORTED_Pause |
8308 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8309 break;
8310
8311 default:
8312 BNX2X_ERR("NVRAM config error. "
8313 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 8314 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8315 return;
8316 }
8317
34f80b04
EG
8318 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8319 port*0x10);
8320 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
8321 break;
8322
8323 case SWITCH_CFG_10G:
8324 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8325
c18487ee
YR
8326 ext_phy_type =
8327 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8328 switch (ext_phy_type) {
8329 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8330 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8331 ext_phy_type);
8332
34f80b04
EG
8333 bp->port.supported |= (SUPPORTED_10baseT_Half |
8334 SUPPORTED_10baseT_Full |
8335 SUPPORTED_100baseT_Half |
8336 SUPPORTED_100baseT_Full |
8337 SUPPORTED_1000baseT_Full |
8338 SUPPORTED_2500baseX_Full |
8339 SUPPORTED_10000baseT_Full |
8340 SUPPORTED_TP |
8341 SUPPORTED_FIBRE |
8342 SUPPORTED_Autoneg |
8343 SUPPORTED_Pause |
8344 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8345 break;
8346
589abe3a
EG
8347 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8348 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 8349 ext_phy_type);
f1410647 8350
34f80b04 8351 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8352 SUPPORTED_1000baseT_Full |
34f80b04 8353 SUPPORTED_FIBRE |
589abe3a 8354 SUPPORTED_Autoneg |
34f80b04
EG
8355 SUPPORTED_Pause |
8356 SUPPORTED_Asym_Pause);
f1410647
ET
8357 break;
8358
589abe3a
EG
8359 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8360 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
8361 ext_phy_type);
8362
34f80b04 8363 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8364 SUPPORTED_2500baseX_Full |
34f80b04 8365 SUPPORTED_1000baseT_Full |
589abe3a
EG
8366 SUPPORTED_FIBRE |
8367 SUPPORTED_Autoneg |
8368 SUPPORTED_Pause |
8369 SUPPORTED_Asym_Pause);
8370 break;
8371
8372 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8373 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8374 ext_phy_type);
8375
8376 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
8377 SUPPORTED_FIBRE |
8378 SUPPORTED_Pause |
8379 SUPPORTED_Asym_Pause);
f1410647
ET
8380 break;
8381
589abe3a
EG
8382 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8383 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
8384 ext_phy_type);
8385
34f80b04
EG
8386 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8387 SUPPORTED_1000baseT_Full |
8388 SUPPORTED_FIBRE |
34f80b04
EG
8389 SUPPORTED_Pause |
8390 SUPPORTED_Asym_Pause);
f1410647
ET
8391 break;
8392
589abe3a
EG
8393 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8394 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
8395 ext_phy_type);
8396
34f80b04 8397 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 8398 SUPPORTED_1000baseT_Full |
34f80b04 8399 SUPPORTED_Autoneg |
589abe3a 8400 SUPPORTED_FIBRE |
34f80b04
EG
8401 SUPPORTED_Pause |
8402 SUPPORTED_Asym_Pause);
c18487ee
YR
8403 break;
8404
4d295db0
EG
8405 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8406 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8407 ext_phy_type);
8408
8409 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8410 SUPPORTED_1000baseT_Full |
8411 SUPPORTED_Autoneg |
8412 SUPPORTED_FIBRE |
8413 SUPPORTED_Pause |
8414 SUPPORTED_Asym_Pause);
8415 break;
8416
f1410647
ET
8417 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8418 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8419 ext_phy_type);
8420
34f80b04
EG
8421 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8422 SUPPORTED_TP |
8423 SUPPORTED_Autoneg |
8424 SUPPORTED_Pause |
8425 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8426 break;
8427
28577185
EG
8428 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8429 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8430 ext_phy_type);
8431
8432 bp->port.supported |= (SUPPORTED_10baseT_Half |
8433 SUPPORTED_10baseT_Full |
8434 SUPPORTED_100baseT_Half |
8435 SUPPORTED_100baseT_Full |
8436 SUPPORTED_1000baseT_Full |
8437 SUPPORTED_10000baseT_Full |
8438 SUPPORTED_TP |
8439 SUPPORTED_Autoneg |
8440 SUPPORTED_Pause |
8441 SUPPORTED_Asym_Pause);
8442 break;
8443
c18487ee
YR
8444 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8445 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8446 bp->link_params.ext_phy_config);
8447 break;
8448
a2fbb9ea
ET
8449 default:
8450 BNX2X_ERR("NVRAM config error. "
8451 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 8452 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8453 return;
8454 }
8455
34f80b04
EG
8456 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8457 port*0x18);
8458 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 8459
a2fbb9ea
ET
8460 break;
8461
8462 default:
8463 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 8464 bp->port.link_config);
a2fbb9ea
ET
8465 return;
8466 }
34f80b04 8467 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
8468
8469 /* mask what we support according to speed_cap_mask */
c18487ee
YR
8470 if (!(bp->link_params.speed_cap_mask &
8471 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 8472 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 8473
c18487ee
YR
8474 if (!(bp->link_params.speed_cap_mask &
8475 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 8476 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 8477
c18487ee
YR
8478 if (!(bp->link_params.speed_cap_mask &
8479 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 8480 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 8481
c18487ee
YR
8482 if (!(bp->link_params.speed_cap_mask &
8483 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 8484 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 8485
c18487ee
YR
8486 if (!(bp->link_params.speed_cap_mask &
8487 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
8488 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8489 SUPPORTED_1000baseT_Full);
a2fbb9ea 8490
c18487ee
YR
8491 if (!(bp->link_params.speed_cap_mask &
8492 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 8493 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 8494
c18487ee
YR
8495 if (!(bp->link_params.speed_cap_mask &
8496 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 8497 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 8498
34f80b04 8499 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
8500}
8501
34f80b04 8502static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8503{
c18487ee 8504 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 8505
34f80b04 8506 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 8507 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 8508 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 8509 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8510 bp->port.advertising = bp->port.supported;
a2fbb9ea 8511 } else {
c18487ee
YR
8512 u32 ext_phy_type =
8513 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8514
8515 if ((ext_phy_type ==
8516 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8517 (ext_phy_type ==
8518 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 8519 /* force 10G, no AN */
c18487ee 8520 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 8521 bp->port.advertising =
a2fbb9ea
ET
8522 (ADVERTISED_10000baseT_Full |
8523 ADVERTISED_FIBRE);
8524 break;
8525 }
8526 BNX2X_ERR("NVRAM config error. "
8527 "Invalid link_config 0x%x"
8528 " Autoneg not supported\n",
34f80b04 8529 bp->port.link_config);
a2fbb9ea
ET
8530 return;
8531 }
8532 break;
8533
8534 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 8535 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 8536 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
8537 bp->port.advertising = (ADVERTISED_10baseT_Full |
8538 ADVERTISED_TP);
a2fbb9ea
ET
8539 } else {
8540 BNX2X_ERR("NVRAM config error. "
8541 "Invalid link_config 0x%x"
8542 " speed_cap_mask 0x%x\n",
34f80b04 8543 bp->port.link_config,
c18487ee 8544 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8545 return;
8546 }
8547 break;
8548
8549 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 8550 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
8551 bp->link_params.req_line_speed = SPEED_10;
8552 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8553 bp->port.advertising = (ADVERTISED_10baseT_Half |
8554 ADVERTISED_TP);
a2fbb9ea
ET
8555 } else {
8556 BNX2X_ERR("NVRAM config error. "
8557 "Invalid link_config 0x%x"
8558 " speed_cap_mask 0x%x\n",
34f80b04 8559 bp->port.link_config,
c18487ee 8560 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8561 return;
8562 }
8563 break;
8564
8565 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 8566 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 8567 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
8568 bp->port.advertising = (ADVERTISED_100baseT_Full |
8569 ADVERTISED_TP);
a2fbb9ea
ET
8570 } else {
8571 BNX2X_ERR("NVRAM config error. "
8572 "Invalid link_config 0x%x"
8573 " speed_cap_mask 0x%x\n",
34f80b04 8574 bp->port.link_config,
c18487ee 8575 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8576 return;
8577 }
8578 break;
8579
8580 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 8581 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
8582 bp->link_params.req_line_speed = SPEED_100;
8583 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8584 bp->port.advertising = (ADVERTISED_100baseT_Half |
8585 ADVERTISED_TP);
a2fbb9ea
ET
8586 } else {
8587 BNX2X_ERR("NVRAM config error. "
8588 "Invalid link_config 0x%x"
8589 " speed_cap_mask 0x%x\n",
34f80b04 8590 bp->port.link_config,
c18487ee 8591 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8592 return;
8593 }
8594 break;
8595
8596 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 8597 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 8598 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
8599 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8600 ADVERTISED_TP);
a2fbb9ea
ET
8601 } else {
8602 BNX2X_ERR("NVRAM config error. "
8603 "Invalid link_config 0x%x"
8604 " speed_cap_mask 0x%x\n",
34f80b04 8605 bp->port.link_config,
c18487ee 8606 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8607 return;
8608 }
8609 break;
8610
8611 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8612 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8613 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8614 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8615 ADVERTISED_TP);
a2fbb9ea
ET
8616 } else {
8617 BNX2X_ERR("NVRAM config error. "
8618 "Invalid link_config 0x%x"
8619 " speed_cap_mask 0x%x\n",
34f80b04 8620 bp->port.link_config,
c18487ee 8621 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8622 return;
8623 }
8624 break;
8625
8626 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8627 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8628 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8629 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8630 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8631 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8632 ADVERTISED_FIBRE);
a2fbb9ea
ET
8633 } else {
8634 BNX2X_ERR("NVRAM config error. "
8635 "Invalid link_config 0x%x"
8636 " speed_cap_mask 0x%x\n",
34f80b04 8637 bp->port.link_config,
c18487ee 8638 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8639 return;
8640 }
8641 break;
8642
8643 default:
8644 BNX2X_ERR("NVRAM config error. "
8645 "BAD link speed link_config 0x%x\n",
34f80b04 8646 bp->port.link_config);
c18487ee 8647 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8648 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8649 break;
8650 }
a2fbb9ea 8651
34f80b04
EG
8652 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8653 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8654 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8655 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8656 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8657
c18487ee 8658 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8659 " advertising 0x%x\n",
c18487ee
YR
8660 bp->link_params.req_line_speed,
8661 bp->link_params.req_duplex,
34f80b04 8662 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8663}
8664
e665bfda
MC
8665static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8666{
8667 mac_hi = cpu_to_be16(mac_hi);
8668 mac_lo = cpu_to_be32(mac_lo);
8669 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8670 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8671}
8672
34f80b04 8673static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8674{
34f80b04
EG
8675 int port = BP_PORT(bp);
8676 u32 val, val2;
589abe3a 8677 u32 config;
c2c8b03e 8678 u16 i;
01cd4528 8679 u32 ext_phy_type;
a2fbb9ea 8680
c18487ee 8681 bp->link_params.bp = bp;
34f80b04 8682 bp->link_params.port = port;
c18487ee 8683
c18487ee 8684 bp->link_params.lane_config =
a2fbb9ea 8685 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8686 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8687 SHMEM_RD(bp,
8688 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
8689 /* BCM8727_NOC => BCM8727 no over current */
8690 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8691 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8692 bp->link_params.ext_phy_config &=
8693 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8694 bp->link_params.ext_phy_config |=
8695 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8696 bp->link_params.feature_config_flags |=
8697 FEATURE_CONFIG_BCM8727_NOC;
8698 }
8699
c18487ee 8700 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8701 SHMEM_RD(bp,
8702 dev_info.port_hw_config[port].speed_capability_mask);
8703
34f80b04 8704 bp->port.link_config =
a2fbb9ea
ET
8705 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8706
c2c8b03e
EG
8707 /* Get the 4 lanes xgxs config rx and tx */
8708 for (i = 0; i < 2; i++) {
8709 val = SHMEM_RD(bp,
8710 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8711 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8712 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8713
8714 val = SHMEM_RD(bp,
8715 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8716 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8717 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8718 }
8719
3ce2c3f9
EG
8720 /* If the device is capable of WoL, set the default state according
8721 * to the HW
8722 */
4d295db0 8723 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8724 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8725 (config & PORT_FEATURE_WOL_ENABLED));
8726
c2c8b03e
EG
8727 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8728 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8729 bp->link_params.lane_config,
8730 bp->link_params.ext_phy_config,
34f80b04 8731 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8732
4d295db0
EG
8733 bp->link_params.switch_cfg |= (bp->port.link_config &
8734 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 8735 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8736
8737 bnx2x_link_settings_requested(bp);
8738
01cd4528
EG
8739 /*
8740 * If connected directly, work with the internal PHY, otherwise, work
8741 * with the external PHY
8742 */
8743 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8744 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8745 bp->mdio.prtad = bp->link_params.phy_addr;
8746
8747 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8748 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8749 bp->mdio.prtad =
659bc5c4 8750 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
01cd4528 8751
a2fbb9ea
ET
8752 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8753 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 8754 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
8755 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8756 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
8757
8758#ifdef BCM_CNIC
8759 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8760 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8761 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8762#endif
34f80b04
EG
8763}
8764
8765static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8766{
8767 int func = BP_FUNC(bp);
8768 u32 val, val2;
8769 int rc = 0;
a2fbb9ea 8770
34f80b04 8771 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8772
34f80b04
EG
8773 bp->e1hov = 0;
8774 bp->e1hmf = 0;
8775 if (CHIP_IS_E1H(bp)) {
8776 bp->mf_config =
8777 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8778
2691d51d 8779 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 8780 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 8781 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 8782 bp->e1hmf = 1;
2691d51d
EG
8783 BNX2X_DEV_INFO("%s function mode\n",
8784 IS_E1HMF(bp) ? "multi" : "single");
8785
8786 if (IS_E1HMF(bp)) {
8787 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8788 e1hov_tag) &
8789 FUNC_MF_CFG_E1HOV_TAG_MASK);
8790 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8791 bp->e1hov = val;
8792 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8793 "(0x%04x)\n",
8794 func, bp->e1hov, bp->e1hov);
8795 } else {
34f80b04
EG
8796 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8797 " aborting\n", func);
8798 rc = -EPERM;
8799 }
2691d51d
EG
8800 } else {
8801 if (BP_E1HVN(bp)) {
8802 BNX2X_ERR("!!! VN %d in single function mode,"
8803 " aborting\n", BP_E1HVN(bp));
8804 rc = -EPERM;
8805 }
34f80b04
EG
8806 }
8807 }
a2fbb9ea 8808
34f80b04
EG
8809 if (!BP_NOMCP(bp)) {
8810 bnx2x_get_port_hwinfo(bp);
8811
8812 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8813 DRV_MSG_SEQ_NUMBER_MASK);
8814 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8815 }
8816
8817 if (IS_E1HMF(bp)) {
8818 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8819 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8820 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8821 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8822 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8823 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8824 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8825 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8826 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8827 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8828 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8829 ETH_ALEN);
8830 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8831 ETH_ALEN);
a2fbb9ea 8832 }
34f80b04
EG
8833
8834 return rc;
a2fbb9ea
ET
8835 }
8836
34f80b04
EG
8837 if (BP_NOMCP(bp)) {
8838 /* only supposed to happen on emulation/FPGA */
33471629 8839 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8840 random_ether_addr(bp->dev->dev_addr);
8841 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8842 }
a2fbb9ea 8843
34f80b04
EG
8844 return rc;
8845}
8846
8847static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8848{
8849 int func = BP_FUNC(bp);
87942b46 8850 int timer_interval;
34f80b04
EG
8851 int rc;
8852
da5a662a
VZ
8853 /* Disable interrupt handling until HW is initialized */
8854 atomic_set(&bp->intr_sem, 1);
e1510706 8855 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8856
34f80b04 8857 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8858
1cf167f2 8859 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8860 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8861
8862 rc = bnx2x_get_hwinfo(bp);
8863
8864 /* need to reset chip if undi was active */
8865 if (!BP_NOMCP(bp))
8866 bnx2x_undi_unload(bp);
8867
8868 if (CHIP_REV_IS_FPGA(bp))
8869 printk(KERN_ERR PFX "FPGA detected\n");
8870
8871 if (BP_NOMCP(bp) && (func == 0))
8872 printk(KERN_ERR PFX
8873 "MCP disabled, must load devices in order!\n");
8874
555f6c78 8875 /* Set multi queue mode */
8badd27a
EG
8876 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8877 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8878 printk(KERN_ERR PFX
8badd27a 8879 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8880 multi_mode = ETH_RSS_MODE_DISABLED;
8881 }
8882 bp->multi_mode = multi_mode;
8883
8884
7a9b2557
VZ
8885 /* Set TPA flags */
8886 if (disable_tpa) {
8887 bp->flags &= ~TPA_ENABLE_FLAG;
8888 bp->dev->features &= ~NETIF_F_LRO;
8889 } else {
8890 bp->flags |= TPA_ENABLE_FLAG;
8891 bp->dev->features |= NETIF_F_LRO;
8892 }
8893
a18f5128
EG
8894 if (CHIP_IS_E1(bp))
8895 bp->dropless_fc = 0;
8896 else
8897 bp->dropless_fc = dropless_fc;
8898
8d5726c4 8899 bp->mrrs = mrrs;
7a9b2557 8900
34f80b04
EG
8901 bp->tx_ring_size = MAX_TX_AVAIL;
8902 bp->rx_ring_size = MAX_RX_AVAIL;
8903
8904 bp->rx_csum = 1;
34f80b04
EG
8905
8906 bp->tx_ticks = 50;
8907 bp->rx_ticks = 25;
8908
87942b46
EG
8909 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8910 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8911
8912 init_timer(&bp->timer);
8913 bp->timer.expires = jiffies + bp->current_interval;
8914 bp->timer.data = (unsigned long) bp;
8915 bp->timer.function = bnx2x_timer;
8916
8917 return rc;
a2fbb9ea
ET
8918}
8919
8920/*
8921 * ethtool service functions
8922 */
8923
8924/* All ethtool functions called with rtnl_lock */
8925
8926static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8927{
8928 struct bnx2x *bp = netdev_priv(dev);
8929
34f80b04
EG
8930 cmd->supported = bp->port.supported;
8931 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8932
8933 if (netif_carrier_ok(dev)) {
c18487ee
YR
8934 cmd->speed = bp->link_vars.line_speed;
8935 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8936 } else {
c18487ee
YR
8937 cmd->speed = bp->link_params.req_line_speed;
8938 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8939 }
34f80b04
EG
8940 if (IS_E1HMF(bp)) {
8941 u16 vn_max_rate;
8942
8943 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8944 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8945 if (vn_max_rate < cmd->speed)
8946 cmd->speed = vn_max_rate;
8947 }
a2fbb9ea 8948
c18487ee
YR
8949 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8950 u32 ext_phy_type =
8951 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8952
8953 switch (ext_phy_type) {
8954 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8955 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8956 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8957 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8958 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8959 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 8960 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
8961 cmd->port = PORT_FIBRE;
8962 break;
8963
8964 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8965 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8966 cmd->port = PORT_TP;
8967 break;
8968
c18487ee
YR
8969 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8970 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8971 bp->link_params.ext_phy_config);
8972 break;
8973
f1410647
ET
8974 default:
8975 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8976 bp->link_params.ext_phy_config);
8977 break;
f1410647
ET
8978 }
8979 } else
a2fbb9ea 8980 cmd->port = PORT_TP;
a2fbb9ea 8981
01cd4528 8982 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
8983 cmd->transceiver = XCVR_INTERNAL;
8984
c18487ee 8985 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8986 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8987 else
a2fbb9ea 8988 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8989
8990 cmd->maxtxpkt = 0;
8991 cmd->maxrxpkt = 0;
8992
8993 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8994 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8995 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8996 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8997 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8998 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8999 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9000
9001 return 0;
9002}
9003
9004static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9005{
9006 struct bnx2x *bp = netdev_priv(dev);
9007 u32 advertising;
9008
34f80b04
EG
9009 if (IS_E1HMF(bp))
9010 return 0;
9011
a2fbb9ea
ET
9012 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9013 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9014 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9015 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9016 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9017 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9018 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9019
a2fbb9ea 9020 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
9021 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9022 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 9023 return -EINVAL;
f1410647 9024 }
a2fbb9ea
ET
9025
9026 /* advertise the requested speed and duplex if supported */
34f80b04 9027 cmd->advertising &= bp->port.supported;
a2fbb9ea 9028
c18487ee
YR
9029 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9030 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
9031 bp->port.advertising |= (ADVERTISED_Autoneg |
9032 cmd->advertising);
a2fbb9ea
ET
9033
9034 } else { /* forced speed */
9035 /* advertise the requested speed and duplex if supported */
9036 switch (cmd->speed) {
9037 case SPEED_10:
9038 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 9039 if (!(bp->port.supported &
f1410647
ET
9040 SUPPORTED_10baseT_Full)) {
9041 DP(NETIF_MSG_LINK,
9042 "10M full not supported\n");
a2fbb9ea 9043 return -EINVAL;
f1410647 9044 }
a2fbb9ea
ET
9045
9046 advertising = (ADVERTISED_10baseT_Full |
9047 ADVERTISED_TP);
9048 } else {
34f80b04 9049 if (!(bp->port.supported &
f1410647
ET
9050 SUPPORTED_10baseT_Half)) {
9051 DP(NETIF_MSG_LINK,
9052 "10M half not supported\n");
a2fbb9ea 9053 return -EINVAL;
f1410647 9054 }
a2fbb9ea
ET
9055
9056 advertising = (ADVERTISED_10baseT_Half |
9057 ADVERTISED_TP);
9058 }
9059 break;
9060
9061 case SPEED_100:
9062 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 9063 if (!(bp->port.supported &
f1410647
ET
9064 SUPPORTED_100baseT_Full)) {
9065 DP(NETIF_MSG_LINK,
9066 "100M full not supported\n");
a2fbb9ea 9067 return -EINVAL;
f1410647 9068 }
a2fbb9ea
ET
9069
9070 advertising = (ADVERTISED_100baseT_Full |
9071 ADVERTISED_TP);
9072 } else {
34f80b04 9073 if (!(bp->port.supported &
f1410647
ET
9074 SUPPORTED_100baseT_Half)) {
9075 DP(NETIF_MSG_LINK,
9076 "100M half not supported\n");
a2fbb9ea 9077 return -EINVAL;
f1410647 9078 }
a2fbb9ea
ET
9079
9080 advertising = (ADVERTISED_100baseT_Half |
9081 ADVERTISED_TP);
9082 }
9083 break;
9084
9085 case SPEED_1000:
f1410647
ET
9086 if (cmd->duplex != DUPLEX_FULL) {
9087 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 9088 return -EINVAL;
f1410647 9089 }
a2fbb9ea 9090
34f80b04 9091 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 9092 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 9093 return -EINVAL;
f1410647 9094 }
a2fbb9ea
ET
9095
9096 advertising = (ADVERTISED_1000baseT_Full |
9097 ADVERTISED_TP);
9098 break;
9099
9100 case SPEED_2500:
f1410647
ET
9101 if (cmd->duplex != DUPLEX_FULL) {
9102 DP(NETIF_MSG_LINK,
9103 "2.5G half not supported\n");
a2fbb9ea 9104 return -EINVAL;
f1410647 9105 }
a2fbb9ea 9106
34f80b04 9107 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
9108 DP(NETIF_MSG_LINK,
9109 "2.5G full not supported\n");
a2fbb9ea 9110 return -EINVAL;
f1410647 9111 }
a2fbb9ea 9112
f1410647 9113 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
9114 ADVERTISED_TP);
9115 break;
9116
9117 case SPEED_10000:
f1410647
ET
9118 if (cmd->duplex != DUPLEX_FULL) {
9119 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 9120 return -EINVAL;
f1410647 9121 }
a2fbb9ea 9122
34f80b04 9123 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 9124 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 9125 return -EINVAL;
f1410647 9126 }
a2fbb9ea
ET
9127
9128 advertising = (ADVERTISED_10000baseT_Full |
9129 ADVERTISED_FIBRE);
9130 break;
9131
9132 default:
f1410647 9133 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
9134 return -EINVAL;
9135 }
9136
c18487ee
YR
9137 bp->link_params.req_line_speed = cmd->speed;
9138 bp->link_params.req_duplex = cmd->duplex;
34f80b04 9139 bp->port.advertising = advertising;
a2fbb9ea
ET
9140 }
9141
c18487ee 9142 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 9143 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 9144 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 9145 bp->port.advertising);
a2fbb9ea 9146
34f80b04 9147 if (netif_running(dev)) {
bb2a0f7a 9148 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9149 bnx2x_link_set(bp);
9150 }
a2fbb9ea
ET
9151
9152 return 0;
9153}
9154
0a64ea57
EG
9155#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9156#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9157
9158static int bnx2x_get_regs_len(struct net_device *dev)
9159{
0a64ea57 9160 struct bnx2x *bp = netdev_priv(dev);
0d28e49a 9161 int regdump_len = 0;
0a64ea57
EG
9162 int i;
9163
0a64ea57
EG
9164 if (CHIP_IS_E1(bp)) {
9165 for (i = 0; i < REGS_COUNT; i++)
9166 if (IS_E1_ONLINE(reg_addrs[i].info))
9167 regdump_len += reg_addrs[i].size;
9168
9169 for (i = 0; i < WREGS_COUNT_E1; i++)
9170 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9171 regdump_len += wreg_addrs_e1[i].size *
9172 (1 + wreg_addrs_e1[i].read_regs_count);
9173
9174 } else { /* E1H */
9175 for (i = 0; i < REGS_COUNT; i++)
9176 if (IS_E1H_ONLINE(reg_addrs[i].info))
9177 regdump_len += reg_addrs[i].size;
9178
9179 for (i = 0; i < WREGS_COUNT_E1H; i++)
9180 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9181 regdump_len += wreg_addrs_e1h[i].size *
9182 (1 + wreg_addrs_e1h[i].read_regs_count);
9183 }
9184 regdump_len *= 4;
9185 regdump_len += sizeof(struct dump_hdr);
9186
9187 return regdump_len;
9188}
9189
9190static void bnx2x_get_regs(struct net_device *dev,
9191 struct ethtool_regs *regs, void *_p)
9192{
9193 u32 *p = _p, i, j;
9194 struct bnx2x *bp = netdev_priv(dev);
9195 struct dump_hdr dump_hdr = {0};
9196
9197 regs->version = 0;
9198 memset(p, 0, regs->len);
9199
9200 if (!netif_running(bp->dev))
9201 return;
9202
9203 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9204 dump_hdr.dump_sign = dump_sign_all;
9205 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9206 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9207 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9208 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9209 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9210
9211 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9212 p += dump_hdr.hdr_size + 1;
9213
9214 if (CHIP_IS_E1(bp)) {
9215 for (i = 0; i < REGS_COUNT; i++)
9216 if (IS_E1_ONLINE(reg_addrs[i].info))
9217 for (j = 0; j < reg_addrs[i].size; j++)
9218 *p++ = REG_RD(bp,
9219 reg_addrs[i].addr + j*4);
9220
9221 } else { /* E1H */
9222 for (i = 0; i < REGS_COUNT; i++)
9223 if (IS_E1H_ONLINE(reg_addrs[i].info))
9224 for (j = 0; j < reg_addrs[i].size; j++)
9225 *p++ = REG_RD(bp,
9226 reg_addrs[i].addr + j*4);
9227 }
9228}
9229
0d28e49a
EG
9230#define PHY_FW_VER_LEN 10
9231
9232static void bnx2x_get_drvinfo(struct net_device *dev,
9233 struct ethtool_drvinfo *info)
9234{
9235 struct bnx2x *bp = netdev_priv(dev);
9236 u8 phy_fw_ver[PHY_FW_VER_LEN];
9237
9238 strcpy(info->driver, DRV_MODULE_NAME);
9239 strcpy(info->version, DRV_MODULE_VERSION);
9240
9241 phy_fw_ver[0] = '\0';
9242 if (bp->port.pmf) {
9243 bnx2x_acquire_phy_lock(bp);
9244 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9245 (bp->state != BNX2X_STATE_CLOSED),
9246 phy_fw_ver, PHY_FW_VER_LEN);
9247 bnx2x_release_phy_lock(bp);
9248 }
9249
9250 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9251 (bp->common.bc_ver & 0xff0000) >> 16,
9252 (bp->common.bc_ver & 0xff00) >> 8,
9253 (bp->common.bc_ver & 0xff),
9254 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9255 strcpy(info->bus_info, pci_name(bp->pdev));
9256 info->n_stats = BNX2X_NUM_STATS;
9257 info->testinfo_len = BNX2X_NUM_TESTS;
9258 info->eedump_len = bp->common.flash_size;
9259 info->regdump_len = bnx2x_get_regs_len(dev);
9260}
9261
a2fbb9ea
ET
9262static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9263{
9264 struct bnx2x *bp = netdev_priv(dev);
9265
9266 if (bp->flags & NO_WOL_FLAG) {
9267 wol->supported = 0;
9268 wol->wolopts = 0;
9269 } else {
9270 wol->supported = WAKE_MAGIC;
9271 if (bp->wol)
9272 wol->wolopts = WAKE_MAGIC;
9273 else
9274 wol->wolopts = 0;
9275 }
9276 memset(&wol->sopass, 0, sizeof(wol->sopass));
9277}
9278
9279static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9280{
9281 struct bnx2x *bp = netdev_priv(dev);
9282
9283 if (wol->wolopts & ~WAKE_MAGIC)
9284 return -EINVAL;
9285
9286 if (wol->wolopts & WAKE_MAGIC) {
9287 if (bp->flags & NO_WOL_FLAG)
9288 return -EINVAL;
9289
9290 bp->wol = 1;
34f80b04 9291 } else
a2fbb9ea 9292 bp->wol = 0;
34f80b04 9293
a2fbb9ea
ET
9294 return 0;
9295}
9296
9297static u32 bnx2x_get_msglevel(struct net_device *dev)
9298{
9299 struct bnx2x *bp = netdev_priv(dev);
9300
9301 return bp->msglevel;
9302}
9303
9304static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9305{
9306 struct bnx2x *bp = netdev_priv(dev);
9307
9308 if (capable(CAP_NET_ADMIN))
9309 bp->msglevel = level;
9310}
9311
9312static int bnx2x_nway_reset(struct net_device *dev)
9313{
9314 struct bnx2x *bp = netdev_priv(dev);
9315
34f80b04
EG
9316 if (!bp->port.pmf)
9317 return 0;
a2fbb9ea 9318
34f80b04 9319 if (netif_running(dev)) {
bb2a0f7a 9320 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9321 bnx2x_link_set(bp);
9322 }
a2fbb9ea
ET
9323
9324 return 0;
9325}
9326
ab6ad5a4 9327static u32 bnx2x_get_link(struct net_device *dev)
01e53298
NO
9328{
9329 struct bnx2x *bp = netdev_priv(dev);
9330
9331 return bp->link_vars.link_up;
9332}
9333
a2fbb9ea
ET
9334static int bnx2x_get_eeprom_len(struct net_device *dev)
9335{
9336 struct bnx2x *bp = netdev_priv(dev);
9337
34f80b04 9338 return bp->common.flash_size;
a2fbb9ea
ET
9339}
9340
9341static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9342{
34f80b04 9343 int port = BP_PORT(bp);
a2fbb9ea
ET
9344 int count, i;
9345 u32 val = 0;
9346
9347 /* adjust timeout for emulation/FPGA */
9348 count = NVRAM_TIMEOUT_COUNT;
9349 if (CHIP_REV_IS_SLOW(bp))
9350 count *= 100;
9351
9352 /* request access to nvram interface */
9353 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9354 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9355
9356 for (i = 0; i < count*10; i++) {
9357 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9358 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9359 break;
9360
9361 udelay(5);
9362 }
9363
9364 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 9365 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
9366 return -EBUSY;
9367 }
9368
9369 return 0;
9370}
9371
9372static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9373{
34f80b04 9374 int port = BP_PORT(bp);
a2fbb9ea
ET
9375 int count, i;
9376 u32 val = 0;
9377
9378 /* adjust timeout for emulation/FPGA */
9379 count = NVRAM_TIMEOUT_COUNT;
9380 if (CHIP_REV_IS_SLOW(bp))
9381 count *= 100;
9382
9383 /* relinquish nvram interface */
9384 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9385 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9386
9387 for (i = 0; i < count*10; i++) {
9388 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9389 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9390 break;
9391
9392 udelay(5);
9393 }
9394
9395 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 9396 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
9397 return -EBUSY;
9398 }
9399
9400 return 0;
9401}
9402
9403static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9404{
9405 u32 val;
9406
9407 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9408
9409 /* enable both bits, even on read */
9410 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9411 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9412 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9413}
9414
9415static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9416{
9417 u32 val;
9418
9419 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9420
9421 /* disable both bits, even after read */
9422 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9423 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9424 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9425}
9426
4781bfad 9427static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
9428 u32 cmd_flags)
9429{
f1410647 9430 int count, i, rc;
a2fbb9ea
ET
9431 u32 val;
9432
9433 /* build the command word */
9434 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9435
9436 /* need to clear DONE bit separately */
9437 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9438
9439 /* address of the NVRAM to read from */
9440 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9441 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9442
9443 /* issue a read command */
9444 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9445
9446 /* adjust timeout for emulation/FPGA */
9447 count = NVRAM_TIMEOUT_COUNT;
9448 if (CHIP_REV_IS_SLOW(bp))
9449 count *= 100;
9450
9451 /* wait for completion */
9452 *ret_val = 0;
9453 rc = -EBUSY;
9454 for (i = 0; i < count; i++) {
9455 udelay(5);
9456 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9457
9458 if (val & MCPR_NVM_COMMAND_DONE) {
9459 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
9460 /* we read nvram data in cpu order
9461 * but ethtool sees it as an array of bytes
9462 * converting to big-endian will do the work */
4781bfad 9463 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
9464 rc = 0;
9465 break;
9466 }
9467 }
9468
9469 return rc;
9470}
9471
9472static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9473 int buf_size)
9474{
9475 int rc;
9476 u32 cmd_flags;
4781bfad 9477 __be32 val;
a2fbb9ea
ET
9478
9479 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9480 DP(BNX2X_MSG_NVM,
c14423fe 9481 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9482 offset, buf_size);
9483 return -EINVAL;
9484 }
9485
34f80b04
EG
9486 if (offset + buf_size > bp->common.flash_size) {
9487 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9488 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9489 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9490 return -EINVAL;
9491 }
9492
9493 /* request access to nvram interface */
9494 rc = bnx2x_acquire_nvram_lock(bp);
9495 if (rc)
9496 return rc;
9497
9498 /* enable access to nvram interface */
9499 bnx2x_enable_nvram_access(bp);
9500
9501 /* read the first word(s) */
9502 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9503 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9504 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9505 memcpy(ret_buf, &val, 4);
9506
9507 /* advance to the next dword */
9508 offset += sizeof(u32);
9509 ret_buf += sizeof(u32);
9510 buf_size -= sizeof(u32);
9511 cmd_flags = 0;
9512 }
9513
9514 if (rc == 0) {
9515 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9516 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9517 memcpy(ret_buf, &val, 4);
9518 }
9519
9520 /* disable access to nvram interface */
9521 bnx2x_disable_nvram_access(bp);
9522 bnx2x_release_nvram_lock(bp);
9523
9524 return rc;
9525}
9526
9527static int bnx2x_get_eeprom(struct net_device *dev,
9528 struct ethtool_eeprom *eeprom, u8 *eebuf)
9529{
9530 struct bnx2x *bp = netdev_priv(dev);
9531 int rc;
9532
2add3acb
EG
9533 if (!netif_running(dev))
9534 return -EAGAIN;
9535
34f80b04 9536 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9537 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9538 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9539 eeprom->len, eeprom->len);
9540
9541 /* parameters already validated in ethtool_get_eeprom */
9542
9543 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9544
9545 return rc;
9546}
9547
9548static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9549 u32 cmd_flags)
9550{
f1410647 9551 int count, i, rc;
a2fbb9ea
ET
9552
9553 /* build the command word */
9554 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9555
9556 /* need to clear DONE bit separately */
9557 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9558
9559 /* write the data */
9560 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9561
9562 /* address of the NVRAM to write to */
9563 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9564 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9565
9566 /* issue the write command */
9567 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9568
9569 /* adjust timeout for emulation/FPGA */
9570 count = NVRAM_TIMEOUT_COUNT;
9571 if (CHIP_REV_IS_SLOW(bp))
9572 count *= 100;
9573
9574 /* wait for completion */
9575 rc = -EBUSY;
9576 for (i = 0; i < count; i++) {
9577 udelay(5);
9578 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9579 if (val & MCPR_NVM_COMMAND_DONE) {
9580 rc = 0;
9581 break;
9582 }
9583 }
9584
9585 return rc;
9586}
9587
f1410647 9588#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
9589
9590static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9591 int buf_size)
9592{
9593 int rc;
9594 u32 cmd_flags;
9595 u32 align_offset;
4781bfad 9596 __be32 val;
a2fbb9ea 9597
34f80b04
EG
9598 if (offset + buf_size > bp->common.flash_size) {
9599 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9600 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9601 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9602 return -EINVAL;
9603 }
9604
9605 /* request access to nvram interface */
9606 rc = bnx2x_acquire_nvram_lock(bp);
9607 if (rc)
9608 return rc;
9609
9610 /* enable access to nvram interface */
9611 bnx2x_enable_nvram_access(bp);
9612
9613 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9614 align_offset = (offset & ~0x03);
9615 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9616
9617 if (rc == 0) {
9618 val &= ~(0xff << BYTE_OFFSET(offset));
9619 val |= (*data_buf << BYTE_OFFSET(offset));
9620
9621 /* nvram data is returned as an array of bytes
9622 * convert it back to cpu order */
9623 val = be32_to_cpu(val);
9624
a2fbb9ea
ET
9625 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9626 cmd_flags);
9627 }
9628
9629 /* disable access to nvram interface */
9630 bnx2x_disable_nvram_access(bp);
9631 bnx2x_release_nvram_lock(bp);
9632
9633 return rc;
9634}
9635
9636static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9637 int buf_size)
9638{
9639 int rc;
9640 u32 cmd_flags;
9641 u32 val;
9642 u32 written_so_far;
9643
34f80b04 9644 if (buf_size == 1) /* ethtool */
a2fbb9ea 9645 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9646
9647 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9648 DP(BNX2X_MSG_NVM,
c14423fe 9649 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9650 offset, buf_size);
9651 return -EINVAL;
9652 }
9653
34f80b04
EG
9654 if (offset + buf_size > bp->common.flash_size) {
9655 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9656 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9657 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9658 return -EINVAL;
9659 }
9660
9661 /* request access to nvram interface */
9662 rc = bnx2x_acquire_nvram_lock(bp);
9663 if (rc)
9664 return rc;
9665
9666 /* enable access to nvram interface */
9667 bnx2x_enable_nvram_access(bp);
9668
9669 written_so_far = 0;
9670 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9671 while ((written_so_far < buf_size) && (rc == 0)) {
9672 if (written_so_far == (buf_size - sizeof(u32)))
9673 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9674 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9675 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9676 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9677 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9678
9679 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9680
9681 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9682
9683 /* advance to the next dword */
9684 offset += sizeof(u32);
9685 data_buf += sizeof(u32);
9686 written_so_far += sizeof(u32);
9687 cmd_flags = 0;
9688 }
9689
9690 /* disable access to nvram interface */
9691 bnx2x_disable_nvram_access(bp);
9692 bnx2x_release_nvram_lock(bp);
9693
9694 return rc;
9695}
9696
9697static int bnx2x_set_eeprom(struct net_device *dev,
9698 struct ethtool_eeprom *eeprom, u8 *eebuf)
9699{
9700 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
9701 int port = BP_PORT(bp);
9702 int rc = 0;
a2fbb9ea 9703
9f4c9583
EG
9704 if (!netif_running(dev))
9705 return -EAGAIN;
9706
34f80b04 9707 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9708 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9709 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9710 eeprom->len, eeprom->len);
9711
9712 /* parameters already validated in ethtool_set_eeprom */
9713
f57a6025
EG
9714 /* PHY eeprom can be accessed only by the PMF */
9715 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9716 !bp->port.pmf)
9717 return -EINVAL;
9718
9719 if (eeprom->magic == 0x50485950) {
9720 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9721 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 9722
f57a6025
EG
9723 bnx2x_acquire_phy_lock(bp);
9724 rc |= bnx2x_link_reset(&bp->link_params,
9725 &bp->link_vars, 0);
9726 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9727 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9728 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9729 MISC_REGISTERS_GPIO_HIGH, port);
9730 bnx2x_release_phy_lock(bp);
9731 bnx2x_link_report(bp);
9732
9733 } else if (eeprom->magic == 0x50485952) {
9734 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9735 if ((bp->state == BNX2X_STATE_OPEN) ||
9736 (bp->state == BNX2X_STATE_DISABLED)) {
4a37fb66 9737 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
9738 rc |= bnx2x_link_reset(&bp->link_params,
9739 &bp->link_vars, 1);
9740
9741 rc |= bnx2x_phy_init(&bp->link_params,
9742 &bp->link_vars);
4a37fb66 9743 bnx2x_release_phy_lock(bp);
f57a6025
EG
9744 bnx2x_calc_fc_adv(bp);
9745 }
9746 } else if (eeprom->magic == 0x53985943) {
9747 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9748 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9749 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9750 u8 ext_phy_addr =
659bc5c4 9751 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
f57a6025
EG
9752
9753 /* DSP Remove Download Mode */
9754 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9755 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 9756
f57a6025
EG
9757 bnx2x_acquire_phy_lock(bp);
9758
9759 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9760
9761 /* wait 0.5 sec to allow it to run */
9762 msleep(500);
9763 bnx2x_ext_phy_hw_reset(bp, port);
9764 msleep(500);
9765 bnx2x_release_phy_lock(bp);
9766 }
9767 } else
c18487ee 9768 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9769
9770 return rc;
9771}
9772
9773static int bnx2x_get_coalesce(struct net_device *dev,
9774 struct ethtool_coalesce *coal)
9775{
9776 struct bnx2x *bp = netdev_priv(dev);
9777
9778 memset(coal, 0, sizeof(struct ethtool_coalesce));
9779
9780 coal->rx_coalesce_usecs = bp->rx_ticks;
9781 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9782
9783 return 0;
9784}
9785
ca00392c 9786#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
a2fbb9ea
ET
9787static int bnx2x_set_coalesce(struct net_device *dev,
9788 struct ethtool_coalesce *coal)
9789{
9790 struct bnx2x *bp = netdev_priv(dev);
9791
9792 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
ca00392c
EG
9793 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9794 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea
ET
9795
9796 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
ca00392c
EG
9797 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9798 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea 9799
34f80b04 9800 if (netif_running(dev))
a2fbb9ea
ET
9801 bnx2x_update_coalesce(bp);
9802
9803 return 0;
9804}
9805
9806static void bnx2x_get_ringparam(struct net_device *dev,
9807 struct ethtool_ringparam *ering)
9808{
9809 struct bnx2x *bp = netdev_priv(dev);
9810
9811 ering->rx_max_pending = MAX_RX_AVAIL;
9812 ering->rx_mini_max_pending = 0;
9813 ering->rx_jumbo_max_pending = 0;
9814
9815 ering->rx_pending = bp->rx_ring_size;
9816 ering->rx_mini_pending = 0;
9817 ering->rx_jumbo_pending = 0;
9818
9819 ering->tx_max_pending = MAX_TX_AVAIL;
9820 ering->tx_pending = bp->tx_ring_size;
9821}
9822
9823static int bnx2x_set_ringparam(struct net_device *dev,
9824 struct ethtool_ringparam *ering)
9825{
9826 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9827 int rc = 0;
a2fbb9ea
ET
9828
9829 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9830 (ering->tx_pending > MAX_TX_AVAIL) ||
9831 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9832 return -EINVAL;
9833
9834 bp->rx_ring_size = ering->rx_pending;
9835 bp->tx_ring_size = ering->tx_pending;
9836
34f80b04
EG
9837 if (netif_running(dev)) {
9838 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9839 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9840 }
9841
34f80b04 9842 return rc;
a2fbb9ea
ET
9843}
9844
9845static void bnx2x_get_pauseparam(struct net_device *dev,
9846 struct ethtool_pauseparam *epause)
9847{
9848 struct bnx2x *bp = netdev_priv(dev);
9849
356e2385
EG
9850 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9851 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9852 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9853
c0700f90
DM
9854 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9855 BNX2X_FLOW_CTRL_RX);
9856 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9857 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9858
9859 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9860 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9861 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9862}
9863
9864static int bnx2x_set_pauseparam(struct net_device *dev,
9865 struct ethtool_pauseparam *epause)
9866{
9867 struct bnx2x *bp = netdev_priv(dev);
9868
34f80b04
EG
9869 if (IS_E1HMF(bp))
9870 return 0;
9871
a2fbb9ea
ET
9872 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9873 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9874 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9875
c0700f90 9876 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9877
f1410647 9878 if (epause->rx_pause)
c0700f90 9879 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9880
f1410647 9881 if (epause->tx_pause)
c0700f90 9882 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9883
c0700f90
DM
9884 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9885 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9886
c18487ee 9887 if (epause->autoneg) {
34f80b04 9888 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9889 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9890 return -EINVAL;
9891 }
a2fbb9ea 9892
c18487ee 9893 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9894 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9895 }
a2fbb9ea 9896
c18487ee
YR
9897 DP(NETIF_MSG_LINK,
9898 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9899
9900 if (netif_running(dev)) {
bb2a0f7a 9901 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9902 bnx2x_link_set(bp);
9903 }
a2fbb9ea
ET
9904
9905 return 0;
9906}
9907
df0f2343
VZ
9908static int bnx2x_set_flags(struct net_device *dev, u32 data)
9909{
9910 struct bnx2x *bp = netdev_priv(dev);
9911 int changed = 0;
9912 int rc = 0;
9913
9914 /* TPA requires Rx CSUM offloading */
9915 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9916 if (!(dev->features & NETIF_F_LRO)) {
9917 dev->features |= NETIF_F_LRO;
9918 bp->flags |= TPA_ENABLE_FLAG;
9919 changed = 1;
9920 }
9921
9922 } else if (dev->features & NETIF_F_LRO) {
9923 dev->features &= ~NETIF_F_LRO;
9924 bp->flags &= ~TPA_ENABLE_FLAG;
9925 changed = 1;
9926 }
9927
9928 if (changed && netif_running(dev)) {
9929 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9930 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9931 }
9932
9933 return rc;
9934}
9935
a2fbb9ea
ET
9936static u32 bnx2x_get_rx_csum(struct net_device *dev)
9937{
9938 struct bnx2x *bp = netdev_priv(dev);
9939
9940 return bp->rx_csum;
9941}
9942
9943static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9944{
9945 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9946 int rc = 0;
a2fbb9ea
ET
9947
9948 bp->rx_csum = data;
df0f2343
VZ
9949
9950 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9951 TPA'ed packets will be discarded due to wrong TCP CSUM */
9952 if (!data) {
9953 u32 flags = ethtool_op_get_flags(dev);
9954
9955 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9956 }
9957
9958 return rc;
a2fbb9ea
ET
9959}
9960
9961static int bnx2x_set_tso(struct net_device *dev, u32 data)
9962{
755735eb 9963 if (data) {
a2fbb9ea 9964 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9965 dev->features |= NETIF_F_TSO6;
9966 } else {
a2fbb9ea 9967 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9968 dev->features &= ~NETIF_F_TSO6;
9969 }
9970
a2fbb9ea
ET
9971 return 0;
9972}
9973
f3c87cdd 9974static const struct {
a2fbb9ea
ET
9975 char string[ETH_GSTRING_LEN];
9976} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9977 { "register_test (offline)" },
9978 { "memory_test (offline)" },
9979 { "loopback_test (offline)" },
9980 { "nvram_test (online)" },
9981 { "interrupt_test (online)" },
9982 { "link_test (online)" },
d3d4f495 9983 { "idle check (online)" }
a2fbb9ea
ET
9984};
9985
f3c87cdd
YG
9986static int bnx2x_test_registers(struct bnx2x *bp)
9987{
9988 int idx, i, rc = -ENODEV;
9989 u32 wr_val = 0;
9dabc424 9990 int port = BP_PORT(bp);
f3c87cdd
YG
9991 static const struct {
9992 u32 offset0;
9993 u32 offset1;
9994 u32 mask;
9995 } reg_tbl[] = {
9996/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9997 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9998 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9999 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
10000 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
10001 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
10002 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
10003 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10004 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
10005 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10006/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
10007 { QM_REG_CONNNUM_0, 4, 0x000fffff },
10008 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
10009 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
10010 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
10011 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10012 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
10013 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 10014 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
10015 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
10016/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
10017 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
10018 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
10019 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
10020 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
10021 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
10022 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
10023 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
10024 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
10025 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
10026/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
10027 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
10028 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
10029 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10030 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
10031 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10032 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
10033
10034 { 0xffffffff, 0, 0x00000000 }
10035 };
10036
10037 if (!netif_running(bp->dev))
10038 return rc;
10039
10040 /* Repeat the test twice:
10041 First by writing 0x00000000, second by writing 0xffffffff */
10042 for (idx = 0; idx < 2; idx++) {
10043
10044 switch (idx) {
10045 case 0:
10046 wr_val = 0;
10047 break;
10048 case 1:
10049 wr_val = 0xffffffff;
10050 break;
10051 }
10052
10053 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10054 u32 offset, mask, save_val, val;
f3c87cdd
YG
10055
10056 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10057 mask = reg_tbl[i].mask;
10058
10059 save_val = REG_RD(bp, offset);
10060
10061 REG_WR(bp, offset, wr_val);
10062 val = REG_RD(bp, offset);
10063
10064 /* Restore the original register's value */
10065 REG_WR(bp, offset, save_val);
10066
10067 /* verify that value is as expected value */
10068 if ((val & mask) != (wr_val & mask))
10069 goto test_reg_exit;
10070 }
10071 }
10072
10073 rc = 0;
10074
10075test_reg_exit:
10076 return rc;
10077}
10078
10079static int bnx2x_test_memory(struct bnx2x *bp)
10080{
10081 int i, j, rc = -ENODEV;
10082 u32 val;
10083 static const struct {
10084 u32 offset;
10085 int size;
10086 } mem_tbl[] = {
10087 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
10088 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10089 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
10090 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
10091 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
10092 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
10093 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
10094
10095 { 0xffffffff, 0 }
10096 };
10097 static const struct {
10098 char *name;
10099 u32 offset;
9dabc424
YG
10100 u32 e1_mask;
10101 u32 e1h_mask;
f3c87cdd 10102 } prty_tbl[] = {
9dabc424
YG
10103 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
10104 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
10105 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
10106 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
10107 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
10108 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
10109
10110 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
10111 };
10112
10113 if (!netif_running(bp->dev))
10114 return rc;
10115
10116 /* Go through all the memories */
10117 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10118 for (j = 0; j < mem_tbl[i].size; j++)
10119 REG_RD(bp, mem_tbl[i].offset + j*4);
10120
10121 /* Check the parity status */
10122 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10123 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
10124 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10125 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
10126 DP(NETIF_MSG_HW,
10127 "%s is 0x%x\n", prty_tbl[i].name, val);
10128 goto test_mem_exit;
10129 }
10130 }
10131
10132 rc = 0;
10133
10134test_mem_exit:
10135 return rc;
10136}
10137
f3c87cdd
YG
10138static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10139{
10140 int cnt = 1000;
10141
10142 if (link_up)
10143 while (bnx2x_link_test(bp) && cnt--)
10144 msleep(10);
10145}
10146
10147static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10148{
10149 unsigned int pkt_size, num_pkts, i;
10150 struct sk_buff *skb;
10151 unsigned char *packet;
ca00392c
EG
10152 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10153 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
f3c87cdd
YG
10154 u16 tx_start_idx, tx_idx;
10155 u16 rx_start_idx, rx_idx;
ca00392c 10156 u16 pkt_prod, bd_prod;
f3c87cdd 10157 struct sw_tx_bd *tx_buf;
ca00392c
EG
10158 struct eth_tx_start_bd *tx_start_bd;
10159 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
10160 dma_addr_t mapping;
10161 union eth_rx_cqe *cqe;
10162 u8 cqe_fp_flags;
10163 struct sw_rx_bd *rx_buf;
10164 u16 len;
10165 int rc = -ENODEV;
10166
b5bf9068
EG
10167 /* check the loopback mode */
10168 switch (loopback_mode) {
10169 case BNX2X_PHY_LOOPBACK:
10170 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10171 return -EINVAL;
10172 break;
10173 case BNX2X_MAC_LOOPBACK:
f3c87cdd 10174 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 10175 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
10176 break;
10177 default:
f3c87cdd 10178 return -EINVAL;
b5bf9068 10179 }
f3c87cdd 10180
b5bf9068
EG
10181 /* prepare the loopback packet */
10182 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10183 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
10184 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10185 if (!skb) {
10186 rc = -ENOMEM;
10187 goto test_loopback_exit;
10188 }
10189 packet = skb_put(skb, pkt_size);
10190 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
10191 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10192 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
10193 for (i = ETH_HLEN; i < pkt_size; i++)
10194 packet[i] = (unsigned char) (i & 0xff);
10195
b5bf9068 10196 /* send the loopback packet */
f3c87cdd 10197 num_pkts = 0;
ca00392c
EG
10198 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10199 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 10200
ca00392c
EG
10201 pkt_prod = fp_tx->tx_pkt_prod++;
10202 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10203 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 10204 tx_buf->skb = skb;
ca00392c 10205 tx_buf->flags = 0;
f3c87cdd 10206
ca00392c
EG
10207 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10208 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
f3c87cdd
YG
10209 mapping = pci_map_single(bp->pdev, skb->data,
10210 skb_headlen(skb), PCI_DMA_TODEVICE);
ca00392c
EG
10211 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10212 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10213 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10214 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10215 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10216 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10217 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10218 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10219
10220 /* turn on parsing and get a BD */
10221 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10222 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10223
10224 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 10225
58f4c4cf
EG
10226 wmb();
10227
ca00392c
EG
10228 fp_tx->tx_db.data.prod += 2;
10229 barrier();
10230 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
f3c87cdd
YG
10231
10232 mmiowb();
10233
10234 num_pkts++;
ca00392c 10235 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
10236 bp->dev->trans_start = jiffies;
10237
10238 udelay(100);
10239
ca00392c 10240 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
10241 if (tx_idx != tx_start_idx + num_pkts)
10242 goto test_loopback_exit;
10243
ca00392c 10244 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
10245 if (rx_idx != rx_start_idx + num_pkts)
10246 goto test_loopback_exit;
10247
ca00392c 10248 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
10249 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10250 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10251 goto test_loopback_rx_exit;
10252
10253 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10254 if (len != pkt_size)
10255 goto test_loopback_rx_exit;
10256
ca00392c 10257 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
10258 skb = rx_buf->skb;
10259 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10260 for (i = ETH_HLEN; i < pkt_size; i++)
10261 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10262 goto test_loopback_rx_exit;
10263
10264 rc = 0;
10265
10266test_loopback_rx_exit:
f3c87cdd 10267
ca00392c
EG
10268 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10269 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10270 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10271 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
10272
10273 /* Update producers */
ca00392c
EG
10274 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10275 fp_rx->rx_sge_prod);
f3c87cdd
YG
10276
10277test_loopback_exit:
10278 bp->link_params.loopback_mode = LOOPBACK_NONE;
10279
10280 return rc;
10281}
10282
10283static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10284{
b5bf9068 10285 int rc = 0, res;
f3c87cdd
YG
10286
10287 if (!netif_running(bp->dev))
10288 return BNX2X_LOOPBACK_FAILED;
10289
f8ef6e44 10290 bnx2x_netif_stop(bp, 1);
3910c8ae 10291 bnx2x_acquire_phy_lock(bp);
f3c87cdd 10292
b5bf9068
EG
10293 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10294 if (res) {
10295 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10296 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
10297 }
10298
b5bf9068
EG
10299 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10300 if (res) {
10301 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10302 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
10303 }
10304
3910c8ae 10305 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
10306 bnx2x_netif_start(bp);
10307
10308 return rc;
10309}
10310
10311#define CRC32_RESIDUAL 0xdebb20e3
10312
10313static int bnx2x_test_nvram(struct bnx2x *bp)
10314{
10315 static const struct {
10316 int offset;
10317 int size;
10318 } nvram_tbl[] = {
10319 { 0, 0x14 }, /* bootstrap */
10320 { 0x14, 0xec }, /* dir */
10321 { 0x100, 0x350 }, /* manuf_info */
10322 { 0x450, 0xf0 }, /* feature_info */
10323 { 0x640, 0x64 }, /* upgrade_key_info */
10324 { 0x6a4, 0x64 },
10325 { 0x708, 0x70 }, /* manuf_key_info */
10326 { 0x778, 0x70 },
10327 { 0, 0 }
10328 };
4781bfad 10329 __be32 buf[0x350 / 4];
f3c87cdd
YG
10330 u8 *data = (u8 *)buf;
10331 int i, rc;
ab6ad5a4 10332 u32 magic, crc;
f3c87cdd
YG
10333
10334 rc = bnx2x_nvram_read(bp, 0, data, 4);
10335 if (rc) {
f5372251 10336 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
10337 goto test_nvram_exit;
10338 }
10339
10340 magic = be32_to_cpu(buf[0]);
10341 if (magic != 0x669955aa) {
10342 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10343 rc = -ENODEV;
10344 goto test_nvram_exit;
10345 }
10346
10347 for (i = 0; nvram_tbl[i].size; i++) {
10348
10349 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10350 nvram_tbl[i].size);
10351 if (rc) {
10352 DP(NETIF_MSG_PROBE,
f5372251 10353 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
10354 goto test_nvram_exit;
10355 }
10356
ab6ad5a4
EG
10357 crc = ether_crc_le(nvram_tbl[i].size, data);
10358 if (crc != CRC32_RESIDUAL) {
f3c87cdd 10359 DP(NETIF_MSG_PROBE,
ab6ad5a4 10360 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
f3c87cdd
YG
10361 rc = -ENODEV;
10362 goto test_nvram_exit;
10363 }
10364 }
10365
10366test_nvram_exit:
10367 return rc;
10368}
10369
10370static int bnx2x_test_intr(struct bnx2x *bp)
10371{
10372 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10373 int i, rc;
10374
10375 if (!netif_running(bp->dev))
10376 return -ENODEV;
10377
8d9c5f34 10378 config->hdr.length = 0;
af246401
EG
10379 if (CHIP_IS_E1(bp))
10380 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10381 else
10382 config->hdr.offset = BP_FUNC(bp);
0626b899 10383 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
10384 config->hdr.reserved1 = 0;
10385
e665bfda
MC
10386 bp->set_mac_pending++;
10387 smp_wmb();
f3c87cdd
YG
10388 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10389 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10390 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10391 if (rc == 0) {
f3c87cdd
YG
10392 for (i = 0; i < 10; i++) {
10393 if (!bp->set_mac_pending)
10394 break;
e665bfda 10395 smp_rmb();
f3c87cdd
YG
10396 msleep_interruptible(10);
10397 }
10398 if (i == 10)
10399 rc = -ENODEV;
10400 }
10401
10402 return rc;
10403}
10404
a2fbb9ea
ET
10405static void bnx2x_self_test(struct net_device *dev,
10406 struct ethtool_test *etest, u64 *buf)
10407{
10408 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
10409
10410 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10411
f3c87cdd 10412 if (!netif_running(dev))
a2fbb9ea 10413 return;
a2fbb9ea 10414
33471629 10415 /* offline tests are not supported in MF mode */
f3c87cdd
YG
10416 if (IS_E1HMF(bp))
10417 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10418
10419 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
10420 int port = BP_PORT(bp);
10421 u32 val;
f3c87cdd
YG
10422 u8 link_up;
10423
279abdf5
EG
10424 /* save current value of input enable for TX port IF */
10425 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10426 /* disable input for TX port IF */
10427 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10428
f3c87cdd
YG
10429 link_up = bp->link_vars.link_up;
10430 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10431 bnx2x_nic_load(bp, LOAD_DIAG);
10432 /* wait until link state is restored */
10433 bnx2x_wait_for_link(bp, link_up);
10434
10435 if (bnx2x_test_registers(bp) != 0) {
10436 buf[0] = 1;
10437 etest->flags |= ETH_TEST_FL_FAILED;
10438 }
10439 if (bnx2x_test_memory(bp) != 0) {
10440 buf[1] = 1;
10441 etest->flags |= ETH_TEST_FL_FAILED;
10442 }
10443 buf[2] = bnx2x_test_loopback(bp, link_up);
10444 if (buf[2] != 0)
10445 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 10446
f3c87cdd 10447 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
10448
10449 /* restore input for TX port IF */
10450 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10451
f3c87cdd
YG
10452 bnx2x_nic_load(bp, LOAD_NORMAL);
10453 /* wait until link state is restored */
10454 bnx2x_wait_for_link(bp, link_up);
10455 }
10456 if (bnx2x_test_nvram(bp) != 0) {
10457 buf[3] = 1;
a2fbb9ea
ET
10458 etest->flags |= ETH_TEST_FL_FAILED;
10459 }
f3c87cdd
YG
10460 if (bnx2x_test_intr(bp) != 0) {
10461 buf[4] = 1;
10462 etest->flags |= ETH_TEST_FL_FAILED;
10463 }
10464 if (bp->port.pmf)
10465 if (bnx2x_link_test(bp) != 0) {
10466 buf[5] = 1;
10467 etest->flags |= ETH_TEST_FL_FAILED;
10468 }
f3c87cdd
YG
10469
10470#ifdef BNX2X_EXTRA_DEBUG
10471 bnx2x_panic_dump(bp);
10472#endif
a2fbb9ea
ET
10473}
10474
de832a55
EG
10475static const struct {
10476 long offset;
10477 int size;
10478 u8 string[ETH_GSTRING_LEN];
10479} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10480/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10481 { Q_STATS_OFFSET32(error_bytes_received_hi),
10482 8, "[%d]: rx_error_bytes" },
10483 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10484 8, "[%d]: rx_ucast_packets" },
10485 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10486 8, "[%d]: rx_mcast_packets" },
10487 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10488 8, "[%d]: rx_bcast_packets" },
10489 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10490 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10491 4, "[%d]: rx_phy_ip_err_discards"},
10492 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10493 4, "[%d]: rx_skb_alloc_discard" },
10494 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10495
10496/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10497 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10498 8, "[%d]: tx_packets" }
10499};
10500
bb2a0f7a
YG
10501static const struct {
10502 long offset;
10503 int size;
10504 u32 flags;
66e855f3
YG
10505#define STATS_FLAGS_PORT 1
10506#define STATS_FLAGS_FUNC 2
de832a55 10507#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 10508 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 10509} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
10510/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10511 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 10512 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 10513 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 10514 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 10515 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 10516 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 10517 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 10518 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 10519 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 10520 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 10521 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 10522 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 10523 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
10524 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10525 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10526 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10527 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10528/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10529 8, STATS_FLAGS_PORT, "rx_fragments" },
10530 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10531 8, STATS_FLAGS_PORT, "rx_jabbers" },
10532 { STATS_OFFSET32(no_buff_discard_hi),
10533 8, STATS_FLAGS_BOTH, "rx_discards" },
10534 { STATS_OFFSET32(mac_filter_discard),
10535 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10536 { STATS_OFFSET32(xxoverflow_discard),
10537 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10538 { STATS_OFFSET32(brb_drop_hi),
10539 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10540 { STATS_OFFSET32(brb_truncate_hi),
10541 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10542 { STATS_OFFSET32(pause_frames_received_hi),
10543 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10544 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10545 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10546 { STATS_OFFSET32(nig_timer_max),
10547 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10548/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10549 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10550 { STATS_OFFSET32(rx_skb_alloc_failed),
10551 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10552 { STATS_OFFSET32(hw_csum_err),
10553 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10554
10555 { STATS_OFFSET32(total_bytes_transmitted_hi),
10556 8, STATS_FLAGS_BOTH, "tx_bytes" },
10557 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10558 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10559 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10560 8, STATS_FLAGS_BOTH, "tx_packets" },
10561 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10562 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10563 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10564 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 10565 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 10566 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 10567 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 10568 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 10569/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 10570 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 10571 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 10572 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 10573 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 10574 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 10575 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 10576 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 10577 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 10578 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 10579 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 10580 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 10581 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 10582 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 10583 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 10584 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 10585 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 10586 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 10587 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 10588 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 10589/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 10590 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
10591 { STATS_OFFSET32(pause_frames_sent_hi),
10592 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
10593};
10594
de832a55
EG
10595#define IS_PORT_STAT(i) \
10596 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10597#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10598#define IS_E1HMF_MODE_STAT(bp) \
10599 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 10600
15f0a394
BH
10601static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10602{
10603 struct bnx2x *bp = netdev_priv(dev);
10604 int i, num_stats;
10605
10606 switch(stringset) {
10607 case ETH_SS_STATS:
10608 if (is_multi(bp)) {
10609 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10610 if (!IS_E1HMF_MODE_STAT(bp))
10611 num_stats += BNX2X_NUM_STATS;
10612 } else {
10613 if (IS_E1HMF_MODE_STAT(bp)) {
10614 num_stats = 0;
10615 for (i = 0; i < BNX2X_NUM_STATS; i++)
10616 if (IS_FUNC_STAT(i))
10617 num_stats++;
10618 } else
10619 num_stats = BNX2X_NUM_STATS;
10620 }
10621 return num_stats;
10622
10623 case ETH_SS_TEST:
10624 return BNX2X_NUM_TESTS;
10625
10626 default:
10627 return -EINVAL;
10628 }
10629}
10630
a2fbb9ea
ET
10631static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10632{
bb2a0f7a 10633 struct bnx2x *bp = netdev_priv(dev);
de832a55 10634 int i, j, k;
bb2a0f7a 10635
a2fbb9ea
ET
10636 switch (stringset) {
10637 case ETH_SS_STATS:
de832a55
EG
10638 if (is_multi(bp)) {
10639 k = 0;
ca00392c 10640 for_each_rx_queue(bp, i) {
de832a55
EG
10641 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10642 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10643 bnx2x_q_stats_arr[j].string, i);
10644 k += BNX2X_NUM_Q_STATS;
10645 }
10646 if (IS_E1HMF_MODE_STAT(bp))
10647 break;
10648 for (j = 0; j < BNX2X_NUM_STATS; j++)
10649 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10650 bnx2x_stats_arr[j].string);
10651 } else {
10652 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10653 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10654 continue;
10655 strcpy(buf + j*ETH_GSTRING_LEN,
10656 bnx2x_stats_arr[i].string);
10657 j++;
10658 }
bb2a0f7a 10659 }
a2fbb9ea
ET
10660 break;
10661
10662 case ETH_SS_TEST:
10663 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10664 break;
10665 }
10666}
10667
a2fbb9ea
ET
10668static void bnx2x_get_ethtool_stats(struct net_device *dev,
10669 struct ethtool_stats *stats, u64 *buf)
10670{
10671 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
10672 u32 *hw_stats, *offset;
10673 int i, j, k;
bb2a0f7a 10674
de832a55
EG
10675 if (is_multi(bp)) {
10676 k = 0;
ca00392c 10677 for_each_rx_queue(bp, i) {
de832a55
EG
10678 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10679 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10680 if (bnx2x_q_stats_arr[j].size == 0) {
10681 /* skip this counter */
10682 buf[k + j] = 0;
10683 continue;
10684 }
10685 offset = (hw_stats +
10686 bnx2x_q_stats_arr[j].offset);
10687 if (bnx2x_q_stats_arr[j].size == 4) {
10688 /* 4-byte counter */
10689 buf[k + j] = (u64) *offset;
10690 continue;
10691 }
10692 /* 8-byte counter */
10693 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10694 }
10695 k += BNX2X_NUM_Q_STATS;
10696 }
10697 if (IS_E1HMF_MODE_STAT(bp))
10698 return;
10699 hw_stats = (u32 *)&bp->eth_stats;
10700 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10701 if (bnx2x_stats_arr[j].size == 0) {
10702 /* skip this counter */
10703 buf[k + j] = 0;
10704 continue;
10705 }
10706 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10707 if (bnx2x_stats_arr[j].size == 4) {
10708 /* 4-byte counter */
10709 buf[k + j] = (u64) *offset;
10710 continue;
10711 }
10712 /* 8-byte counter */
10713 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10714 }
de832a55
EG
10715 } else {
10716 hw_stats = (u32 *)&bp->eth_stats;
10717 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10718 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10719 continue;
10720 if (bnx2x_stats_arr[i].size == 0) {
10721 /* skip this counter */
10722 buf[j] = 0;
10723 j++;
10724 continue;
10725 }
10726 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10727 if (bnx2x_stats_arr[i].size == 4) {
10728 /* 4-byte counter */
10729 buf[j] = (u64) *offset;
10730 j++;
10731 continue;
10732 }
10733 /* 8-byte counter */
10734 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10735 j++;
a2fbb9ea 10736 }
a2fbb9ea
ET
10737 }
10738}
10739
10740static int bnx2x_phys_id(struct net_device *dev, u32 data)
10741{
10742 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10743 int port = BP_PORT(bp);
a2fbb9ea
ET
10744 int i;
10745
34f80b04
EG
10746 if (!netif_running(dev))
10747 return 0;
10748
10749 if (!bp->port.pmf)
10750 return 0;
10751
a2fbb9ea
ET
10752 if (data == 0)
10753 data = 2;
10754
10755 for (i = 0; i < (data * 2); i++) {
c18487ee 10756 if ((i % 2) == 0)
34f80b04 10757 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
10758 bp->link_params.hw_led_mode,
10759 bp->link_params.chip_id);
10760 else
34f80b04 10761 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
10762 bp->link_params.hw_led_mode,
10763 bp->link_params.chip_id);
10764
a2fbb9ea
ET
10765 msleep_interruptible(500);
10766 if (signal_pending(current))
10767 break;
10768 }
10769
c18487ee 10770 if (bp->link_vars.link_up)
34f80b04 10771 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
10772 bp->link_vars.line_speed,
10773 bp->link_params.hw_led_mode,
10774 bp->link_params.chip_id);
a2fbb9ea
ET
10775
10776 return 0;
10777}
10778
0fc0b732 10779static const struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10780 .get_settings = bnx2x_get_settings,
10781 .set_settings = bnx2x_set_settings,
10782 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10783 .get_regs_len = bnx2x_get_regs_len,
10784 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10785 .get_wol = bnx2x_get_wol,
10786 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10787 .get_msglevel = bnx2x_get_msglevel,
10788 .set_msglevel = bnx2x_set_msglevel,
10789 .nway_reset = bnx2x_nway_reset,
01e53298 10790 .get_link = bnx2x_get_link,
7a9b2557
VZ
10791 .get_eeprom_len = bnx2x_get_eeprom_len,
10792 .get_eeprom = bnx2x_get_eeprom,
10793 .set_eeprom = bnx2x_set_eeprom,
10794 .get_coalesce = bnx2x_get_coalesce,
10795 .set_coalesce = bnx2x_set_coalesce,
10796 .get_ringparam = bnx2x_get_ringparam,
10797 .set_ringparam = bnx2x_set_ringparam,
10798 .get_pauseparam = bnx2x_get_pauseparam,
10799 .set_pauseparam = bnx2x_set_pauseparam,
10800 .get_rx_csum = bnx2x_get_rx_csum,
10801 .set_rx_csum = bnx2x_set_rx_csum,
10802 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10803 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10804 .set_flags = bnx2x_set_flags,
10805 .get_flags = ethtool_op_get_flags,
10806 .get_sg = ethtool_op_get_sg,
10807 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10808 .get_tso = ethtool_op_get_tso,
10809 .set_tso = bnx2x_set_tso,
7a9b2557 10810 .self_test = bnx2x_self_test,
15f0a394 10811 .get_sset_count = bnx2x_get_sset_count,
7a9b2557 10812 .get_strings = bnx2x_get_strings,
a2fbb9ea 10813 .phys_id = bnx2x_phys_id,
bb2a0f7a 10814 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10815};
10816
10817/* end of ethtool_ops */
10818
10819/****************************************************************************
10820* General service functions
10821****************************************************************************/
10822
10823static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10824{
10825 u16 pmcsr;
10826
10827 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10828
10829 switch (state) {
10830 case PCI_D0:
34f80b04 10831 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10832 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10833 PCI_PM_CTRL_PME_STATUS));
10834
10835 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10836 /* delay required during transition out of D3hot */
a2fbb9ea 10837 msleep(20);
34f80b04 10838 break;
a2fbb9ea 10839
34f80b04
EG
10840 case PCI_D3hot:
10841 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10842 pmcsr |= 3;
a2fbb9ea 10843
34f80b04
EG
10844 if (bp->wol)
10845 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10846
34f80b04
EG
10847 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10848 pmcsr);
a2fbb9ea 10849
34f80b04
EG
10850 /* No more memory access after this point until
10851 * device is brought back to D0.
10852 */
10853 break;
10854
10855 default:
10856 return -EINVAL;
10857 }
10858 return 0;
a2fbb9ea
ET
10859}
10860
237907c1
EG
10861static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10862{
10863 u16 rx_cons_sb;
10864
10865 /* Tell compiler that status block fields can change */
10866 barrier();
10867 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10868 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10869 rx_cons_sb++;
10870 return (fp->rx_comp_cons != rx_cons_sb);
10871}
10872
34f80b04
EG
10873/*
10874 * net_device service functions
10875 */
10876
a2fbb9ea
ET
10877static int bnx2x_poll(struct napi_struct *napi, int budget)
10878{
10879 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10880 napi);
10881 struct bnx2x *bp = fp->bp;
10882 int work_done = 0;
10883
10884#ifdef BNX2X_STOP_ON_ERROR
10885 if (unlikely(bp->panic))
34f80b04 10886 goto poll_panic;
a2fbb9ea
ET
10887#endif
10888
a2fbb9ea
ET
10889 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10890 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10891
10892 bnx2x_update_fpsb_idx(fp);
10893
8534f32c 10894 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 10895 work_done = bnx2x_rx_int(fp, budget);
356e2385 10896
8534f32c
EG
10897 /* must not complete if we consumed full budget */
10898 if (work_done >= budget)
10899 goto poll_again;
10900 }
a2fbb9ea 10901
ca00392c 10902 /* bnx2x_has_rx_work() reads the status block, thus we need to
8534f32c 10903 * ensure that status block indices have been actually read
ca00392c 10904 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
8534f32c 10905 * so that we won't write the "newer" value of the status block to IGU
ca00392c 10906 * (if there was a DMA right after bnx2x_has_rx_work and
8534f32c
EG
10907 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10908 * may be postponed to right before bnx2x_ack_sb). In this case
10909 * there will never be another interrupt until there is another update
10910 * of the status block, while there is still unhandled work.
10911 */
10912 rmb();
a2fbb9ea 10913
ca00392c 10914 if (!bnx2x_has_rx_work(fp)) {
a2fbb9ea 10915#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10916poll_panic:
a2fbb9ea 10917#endif
288379f0 10918 napi_complete(napi);
a2fbb9ea 10919
0626b899 10920 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10921 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10922 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10923 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10924 }
356e2385 10925
8534f32c 10926poll_again:
a2fbb9ea
ET
10927 return work_done;
10928}
10929
755735eb
EG
10930
10931/* we split the first BD into headers and data BDs
33471629 10932 * to ease the pain of our fellow microcode engineers
755735eb
EG
10933 * we use one mapping for both BDs
10934 * So far this has only been observed to happen
10935 * in Other Operating Systems(TM)
10936 */
10937static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10938 struct bnx2x_fastpath *fp,
ca00392c
EG
10939 struct sw_tx_bd *tx_buf,
10940 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
10941 u16 bd_prod, int nbd)
10942{
ca00392c 10943 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
10944 struct eth_tx_bd *d_tx_bd;
10945 dma_addr_t mapping;
10946 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10947
10948 /* first fix first BD */
10949 h_tx_bd->nbd = cpu_to_le16(nbd);
10950 h_tx_bd->nbytes = cpu_to_le16(hlen);
10951
10952 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10953 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10954 h_tx_bd->addr_lo, h_tx_bd->nbd);
10955
10956 /* now get a new data BD
10957 * (after the pbd) and fill it */
10958 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 10959 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
10960
10961 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10962 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10963
10964 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10965 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10966 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
10967
10968 /* this marks the BD as one that has no individual mapping */
10969 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10970
755735eb
EG
10971 DP(NETIF_MSG_TX_QUEUED,
10972 "TSO split data size is %d (%x:%x)\n",
10973 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10974
ca00392c
EG
10975 /* update tx_bd */
10976 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
10977
10978 return bd_prod;
10979}
10980
10981static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10982{
10983 if (fix > 0)
10984 csum = (u16) ~csum_fold(csum_sub(csum,
10985 csum_partial(t_header - fix, fix, 0)));
10986
10987 else if (fix < 0)
10988 csum = (u16) ~csum_fold(csum_add(csum,
10989 csum_partial(t_header, -fix, 0)));
10990
10991 return swab16(csum);
10992}
10993
10994static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10995{
10996 u32 rc;
10997
10998 if (skb->ip_summed != CHECKSUM_PARTIAL)
10999 rc = XMIT_PLAIN;
11000
11001 else {
4781bfad 11002 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
11003 rc = XMIT_CSUM_V6;
11004 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11005 rc |= XMIT_CSUM_TCP;
11006
11007 } else {
11008 rc = XMIT_CSUM_V4;
11009 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11010 rc |= XMIT_CSUM_TCP;
11011 }
11012 }
11013
11014 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
11015 rc |= XMIT_GSO_V4;
11016
11017 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
11018 rc |= XMIT_GSO_V6;
11019
11020 return rc;
11021}
11022
632da4d6 11023#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
11024/* check if packet requires linearization (packet is too fragmented)
11025 no need to check fragmentation if page size > 8K (there will be no
11026 violation to FW restrictions) */
755735eb
EG
11027static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11028 u32 xmit_type)
11029{
11030 int to_copy = 0;
11031 int hlen = 0;
11032 int first_bd_sz = 0;
11033
11034 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11035 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11036
11037 if (xmit_type & XMIT_GSO) {
11038 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11039 /* Check if LSO packet needs to be copied:
11040 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11041 int wnd_size = MAX_FETCH_BD - 3;
33471629 11042 /* Number of windows to check */
755735eb
EG
11043 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11044 int wnd_idx = 0;
11045 int frag_idx = 0;
11046 u32 wnd_sum = 0;
11047
11048 /* Headers length */
11049 hlen = (int)(skb_transport_header(skb) - skb->data) +
11050 tcp_hdrlen(skb);
11051
11052 /* Amount of data (w/o headers) on linear part of SKB*/
11053 first_bd_sz = skb_headlen(skb) - hlen;
11054
11055 wnd_sum = first_bd_sz;
11056
11057 /* Calculate the first sum - it's special */
11058 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11059 wnd_sum +=
11060 skb_shinfo(skb)->frags[frag_idx].size;
11061
11062 /* If there was data on linear skb data - check it */
11063 if (first_bd_sz > 0) {
11064 if (unlikely(wnd_sum < lso_mss)) {
11065 to_copy = 1;
11066 goto exit_lbl;
11067 }
11068
11069 wnd_sum -= first_bd_sz;
11070 }
11071
11072 /* Others are easier: run through the frag list and
11073 check all windows */
11074 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11075 wnd_sum +=
11076 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11077
11078 if (unlikely(wnd_sum < lso_mss)) {
11079 to_copy = 1;
11080 break;
11081 }
11082 wnd_sum -=
11083 skb_shinfo(skb)->frags[wnd_idx].size;
11084 }
755735eb
EG
11085 } else {
11086 /* in non-LSO too fragmented packet should always
11087 be linearized */
11088 to_copy = 1;
11089 }
11090 }
11091
11092exit_lbl:
11093 if (unlikely(to_copy))
11094 DP(NETIF_MSG_TX_QUEUED,
11095 "Linearization IS REQUIRED for %s packet. "
11096 "num_frags %d hlen %d first_bd_sz %d\n",
11097 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11098 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11099
11100 return to_copy;
11101}
632da4d6 11102#endif
755735eb
EG
11103
11104/* called with netif_tx_lock
a2fbb9ea 11105 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 11106 * netif_wake_queue()
a2fbb9ea 11107 */
61357325 11108static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
a2fbb9ea
ET
11109{
11110 struct bnx2x *bp = netdev_priv(dev);
ca00392c 11111 struct bnx2x_fastpath *fp, *fp_stat;
555f6c78 11112 struct netdev_queue *txq;
a2fbb9ea 11113 struct sw_tx_bd *tx_buf;
ca00392c
EG
11114 struct eth_tx_start_bd *tx_start_bd;
11115 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
11116 struct eth_tx_parse_bd *pbd = NULL;
11117 u16 pkt_prod, bd_prod;
755735eb 11118 int nbd, fp_index;
a2fbb9ea 11119 dma_addr_t mapping;
755735eb 11120 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
11121 int i;
11122 u8 hlen = 0;
ca00392c 11123 __le16 pkt_size = 0;
a2fbb9ea
ET
11124
11125#ifdef BNX2X_STOP_ON_ERROR
11126 if (unlikely(bp->panic))
11127 return NETDEV_TX_BUSY;
11128#endif
11129
555f6c78
EG
11130 fp_index = skb_get_queue_mapping(skb);
11131 txq = netdev_get_tx_queue(dev, fp_index);
11132
ca00392c
EG
11133 fp = &bp->fp[fp_index + bp->num_rx_queues];
11134 fp_stat = &bp->fp[fp_index];
755735eb 11135
231fd58a 11136 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
ca00392c 11137 fp_stat->eth_q_stats.driver_xoff++;
555f6c78 11138 netif_tx_stop_queue(txq);
a2fbb9ea
ET
11139 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11140 return NETDEV_TX_BUSY;
11141 }
11142
755735eb
EG
11143 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
11144 " gso type %x xmit_type %x\n",
11145 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11146 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11147
632da4d6 11148#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
11149 /* First, check if we need to linearize the skb (due to FW
11150 restrictions). No need to check fragmentation if page size > 8K
11151 (there will be no violation to FW restrictions) */
755735eb
EG
11152 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11153 /* Statistics of linearization */
11154 bp->lin_cnt++;
11155 if (skb_linearize(skb) != 0) {
11156 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11157 "silently dropping this SKB\n");
11158 dev_kfree_skb_any(skb);
da5a662a 11159 return NETDEV_TX_OK;
755735eb
EG
11160 }
11161 }
632da4d6 11162#endif
755735eb 11163
a2fbb9ea 11164 /*
755735eb 11165 Please read carefully. First we use one BD which we mark as start,
ca00392c 11166 then we have a parsing info BD (used for TSO or xsum),
755735eb 11167 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
11168 (don't forget to mark the last one as last,
11169 and to unmap only AFTER you write to the BD ...)
755735eb 11170 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
11171 */
11172
11173 pkt_prod = fp->tx_pkt_prod++;
755735eb 11174 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 11175
755735eb 11176 /* get a tx_buf and first BD */
a2fbb9ea 11177 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 11178 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 11179
ca00392c
EG
11180 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11181 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11182 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 11183 /* header nbd */
ca00392c 11184 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 11185
755735eb
EG
11186 /* remember the first BD of the packet */
11187 tx_buf->first_bd = fp->tx_bd_prod;
11188 tx_buf->skb = skb;
ca00392c 11189 tx_buf->flags = 0;
a2fbb9ea
ET
11190
11191 DP(NETIF_MSG_TX_QUEUED,
11192 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 11193 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 11194
0c6671b0
EG
11195#ifdef BCM_VLAN
11196 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11197 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
11198 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11199 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 11200 } else
0c6671b0 11201#endif
ca00392c 11202 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 11203
ca00392c
EG
11204 /* turn on parsing and get a BD */
11205 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11206 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 11207
ca00392c 11208 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
11209
11210 if (xmit_type & XMIT_CSUM) {
ca00392c 11211 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
11212
11213 /* for now NS flag is not used in Linux */
4781bfad
EG
11214 pbd->global_data =
11215 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11216 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 11217
755735eb
EG
11218 pbd->ip_hlen = (skb_transport_header(skb) -
11219 skb_network_header(skb)) / 2;
11220
11221 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 11222
755735eb 11223 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 11224 hlen = hlen*2;
a2fbb9ea 11225
ca00392c 11226 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
11227
11228 if (xmit_type & XMIT_CSUM_V4)
ca00392c 11229 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
11230 ETH_TX_BD_FLAGS_IP_CSUM;
11231 else
ca00392c
EG
11232 tx_start_bd->bd_flags.as_bitfield |=
11233 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
11234
11235 if (xmit_type & XMIT_CSUM_TCP) {
11236 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11237
11238 } else {
11239 s8 fix = SKB_CS_OFF(skb); /* signed! */
11240
ca00392c 11241 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 11242
755735eb 11243 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11244 "hlen %d fix %d csum before fix %x\n",
11245 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
11246
11247 /* HW bug: fixup the CSUM */
11248 pbd->tcp_pseudo_csum =
11249 bnx2x_csum_fix(skb_transport_header(skb),
11250 SKB_CS(skb), fix);
11251
11252 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11253 pbd->tcp_pseudo_csum);
11254 }
a2fbb9ea
ET
11255 }
11256
11257 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 11258 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea 11259
ca00392c
EG
11260 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11261 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11262 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11263 tx_start_bd->nbd = cpu_to_le16(nbd);
11264 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11265 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
11266
11267 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 11268 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
11269 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11270 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11271 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 11272
755735eb 11273 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
11274
11275 DP(NETIF_MSG_TX_QUEUED,
11276 "TSO packet len %d hlen %d total len %d tso size %d\n",
11277 skb->len, hlen, skb_headlen(skb),
11278 skb_shinfo(skb)->gso_size);
11279
ca00392c 11280 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 11281
755735eb 11282 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
11283 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11284 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
11285
11286 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11287 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
11288 pbd->tcp_flags = pbd_tcp_flags(skb);
11289
11290 if (xmit_type & XMIT_GSO_V4) {
11291 pbd->ip_id = swab16(ip_hdr(skb)->id);
11292 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
11293 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11294 ip_hdr(skb)->daddr,
11295 0, IPPROTO_TCP, 0));
755735eb
EG
11296
11297 } else
11298 pbd->tcp_pseudo_csum =
11299 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11300 &ipv6_hdr(skb)->daddr,
11301 0, IPPROTO_TCP, 0));
11302
a2fbb9ea
ET
11303 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11304 }
ca00392c 11305 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 11306
755735eb
EG
11307 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11308 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 11309
755735eb 11310 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
11311 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11312 if (total_pkt_bd == NULL)
11313 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 11314
755735eb
EG
11315 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11316 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 11317
ca00392c
EG
11318 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11319 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11320 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11321 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 11322
755735eb 11323 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11324 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11325 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11326 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
11327 }
11328
ca00392c 11329 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 11330
a2fbb9ea
ET
11331 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11332
755735eb 11333 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
11334 * if the packet contains or ends with it
11335 */
11336 if (TX_BD_POFF(bd_prod) < nbd)
11337 nbd++;
11338
ca00392c
EG
11339 if (total_pkt_bd != NULL)
11340 total_pkt_bd->total_pkt_bytes = pkt_size;
11341
a2fbb9ea
ET
11342 if (pbd)
11343 DP(NETIF_MSG_TX_QUEUED,
11344 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11345 " tcp_flags %x xsum %x seq %u hlen %u\n",
11346 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11347 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 11348 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 11349
755735eb 11350 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 11351
58f4c4cf
EG
11352 /*
11353 * Make sure that the BD data is updated before updating the producer
11354 * since FW might read the BD right after the producer is updated.
11355 * This is only applicable for weak-ordered memory model archs such
11356 * as IA-64. The following barrier is also mandatory since FW will
11357 * assumes packets must have BDs.
11358 */
11359 wmb();
11360
ca00392c
EG
11361 fp->tx_db.data.prod += nbd;
11362 barrier();
11363 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
a2fbb9ea
ET
11364
11365 mmiowb();
11366
755735eb 11367 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
11368
11369 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 11370 netif_tx_stop_queue(txq);
58f4c4cf
EG
11371 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11372 if we put Tx into XOFF state. */
11373 smp_mb();
ca00392c 11374 fp_stat->eth_q_stats.driver_xoff++;
a2fbb9ea 11375 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 11376 netif_tx_wake_queue(txq);
a2fbb9ea 11377 }
ca00392c 11378 fp_stat->tx_pkt++;
a2fbb9ea
ET
11379
11380 return NETDEV_TX_OK;
11381}
11382
bb2a0f7a 11383/* called with rtnl_lock */
a2fbb9ea
ET
11384static int bnx2x_open(struct net_device *dev)
11385{
11386 struct bnx2x *bp = netdev_priv(dev);
11387
6eccabb3
EG
11388 netif_carrier_off(dev);
11389
a2fbb9ea
ET
11390 bnx2x_set_power_state(bp, PCI_D0);
11391
bb2a0f7a 11392 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
11393}
11394
bb2a0f7a 11395/* called with rtnl_lock */
a2fbb9ea
ET
11396static int bnx2x_close(struct net_device *dev)
11397{
a2fbb9ea
ET
11398 struct bnx2x *bp = netdev_priv(dev);
11399
11400 /* Unload the driver, release IRQs */
bb2a0f7a
YG
11401 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11402 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11403 if (!CHIP_REV_IS_SLOW(bp))
11404 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
11405
11406 return 0;
11407}
11408
f5372251 11409/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
11410static void bnx2x_set_rx_mode(struct net_device *dev)
11411{
11412 struct bnx2x *bp = netdev_priv(dev);
11413 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11414 int port = BP_PORT(bp);
11415
11416 if (bp->state != BNX2X_STATE_OPEN) {
11417 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11418 return;
11419 }
11420
11421 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11422
11423 if (dev->flags & IFF_PROMISC)
11424 rx_mode = BNX2X_RX_MODE_PROMISC;
11425
11426 else if ((dev->flags & IFF_ALLMULTI) ||
11427 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11428 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11429
11430 else { /* some multicasts */
11431 if (CHIP_IS_E1(bp)) {
11432 int i, old, offset;
11433 struct dev_mc_list *mclist;
11434 struct mac_configuration_cmd *config =
11435 bnx2x_sp(bp, mcast_config);
11436
11437 for (i = 0, mclist = dev->mc_list;
11438 mclist && (i < dev->mc_count);
11439 i++, mclist = mclist->next) {
11440
11441 config->config_table[i].
11442 cam_entry.msb_mac_addr =
11443 swab16(*(u16 *)&mclist->dmi_addr[0]);
11444 config->config_table[i].
11445 cam_entry.middle_mac_addr =
11446 swab16(*(u16 *)&mclist->dmi_addr[2]);
11447 config->config_table[i].
11448 cam_entry.lsb_mac_addr =
11449 swab16(*(u16 *)&mclist->dmi_addr[4]);
11450 config->config_table[i].cam_entry.flags =
11451 cpu_to_le16(port);
11452 config->config_table[i].
11453 target_table_entry.flags = 0;
ca00392c
EG
11454 config->config_table[i].target_table_entry.
11455 clients_bit_vector =
11456 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
11457 config->config_table[i].
11458 target_table_entry.vlan_id = 0;
11459
11460 DP(NETIF_MSG_IFUP,
11461 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11462 config->config_table[i].
11463 cam_entry.msb_mac_addr,
11464 config->config_table[i].
11465 cam_entry.middle_mac_addr,
11466 config->config_table[i].
11467 cam_entry.lsb_mac_addr);
11468 }
8d9c5f34 11469 old = config->hdr.length;
34f80b04
EG
11470 if (old > i) {
11471 for (; i < old; i++) {
11472 if (CAM_IS_INVALID(config->
11473 config_table[i])) {
af246401 11474 /* already invalidated */
34f80b04
EG
11475 break;
11476 }
11477 /* invalidate */
11478 CAM_INVALIDATE(config->
11479 config_table[i]);
11480 }
11481 }
11482
11483 if (CHIP_REV_IS_SLOW(bp))
11484 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11485 else
11486 offset = BNX2X_MAX_MULTICAST*(1 + port);
11487
8d9c5f34 11488 config->hdr.length = i;
34f80b04 11489 config->hdr.offset = offset;
8d9c5f34 11490 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
11491 config->hdr.reserved1 = 0;
11492
e665bfda
MC
11493 bp->set_mac_pending++;
11494 smp_wmb();
11495
34f80b04
EG
11496 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11497 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11498 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11499 0);
11500 } else { /* E1H */
11501 /* Accept one or more multicasts */
11502 struct dev_mc_list *mclist;
11503 u32 mc_filter[MC_HASH_SIZE];
11504 u32 crc, bit, regidx;
11505 int i;
11506
11507 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11508
11509 for (i = 0, mclist = dev->mc_list;
11510 mclist && (i < dev->mc_count);
11511 i++, mclist = mclist->next) {
11512
7c510e4b
JB
11513 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11514 mclist->dmi_addr);
34f80b04
EG
11515
11516 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11517 bit = (crc >> 24) & 0xff;
11518 regidx = bit >> 5;
11519 bit &= 0x1f;
11520 mc_filter[regidx] |= (1 << bit);
11521 }
11522
11523 for (i = 0; i < MC_HASH_SIZE; i++)
11524 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11525 mc_filter[i]);
11526 }
11527 }
11528
11529 bp->rx_mode = rx_mode;
11530 bnx2x_set_storm_rx_mode(bp);
11531}
11532
11533/* called with rtnl_lock */
a2fbb9ea
ET
11534static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11535{
11536 struct sockaddr *addr = p;
11537 struct bnx2x *bp = netdev_priv(dev);
11538
34f80b04 11539 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
11540 return -EINVAL;
11541
11542 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
11543 if (netif_running(dev)) {
11544 if (CHIP_IS_E1(bp))
e665bfda 11545 bnx2x_set_eth_mac_addr_e1(bp, 1);
34f80b04 11546 else
e665bfda 11547 bnx2x_set_eth_mac_addr_e1h(bp, 1);
34f80b04 11548 }
a2fbb9ea
ET
11549
11550 return 0;
11551}
11552
c18487ee 11553/* called with rtnl_lock */
01cd4528
EG
11554static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11555 int devad, u16 addr)
a2fbb9ea 11556{
01cd4528
EG
11557 struct bnx2x *bp = netdev_priv(netdev);
11558 u16 value;
11559 int rc;
11560 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 11561
01cd4528
EG
11562 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11563 prtad, devad, addr);
a2fbb9ea 11564
01cd4528
EG
11565 if (prtad != bp->mdio.prtad) {
11566 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11567 prtad, bp->mdio.prtad);
11568 return -EINVAL;
11569 }
11570
11571 /* The HW expects different devad if CL22 is used */
11572 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 11573
01cd4528
EG
11574 bnx2x_acquire_phy_lock(bp);
11575 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11576 devad, addr, &value);
11577 bnx2x_release_phy_lock(bp);
11578 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 11579
01cd4528
EG
11580 if (!rc)
11581 rc = value;
11582 return rc;
11583}
a2fbb9ea 11584
01cd4528
EG
11585/* called with rtnl_lock */
11586static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11587 u16 addr, u16 value)
11588{
11589 struct bnx2x *bp = netdev_priv(netdev);
11590 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11591 int rc;
11592
11593 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11594 " value 0x%x\n", prtad, devad, addr, value);
11595
11596 if (prtad != bp->mdio.prtad) {
11597 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11598 prtad, bp->mdio.prtad);
11599 return -EINVAL;
a2fbb9ea
ET
11600 }
11601
01cd4528
EG
11602 /* The HW expects different devad if CL22 is used */
11603 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 11604
01cd4528
EG
11605 bnx2x_acquire_phy_lock(bp);
11606 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11607 devad, addr, value);
11608 bnx2x_release_phy_lock(bp);
11609 return rc;
11610}
c18487ee 11611
01cd4528
EG
11612/* called with rtnl_lock */
11613static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11614{
11615 struct bnx2x *bp = netdev_priv(dev);
11616 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 11617
01cd4528
EG
11618 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11619 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 11620
01cd4528
EG
11621 if (!netif_running(dev))
11622 return -EAGAIN;
11623
11624 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
11625}
11626
34f80b04 11627/* called with rtnl_lock */
a2fbb9ea
ET
11628static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11629{
11630 struct bnx2x *bp = netdev_priv(dev);
34f80b04 11631 int rc = 0;
a2fbb9ea
ET
11632
11633 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11634 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11635 return -EINVAL;
11636
11637 /* This does not race with packet allocation
c14423fe 11638 * because the actual alloc size is
a2fbb9ea
ET
11639 * only updated as part of load
11640 */
11641 dev->mtu = new_mtu;
11642
11643 if (netif_running(dev)) {
34f80b04
EG
11644 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11645 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 11646 }
34f80b04
EG
11647
11648 return rc;
a2fbb9ea
ET
11649}
11650
11651static void bnx2x_tx_timeout(struct net_device *dev)
11652{
11653 struct bnx2x *bp = netdev_priv(dev);
11654
11655#ifdef BNX2X_STOP_ON_ERROR
11656 if (!bp->panic)
11657 bnx2x_panic();
11658#endif
11659 /* This allows the netif to be shutdown gracefully before resetting */
11660 schedule_work(&bp->reset_task);
11661}
11662
11663#ifdef BCM_VLAN
34f80b04 11664/* called with rtnl_lock */
a2fbb9ea
ET
11665static void bnx2x_vlan_rx_register(struct net_device *dev,
11666 struct vlan_group *vlgrp)
11667{
11668 struct bnx2x *bp = netdev_priv(dev);
11669
11670 bp->vlgrp = vlgrp;
0c6671b0
EG
11671
11672 /* Set flags according to the required capabilities */
11673 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11674
11675 if (dev->features & NETIF_F_HW_VLAN_TX)
11676 bp->flags |= HW_VLAN_TX_FLAG;
11677
11678 if (dev->features & NETIF_F_HW_VLAN_RX)
11679 bp->flags |= HW_VLAN_RX_FLAG;
11680
a2fbb9ea 11681 if (netif_running(dev))
49d66772 11682 bnx2x_set_client_config(bp);
a2fbb9ea 11683}
34f80b04 11684
a2fbb9ea
ET
11685#endif
11686
11687#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11688static void poll_bnx2x(struct net_device *dev)
11689{
11690 struct bnx2x *bp = netdev_priv(dev);
11691
11692 disable_irq(bp->pdev->irq);
11693 bnx2x_interrupt(bp->pdev->irq, dev);
11694 enable_irq(bp->pdev->irq);
11695}
11696#endif
11697
c64213cd
SH
11698static const struct net_device_ops bnx2x_netdev_ops = {
11699 .ndo_open = bnx2x_open,
11700 .ndo_stop = bnx2x_close,
11701 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 11702 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
11703 .ndo_set_mac_address = bnx2x_change_mac_addr,
11704 .ndo_validate_addr = eth_validate_addr,
11705 .ndo_do_ioctl = bnx2x_ioctl,
11706 .ndo_change_mtu = bnx2x_change_mtu,
11707 .ndo_tx_timeout = bnx2x_tx_timeout,
11708#ifdef BCM_VLAN
11709 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11710#endif
11711#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11712 .ndo_poll_controller = poll_bnx2x,
11713#endif
11714};
11715
34f80b04
EG
11716static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11717 struct net_device *dev)
a2fbb9ea
ET
11718{
11719 struct bnx2x *bp;
11720 int rc;
11721
11722 SET_NETDEV_DEV(dev, &pdev->dev);
11723 bp = netdev_priv(dev);
11724
34f80b04
EG
11725 bp->dev = dev;
11726 bp->pdev = pdev;
a2fbb9ea 11727 bp->flags = 0;
34f80b04 11728 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11729
11730 rc = pci_enable_device(pdev);
11731 if (rc) {
11732 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11733 goto err_out;
11734 }
11735
11736 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11737 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11738 " aborting\n");
11739 rc = -ENODEV;
11740 goto err_out_disable;
11741 }
11742
11743 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11744 printk(KERN_ERR PFX "Cannot find second PCI device"
11745 " base address, aborting\n");
11746 rc = -ENODEV;
11747 goto err_out_disable;
11748 }
11749
34f80b04
EG
11750 if (atomic_read(&pdev->enable_cnt) == 1) {
11751 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11752 if (rc) {
11753 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11754 " aborting\n");
11755 goto err_out_disable;
11756 }
a2fbb9ea 11757
34f80b04
EG
11758 pci_set_master(pdev);
11759 pci_save_state(pdev);
11760 }
a2fbb9ea
ET
11761
11762 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11763 if (bp->pm_cap == 0) {
11764 printk(KERN_ERR PFX "Cannot find power management"
11765 " capability, aborting\n");
11766 rc = -EIO;
11767 goto err_out_release;
11768 }
11769
11770 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11771 if (bp->pcie_cap == 0) {
11772 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11773 " aborting\n");
11774 rc = -EIO;
11775 goto err_out_release;
11776 }
11777
6a35528a 11778 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11779 bp->flags |= USING_DAC_FLAG;
6a35528a 11780 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
11781 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11782 " failed, aborting\n");
11783 rc = -EIO;
11784 goto err_out_release;
11785 }
11786
284901a9 11787 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a2fbb9ea
ET
11788 printk(KERN_ERR PFX "System does not support DMA,"
11789 " aborting\n");
11790 rc = -EIO;
11791 goto err_out_release;
11792 }
11793
34f80b04
EG
11794 dev->mem_start = pci_resource_start(pdev, 0);
11795 dev->base_addr = dev->mem_start;
11796 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11797
11798 dev->irq = pdev->irq;
11799
275f165f 11800 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11801 if (!bp->regview) {
11802 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11803 rc = -ENOMEM;
11804 goto err_out_release;
11805 }
11806
34f80b04
EG
11807 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11808 min_t(u64, BNX2X_DB_SIZE,
11809 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11810 if (!bp->doorbells) {
11811 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11812 rc = -ENOMEM;
11813 goto err_out_unmap;
11814 }
11815
11816 bnx2x_set_power_state(bp, PCI_D0);
11817
34f80b04
EG
11818 /* clean indirect addresses */
11819 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11820 PCICFG_VENDOR_ID_OFFSET);
11821 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11822 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11823 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11824 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11825
34f80b04 11826 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11827
c64213cd 11828 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11829 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11830 dev->features |= NETIF_F_SG;
11831 dev->features |= NETIF_F_HW_CSUM;
11832 if (bp->flags & USING_DAC_FLAG)
11833 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
11834 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11835 dev->features |= NETIF_F_TSO6;
34f80b04
EG
11836#ifdef BCM_VLAN
11837 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11838 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
11839
11840 dev->vlan_features |= NETIF_F_SG;
11841 dev->vlan_features |= NETIF_F_HW_CSUM;
11842 if (bp->flags & USING_DAC_FLAG)
11843 dev->vlan_features |= NETIF_F_HIGHDMA;
11844 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11845 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 11846#endif
a2fbb9ea 11847
01cd4528
EG
11848 /* get_port_hwinfo() will set prtad and mmds properly */
11849 bp->mdio.prtad = MDIO_PRTAD_NONE;
11850 bp->mdio.mmds = 0;
11851 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11852 bp->mdio.dev = dev;
11853 bp->mdio.mdio_read = bnx2x_mdio_read;
11854 bp->mdio.mdio_write = bnx2x_mdio_write;
11855
a2fbb9ea
ET
11856 return 0;
11857
11858err_out_unmap:
11859 if (bp->regview) {
11860 iounmap(bp->regview);
11861 bp->regview = NULL;
11862 }
a2fbb9ea
ET
11863 if (bp->doorbells) {
11864 iounmap(bp->doorbells);
11865 bp->doorbells = NULL;
11866 }
11867
11868err_out_release:
34f80b04
EG
11869 if (atomic_read(&pdev->enable_cnt) == 1)
11870 pci_release_regions(pdev);
a2fbb9ea
ET
11871
11872err_out_disable:
11873 pci_disable_device(pdev);
11874 pci_set_drvdata(pdev, NULL);
11875
11876err_out:
11877 return rc;
11878}
11879
37f9ce62
EG
11880static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11881 int *width, int *speed)
25047950
ET
11882{
11883 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11884
37f9ce62 11885 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 11886
37f9ce62
EG
11887 /* return value of 1=2.5GHz 2=5GHz */
11888 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 11889}
37f9ce62 11890
94a78b79
VZ
11891static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11892{
37f9ce62 11893 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
11894 struct bnx2x_fw_file_hdr *fw_hdr;
11895 struct bnx2x_fw_file_section *sections;
94a78b79 11896 u32 offset, len, num_ops;
37f9ce62 11897 u16 *ops_offsets;
94a78b79 11898 int i;
37f9ce62 11899 const u8 *fw_ver;
94a78b79
VZ
11900
11901 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11902 return -EINVAL;
11903
11904 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11905 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11906
11907 /* Make sure none of the offsets and sizes make us read beyond
11908 * the end of the firmware data */
11909 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11910 offset = be32_to_cpu(sections[i].offset);
11911 len = be32_to_cpu(sections[i].len);
11912 if (offset + len > firmware->size) {
37f9ce62
EG
11913 printk(KERN_ERR PFX "Section %d length is out of "
11914 "bounds\n", i);
94a78b79
VZ
11915 return -EINVAL;
11916 }
11917 }
11918
11919 /* Likewise for the init_ops offsets */
11920 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11921 ops_offsets = (u16 *)(firmware->data + offset);
11922 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11923
11924 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11925 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
37f9ce62
EG
11926 printk(KERN_ERR PFX "Section offset %d is out of "
11927 "bounds\n", i);
94a78b79
VZ
11928 return -EINVAL;
11929 }
11930 }
11931
11932 /* Check FW version */
11933 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11934 fw_ver = firmware->data + offset;
11935 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11936 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11937 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11938 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11939 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11940 " Should be %d.%d.%d.%d\n",
11941 fw_ver[0], fw_ver[1], fw_ver[2],
11942 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11943 BCM_5710_FW_MINOR_VERSION,
11944 BCM_5710_FW_REVISION_VERSION,
11945 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 11946 return -EINVAL;
94a78b79
VZ
11947 }
11948
11949 return 0;
11950}
11951
ab6ad5a4 11952static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 11953{
ab6ad5a4
EG
11954 const __be32 *source = (const __be32 *)_source;
11955 u32 *target = (u32 *)_target;
94a78b79 11956 u32 i;
94a78b79
VZ
11957
11958 for (i = 0; i < n/4; i++)
11959 target[i] = be32_to_cpu(source[i]);
11960}
11961
11962/*
11963 Ops array is stored in the following format:
11964 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11965 */
ab6ad5a4 11966static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 11967{
ab6ad5a4
EG
11968 const __be32 *source = (const __be32 *)_source;
11969 struct raw_op *target = (struct raw_op *)_target;
94a78b79 11970 u32 i, j, tmp;
94a78b79 11971
ab6ad5a4 11972 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
11973 tmp = be32_to_cpu(source[j]);
11974 target[i].op = (tmp >> 24) & 0xff;
11975 target[i].offset = tmp & 0xffffff;
11976 target[i].raw_data = be32_to_cpu(source[j+1]);
11977 }
11978}
ab6ad5a4
EG
11979
11980static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 11981{
ab6ad5a4
EG
11982 const __be16 *source = (const __be16 *)_source;
11983 u16 *target = (u16 *)_target;
94a78b79 11984 u32 i;
94a78b79
VZ
11985
11986 for (i = 0; i < n/2; i++)
11987 target[i] = be16_to_cpu(source[i]);
11988}
11989
11990#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
ab6ad5a4
EG
11991 do { \
11992 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11993 bp->arr = kmalloc(len, GFP_KERNEL); \
94a78b79 11994 if (!bp->arr) { \
ab6ad5a4
EG
11995 printk(KERN_ERR PFX "Failed to allocate %d bytes " \
11996 "for "#arr"\n", len); \
94a78b79
VZ
11997 goto lbl; \
11998 } \
ab6ad5a4
EG
11999 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12000 (u8 *)bp->arr, len); \
94a78b79
VZ
12001 } while (0)
12002
94a78b79
VZ
12003static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12004{
12005 char fw_file_name[40] = {0};
94a78b79 12006 struct bnx2x_fw_file_hdr *fw_hdr;
ab6ad5a4 12007 int rc, offset;
94a78b79
VZ
12008
12009 /* Create a FW file name */
12010 if (CHIP_IS_E1(bp))
ab6ad5a4 12011 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
94a78b79
VZ
12012 else
12013 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
12014
12015 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
12016 BCM_5710_FW_MAJOR_VERSION,
ab6ad5a4
EG
12017 BCM_5710_FW_MINOR_VERSION,
12018 BCM_5710_FW_REVISION_VERSION,
12019 BCM_5710_FW_ENGINEERING_VERSION);
94a78b79
VZ
12020
12021 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
12022
12023 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12024 if (rc) {
ab6ad5a4
EG
12025 printk(KERN_ERR PFX "Can't load firmware file %s\n",
12026 fw_file_name);
94a78b79
VZ
12027 goto request_firmware_exit;
12028 }
12029
12030 rc = bnx2x_check_firmware(bp);
12031 if (rc) {
12032 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
12033 goto request_firmware_exit;
12034 }
12035
12036 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12037
12038 /* Initialize the pointers to the init arrays */
12039 /* Blob */
12040 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12041
12042 /* Opcodes */
12043 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12044
12045 /* Offsets */
ab6ad5a4
EG
12046 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12047 be16_to_cpu_n);
94a78b79
VZ
12048
12049 /* STORMs firmware */
573f2035
EG
12050 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12051 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12052 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12053 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12054 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12055 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12056 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12057 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12058 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12059 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12060 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12061 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12062 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12063 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12064 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12065 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
12066
12067 return 0;
ab6ad5a4 12068
94a78b79
VZ
12069init_offsets_alloc_err:
12070 kfree(bp->init_ops);
12071init_ops_alloc_err:
12072 kfree(bp->init_data);
12073request_firmware_exit:
12074 release_firmware(bp->firmware);
12075
12076 return rc;
12077}
12078
12079
a2fbb9ea
ET
12080static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12081 const struct pci_device_id *ent)
12082{
a2fbb9ea
ET
12083 struct net_device *dev = NULL;
12084 struct bnx2x *bp;
37f9ce62 12085 int pcie_width, pcie_speed;
25047950 12086 int rc;
a2fbb9ea 12087
a2fbb9ea 12088 /* dev zeroed in init_etherdev */
555f6c78 12089 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
12090 if (!dev) {
12091 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 12092 return -ENOMEM;
34f80b04 12093 }
a2fbb9ea 12094
a2fbb9ea
ET
12095 bp = netdev_priv(dev);
12096 bp->msglevel = debug;
12097
df4770de
EG
12098 pci_set_drvdata(pdev, dev);
12099
34f80b04 12100 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
12101 if (rc < 0) {
12102 free_netdev(dev);
12103 return rc;
12104 }
12105
34f80b04 12106 rc = bnx2x_init_bp(bp);
693fc0d1
EG
12107 if (rc)
12108 goto init_one_exit;
12109
94a78b79
VZ
12110 /* Set init arrays */
12111 rc = bnx2x_init_firmware(bp, &pdev->dev);
12112 if (rc) {
12113 printk(KERN_ERR PFX "Error loading firmware\n");
12114 goto init_one_exit;
12115 }
12116
693fc0d1 12117 rc = register_netdev(dev);
34f80b04 12118 if (rc) {
693fc0d1 12119 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
12120 goto init_one_exit;
12121 }
12122
37f9ce62 12123 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
25047950 12124 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 12125 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 12126 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
37f9ce62 12127 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
25047950 12128 dev->base_addr, bp->pdev->irq);
e174961c 12129 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 12130
a2fbb9ea 12131 return 0;
34f80b04
EG
12132
12133init_one_exit:
12134 if (bp->regview)
12135 iounmap(bp->regview);
12136
12137 if (bp->doorbells)
12138 iounmap(bp->doorbells);
12139
12140 free_netdev(dev);
12141
12142 if (atomic_read(&pdev->enable_cnt) == 1)
12143 pci_release_regions(pdev);
12144
12145 pci_disable_device(pdev);
12146 pci_set_drvdata(pdev, NULL);
12147
12148 return rc;
a2fbb9ea
ET
12149}
12150
12151static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12152{
12153 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
12154 struct bnx2x *bp;
12155
12156 if (!dev) {
228241eb
ET
12157 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12158 return;
12159 }
228241eb 12160 bp = netdev_priv(dev);
a2fbb9ea 12161
a2fbb9ea
ET
12162 unregister_netdev(dev);
12163
94a78b79
VZ
12164 kfree(bp->init_ops_offsets);
12165 kfree(bp->init_ops);
12166 kfree(bp->init_data);
12167 release_firmware(bp->firmware);
12168
a2fbb9ea
ET
12169 if (bp->regview)
12170 iounmap(bp->regview);
12171
12172 if (bp->doorbells)
12173 iounmap(bp->doorbells);
12174
12175 free_netdev(dev);
34f80b04
EG
12176
12177 if (atomic_read(&pdev->enable_cnt) == 1)
12178 pci_release_regions(pdev);
12179
a2fbb9ea
ET
12180 pci_disable_device(pdev);
12181 pci_set_drvdata(pdev, NULL);
12182}
12183
12184static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12185{
12186 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
12187 struct bnx2x *bp;
12188
34f80b04
EG
12189 if (!dev) {
12190 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12191 return -ENODEV;
12192 }
12193 bp = netdev_priv(dev);
a2fbb9ea 12194
34f80b04 12195 rtnl_lock();
a2fbb9ea 12196
34f80b04 12197 pci_save_state(pdev);
228241eb 12198
34f80b04
EG
12199 if (!netif_running(dev)) {
12200 rtnl_unlock();
12201 return 0;
12202 }
a2fbb9ea
ET
12203
12204 netif_device_detach(dev);
a2fbb9ea 12205
da5a662a 12206 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 12207
a2fbb9ea 12208 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 12209
34f80b04
EG
12210 rtnl_unlock();
12211
a2fbb9ea
ET
12212 return 0;
12213}
12214
12215static int bnx2x_resume(struct pci_dev *pdev)
12216{
12217 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 12218 struct bnx2x *bp;
a2fbb9ea
ET
12219 int rc;
12220
228241eb
ET
12221 if (!dev) {
12222 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12223 return -ENODEV;
12224 }
228241eb 12225 bp = netdev_priv(dev);
a2fbb9ea 12226
34f80b04
EG
12227 rtnl_lock();
12228
228241eb 12229 pci_restore_state(pdev);
34f80b04
EG
12230
12231 if (!netif_running(dev)) {
12232 rtnl_unlock();
12233 return 0;
12234 }
12235
a2fbb9ea
ET
12236 bnx2x_set_power_state(bp, PCI_D0);
12237 netif_device_attach(dev);
12238
da5a662a 12239 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 12240
34f80b04
EG
12241 rtnl_unlock();
12242
12243 return rc;
a2fbb9ea
ET
12244}
12245
f8ef6e44
YG
12246static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12247{
12248 int i;
12249
12250 bp->state = BNX2X_STATE_ERROR;
12251
12252 bp->rx_mode = BNX2X_RX_MODE_NONE;
12253
12254 bnx2x_netif_stop(bp, 0);
12255
12256 del_timer_sync(&bp->timer);
12257 bp->stats_state = STATS_STATE_DISABLED;
12258 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12259
12260 /* Release IRQs */
12261 bnx2x_free_irq(bp);
12262
12263 if (CHIP_IS_E1(bp)) {
12264 struct mac_configuration_cmd *config =
12265 bnx2x_sp(bp, mcast_config);
12266
8d9c5f34 12267 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
12268 CAM_INVALIDATE(config->config_table[i]);
12269 }
12270
12271 /* Free SKBs, SGEs, TPA pool and driver internals */
12272 bnx2x_free_skbs(bp);
555f6c78 12273 for_each_rx_queue(bp, i)
f8ef6e44 12274 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 12275 for_each_rx_queue(bp, i)
7cde1c8b 12276 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
12277 bnx2x_free_mem(bp);
12278
12279 bp->state = BNX2X_STATE_CLOSED;
12280
12281 netif_carrier_off(bp->dev);
12282
12283 return 0;
12284}
12285
12286static void bnx2x_eeh_recover(struct bnx2x *bp)
12287{
12288 u32 val;
12289
12290 mutex_init(&bp->port.phy_mutex);
12291
12292 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12293 bp->link_params.shmem_base = bp->common.shmem_base;
12294 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12295
12296 if (!bp->common.shmem_base ||
12297 (bp->common.shmem_base < 0xA0000) ||
12298 (bp->common.shmem_base >= 0xC0000)) {
12299 BNX2X_DEV_INFO("MCP not active\n");
12300 bp->flags |= NO_MCP_FLAG;
12301 return;
12302 }
12303
12304 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12305 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12306 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12307 BNX2X_ERR("BAD MCP validity signature\n");
12308
12309 if (!BP_NOMCP(bp)) {
12310 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12311 & DRV_MSG_SEQ_NUMBER_MASK);
12312 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12313 }
12314}
12315
493adb1f
WX
12316/**
12317 * bnx2x_io_error_detected - called when PCI error is detected
12318 * @pdev: Pointer to PCI device
12319 * @state: The current pci connection state
12320 *
12321 * This function is called after a PCI bus error affecting
12322 * this device has been detected.
12323 */
12324static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12325 pci_channel_state_t state)
12326{
12327 struct net_device *dev = pci_get_drvdata(pdev);
12328 struct bnx2x *bp = netdev_priv(dev);
12329
12330 rtnl_lock();
12331
12332 netif_device_detach(dev);
12333
07ce50e4
DN
12334 if (state == pci_channel_io_perm_failure) {
12335 rtnl_unlock();
12336 return PCI_ERS_RESULT_DISCONNECT;
12337 }
12338
493adb1f 12339 if (netif_running(dev))
f8ef6e44 12340 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
12341
12342 pci_disable_device(pdev);
12343
12344 rtnl_unlock();
12345
12346 /* Request a slot reset */
12347 return PCI_ERS_RESULT_NEED_RESET;
12348}
12349
12350/**
12351 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12352 * @pdev: Pointer to PCI device
12353 *
12354 * Restart the card from scratch, as if from a cold-boot.
12355 */
12356static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12357{
12358 struct net_device *dev = pci_get_drvdata(pdev);
12359 struct bnx2x *bp = netdev_priv(dev);
12360
12361 rtnl_lock();
12362
12363 if (pci_enable_device(pdev)) {
12364 dev_err(&pdev->dev,
12365 "Cannot re-enable PCI device after reset\n");
12366 rtnl_unlock();
12367 return PCI_ERS_RESULT_DISCONNECT;
12368 }
12369
12370 pci_set_master(pdev);
12371 pci_restore_state(pdev);
12372
12373 if (netif_running(dev))
12374 bnx2x_set_power_state(bp, PCI_D0);
12375
12376 rtnl_unlock();
12377
12378 return PCI_ERS_RESULT_RECOVERED;
12379}
12380
12381/**
12382 * bnx2x_io_resume - called when traffic can start flowing again
12383 * @pdev: Pointer to PCI device
12384 *
12385 * This callback is called when the error recovery driver tells us that
12386 * its OK to resume normal operation.
12387 */
12388static void bnx2x_io_resume(struct pci_dev *pdev)
12389{
12390 struct net_device *dev = pci_get_drvdata(pdev);
12391 struct bnx2x *bp = netdev_priv(dev);
12392
12393 rtnl_lock();
12394
f8ef6e44
YG
12395 bnx2x_eeh_recover(bp);
12396
493adb1f 12397 if (netif_running(dev))
f8ef6e44 12398 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
12399
12400 netif_device_attach(dev);
12401
12402 rtnl_unlock();
12403}
12404
12405static struct pci_error_handlers bnx2x_err_handler = {
12406 .error_detected = bnx2x_io_error_detected,
356e2385
EG
12407 .slot_reset = bnx2x_io_slot_reset,
12408 .resume = bnx2x_io_resume,
493adb1f
WX
12409};
12410
a2fbb9ea 12411static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
12412 .name = DRV_MODULE_NAME,
12413 .id_table = bnx2x_pci_tbl,
12414 .probe = bnx2x_init_one,
12415 .remove = __devexit_p(bnx2x_remove_one),
12416 .suspend = bnx2x_suspend,
12417 .resume = bnx2x_resume,
12418 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
12419};
12420
12421static int __init bnx2x_init(void)
12422{
dd21ca6d
SG
12423 int ret;
12424
938cf541
EG
12425 printk(KERN_INFO "%s", version);
12426
1cf167f2
EG
12427 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12428 if (bnx2x_wq == NULL) {
12429 printk(KERN_ERR PFX "Cannot create workqueue\n");
12430 return -ENOMEM;
12431 }
12432
dd21ca6d
SG
12433 ret = pci_register_driver(&bnx2x_pci_driver);
12434 if (ret) {
12435 printk(KERN_ERR PFX "Cannot register driver\n");
12436 destroy_workqueue(bnx2x_wq);
12437 }
12438 return ret;
a2fbb9ea
ET
12439}
12440
12441static void __exit bnx2x_cleanup(void)
12442{
12443 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
12444
12445 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
12446}
12447
12448module_init(bnx2x_init);
12449module_exit(bnx2x_cleanup);
12450
94a78b79 12451