]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Changing the Disabled state to a flag
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
0a64ea57 57#include "bnx2x_dump.h"
a2fbb9ea 58
c458bc50
EG
59#define DRV_MODULE_VERSION "1.52.1"
60#define DRV_MODULE_RELDATE "2009/08/12"
34f80b04 61#define BNX2X_BC_VER 0x040200
a2fbb9ea 62
94a78b79
VZ
63#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
ab6ad5a4
EG
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
94a78b79 68
34f80b04
EG
69/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
a2fbb9ea 71
53a10565 72static char version[] __devinitdata =
34f80b04 73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
24e3fcef 76MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 80
555f6c78
EG
81static int multi_mode = 1;
82module_param(multi_mode, int, 0);
ca00392c
EG
83MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
85
86static int num_rx_queues;
87module_param(num_rx_queues, int, 0);
88MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
90
91static int num_tx_queues;
92module_param(num_tx_queues, int, 0);
93MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
555f6c78 95
19680c48 96static int disable_tpa;
19680c48 97module_param(disable_tpa, int, 0);
9898f86d 98MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
99
100static int int_mode;
101module_param(int_mode, int, 0);
102MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
a18f5128
EG
104static int dropless_fc;
105module_param(dropless_fc, int, 0);
106MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
107
9898f86d 108static int poll;
a2fbb9ea 109module_param(poll, int, 0);
9898f86d 110MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
111
112static int mrrs = -1;
113module_param(mrrs, int, 0);
114MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
115
9898f86d 116static int debug;
a2fbb9ea 117module_param(debug, int, 0);
9898f86d
EG
118MODULE_PARM_DESC(debug, " Default debug msglevel");
119
120static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 121
1cf167f2 122static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
123
124enum bnx2x_board_type {
125 BCM57710 = 0,
34f80b04
EG
126 BCM57711 = 1,
127 BCM57711E = 2,
a2fbb9ea
ET
128};
129
34f80b04 130/* indexed by board_type, above */
53a10565 131static struct {
a2fbb9ea
ET
132 char *name;
133} board_info[] __devinitdata = {
34f80b04
EG
134 { "Broadcom NetXtreme II BCM57710 XGb" },
135 { "Broadcom NetXtreme II BCM57711 XGb" },
136 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
137};
138
34f80b04 139
a2fbb9ea 140static const struct pci_device_id bnx2x_pci_tbl[] = {
e4ed7113
EG
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
143 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
144 { 0 }
145};
146
147MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
148
149/****************************************************************************
150* General service functions
151****************************************************************************/
152
153/* used only at init
154 * locking is done by mcp
155 */
573f2035 156void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
157{
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
160 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
161 PCICFG_VENDOR_ID_OFFSET);
162}
163
a2fbb9ea
ET
164static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
165{
166 u32 val;
167
168 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
169 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
170 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
171 PCICFG_VENDOR_ID_OFFSET);
172
173 return val;
174}
a2fbb9ea
ET
175
176static const u32 dmae_reg_go_c[] = {
177 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
178 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
179 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
180 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
181};
182
183/* copy command into DMAE command memory and set DMAE command go */
184static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
185 int idx)
186{
187 u32 cmd_offset;
188 int i;
189
190 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
191 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
192 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
193
ad8d3948
EG
194 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
195 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
196 }
197 REG_WR(bp, dmae_reg_go_c[idx], 1);
198}
199
ad8d3948
EG
200void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
201 u32 len32)
a2fbb9ea 202{
5ff7b6d4 203 struct dmae_command dmae;
a2fbb9ea 204 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
205 int cnt = 200;
206
207 if (!bp->dmae_ready) {
208 u32 *data = bnx2x_sp(bp, wb_data[0]);
209
210 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
211 " using indirect\n", dst_addr, len32);
212 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
213 return;
214 }
215
5ff7b6d4 216 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 217
5ff7b6d4
EG
218 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
219 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
220 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 221#ifdef __BIG_ENDIAN
5ff7b6d4 222 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 223#else
5ff7b6d4 224 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 225#endif
5ff7b6d4
EG
226 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
227 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
228 dmae.src_addr_lo = U64_LO(dma_addr);
229 dmae.src_addr_hi = U64_HI(dma_addr);
230 dmae.dst_addr_lo = dst_addr >> 2;
231 dmae.dst_addr_hi = 0;
232 dmae.len = len32;
233 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
234 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
235 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 236
c3eefaf6 237 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
238 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
239 "dst_addr [%x:%08x (%08x)]\n"
240 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
241 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
242 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
243 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 244 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
245 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
246 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 247
5ff7b6d4
EG
248 mutex_lock(&bp->dmae_mutex);
249
a2fbb9ea
ET
250 *wb_comp = 0;
251
5ff7b6d4 252 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
253
254 udelay(5);
ad8d3948
EG
255
256 while (*wb_comp != DMAE_COMP_VAL) {
257 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
258
ad8d3948 259 if (!cnt) {
c3eefaf6 260 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
261 break;
262 }
ad8d3948 263 cnt--;
12469401
YG
264 /* adjust delay for emulation/FPGA */
265 if (CHIP_REV_IS_SLOW(bp))
266 msleep(100);
267 else
268 udelay(5);
a2fbb9ea 269 }
ad8d3948
EG
270
271 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
272}
273
c18487ee 274void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 275{
5ff7b6d4 276 struct dmae_command dmae;
a2fbb9ea 277 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
278 int cnt = 200;
279
280 if (!bp->dmae_ready) {
281 u32 *data = bnx2x_sp(bp, wb_data[0]);
282 int i;
283
284 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
285 " using indirect\n", src_addr, len32);
286 for (i = 0; i < len32; i++)
287 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
288 return;
289 }
290
5ff7b6d4 291 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 292
5ff7b6d4
EG
293 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
294 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
295 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 296#ifdef __BIG_ENDIAN
5ff7b6d4 297 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 298#else
5ff7b6d4 299 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 300#endif
5ff7b6d4
EG
301 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
302 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
303 dmae.src_addr_lo = src_addr >> 2;
304 dmae.src_addr_hi = 0;
305 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
306 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
307 dmae.len = len32;
308 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
309 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
310 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 311
c3eefaf6 312 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
313 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
314 "dst_addr [%x:%08x (%08x)]\n"
315 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
316 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
317 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
318 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 319
5ff7b6d4
EG
320 mutex_lock(&bp->dmae_mutex);
321
322 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
323 *wb_comp = 0;
324
5ff7b6d4 325 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
326
327 udelay(5);
ad8d3948
EG
328
329 while (*wb_comp != DMAE_COMP_VAL) {
330
ad8d3948 331 if (!cnt) {
c3eefaf6 332 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
333 break;
334 }
ad8d3948 335 cnt--;
12469401
YG
336 /* adjust delay for emulation/FPGA */
337 if (CHIP_REV_IS_SLOW(bp))
338 msleep(100);
339 else
340 udelay(5);
a2fbb9ea 341 }
ad8d3948 342 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
343 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
344 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
345
346 mutex_unlock(&bp->dmae_mutex);
347}
348
573f2035
EG
349void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
350 u32 addr, u32 len)
351{
352 int offset = 0;
353
354 while (len > DMAE_LEN32_WR_MAX) {
355 bnx2x_write_dmae(bp, phys_addr + offset,
356 addr + offset, DMAE_LEN32_WR_MAX);
357 offset += DMAE_LEN32_WR_MAX * 4;
358 len -= DMAE_LEN32_WR_MAX;
359 }
360
361 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
362}
363
ad8d3948
EG
364/* used only for slowpath so not inlined */
365static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
366{
367 u32 wb_write[2];
368
369 wb_write[0] = val_hi;
370 wb_write[1] = val_lo;
371 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 372}
a2fbb9ea 373
ad8d3948
EG
374#ifdef USE_WB_RD
375static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
376{
377 u32 wb_data[2];
378
379 REG_RD_DMAE(bp, reg, wb_data, 2);
380
381 return HILO_U64(wb_data[0], wb_data[1]);
382}
383#endif
384
a2fbb9ea
ET
385static int bnx2x_mc_assert(struct bnx2x *bp)
386{
a2fbb9ea 387 char last_idx;
34f80b04
EG
388 int i, rc = 0;
389 u32 row0, row1, row2, row3;
390
391 /* XSTORM */
392 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
393 XSTORM_ASSERT_LIST_INDEX_OFFSET);
394 if (last_idx)
395 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
396
397 /* print the asserts */
398 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
399
400 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
401 XSTORM_ASSERT_LIST_OFFSET(i));
402 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
403 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
404 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
406 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
408
409 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
410 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
411 " 0x%08x 0x%08x 0x%08x\n",
412 i, row3, row2, row1, row0);
413 rc++;
414 } else {
415 break;
416 }
417 }
418
419 /* TSTORM */
420 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
421 TSTORM_ASSERT_LIST_INDEX_OFFSET);
422 if (last_idx)
423 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
424
425 /* print the asserts */
426 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
427
428 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
429 TSTORM_ASSERT_LIST_OFFSET(i));
430 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
431 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
432 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
434 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
436
437 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
438 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
439 " 0x%08x 0x%08x 0x%08x\n",
440 i, row3, row2, row1, row0);
441 rc++;
442 } else {
443 break;
444 }
445 }
446
447 /* CSTORM */
448 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
449 CSTORM_ASSERT_LIST_INDEX_OFFSET);
450 if (last_idx)
451 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
452
453 /* print the asserts */
454 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
455
456 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
457 CSTORM_ASSERT_LIST_OFFSET(i));
458 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
459 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
460 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
462 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
464
465 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
466 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
467 " 0x%08x 0x%08x 0x%08x\n",
468 i, row3, row2, row1, row0);
469 rc++;
470 } else {
471 break;
472 }
473 }
474
475 /* USTORM */
476 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
477 USTORM_ASSERT_LIST_INDEX_OFFSET);
478 if (last_idx)
479 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
480
481 /* print the asserts */
482 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
483
484 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
485 USTORM_ASSERT_LIST_OFFSET(i));
486 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
487 USTORM_ASSERT_LIST_OFFSET(i) + 4);
488 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
489 USTORM_ASSERT_LIST_OFFSET(i) + 8);
490 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
491 USTORM_ASSERT_LIST_OFFSET(i) + 12);
492
493 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
494 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
495 " 0x%08x 0x%08x 0x%08x\n",
496 i, row3, row2, row1, row0);
497 rc++;
498 } else {
499 break;
a2fbb9ea
ET
500 }
501 }
34f80b04 502
a2fbb9ea
ET
503 return rc;
504}
c14423fe 505
a2fbb9ea
ET
506static void bnx2x_fw_dump(struct bnx2x *bp)
507{
508 u32 mark, offset;
4781bfad 509 __be32 data[9];
a2fbb9ea
ET
510 int word;
511
512 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772 513 mark = ((mark + 0x3) & ~0x3);
ad361c98 514 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 515
ad361c98 516 printk(KERN_ERR PFX);
a2fbb9ea
ET
517 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
518 for (word = 0; word < 8; word++)
519 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
520 offset + 4*word));
521 data[8] = 0x0;
49d66772 522 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
523 }
524 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
525 for (word = 0; word < 8; word++)
526 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
527 offset + 4*word));
528 data[8] = 0x0;
49d66772 529 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea 530 }
ad361c98 531 printk(KERN_ERR PFX "end of fw dump\n");
a2fbb9ea
ET
532}
533
534static void bnx2x_panic_dump(struct bnx2x *bp)
535{
536 int i;
537 u16 j, start, end;
538
66e855f3
YG
539 bp->stats_state = STATS_STATE_DISABLED;
540 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
541
a2fbb9ea
ET
542 BNX2X_ERR("begin crash dump -----------------\n");
543
8440d2b6
EG
544 /* Indices */
545 /* Common */
546 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
547 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
548 " spq_prod_idx(%u)\n",
549 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
550 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
551
552 /* Rx */
553 for_each_rx_queue(bp, i) {
a2fbb9ea 554 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 555
c3eefaf6 556 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
557 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
558 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 559 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
560 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
561 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 562 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
563 " fp_u_idx(%x) *sb_u_idx(%x)\n",
564 fp->rx_sge_prod, fp->last_max_sge,
565 le16_to_cpu(fp->fp_u_idx),
566 fp->status_blk->u_status_block.status_block_index);
567 }
a2fbb9ea 568
8440d2b6
EG
569 /* Tx */
570 for_each_tx_queue(bp, i) {
571 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 572
c3eefaf6 573 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
574 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
575 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
576 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 577 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
ca00392c 578 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 579 fp->status_blk->c_status_block.status_block_index,
ca00392c 580 fp->tx_db.data.prod);
8440d2b6 581 }
a2fbb9ea 582
8440d2b6
EG
583 /* Rings */
584 /* Rx */
585 for_each_rx_queue(bp, i) {
586 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
587
588 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
589 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 590 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
591 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
592 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
593
c3eefaf6
EG
594 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
595 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
596 }
597
3196a88a
EG
598 start = RX_SGE(fp->rx_sge_prod);
599 end = RX_SGE(fp->last_max_sge);
8440d2b6 600 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
601 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
602 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
603
c3eefaf6
EG
604 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
605 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
606 }
607
a2fbb9ea
ET
608 start = RCQ_BD(fp->rx_comp_cons - 10);
609 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 610 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
611 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
612
c3eefaf6
EG
613 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
614 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
615 }
616 }
617
8440d2b6
EG
618 /* Tx */
619 for_each_tx_queue(bp, i) {
620 struct bnx2x_fastpath *fp = &bp->fp[i];
621
622 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
623 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
624 for (j = start; j != end; j = TX_BD(j + 1)) {
625 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
626
c3eefaf6
EG
627 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
628 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
629 }
630
631 start = TX_BD(fp->tx_bd_cons - 10);
632 end = TX_BD(fp->tx_bd_cons + 254);
633 for (j = start; j != end; j = TX_BD(j + 1)) {
634 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
635
c3eefaf6
EG
636 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
637 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
638 }
639 }
a2fbb9ea 640
34f80b04 641 bnx2x_fw_dump(bp);
a2fbb9ea
ET
642 bnx2x_mc_assert(bp);
643 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
644}
645
615f8fd9 646static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 647{
34f80b04 648 int port = BP_PORT(bp);
a2fbb9ea
ET
649 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
650 u32 val = REG_RD(bp, addr);
651 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 652 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
653
654 if (msix) {
8badd27a
EG
655 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
656 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
657 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
658 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
659 } else if (msi) {
660 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
661 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
662 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
664 } else {
665 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 666 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
667 HC_CONFIG_0_REG_INT_LINE_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 669
8badd27a
EG
670 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
671 val, port, addr);
615f8fd9
ET
672
673 REG_WR(bp, addr, val);
674
a2fbb9ea
ET
675 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
676 }
677
8badd27a
EG
678 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
679 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
680
681 REG_WR(bp, addr, val);
37dbbf32
EG
682 /*
683 * Ensure that HC_CONFIG is written before leading/trailing edge config
684 */
685 mmiowb();
686 barrier();
34f80b04
EG
687
688 if (CHIP_IS_E1H(bp)) {
689 /* init leading/trailing edge */
690 if (IS_E1HMF(bp)) {
8badd27a 691 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 692 if (bp->port.pmf)
4acac6a5
EG
693 /* enable nig and gpio3 attention */
694 val |= 0x1100;
34f80b04
EG
695 } else
696 val = 0xffff;
697
698 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
699 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
700 }
37dbbf32
EG
701
702 /* Make sure that interrupts are indeed enabled from here on */
703 mmiowb();
a2fbb9ea
ET
704}
705
615f8fd9 706static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 707{
34f80b04 708 int port = BP_PORT(bp);
a2fbb9ea
ET
709 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
710 u32 val = REG_RD(bp, addr);
711
712 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
713 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
714 HC_CONFIG_0_REG_INT_LINE_EN_0 |
715 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
716
717 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
718 val, port, addr);
719
8badd27a
EG
720 /* flush all outstanding writes */
721 mmiowb();
722
a2fbb9ea
ET
723 REG_WR(bp, addr, val);
724 if (REG_RD(bp, addr) != val)
725 BNX2X_ERR("BUG! proper val not read from IGU!\n");
726}
727
f8ef6e44 728static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 729{
a2fbb9ea 730 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 731 int i, offset;
a2fbb9ea 732
34f80b04 733 /* disable interrupt handling */
a2fbb9ea 734 atomic_inc(&bp->intr_sem);
e1510706
EG
735 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
736
f8ef6e44
YG
737 if (disable_hw)
738 /* prevent the HW from sending interrupts */
739 bnx2x_int_disable(bp);
a2fbb9ea
ET
740
741 /* make sure all ISRs are done */
742 if (msix) {
8badd27a
EG
743 synchronize_irq(bp->msix_table[0].vector);
744 offset = 1;
37b091ba
MC
745#ifdef BCM_CNIC
746 offset++;
747#endif
a2fbb9ea 748 for_each_queue(bp, i)
8badd27a 749 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
750 } else
751 synchronize_irq(bp->pdev->irq);
752
753 /* make sure sp_task is not running */
1cf167f2
EG
754 cancel_delayed_work(&bp->sp_task);
755 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
756}
757
34f80b04 758/* fast path */
a2fbb9ea
ET
759
760/*
34f80b04 761 * General service functions
a2fbb9ea
ET
762 */
763
34f80b04 764static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
765 u8 storm, u16 index, u8 op, u8 update)
766{
5c862848
EG
767 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
768 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
769 struct igu_ack_register igu_ack;
770
771 igu_ack.status_block_index = index;
772 igu_ack.sb_id_and_flags =
34f80b04 773 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
774 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
775 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
776 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
777
5c862848
EG
778 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
779 (*(u32 *)&igu_ack), hc_addr);
780 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
781
782 /* Make sure that ACK is written */
783 mmiowb();
784 barrier();
a2fbb9ea
ET
785}
786
787static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
788{
789 struct host_status_block *fpsb = fp->status_blk;
790 u16 rc = 0;
791
792 barrier(); /* status block is written to by the chip */
793 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
794 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
795 rc |= 1;
796 }
797 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
798 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
799 rc |= 2;
800 }
801 return rc;
802}
803
a2fbb9ea
ET
804static u16 bnx2x_ack_int(struct bnx2x *bp)
805{
5c862848
EG
806 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
807 COMMAND_REG_SIMD_MASK);
808 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 809
5c862848
EG
810 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
811 result, hc_addr);
a2fbb9ea 812
a2fbb9ea
ET
813 return result;
814}
815
816
817/*
818 * fast path service functions
819 */
820
e8b5fc51
VZ
821static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
822{
823 /* Tell compiler that consumer and producer can change */
824 barrier();
825 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
826}
827
a2fbb9ea
ET
828/* free skb in the packet ring at pos idx
829 * return idx of last bd freed
830 */
831static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
832 u16 idx)
833{
834 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
835 struct eth_tx_start_bd *tx_start_bd;
836 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 837 struct sk_buff *skb = tx_buf->skb;
34f80b04 838 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
839 int nbd;
840
841 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
842 idx, tx_buf, skb);
843
844 /* unmap first bd */
845 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c
EG
846 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
847 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
848 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 849
ca00392c 850 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 851#ifdef BNX2X_STOP_ON_ERROR
ca00392c 852 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 853 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
854 bnx2x_panic();
855 }
856#endif
ca00392c 857 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 858
ca00392c
EG
859 /* Get the next bd */
860 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 861
ca00392c
EG
862 /* Skip a parse bd... */
863 --nbd;
864 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
865
866 /* ...and the TSO split header bd since they have no mapping */
867 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
868 --nbd;
869 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
870 }
871
872 /* now free frags */
873 while (nbd > 0) {
874
875 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c
EG
876 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
877 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
878 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
a2fbb9ea
ET
879 if (--nbd)
880 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
881 }
882
883 /* release skb */
53e5e96e 884 WARN_ON(!skb);
ca00392c 885 dev_kfree_skb_any(skb);
a2fbb9ea
ET
886 tx_buf->first_bd = 0;
887 tx_buf->skb = NULL;
888
34f80b04 889 return new_cons;
a2fbb9ea
ET
890}
891
34f80b04 892static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 893{
34f80b04
EG
894 s16 used;
895 u16 prod;
896 u16 cons;
a2fbb9ea 897
34f80b04 898 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
899 prod = fp->tx_bd_prod;
900 cons = fp->tx_bd_cons;
901
34f80b04
EG
902 /* NUM_TX_RINGS = number of "next-page" entries
903 It will be used as a threshold */
904 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 905
34f80b04 906#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
907 WARN_ON(used < 0);
908 WARN_ON(used > fp->bp->tx_ring_size);
909 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 910#endif
a2fbb9ea 911
34f80b04 912 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
913}
914
7961f791 915static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
916{
917 struct bnx2x *bp = fp->bp;
555f6c78 918 struct netdev_queue *txq;
a2fbb9ea
ET
919 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
920 int done = 0;
921
922#ifdef BNX2X_STOP_ON_ERROR
923 if (unlikely(bp->panic))
924 return;
925#endif
926
ca00392c 927 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
a2fbb9ea
ET
928 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
929 sw_cons = fp->tx_pkt_cons;
930
931 while (sw_cons != hw_cons) {
932 u16 pkt_cons;
933
934 pkt_cons = TX_BD(sw_cons);
935
936 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
937
34f80b04 938 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
939 hw_cons, sw_cons, pkt_cons);
940
34f80b04 941/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
942 rmb();
943 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
944 }
945*/
946 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
947 sw_cons++;
948 done++;
a2fbb9ea
ET
949 }
950
951 fp->tx_pkt_cons = sw_cons;
952 fp->tx_bd_cons = bd_cons;
953
a2fbb9ea 954 /* TBD need a thresh? */
555f6c78 955 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 956
6044735d
EG
957 /* Need to make the tx_bd_cons update visible to start_xmit()
958 * before checking for netif_tx_queue_stopped(). Without the
959 * memory barrier, there is a small possibility that
960 * start_xmit() will miss it and cause the queue to be stopped
961 * forever.
962 */
963 smp_mb();
964
555f6c78 965 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 966 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 967 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 968 netif_tx_wake_queue(txq);
a2fbb9ea
ET
969 }
970}
971
993ac7b5
MC
972#ifdef BCM_CNIC
973static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
974#endif
3196a88a 975
a2fbb9ea
ET
976static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
977 union eth_rx_cqe *rr_cqe)
978{
979 struct bnx2x *bp = fp->bp;
980 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
981 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
982
34f80b04 983 DP(BNX2X_MSG_SP,
a2fbb9ea 984 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 985 fp->index, cid, command, bp->state,
34f80b04 986 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
987
988 bp->spq_left++;
989
0626b899 990 if (fp->index) {
a2fbb9ea
ET
991 switch (command | fp->state) {
992 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
993 BNX2X_FP_STATE_OPENING):
994 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
995 cid);
996 fp->state = BNX2X_FP_STATE_OPEN;
997 break;
998
999 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1000 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1001 cid);
1002 fp->state = BNX2X_FP_STATE_HALTED;
1003 break;
1004
1005 default:
34f80b04
EG
1006 BNX2X_ERR("unexpected MC reply (%d) "
1007 "fp->state is %x\n", command, fp->state);
1008 break;
a2fbb9ea 1009 }
34f80b04 1010 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1011 return;
1012 }
c14423fe 1013
a2fbb9ea
ET
1014 switch (command | bp->state) {
1015 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1016 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1017 bp->state = BNX2X_STATE_OPEN;
1018 break;
1019
1020 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1021 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1022 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1023 fp->state = BNX2X_FP_STATE_HALTED;
1024 break;
1025
a2fbb9ea 1026 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1027 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1028 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1029 break;
1030
993ac7b5
MC
1031#ifdef BCM_CNIC
1032 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1033 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1034 bnx2x_cnic_cfc_comp(bp, cid);
1035 break;
1036#endif
3196a88a 1037
a2fbb9ea 1038 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1039 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1040 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
e665bfda
MC
1041 bp->set_mac_pending--;
1042 smp_wmb();
a2fbb9ea
ET
1043 break;
1044
49d66772 1045 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1046 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
e665bfda
MC
1047 bp->set_mac_pending--;
1048 smp_wmb();
49d66772
ET
1049 break;
1050
a2fbb9ea 1051 default:
34f80b04 1052 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1053 command, bp->state);
34f80b04 1054 break;
a2fbb9ea 1055 }
34f80b04 1056 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1057}
1058
7a9b2557
VZ
1059static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1060 struct bnx2x_fastpath *fp, u16 index)
1061{
1062 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1063 struct page *page = sw_buf->page;
1064 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1065
1066 /* Skip "next page" elements */
1067 if (!page)
1068 return;
1069
1070 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1071 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1072 __free_pages(page, PAGES_PER_SGE_SHIFT);
1073
1074 sw_buf->page = NULL;
1075 sge->addr_hi = 0;
1076 sge->addr_lo = 0;
1077}
1078
1079static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1080 struct bnx2x_fastpath *fp, int last)
1081{
1082 int i;
1083
1084 for (i = 0; i < last; i++)
1085 bnx2x_free_rx_sge(bp, fp, i);
1086}
1087
1088static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1089 struct bnx2x_fastpath *fp, u16 index)
1090{
1091 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1092 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1093 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1094 dma_addr_t mapping;
1095
1096 if (unlikely(page == NULL))
1097 return -ENOMEM;
1098
4f40f2cb 1099 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1100 PCI_DMA_FROMDEVICE);
8d8bb39b 1101 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1102 __free_pages(page, PAGES_PER_SGE_SHIFT);
1103 return -ENOMEM;
1104 }
1105
1106 sw_buf->page = page;
1107 pci_unmap_addr_set(sw_buf, mapping, mapping);
1108
1109 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1110 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1111
1112 return 0;
1113}
1114
a2fbb9ea
ET
1115static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1116 struct bnx2x_fastpath *fp, u16 index)
1117{
1118 struct sk_buff *skb;
1119 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1120 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1121 dma_addr_t mapping;
1122
1123 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1124 if (unlikely(skb == NULL))
1125 return -ENOMEM;
1126
437cf2f1 1127 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1128 PCI_DMA_FROMDEVICE);
8d8bb39b 1129 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1130 dev_kfree_skb(skb);
1131 return -ENOMEM;
1132 }
1133
1134 rx_buf->skb = skb;
1135 pci_unmap_addr_set(rx_buf, mapping, mapping);
1136
1137 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1138 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1139
1140 return 0;
1141}
1142
1143/* note that we are not allocating a new skb,
1144 * we are just moving one from cons to prod
1145 * we are not creating a new mapping,
1146 * so there is no need to check for dma_mapping_error().
1147 */
1148static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1149 struct sk_buff *skb, u16 cons, u16 prod)
1150{
1151 struct bnx2x *bp = fp->bp;
1152 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1153 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1154 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1155 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1156
1157 pci_dma_sync_single_for_device(bp->pdev,
1158 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1159 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1160
1161 prod_rx_buf->skb = cons_rx_buf->skb;
1162 pci_unmap_addr_set(prod_rx_buf, mapping,
1163 pci_unmap_addr(cons_rx_buf, mapping));
1164 *prod_bd = *cons_bd;
1165}
1166
7a9b2557
VZ
1167static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1168 u16 idx)
1169{
1170 u16 last_max = fp->last_max_sge;
1171
1172 if (SUB_S16(idx, last_max) > 0)
1173 fp->last_max_sge = idx;
1174}
1175
1176static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1177{
1178 int i, j;
1179
1180 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1181 int idx = RX_SGE_CNT * i - 1;
1182
1183 for (j = 0; j < 2; j++) {
1184 SGE_MASK_CLEAR_BIT(fp, idx);
1185 idx--;
1186 }
1187 }
1188}
1189
1190static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1191 struct eth_fast_path_rx_cqe *fp_cqe)
1192{
1193 struct bnx2x *bp = fp->bp;
4f40f2cb 1194 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1195 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1196 SGE_PAGE_SHIFT;
7a9b2557
VZ
1197 u16 last_max, last_elem, first_elem;
1198 u16 delta = 0;
1199 u16 i;
1200
1201 if (!sge_len)
1202 return;
1203
1204 /* First mark all used pages */
1205 for (i = 0; i < sge_len; i++)
1206 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1207
1208 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1209 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1210
1211 /* Here we assume that the last SGE index is the biggest */
1212 prefetch((void *)(fp->sge_mask));
1213 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1214
1215 last_max = RX_SGE(fp->last_max_sge);
1216 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1217 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1218
1219 /* If ring is not full */
1220 if (last_elem + 1 != first_elem)
1221 last_elem++;
1222
1223 /* Now update the prod */
1224 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1225 if (likely(fp->sge_mask[i]))
1226 break;
1227
1228 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1229 delta += RX_SGE_MASK_ELEM_SZ;
1230 }
1231
1232 if (delta > 0) {
1233 fp->rx_sge_prod += delta;
1234 /* clear page-end entries */
1235 bnx2x_clear_sge_mask_next_elems(fp);
1236 }
1237
1238 DP(NETIF_MSG_RX_STATUS,
1239 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1240 fp->last_max_sge, fp->rx_sge_prod);
1241}
1242
1243static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1244{
1245 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1246 memset(fp->sge_mask, 0xff,
1247 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1248
33471629
EG
1249 /* Clear the two last indices in the page to 1:
1250 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1251 hence will never be indicated and should be removed from
1252 the calculations. */
1253 bnx2x_clear_sge_mask_next_elems(fp);
1254}
1255
1256static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1257 struct sk_buff *skb, u16 cons, u16 prod)
1258{
1259 struct bnx2x *bp = fp->bp;
1260 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1261 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1262 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1263 dma_addr_t mapping;
1264
1265 /* move empty skb from pool to prod and map it */
1266 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1267 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1268 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1269 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1270
1271 /* move partial skb from cons to pool (don't unmap yet) */
1272 fp->tpa_pool[queue] = *cons_rx_buf;
1273
1274 /* mark bin state as start - print error if current state != stop */
1275 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1276 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1277
1278 fp->tpa_state[queue] = BNX2X_TPA_START;
1279
1280 /* point prod_bd to new skb */
1281 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1282 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1283
1284#ifdef BNX2X_STOP_ON_ERROR
1285 fp->tpa_queue_used |= (1 << queue);
1286#ifdef __powerpc64__
1287 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1288#else
1289 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1290#endif
1291 fp->tpa_queue_used);
1292#endif
1293}
1294
1295static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1296 struct sk_buff *skb,
1297 struct eth_fast_path_rx_cqe *fp_cqe,
1298 u16 cqe_idx)
1299{
1300 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1301 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1302 u32 i, frag_len, frag_size, pages;
1303 int err;
1304 int j;
1305
1306 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1307 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1308
1309 /* This is needed in order to enable forwarding support */
1310 if (frag_size)
4f40f2cb 1311 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1312 max(frag_size, (u32)len_on_bd));
1313
1314#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1315 if (pages >
1316 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1317 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1318 pages, cqe_idx);
1319 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1320 fp_cqe->pkt_len, len_on_bd);
1321 bnx2x_panic();
1322 return -EINVAL;
1323 }
1324#endif
1325
1326 /* Run through the SGL and compose the fragmented skb */
1327 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1328 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1329
1330 /* FW gives the indices of the SGE as if the ring is an array
1331 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1332 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1333 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1334 old_rx_pg = *rx_pg;
1335
1336 /* If we fail to allocate a substitute page, we simply stop
1337 where we are and drop the whole packet */
1338 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1339 if (unlikely(err)) {
de832a55 1340 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1341 return err;
1342 }
1343
1344 /* Unmap the page as we r going to pass it to the stack */
1345 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1346 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1347
1348 /* Add one frag and update the appropriate fields in the skb */
1349 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1350
1351 skb->data_len += frag_len;
1352 skb->truesize += frag_len;
1353 skb->len += frag_len;
1354
1355 frag_size -= frag_len;
1356 }
1357
1358 return 0;
1359}
1360
1361static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1362 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1363 u16 cqe_idx)
1364{
1365 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1366 struct sk_buff *skb = rx_buf->skb;
1367 /* alloc new skb */
1368 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1369
1370 /* Unmap skb in the pool anyway, as we are going to change
1371 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1372 fails. */
1373 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1374 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1375
7a9b2557 1376 if (likely(new_skb)) {
66e855f3
YG
1377 /* fix ip xsum and give it to the stack */
1378 /* (no need to map the new skb) */
0c6671b0
EG
1379#ifdef BCM_VLAN
1380 int is_vlan_cqe =
1381 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1382 PARSING_FLAGS_VLAN);
1383 int is_not_hwaccel_vlan_cqe =
1384 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1385#endif
7a9b2557
VZ
1386
1387 prefetch(skb);
1388 prefetch(((char *)(skb)) + 128);
1389
7a9b2557
VZ
1390#ifdef BNX2X_STOP_ON_ERROR
1391 if (pad + len > bp->rx_buf_size) {
1392 BNX2X_ERR("skb_put is about to fail... "
1393 "pad %d len %d rx_buf_size %d\n",
1394 pad, len, bp->rx_buf_size);
1395 bnx2x_panic();
1396 return;
1397 }
1398#endif
1399
1400 skb_reserve(skb, pad);
1401 skb_put(skb, len);
1402
1403 skb->protocol = eth_type_trans(skb, bp->dev);
1404 skb->ip_summed = CHECKSUM_UNNECESSARY;
1405
1406 {
1407 struct iphdr *iph;
1408
1409 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1410#ifdef BCM_VLAN
1411 /* If there is no Rx VLAN offloading -
1412 take VLAN tag into an account */
1413 if (unlikely(is_not_hwaccel_vlan_cqe))
1414 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1415#endif
7a9b2557
VZ
1416 iph->check = 0;
1417 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1418 }
1419
1420 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1421 &cqe->fast_path_cqe, cqe_idx)) {
1422#ifdef BCM_VLAN
0c6671b0
EG
1423 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1424 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1425 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1426 le16_to_cpu(cqe->fast_path_cqe.
1427 vlan_tag));
1428 else
1429#endif
1430 netif_receive_skb(skb);
1431 } else {
1432 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1433 " - dropping packet!\n");
1434 dev_kfree_skb(skb);
1435 }
1436
7a9b2557
VZ
1437
1438 /* put new skb in bin */
1439 fp->tpa_pool[queue].skb = new_skb;
1440
1441 } else {
66e855f3 1442 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1443 DP(NETIF_MSG_RX_STATUS,
1444 "Failed to allocate new skb - dropping packet!\n");
de832a55 1445 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1446 }
1447
1448 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1449}
1450
1451static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1452 struct bnx2x_fastpath *fp,
1453 u16 bd_prod, u16 rx_comp_prod,
1454 u16 rx_sge_prod)
1455{
8d9c5f34 1456 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1457 int i;
1458
1459 /* Update producers */
1460 rx_prods.bd_prod = bd_prod;
1461 rx_prods.cqe_prod = rx_comp_prod;
1462 rx_prods.sge_prod = rx_sge_prod;
1463
58f4c4cf
EG
1464 /*
1465 * Make sure that the BD and SGE data is updated before updating the
1466 * producers since FW might read the BD/SGE right after the producer
1467 * is updated.
1468 * This is only applicable for weak-ordered memory model archs such
1469 * as IA-64. The following barrier is also mandatory since FW will
1470 * assumes BDs must have buffers.
1471 */
1472 wmb();
1473
8d9c5f34
EG
1474 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1475 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1476 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1477 ((u32 *)&rx_prods)[i]);
1478
58f4c4cf
EG
1479 mmiowb(); /* keep prod updates ordered */
1480
7a9b2557 1481 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1482 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1483 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1484}
1485
a2fbb9ea
ET
1486static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1487{
1488 struct bnx2x *bp = fp->bp;
34f80b04 1489 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1490 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1491 int rx_pkt = 0;
1492
1493#ifdef BNX2X_STOP_ON_ERROR
1494 if (unlikely(bp->panic))
1495 return 0;
1496#endif
1497
34f80b04
EG
1498 /* CQ "next element" is of the size of the regular element,
1499 that's why it's ok here */
a2fbb9ea
ET
1500 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1501 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1502 hw_comp_cons++;
1503
1504 bd_cons = fp->rx_bd_cons;
1505 bd_prod = fp->rx_bd_prod;
34f80b04 1506 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1507 sw_comp_cons = fp->rx_comp_cons;
1508 sw_comp_prod = fp->rx_comp_prod;
1509
1510 /* Memory barrier necessary as speculative reads of the rx
1511 * buffer can be ahead of the index in the status block
1512 */
1513 rmb();
1514
1515 DP(NETIF_MSG_RX_STATUS,
1516 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1517 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1518
1519 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1520 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1521 struct sk_buff *skb;
1522 union eth_rx_cqe *cqe;
34f80b04
EG
1523 u8 cqe_fp_flags;
1524 u16 len, pad;
a2fbb9ea
ET
1525
1526 comp_ring_cons = RCQ_BD(sw_comp_cons);
1527 bd_prod = RX_BD(bd_prod);
1528 bd_cons = RX_BD(bd_cons);
1529
619e7a66
EG
1530 /* Prefetch the page containing the BD descriptor
1531 at producer's index. It will be needed when new skb is
1532 allocated */
1533 prefetch((void *)(PAGE_ALIGN((unsigned long)
1534 (&fp->rx_desc_ring[bd_prod])) -
1535 PAGE_SIZE + 1));
1536
a2fbb9ea 1537 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1538 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1539
a2fbb9ea 1540 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1541 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1542 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1543 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1544 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1545 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1546
1547 /* is this a slowpath msg? */
34f80b04 1548 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1549 bnx2x_sp_event(fp, cqe);
1550 goto next_cqe;
1551
1552 /* this is an rx packet */
1553 } else {
1554 rx_buf = &fp->rx_buf_ring[bd_cons];
1555 skb = rx_buf->skb;
a2fbb9ea
ET
1556 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1557 pad = cqe->fast_path_cqe.placement_offset;
1558
7a9b2557
VZ
1559 /* If CQE is marked both TPA_START and TPA_END
1560 it is a non-TPA CQE */
1561 if ((!fp->disable_tpa) &&
1562 (TPA_TYPE(cqe_fp_flags) !=
1563 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1564 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1565
1566 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1567 DP(NETIF_MSG_RX_STATUS,
1568 "calling tpa_start on queue %d\n",
1569 queue);
1570
1571 bnx2x_tpa_start(fp, queue, skb,
1572 bd_cons, bd_prod);
1573 goto next_rx;
1574 }
1575
1576 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1577 DP(NETIF_MSG_RX_STATUS,
1578 "calling tpa_stop on queue %d\n",
1579 queue);
1580
1581 if (!BNX2X_RX_SUM_FIX(cqe))
1582 BNX2X_ERR("STOP on none TCP "
1583 "data\n");
1584
1585 /* This is a size of the linear data
1586 on this skb */
1587 len = le16_to_cpu(cqe->fast_path_cqe.
1588 len_on_bd);
1589 bnx2x_tpa_stop(bp, fp, queue, pad,
1590 len, cqe, comp_ring_cons);
1591#ifdef BNX2X_STOP_ON_ERROR
1592 if (bp->panic)
17cb4006 1593 return 0;
7a9b2557
VZ
1594#endif
1595
1596 bnx2x_update_sge_prod(fp,
1597 &cqe->fast_path_cqe);
1598 goto next_cqe;
1599 }
1600 }
1601
a2fbb9ea
ET
1602 pci_dma_sync_single_for_device(bp->pdev,
1603 pci_unmap_addr(rx_buf, mapping),
1604 pad + RX_COPY_THRESH,
1605 PCI_DMA_FROMDEVICE);
1606 prefetch(skb);
1607 prefetch(((char *)(skb)) + 128);
1608
1609 /* is this an error packet? */
34f80b04 1610 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1611 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1612 "ERROR flags %x rx packet %u\n",
1613 cqe_fp_flags, sw_comp_cons);
de832a55 1614 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1615 goto reuse_rx;
1616 }
1617
1618 /* Since we don't have a jumbo ring
1619 * copy small packets if mtu > 1500
1620 */
1621 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1622 (len <= RX_COPY_THRESH)) {
1623 struct sk_buff *new_skb;
1624
1625 new_skb = netdev_alloc_skb(bp->dev,
1626 len + pad);
1627 if (new_skb == NULL) {
1628 DP(NETIF_MSG_RX_ERR,
34f80b04 1629 "ERROR packet dropped "
a2fbb9ea 1630 "because of alloc failure\n");
de832a55 1631 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1632 goto reuse_rx;
1633 }
1634
1635 /* aligned copy */
1636 skb_copy_from_linear_data_offset(skb, pad,
1637 new_skb->data + pad, len);
1638 skb_reserve(new_skb, pad);
1639 skb_put(new_skb, len);
1640
1641 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1642
1643 skb = new_skb;
1644
a119a069
EG
1645 } else
1646 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
a2fbb9ea
ET
1647 pci_unmap_single(bp->pdev,
1648 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1649 bp->rx_buf_size,
a2fbb9ea
ET
1650 PCI_DMA_FROMDEVICE);
1651 skb_reserve(skb, pad);
1652 skb_put(skb, len);
1653
1654 } else {
1655 DP(NETIF_MSG_RX_ERR,
34f80b04 1656 "ERROR packet dropped because "
a2fbb9ea 1657 "of alloc failure\n");
de832a55 1658 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1659reuse_rx:
1660 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1661 goto next_rx;
1662 }
1663
1664 skb->protocol = eth_type_trans(skb, bp->dev);
1665
1666 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1667 if (bp->rx_csum) {
1adcd8be
EG
1668 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1669 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1670 else
de832a55 1671 fp->eth_q_stats.hw_csum_err++;
66e855f3 1672 }
a2fbb9ea
ET
1673 }
1674
748e5439 1675 skb_record_rx_queue(skb, fp->index);
ab6ad5a4 1676
a2fbb9ea 1677#ifdef BCM_VLAN
0c6671b0 1678 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1679 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1680 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1681 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1682 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1683 else
1684#endif
34f80b04 1685 netif_receive_skb(skb);
a2fbb9ea 1686
a2fbb9ea
ET
1687
1688next_rx:
1689 rx_buf->skb = NULL;
1690
1691 bd_cons = NEXT_RX_IDX(bd_cons);
1692 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1693 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1694 rx_pkt++;
a2fbb9ea
ET
1695next_cqe:
1696 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1697 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1698
34f80b04 1699 if (rx_pkt == budget)
a2fbb9ea
ET
1700 break;
1701 } /* while */
1702
1703 fp->rx_bd_cons = bd_cons;
34f80b04 1704 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1705 fp->rx_comp_cons = sw_comp_cons;
1706 fp->rx_comp_prod = sw_comp_prod;
1707
7a9b2557
VZ
1708 /* Update producers */
1709 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1710 fp->rx_sge_prod);
a2fbb9ea
ET
1711
1712 fp->rx_pkt += rx_pkt;
1713 fp->rx_calls++;
1714
1715 return rx_pkt;
1716}
1717
1718static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1719{
1720 struct bnx2x_fastpath *fp = fp_cookie;
1721 struct bnx2x *bp = fp->bp;
a2fbb9ea 1722
da5a662a
VZ
1723 /* Return here if interrupt is disabled */
1724 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1725 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1726 return IRQ_HANDLED;
1727 }
1728
34f80b04 1729 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1730 fp->index, fp->sb_id);
0626b899 1731 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1732
1733#ifdef BNX2X_STOP_ON_ERROR
1734 if (unlikely(bp->panic))
1735 return IRQ_HANDLED;
1736#endif
ca00392c
EG
1737 /* Handle Rx or Tx according to MSI-X vector */
1738 if (fp->is_rx_queue) {
1739 prefetch(fp->rx_cons_sb);
1740 prefetch(&fp->status_blk->u_status_block.status_block_index);
a2fbb9ea 1741
ca00392c 1742 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1743
ca00392c
EG
1744 } else {
1745 prefetch(fp->tx_cons_sb);
1746 prefetch(&fp->status_blk->c_status_block.status_block_index);
1747
1748 bnx2x_update_fpsb_idx(fp);
1749 rmb();
1750 bnx2x_tx_int(fp);
1751
1752 /* Re-enable interrupts */
1753 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1754 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1755 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1756 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1757 }
34f80b04 1758
a2fbb9ea
ET
1759 return IRQ_HANDLED;
1760}
1761
1762static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1763{
555f6c78 1764 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1765 u16 status = bnx2x_ack_int(bp);
34f80b04 1766 u16 mask;
ca00392c 1767 int i;
a2fbb9ea 1768
34f80b04 1769 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1770 if (unlikely(status == 0)) {
1771 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1772 return IRQ_NONE;
1773 }
f5372251 1774 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1775
34f80b04 1776 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1777 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1778 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1779 return IRQ_HANDLED;
1780 }
1781
3196a88a
EG
1782#ifdef BNX2X_STOP_ON_ERROR
1783 if (unlikely(bp->panic))
1784 return IRQ_HANDLED;
1785#endif
1786
ca00392c
EG
1787 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1788 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1789
ca00392c
EG
1790 mask = 0x2 << fp->sb_id;
1791 if (status & mask) {
1792 /* Handle Rx or Tx according to SB id */
1793 if (fp->is_rx_queue) {
1794 prefetch(fp->rx_cons_sb);
1795 prefetch(&fp->status_blk->u_status_block.
1796 status_block_index);
a2fbb9ea 1797
ca00392c 1798 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1799
ca00392c
EG
1800 } else {
1801 prefetch(fp->tx_cons_sb);
1802 prefetch(&fp->status_blk->c_status_block.
1803 status_block_index);
1804
1805 bnx2x_update_fpsb_idx(fp);
1806 rmb();
1807 bnx2x_tx_int(fp);
1808
1809 /* Re-enable interrupts */
1810 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1811 le16_to_cpu(fp->fp_u_idx),
1812 IGU_INT_NOP, 1);
1813 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1814 le16_to_cpu(fp->fp_c_idx),
1815 IGU_INT_ENABLE, 1);
1816 }
1817 status &= ~mask;
1818 }
a2fbb9ea
ET
1819 }
1820
993ac7b5
MC
1821#ifdef BCM_CNIC
1822 mask = 0x2 << CNIC_SB_ID(bp);
1823 if (status & (mask | 0x1)) {
1824 struct cnic_ops *c_ops = NULL;
1825
1826 rcu_read_lock();
1827 c_ops = rcu_dereference(bp->cnic_ops);
1828 if (c_ops)
1829 c_ops->cnic_handler(bp->cnic_data, NULL);
1830 rcu_read_unlock();
1831
1832 status &= ~mask;
1833 }
1834#endif
a2fbb9ea 1835
34f80b04 1836 if (unlikely(status & 0x1)) {
1cf167f2 1837 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1838
1839 status &= ~0x1;
1840 if (!status)
1841 return IRQ_HANDLED;
1842 }
1843
34f80b04
EG
1844 if (status)
1845 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1846 status);
a2fbb9ea 1847
c18487ee 1848 return IRQ_HANDLED;
a2fbb9ea
ET
1849}
1850
c18487ee 1851/* end of fast path */
a2fbb9ea 1852
bb2a0f7a 1853static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1854
c18487ee
YR
1855/* Link */
1856
1857/*
1858 * General service functions
1859 */
a2fbb9ea 1860
4a37fb66 1861static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1862{
1863 u32 lock_status;
1864 u32 resource_bit = (1 << resource);
4a37fb66
YG
1865 int func = BP_FUNC(bp);
1866 u32 hw_lock_control_reg;
c18487ee 1867 int cnt;
a2fbb9ea 1868
c18487ee
YR
1869 /* Validating that the resource is within range */
1870 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1871 DP(NETIF_MSG_HW,
1872 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1873 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1874 return -EINVAL;
1875 }
a2fbb9ea 1876
4a37fb66
YG
1877 if (func <= 5) {
1878 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1879 } else {
1880 hw_lock_control_reg =
1881 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1882 }
1883
c18487ee 1884 /* Validating that the resource is not already taken */
4a37fb66 1885 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1886 if (lock_status & resource_bit) {
1887 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1888 lock_status, resource_bit);
1889 return -EEXIST;
1890 }
a2fbb9ea 1891
46230476
EG
1892 /* Try for 5 second every 5ms */
1893 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1894 /* Try to acquire the lock */
4a37fb66
YG
1895 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1896 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1897 if (lock_status & resource_bit)
1898 return 0;
a2fbb9ea 1899
c18487ee 1900 msleep(5);
a2fbb9ea 1901 }
c18487ee
YR
1902 DP(NETIF_MSG_HW, "Timeout\n");
1903 return -EAGAIN;
1904}
a2fbb9ea 1905
4a37fb66 1906static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1907{
1908 u32 lock_status;
1909 u32 resource_bit = (1 << resource);
4a37fb66
YG
1910 int func = BP_FUNC(bp);
1911 u32 hw_lock_control_reg;
a2fbb9ea 1912
c18487ee
YR
1913 /* Validating that the resource is within range */
1914 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1915 DP(NETIF_MSG_HW,
1916 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1917 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1918 return -EINVAL;
1919 }
1920
4a37fb66
YG
1921 if (func <= 5) {
1922 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1923 } else {
1924 hw_lock_control_reg =
1925 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1926 }
1927
c18487ee 1928 /* Validating that the resource is currently taken */
4a37fb66 1929 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1930 if (!(lock_status & resource_bit)) {
1931 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1932 lock_status, resource_bit);
1933 return -EFAULT;
a2fbb9ea
ET
1934 }
1935
4a37fb66 1936 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1937 return 0;
1938}
1939
1940/* HW Lock for shared dual port PHYs */
4a37fb66 1941static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1942{
34f80b04 1943 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1944
46c6a674
EG
1945 if (bp->port.need_hw_lock)
1946 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1947}
a2fbb9ea 1948
4a37fb66 1949static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1950{
46c6a674
EG
1951 if (bp->port.need_hw_lock)
1952 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1953
34f80b04 1954 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1955}
a2fbb9ea 1956
4acac6a5
EG
1957int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1958{
1959 /* The GPIO should be swapped if swap register is set and active */
1960 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1961 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1962 int gpio_shift = gpio_num +
1963 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1964 u32 gpio_mask = (1 << gpio_shift);
1965 u32 gpio_reg;
1966 int value;
1967
1968 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1969 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1970 return -EINVAL;
1971 }
1972
1973 /* read GPIO value */
1974 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1975
1976 /* get the requested pin value */
1977 if ((gpio_reg & gpio_mask) == gpio_mask)
1978 value = 1;
1979 else
1980 value = 0;
1981
1982 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1983
1984 return value;
1985}
1986
17de50b7 1987int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1988{
1989 /* The GPIO should be swapped if swap register is set and active */
1990 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1991 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1992 int gpio_shift = gpio_num +
1993 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1994 u32 gpio_mask = (1 << gpio_shift);
1995 u32 gpio_reg;
a2fbb9ea 1996
c18487ee
YR
1997 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1998 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1999 return -EINVAL;
2000 }
a2fbb9ea 2001
4a37fb66 2002 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
2003 /* read GPIO and mask except the float bits */
2004 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 2005
c18487ee
YR
2006 switch (mode) {
2007 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2008 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2009 gpio_num, gpio_shift);
2010 /* clear FLOAT and set CLR */
2011 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2012 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2013 break;
a2fbb9ea 2014
c18487ee
YR
2015 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2016 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2017 gpio_num, gpio_shift);
2018 /* clear FLOAT and set SET */
2019 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2020 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2021 break;
a2fbb9ea 2022
17de50b7 2023 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
2024 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2025 gpio_num, gpio_shift);
2026 /* set FLOAT */
2027 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2028 break;
a2fbb9ea 2029
c18487ee
YR
2030 default:
2031 break;
a2fbb9ea
ET
2032 }
2033
c18487ee 2034 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 2035 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 2036
c18487ee 2037 return 0;
a2fbb9ea
ET
2038}
2039
4acac6a5
EG
2040int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2041{
2042 /* The GPIO should be swapped if swap register is set and active */
2043 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2044 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2045 int gpio_shift = gpio_num +
2046 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2047 u32 gpio_mask = (1 << gpio_shift);
2048 u32 gpio_reg;
2049
2050 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2051 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2052 return -EINVAL;
2053 }
2054
2055 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2056 /* read GPIO int */
2057 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2058
2059 switch (mode) {
2060 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2061 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2062 "output low\n", gpio_num, gpio_shift);
2063 /* clear SET and set CLR */
2064 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2065 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2066 break;
2067
2068 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2069 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2070 "output high\n", gpio_num, gpio_shift);
2071 /* clear CLR and set SET */
2072 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2073 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2074 break;
2075
2076 default:
2077 break;
2078 }
2079
2080 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2081 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2082
2083 return 0;
2084}
2085
c18487ee 2086static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2087{
c18487ee
YR
2088 u32 spio_mask = (1 << spio_num);
2089 u32 spio_reg;
a2fbb9ea 2090
c18487ee
YR
2091 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2092 (spio_num > MISC_REGISTERS_SPIO_7)) {
2093 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2094 return -EINVAL;
a2fbb9ea
ET
2095 }
2096
4a37fb66 2097 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2098 /* read SPIO and mask except the float bits */
2099 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2100
c18487ee 2101 switch (mode) {
6378c025 2102 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2103 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2104 /* clear FLOAT and set CLR */
2105 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2106 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2107 break;
a2fbb9ea 2108
6378c025 2109 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2110 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2111 /* clear FLOAT and set SET */
2112 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2113 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2114 break;
a2fbb9ea 2115
c18487ee
YR
2116 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2117 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2118 /* set FLOAT */
2119 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2120 break;
a2fbb9ea 2121
c18487ee
YR
2122 default:
2123 break;
a2fbb9ea
ET
2124 }
2125
c18487ee 2126 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2127 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2128
a2fbb9ea
ET
2129 return 0;
2130}
2131
c18487ee 2132static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2133{
ad33ea3a
EG
2134 switch (bp->link_vars.ieee_fc &
2135 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2136 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2137 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2138 ADVERTISED_Pause);
2139 break;
356e2385 2140
c18487ee 2141 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2142 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2143 ADVERTISED_Pause);
2144 break;
356e2385 2145
c18487ee 2146 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2147 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2148 break;
356e2385 2149
c18487ee 2150 default:
34f80b04 2151 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2152 ADVERTISED_Pause);
2153 break;
2154 }
2155}
f1410647 2156
c18487ee
YR
2157static void bnx2x_link_report(struct bnx2x *bp)
2158{
f34d28ea 2159 if (bp->flags & MF_FUNC_DIS) {
2691d51d
EG
2160 netif_carrier_off(bp->dev);
2161 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2162 return;
2163 }
2164
c18487ee
YR
2165 if (bp->link_vars.link_up) {
2166 if (bp->state == BNX2X_STATE_OPEN)
2167 netif_carrier_on(bp->dev);
2168 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2169
c18487ee 2170 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2171
c18487ee
YR
2172 if (bp->link_vars.duplex == DUPLEX_FULL)
2173 printk("full duplex");
2174 else
2175 printk("half duplex");
f1410647 2176
c0700f90
DM
2177 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2178 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2179 printk(", receive ");
356e2385
EG
2180 if (bp->link_vars.flow_ctrl &
2181 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2182 printk("& transmit ");
2183 } else {
2184 printk(", transmit ");
2185 }
2186 printk("flow control ON");
2187 }
2188 printk("\n");
f1410647 2189
c18487ee
YR
2190 } else { /* link_down */
2191 netif_carrier_off(bp->dev);
2192 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2193 }
c18487ee
YR
2194}
2195
b5bf9068 2196static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2197{
19680c48
EG
2198 if (!BP_NOMCP(bp)) {
2199 u8 rc;
a2fbb9ea 2200
19680c48 2201 /* Initialize link parameters structure variables */
8c99e7b0
YR
2202 /* It is recommended to turn off RX FC for jumbo frames
2203 for better performance */
0c593270 2204 if (bp->dev->mtu > 5000)
c0700f90 2205 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2206 else
c0700f90 2207 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2208
4a37fb66 2209 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2210
2211 if (load_mode == LOAD_DIAG)
2212 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2213
19680c48 2214 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2215
4a37fb66 2216 bnx2x_release_phy_lock(bp);
a2fbb9ea 2217
3c96c68b
EG
2218 bnx2x_calc_fc_adv(bp);
2219
b5bf9068
EG
2220 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2221 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2222 bnx2x_link_report(bp);
b5bf9068 2223 }
34f80b04 2224
19680c48
EG
2225 return rc;
2226 }
f5372251 2227 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2228 return -EINVAL;
a2fbb9ea
ET
2229}
2230
c18487ee 2231static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2232{
19680c48 2233 if (!BP_NOMCP(bp)) {
4a37fb66 2234 bnx2x_acquire_phy_lock(bp);
19680c48 2235 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2236 bnx2x_release_phy_lock(bp);
a2fbb9ea 2237
19680c48
EG
2238 bnx2x_calc_fc_adv(bp);
2239 } else
f5372251 2240 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2241}
a2fbb9ea 2242
c18487ee
YR
2243static void bnx2x__link_reset(struct bnx2x *bp)
2244{
19680c48 2245 if (!BP_NOMCP(bp)) {
4a37fb66 2246 bnx2x_acquire_phy_lock(bp);
589abe3a 2247 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2248 bnx2x_release_phy_lock(bp);
19680c48 2249 } else
f5372251 2250 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2251}
a2fbb9ea 2252
c18487ee
YR
2253static u8 bnx2x_link_test(struct bnx2x *bp)
2254{
2255 u8 rc;
a2fbb9ea 2256
4a37fb66 2257 bnx2x_acquire_phy_lock(bp);
c18487ee 2258 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2259 bnx2x_release_phy_lock(bp);
a2fbb9ea 2260
c18487ee
YR
2261 return rc;
2262}
a2fbb9ea 2263
8a1c38d1 2264static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2265{
8a1c38d1
EG
2266 u32 r_param = bp->link_vars.line_speed / 8;
2267 u32 fair_periodic_timeout_usec;
2268 u32 t_fair;
34f80b04 2269
8a1c38d1
EG
2270 memset(&(bp->cmng.rs_vars), 0,
2271 sizeof(struct rate_shaping_vars_per_port));
2272 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2273
8a1c38d1
EG
2274 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2275 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2276
8a1c38d1
EG
2277 /* this is the threshold below which no timer arming will occur
2278 1.25 coefficient is for the threshold to be a little bigger
2279 than the real time, to compensate for timer in-accuracy */
2280 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2281 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2282
8a1c38d1
EG
2283 /* resolution of fairness timer */
2284 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2285 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2286 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2287
8a1c38d1
EG
2288 /* this is the threshold below which we won't arm the timer anymore */
2289 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2290
8a1c38d1
EG
2291 /* we multiply by 1e3/8 to get bytes/msec.
2292 We don't want the credits to pass a credit
2293 of the t_fair*FAIR_MEM (algorithm resolution) */
2294 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2295 /* since each tick is 4 usec */
2296 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2297}
2298
2691d51d
EG
2299/* Calculates the sum of vn_min_rates.
2300 It's needed for further normalizing of the min_rates.
2301 Returns:
2302 sum of vn_min_rates.
2303 or
2304 0 - if all the min_rates are 0.
2305 In the later case fainess algorithm should be deactivated.
2306 If not all min_rates are zero then those that are zeroes will be set to 1.
2307 */
2308static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2309{
2310 int all_zero = 1;
2311 int port = BP_PORT(bp);
2312 int vn;
2313
2314 bp->vn_weight_sum = 0;
2315 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2316 int func = 2*vn + port;
2317 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2318 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2319 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2320
2321 /* Skip hidden vns */
2322 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2323 continue;
2324
2325 /* If min rate is zero - set it to 1 */
2326 if (!vn_min_rate)
2327 vn_min_rate = DEF_MIN_RATE;
2328 else
2329 all_zero = 0;
2330
2331 bp->vn_weight_sum += vn_min_rate;
2332 }
2333
2334 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
2335 if (all_zero) {
2336 bp->cmng.flags.cmng_enables &=
2337 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2338 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2339 " fairness will be disabled\n");
2340 } else
2341 bp->cmng.flags.cmng_enables |=
2342 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
2343}
2344
8a1c38d1 2345static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2346{
2347 struct rate_shaping_vars_per_vn m_rs_vn;
2348 struct fairness_vars_per_vn m_fair_vn;
2349 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2350 u16 vn_min_rate, vn_max_rate;
2351 int i;
2352
2353 /* If function is hidden - set min and max to zeroes */
2354 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2355 vn_min_rate = 0;
2356 vn_max_rate = 0;
2357
2358 } else {
2359 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2360 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1
EG
2361 /* If min rate is zero - set it to 1 */
2362 if (!vn_min_rate)
34f80b04
EG
2363 vn_min_rate = DEF_MIN_RATE;
2364 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2365 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2366 }
8a1c38d1 2367 DP(NETIF_MSG_IFUP,
b015e3d1 2368 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 2369 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2370
2371 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2372 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2373
2374 /* global vn counter - maximal Mbps for this vn */
2375 m_rs_vn.vn_counter.rate = vn_max_rate;
2376
2377 /* quota - number of bytes transmitted in this period */
2378 m_rs_vn.vn_counter.quota =
2379 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2380
8a1c38d1 2381 if (bp->vn_weight_sum) {
34f80b04
EG
2382 /* credit for each period of the fairness algorithm:
2383 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2384 vn_weight_sum should not be larger than 10000, thus
2385 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2386 than zero */
34f80b04 2387 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2388 max((u32)(vn_min_rate * (T_FAIR_COEF /
2389 (8 * bp->vn_weight_sum))),
2390 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2391 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2392 m_fair_vn.vn_credit_delta);
2393 }
2394
34f80b04
EG
2395 /* Store it to internal memory */
2396 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2397 REG_WR(bp, BAR_XSTRORM_INTMEM +
2398 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2399 ((u32 *)(&m_rs_vn))[i]);
2400
2401 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2402 REG_WR(bp, BAR_XSTRORM_INTMEM +
2403 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2404 ((u32 *)(&m_fair_vn))[i]);
2405}
2406
8a1c38d1 2407
c18487ee
YR
2408/* This function is called upon link interrupt */
2409static void bnx2x_link_attn(struct bnx2x *bp)
2410{
bb2a0f7a
YG
2411 /* Make sure that we are synced with the current statistics */
2412 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2413
c18487ee 2414 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2415
bb2a0f7a
YG
2416 if (bp->link_vars.link_up) {
2417
1c06328c 2418 /* dropless flow control */
a18f5128 2419 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
2420 int port = BP_PORT(bp);
2421 u32 pause_enabled = 0;
2422
2423 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2424 pause_enabled = 1;
2425
2426 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2427 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2428 pause_enabled);
2429 }
2430
bb2a0f7a
YG
2431 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2432 struct host_port_stats *pstats;
2433
2434 pstats = bnx2x_sp(bp, port_stats);
2435 /* reset old bmac stats */
2436 memset(&(pstats->mac_stx[0]), 0,
2437 sizeof(struct mac_stx));
2438 }
f34d28ea 2439 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2440 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2441 }
2442
c18487ee
YR
2443 /* indicate link status */
2444 bnx2x_link_report(bp);
34f80b04
EG
2445
2446 if (IS_E1HMF(bp)) {
8a1c38d1 2447 int port = BP_PORT(bp);
34f80b04 2448 int func;
8a1c38d1 2449 int vn;
34f80b04 2450
ab6ad5a4 2451 /* Set the attention towards other drivers on the same port */
34f80b04
EG
2452 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2453 if (vn == BP_E1HVN(bp))
2454 continue;
2455
8a1c38d1 2456 func = ((vn << 1) | port);
34f80b04
EG
2457 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2458 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2459 }
34f80b04 2460
8a1c38d1
EG
2461 if (bp->link_vars.link_up) {
2462 int i;
2463
2464 /* Init rate shaping and fairness contexts */
2465 bnx2x_init_port_minmax(bp);
34f80b04 2466
34f80b04 2467 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2468 bnx2x_init_vn_minmax(bp, 2*vn + port);
2469
2470 /* Store it to internal memory */
2471 for (i = 0;
2472 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2473 REG_WR(bp, BAR_XSTRORM_INTMEM +
2474 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2475 ((u32 *)(&bp->cmng))[i]);
2476 }
34f80b04 2477 }
c18487ee 2478}
a2fbb9ea 2479
c18487ee
YR
2480static void bnx2x__link_status_update(struct bnx2x *bp)
2481{
f34d28ea 2482 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 2483 return;
a2fbb9ea 2484
c18487ee 2485 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2486
bb2a0f7a
YG
2487 if (bp->link_vars.link_up)
2488 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2489 else
2490 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2491
2691d51d
EG
2492 bnx2x_calc_vn_weight_sum(bp);
2493
c18487ee
YR
2494 /* indicate link status */
2495 bnx2x_link_report(bp);
a2fbb9ea 2496}
a2fbb9ea 2497
34f80b04
EG
2498static void bnx2x_pmf_update(struct bnx2x *bp)
2499{
2500 int port = BP_PORT(bp);
2501 u32 val;
2502
2503 bp->port.pmf = 1;
2504 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2505
2506 /* enable nig attention */
2507 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2508 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2509 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2510
2511 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2512}
2513
c18487ee 2514/* end of Link */
a2fbb9ea
ET
2515
2516/* slow path */
2517
2518/*
2519 * General service functions
2520 */
2521
2691d51d
EG
2522/* send the MCP a request, block until there is a reply */
2523u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2524{
2525 int func = BP_FUNC(bp);
2526 u32 seq = ++bp->fw_seq;
2527 u32 rc = 0;
2528 u32 cnt = 1;
2529 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2530
2531 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2532 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2533
2534 do {
2535 /* let the FW do it's magic ... */
2536 msleep(delay);
2537
2538 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2539
2540 /* Give the FW up to 2 second (200*10ms) */
2541 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2542
2543 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2544 cnt*delay, rc, seq);
2545
2546 /* is this a reply to our command? */
2547 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2548 rc &= FW_MSG_CODE_MASK;
2549 else {
2550 /* FW BUG! */
2551 BNX2X_ERR("FW failed to respond!\n");
2552 bnx2x_fw_dump(bp);
2553 rc = 0;
2554 }
2555
2556 return rc;
2557}
2558
2559static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
e665bfda 2560static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2691d51d
EG
2561static void bnx2x_set_rx_mode(struct net_device *dev);
2562
2563static void bnx2x_e1h_disable(struct bnx2x *bp)
2564{
2565 int port = BP_PORT(bp);
2566 int i;
2567
2568 bp->rx_mode = BNX2X_RX_MODE_NONE;
2569 bnx2x_set_storm_rx_mode(bp);
2570
2571 netif_tx_disable(bp->dev);
2572 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2573
2574 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2575
e665bfda 2576 bnx2x_set_eth_mac_addr_e1h(bp, 0);
2691d51d
EG
2577
2578 for (i = 0; i < MC_HASH_SIZE; i++)
2579 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2580
2581 netif_carrier_off(bp->dev);
2582}
2583
2584static void bnx2x_e1h_enable(struct bnx2x *bp)
2585{
2586 int port = BP_PORT(bp);
2587
2588 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2589
e665bfda 2590 bnx2x_set_eth_mac_addr_e1h(bp, 1);
2691d51d
EG
2591
2592 /* Tx queue should be only reenabled */
2593 netif_tx_wake_all_queues(bp->dev);
2594
2595 /* Initialize the receive filter. */
2596 bnx2x_set_rx_mode(bp->dev);
2597}
2598
2599static void bnx2x_update_min_max(struct bnx2x *bp)
2600{
2601 int port = BP_PORT(bp);
2602 int vn, i;
2603
2604 /* Init rate shaping and fairness contexts */
2605 bnx2x_init_port_minmax(bp);
2606
2607 bnx2x_calc_vn_weight_sum(bp);
2608
2609 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2610 bnx2x_init_vn_minmax(bp, 2*vn + port);
2611
2612 if (bp->port.pmf) {
2613 int func;
2614
2615 /* Set the attention towards other drivers on the same port */
2616 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2617 if (vn == BP_E1HVN(bp))
2618 continue;
2619
2620 func = ((vn << 1) | port);
2621 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2622 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2623 }
2624
2625 /* Store it to internal memory */
2626 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2627 REG_WR(bp, BAR_XSTRORM_INTMEM +
2628 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2629 ((u32 *)(&bp->cmng))[i]);
2630 }
2631}
2632
2633static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2634{
2691d51d 2635 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2691d51d
EG
2636
2637 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2638
f34d28ea
EG
2639 /*
2640 * This is the only place besides the function initialization
2641 * where the bp->flags can change so it is done without any
2642 * locks
2643 */
2691d51d
EG
2644 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2645 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
f34d28ea 2646 bp->flags |= MF_FUNC_DIS;
2691d51d
EG
2647
2648 bnx2x_e1h_disable(bp);
2649 } else {
2650 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
f34d28ea 2651 bp->flags &= ~MF_FUNC_DIS;
2691d51d
EG
2652
2653 bnx2x_e1h_enable(bp);
2654 }
2655 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2656 }
2657 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2658
2659 bnx2x_update_min_max(bp);
2660 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2661 }
2662
2663 /* Report results to MCP */
2664 if (dcc_event)
2665 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2666 else
2667 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2668}
2669
28912902
MC
2670/* must be called under the spq lock */
2671static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2672{
2673 struct eth_spe *next_spe = bp->spq_prod_bd;
2674
2675 if (bp->spq_prod_bd == bp->spq_last_bd) {
2676 bp->spq_prod_bd = bp->spq;
2677 bp->spq_prod_idx = 0;
2678 DP(NETIF_MSG_TIMER, "end of spq\n");
2679 } else {
2680 bp->spq_prod_bd++;
2681 bp->spq_prod_idx++;
2682 }
2683 return next_spe;
2684}
2685
2686/* must be called under the spq lock */
2687static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2688{
2689 int func = BP_FUNC(bp);
2690
2691 /* Make sure that BD data is updated before writing the producer */
2692 wmb();
2693
2694 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2695 bp->spq_prod_idx);
2696 mmiowb();
2697}
2698
a2fbb9ea
ET
2699/* the slow path queue is odd since completions arrive on the fastpath ring */
2700static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2701 u32 data_hi, u32 data_lo, int common)
2702{
28912902 2703 struct eth_spe *spe;
a2fbb9ea 2704
34f80b04
EG
2705 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2706 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2707 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2708 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2709 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2710
2711#ifdef BNX2X_STOP_ON_ERROR
2712 if (unlikely(bp->panic))
2713 return -EIO;
2714#endif
2715
34f80b04 2716 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2717
2718 if (!bp->spq_left) {
2719 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2720 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2721 bnx2x_panic();
2722 return -EBUSY;
2723 }
f1410647 2724
28912902
MC
2725 spe = bnx2x_sp_get_next(bp);
2726
a2fbb9ea 2727 /* CID needs port number to be encoded int it */
28912902 2728 spe->hdr.conn_and_cmd_data =
a2fbb9ea
ET
2729 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2730 HW_CID(bp, cid)));
28912902 2731 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 2732 if (common)
28912902 2733 spe->hdr.type |=
a2fbb9ea
ET
2734 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2735
28912902
MC
2736 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2737 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
2738
2739 bp->spq_left--;
2740
28912902 2741 bnx2x_sp_prod_update(bp);
34f80b04 2742 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2743 return 0;
2744}
2745
2746/* acquire split MCP access lock register */
4a37fb66 2747static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2748{
a2fbb9ea 2749 u32 i, j, val;
34f80b04 2750 int rc = 0;
a2fbb9ea
ET
2751
2752 might_sleep();
2753 i = 100;
2754 for (j = 0; j < i*10; j++) {
2755 val = (1UL << 31);
2756 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2757 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2758 if (val & (1L << 31))
2759 break;
2760
2761 msleep(5);
2762 }
a2fbb9ea 2763 if (!(val & (1L << 31))) {
19680c48 2764 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2765 rc = -EBUSY;
2766 }
2767
2768 return rc;
2769}
2770
4a37fb66
YG
2771/* release split MCP access lock register */
2772static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2773{
2774 u32 val = 0;
2775
2776 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2777}
2778
2779static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2780{
2781 struct host_def_status_block *def_sb = bp->def_status_blk;
2782 u16 rc = 0;
2783
2784 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2785 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2786 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2787 rc |= 1;
2788 }
2789 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2790 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2791 rc |= 2;
2792 }
2793 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2794 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2795 rc |= 4;
2796 }
2797 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2798 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2799 rc |= 8;
2800 }
2801 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2802 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2803 rc |= 16;
2804 }
2805 return rc;
2806}
2807
2808/*
2809 * slow path service functions
2810 */
2811
2812static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2813{
34f80b04 2814 int port = BP_PORT(bp);
5c862848
EG
2815 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2816 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2817 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2818 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2819 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2820 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2821 u32 aeu_mask;
87942b46 2822 u32 nig_mask = 0;
a2fbb9ea 2823
a2fbb9ea
ET
2824 if (bp->attn_state & asserted)
2825 BNX2X_ERR("IGU ERROR\n");
2826
3fcaf2e5
EG
2827 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2828 aeu_mask = REG_RD(bp, aeu_addr);
2829
a2fbb9ea 2830 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2831 aeu_mask, asserted);
2832 aeu_mask &= ~(asserted & 0xff);
2833 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2834
3fcaf2e5
EG
2835 REG_WR(bp, aeu_addr, aeu_mask);
2836 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2837
3fcaf2e5 2838 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2839 bp->attn_state |= asserted;
3fcaf2e5 2840 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2841
2842 if (asserted & ATTN_HARD_WIRED_MASK) {
2843 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2844
a5e9a7cf
EG
2845 bnx2x_acquire_phy_lock(bp);
2846
877e9aa4 2847 /* save nig interrupt mask */
87942b46 2848 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2849 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2850
c18487ee 2851 bnx2x_link_attn(bp);
a2fbb9ea
ET
2852
2853 /* handle unicore attn? */
2854 }
2855 if (asserted & ATTN_SW_TIMER_4_FUNC)
2856 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2857
2858 if (asserted & GPIO_2_FUNC)
2859 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2860
2861 if (asserted & GPIO_3_FUNC)
2862 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2863
2864 if (asserted & GPIO_4_FUNC)
2865 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2866
2867 if (port == 0) {
2868 if (asserted & ATTN_GENERAL_ATTN_1) {
2869 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2870 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2871 }
2872 if (asserted & ATTN_GENERAL_ATTN_2) {
2873 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2874 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2875 }
2876 if (asserted & ATTN_GENERAL_ATTN_3) {
2877 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2878 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2879 }
2880 } else {
2881 if (asserted & ATTN_GENERAL_ATTN_4) {
2882 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2883 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2884 }
2885 if (asserted & ATTN_GENERAL_ATTN_5) {
2886 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2887 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2888 }
2889 if (asserted & ATTN_GENERAL_ATTN_6) {
2890 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2891 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2892 }
2893 }
2894
2895 } /* if hardwired */
2896
5c862848
EG
2897 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2898 asserted, hc_addr);
2899 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2900
2901 /* now set back the mask */
a5e9a7cf 2902 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2903 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2904 bnx2x_release_phy_lock(bp);
2905 }
a2fbb9ea
ET
2906}
2907
fd4ef40d
EG
2908static inline void bnx2x_fan_failure(struct bnx2x *bp)
2909{
2910 int port = BP_PORT(bp);
2911
2912 /* mark the failure */
2913 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2914 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2915 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2916 bp->link_params.ext_phy_config);
2917
2918 /* log the failure */
2919 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2920 " the driver to shutdown the card to prevent permanent"
2921 " damage. Please contact Dell Support for assistance\n",
2922 bp->dev->name);
2923}
ab6ad5a4 2924
877e9aa4 2925static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2926{
34f80b04 2927 int port = BP_PORT(bp);
877e9aa4 2928 int reg_offset;
4d295db0 2929 u32 val, swap_val, swap_override;
877e9aa4 2930
34f80b04
EG
2931 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2932 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2933
34f80b04 2934 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2935
2936 val = REG_RD(bp, reg_offset);
2937 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2938 REG_WR(bp, reg_offset, val);
2939
2940 BNX2X_ERR("SPIO5 hw attention\n");
2941
fd4ef40d 2942 /* Fan failure attention */
35b19ba5
EG
2943 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2944 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2945 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2946 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2947 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2948 /* The PHY reset is controlled by GPIO 1 */
2949 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2950 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2951 break;
2952
4d295db0
EG
2953 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2954 /* The PHY reset is controlled by GPIO 1 */
2955 /* fake the port number to cancel the swap done in
2956 set_gpio() */
2957 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2958 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2959 port = (swap_val && swap_override) ^ 1;
2960 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2961 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2962 break;
2963
877e9aa4
ET
2964 default:
2965 break;
2966 }
fd4ef40d 2967 bnx2x_fan_failure(bp);
877e9aa4 2968 }
34f80b04 2969
589abe3a
EG
2970 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2971 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2972 bnx2x_acquire_phy_lock(bp);
2973 bnx2x_handle_module_detect_int(&bp->link_params);
2974 bnx2x_release_phy_lock(bp);
2975 }
2976
34f80b04
EG
2977 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2978
2979 val = REG_RD(bp, reg_offset);
2980 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2981 REG_WR(bp, reg_offset, val);
2982
2983 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2984 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2985 bnx2x_panic();
2986 }
877e9aa4
ET
2987}
2988
2989static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2990{
2991 u32 val;
2992
0626b899 2993 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2994
2995 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2996 BNX2X_ERR("DB hw attention 0x%x\n", val);
2997 /* DORQ discard attention */
2998 if (val & 0x2)
2999 BNX2X_ERR("FATAL error from DORQ\n");
3000 }
34f80b04
EG
3001
3002 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3003
3004 int port = BP_PORT(bp);
3005 int reg_offset;
3006
3007 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3008 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3009
3010 val = REG_RD(bp, reg_offset);
3011 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3012 REG_WR(bp, reg_offset, val);
3013
3014 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 3015 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
3016 bnx2x_panic();
3017 }
877e9aa4
ET
3018}
3019
3020static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3021{
3022 u32 val;
3023
3024 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3025
3026 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3027 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3028 /* CFC error attention */
3029 if (val & 0x2)
3030 BNX2X_ERR("FATAL error from CFC\n");
3031 }
3032
3033 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3034
3035 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3036 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3037 /* RQ_USDMDP_FIFO_OVERFLOW */
3038 if (val & 0x18000)
3039 BNX2X_ERR("FATAL error from PXP\n");
3040 }
34f80b04
EG
3041
3042 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3043
3044 int port = BP_PORT(bp);
3045 int reg_offset;
3046
3047 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3048 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3049
3050 val = REG_RD(bp, reg_offset);
3051 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3052 REG_WR(bp, reg_offset, val);
3053
3054 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3055 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3056 bnx2x_panic();
3057 }
877e9aa4
ET
3058}
3059
3060static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3061{
34f80b04
EG
3062 u32 val;
3063
877e9aa4
ET
3064 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3065
34f80b04
EG
3066 if (attn & BNX2X_PMF_LINK_ASSERT) {
3067 int func = BP_FUNC(bp);
3068
3069 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
b015e3d1
EG
3070 bp->mf_config = SHMEM_RD(bp,
3071 mf_cfg.func_mf_config[func].config);
2691d51d
EG
3072 val = SHMEM_RD(bp, func_mb[func].drv_status);
3073 if (val & DRV_STATUS_DCC_EVENT_MASK)
3074 bnx2x_dcc_event(bp,
3075 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3076 bnx2x__link_status_update(bp);
2691d51d 3077 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3078 bnx2x_pmf_update(bp);
3079
3080 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3081
3082 BNX2X_ERR("MC assert!\n");
3083 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3084 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3085 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3086 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3087 bnx2x_panic();
3088
3089 } else if (attn & BNX2X_MCP_ASSERT) {
3090
3091 BNX2X_ERR("MCP assert!\n");
3092 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3093 bnx2x_fw_dump(bp);
877e9aa4
ET
3094
3095 } else
3096 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3097 }
3098
3099 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3100 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3101 if (attn & BNX2X_GRC_TIMEOUT) {
3102 val = CHIP_IS_E1H(bp) ?
3103 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3104 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3105 }
3106 if (attn & BNX2X_GRC_RSV) {
3107 val = CHIP_IS_E1H(bp) ?
3108 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3109 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3110 }
877e9aa4 3111 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3112 }
3113}
3114
3115static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3116{
a2fbb9ea
ET
3117 struct attn_route attn;
3118 struct attn_route group_mask;
34f80b04 3119 int port = BP_PORT(bp);
877e9aa4 3120 int index;
a2fbb9ea
ET
3121 u32 reg_addr;
3122 u32 val;
3fcaf2e5 3123 u32 aeu_mask;
a2fbb9ea
ET
3124
3125 /* need to take HW lock because MCP or other port might also
3126 try to handle this event */
4a37fb66 3127 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
3128
3129 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3130 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3131 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3132 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3133 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3134 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3135
3136 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3137 if (deasserted & (1 << index)) {
3138 group_mask = bp->attn_group[index];
3139
34f80b04
EG
3140 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3141 index, group_mask.sig[0], group_mask.sig[1],
3142 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 3143
877e9aa4
ET
3144 bnx2x_attn_int_deasserted3(bp,
3145 attn.sig[3] & group_mask.sig[3]);
3146 bnx2x_attn_int_deasserted1(bp,
3147 attn.sig[1] & group_mask.sig[1]);
3148 bnx2x_attn_int_deasserted2(bp,
3149 attn.sig[2] & group_mask.sig[2]);
3150 bnx2x_attn_int_deasserted0(bp,
3151 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 3152
a2fbb9ea
ET
3153 if ((attn.sig[0] & group_mask.sig[0] &
3154 HW_PRTY_ASSERT_SET_0) ||
3155 (attn.sig[1] & group_mask.sig[1] &
3156 HW_PRTY_ASSERT_SET_1) ||
3157 (attn.sig[2] & group_mask.sig[2] &
3158 HW_PRTY_ASSERT_SET_2))
6378c025 3159 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
3160 }
3161 }
3162
4a37fb66 3163 bnx2x_release_alr(bp);
a2fbb9ea 3164
5c862848 3165 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3166
3167 val = ~deasserted;
3fcaf2e5
EG
3168 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3169 val, reg_addr);
5c862848 3170 REG_WR(bp, reg_addr, val);
a2fbb9ea 3171
a2fbb9ea 3172 if (~bp->attn_state & deasserted)
3fcaf2e5 3173 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3174
3175 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3176 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3177
3fcaf2e5
EG
3178 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3179 aeu_mask = REG_RD(bp, reg_addr);
3180
3181 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3182 aeu_mask, deasserted);
3183 aeu_mask |= (deasserted & 0xff);
3184 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3185
3fcaf2e5
EG
3186 REG_WR(bp, reg_addr, aeu_mask);
3187 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3188
3189 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3190 bp->attn_state &= ~deasserted;
3191 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3192}
3193
3194static void bnx2x_attn_int(struct bnx2x *bp)
3195{
3196 /* read local copy of bits */
68d59484
EG
3197 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3198 attn_bits);
3199 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3200 attn_bits_ack);
a2fbb9ea
ET
3201 u32 attn_state = bp->attn_state;
3202
3203 /* look for changed bits */
3204 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3205 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3206
3207 DP(NETIF_MSG_HW,
3208 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3209 attn_bits, attn_ack, asserted, deasserted);
3210
3211 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3212 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3213
3214 /* handle bits that were raised */
3215 if (asserted)
3216 bnx2x_attn_int_asserted(bp, asserted);
3217
3218 if (deasserted)
3219 bnx2x_attn_int_deasserted(bp, deasserted);
3220}
3221
3222static void bnx2x_sp_task(struct work_struct *work)
3223{
1cf167f2 3224 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3225 u16 status;
3226
34f80b04 3227
a2fbb9ea
ET
3228 /* Return here if interrupt is disabled */
3229 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3230 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3231 return;
3232 }
3233
3234 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3235/* if (status == 0) */
3236/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3237
3196a88a 3238 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 3239
877e9aa4
ET
3240 /* HW attentions */
3241 if (status & 0x1)
a2fbb9ea 3242 bnx2x_attn_int(bp);
a2fbb9ea 3243
68d59484 3244 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3245 IGU_INT_NOP, 1);
3246 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3247 IGU_INT_NOP, 1);
3248 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3249 IGU_INT_NOP, 1);
3250 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3251 IGU_INT_NOP, 1);
3252 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3253 IGU_INT_ENABLE, 1);
877e9aa4 3254
a2fbb9ea
ET
3255}
3256
3257static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3258{
3259 struct net_device *dev = dev_instance;
3260 struct bnx2x *bp = netdev_priv(dev);
3261
3262 /* Return here if interrupt is disabled */
3263 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3264 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3265 return IRQ_HANDLED;
3266 }
3267
8d9c5f34 3268 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3269
3270#ifdef BNX2X_STOP_ON_ERROR
3271 if (unlikely(bp->panic))
3272 return IRQ_HANDLED;
3273#endif
3274
993ac7b5
MC
3275#ifdef BCM_CNIC
3276 {
3277 struct cnic_ops *c_ops;
3278
3279 rcu_read_lock();
3280 c_ops = rcu_dereference(bp->cnic_ops);
3281 if (c_ops)
3282 c_ops->cnic_handler(bp->cnic_data, NULL);
3283 rcu_read_unlock();
3284 }
3285#endif
1cf167f2 3286 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3287
3288 return IRQ_HANDLED;
3289}
3290
3291/* end of slow path */
3292
3293/* Statistics */
3294
3295/****************************************************************************
3296* Macros
3297****************************************************************************/
3298
a2fbb9ea
ET
3299/* sum[hi:lo] += add[hi:lo] */
3300#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3301 do { \
3302 s_lo += a_lo; \
f5ba6772 3303 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3304 } while (0)
3305
3306/* difference = minuend - subtrahend */
3307#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3308 do { \
bb2a0f7a
YG
3309 if (m_lo < s_lo) { \
3310 /* underflow */ \
a2fbb9ea 3311 d_hi = m_hi - s_hi; \
bb2a0f7a 3312 if (d_hi > 0) { \
6378c025 3313 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3314 d_hi--; \
3315 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3316 } else { \
6378c025 3317 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3318 d_hi = 0; \
3319 d_lo = 0; \
3320 } \
bb2a0f7a
YG
3321 } else { \
3322 /* m_lo >= s_lo */ \
a2fbb9ea 3323 if (m_hi < s_hi) { \
bb2a0f7a
YG
3324 d_hi = 0; \
3325 d_lo = 0; \
3326 } else { \
6378c025 3327 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3328 d_hi = m_hi - s_hi; \
3329 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3330 } \
3331 } \
3332 } while (0)
3333
bb2a0f7a 3334#define UPDATE_STAT64(s, t) \
a2fbb9ea 3335 do { \
bb2a0f7a
YG
3336 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3337 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3338 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3339 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3340 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3341 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3342 } while (0)
3343
bb2a0f7a 3344#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3345 do { \
bb2a0f7a
YG
3346 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3347 diff.lo, new->s##_lo, old->s##_lo); \
3348 ADD_64(estats->t##_hi, diff.hi, \
3349 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3350 } while (0)
3351
3352/* sum[hi:lo] += add */
3353#define ADD_EXTEND_64(s_hi, s_lo, a) \
3354 do { \
3355 s_lo += a; \
3356 s_hi += (s_lo < a) ? 1 : 0; \
3357 } while (0)
3358
bb2a0f7a 3359#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3360 do { \
bb2a0f7a
YG
3361 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3362 pstats->mac_stx[1].s##_lo, \
3363 new->s); \
a2fbb9ea
ET
3364 } while (0)
3365
bb2a0f7a 3366#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3367 do { \
4781bfad
EG
3368 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3369 old_tclient->s = tclient->s; \
de832a55
EG
3370 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3371 } while (0)
3372
3373#define UPDATE_EXTEND_USTAT(s, t) \
3374 do { \
3375 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3376 old_uclient->s = uclient->s; \
3377 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3378 } while (0)
3379
3380#define UPDATE_EXTEND_XSTAT(s, t) \
3381 do { \
4781bfad
EG
3382 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3383 old_xclient->s = xclient->s; \
de832a55
EG
3384 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3385 } while (0)
3386
3387/* minuend -= subtrahend */
3388#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3389 do { \
3390 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3391 } while (0)
3392
3393/* minuend[hi:lo] -= subtrahend */
3394#define SUB_EXTEND_64(m_hi, m_lo, s) \
3395 do { \
3396 SUB_64(m_hi, 0, m_lo, s); \
3397 } while (0)
3398
3399#define SUB_EXTEND_USTAT(s, t) \
3400 do { \
3401 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3402 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3403 } while (0)
3404
3405/*
3406 * General service functions
3407 */
3408
3409static inline long bnx2x_hilo(u32 *hiref)
3410{
3411 u32 lo = *(hiref + 1);
3412#if (BITS_PER_LONG == 64)
3413 u32 hi = *hiref;
3414
3415 return HILO_U64(hi, lo);
3416#else
3417 return lo;
3418#endif
3419}
3420
3421/*
3422 * Init service functions
3423 */
3424
bb2a0f7a
YG
3425static void bnx2x_storm_stats_post(struct bnx2x *bp)
3426{
3427 if (!bp->stats_pending) {
3428 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3429 int i, rc;
bb2a0f7a
YG
3430
3431 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3432 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3433 for_each_queue(bp, i)
3434 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3435
3436 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3437 ((u32 *)&ramrod_data)[1],
3438 ((u32 *)&ramrod_data)[0], 0);
3439 if (rc == 0) {
3440 /* stats ramrod has it's own slot on the spq */
3441 bp->spq_left++;
3442 bp->stats_pending = 1;
3443 }
3444 }
3445}
3446
bb2a0f7a
YG
3447static void bnx2x_hw_stats_post(struct bnx2x *bp)
3448{
3449 struct dmae_command *dmae = &bp->stats_dmae;
3450 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3451
3452 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3453 if (CHIP_REV_IS_SLOW(bp))
3454 return;
bb2a0f7a
YG
3455
3456 /* loader */
3457 if (bp->executer_idx) {
3458 int loader_idx = PMF_DMAE_C(bp);
3459
3460 memset(dmae, 0, sizeof(struct dmae_command));
3461
3462 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3463 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3464 DMAE_CMD_DST_RESET |
3465#ifdef __BIG_ENDIAN
3466 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3467#else
3468 DMAE_CMD_ENDIANITY_DW_SWAP |
3469#endif
3470 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3471 DMAE_CMD_PORT_0) |
3472 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3473 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3474 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3475 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3476 sizeof(struct dmae_command) *
3477 (loader_idx + 1)) >> 2;
3478 dmae->dst_addr_hi = 0;
3479 dmae->len = sizeof(struct dmae_command) >> 2;
3480 if (CHIP_IS_E1(bp))
3481 dmae->len--;
3482 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3483 dmae->comp_addr_hi = 0;
3484 dmae->comp_val = 1;
3485
3486 *stats_comp = 0;
3487 bnx2x_post_dmae(bp, dmae, loader_idx);
3488
3489 } else if (bp->func_stx) {
3490 *stats_comp = 0;
3491 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3492 }
3493}
3494
3495static int bnx2x_stats_comp(struct bnx2x *bp)
3496{
3497 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3498 int cnt = 10;
3499
3500 might_sleep();
3501 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3502 if (!cnt) {
3503 BNX2X_ERR("timeout waiting for stats finished\n");
3504 break;
3505 }
3506 cnt--;
12469401 3507 msleep(1);
bb2a0f7a
YG
3508 }
3509 return 1;
3510}
3511
3512/*
3513 * Statistics service functions
3514 */
3515
3516static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3517{
3518 struct dmae_command *dmae;
3519 u32 opcode;
3520 int loader_idx = PMF_DMAE_C(bp);
3521 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3522
3523 /* sanity */
3524 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3525 BNX2X_ERR("BUG!\n");
3526 return;
3527 }
3528
3529 bp->executer_idx = 0;
3530
3531 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3532 DMAE_CMD_C_ENABLE |
3533 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3534#ifdef __BIG_ENDIAN
3535 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3536#else
3537 DMAE_CMD_ENDIANITY_DW_SWAP |
3538#endif
3539 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3540 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3541
3542 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3543 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3544 dmae->src_addr_lo = bp->port.port_stx >> 2;
3545 dmae->src_addr_hi = 0;
3546 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3547 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3548 dmae->len = DMAE_LEN32_RD_MAX;
3549 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3550 dmae->comp_addr_hi = 0;
3551 dmae->comp_val = 1;
3552
3553 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3554 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3555 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3556 dmae->src_addr_hi = 0;
7a9b2557
VZ
3557 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3558 DMAE_LEN32_RD_MAX * 4);
3559 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3560 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3561 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3562 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3563 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3564 dmae->comp_val = DMAE_COMP_VAL;
3565
3566 *stats_comp = 0;
3567 bnx2x_hw_stats_post(bp);
3568 bnx2x_stats_comp(bp);
3569}
3570
3571static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3572{
3573 struct dmae_command *dmae;
34f80b04 3574 int port = BP_PORT(bp);
bb2a0f7a 3575 int vn = BP_E1HVN(bp);
a2fbb9ea 3576 u32 opcode;
bb2a0f7a 3577 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3578 u32 mac_addr;
bb2a0f7a
YG
3579 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3580
3581 /* sanity */
3582 if (!bp->link_vars.link_up || !bp->port.pmf) {
3583 BNX2X_ERR("BUG!\n");
3584 return;
3585 }
a2fbb9ea
ET
3586
3587 bp->executer_idx = 0;
bb2a0f7a
YG
3588
3589 /* MCP */
3590 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3591 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3592 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3593#ifdef __BIG_ENDIAN
bb2a0f7a 3594 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3595#else
bb2a0f7a 3596 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3597#endif
bb2a0f7a
YG
3598 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3599 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3600
bb2a0f7a 3601 if (bp->port.port_stx) {
a2fbb9ea
ET
3602
3603 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3604 dmae->opcode = opcode;
bb2a0f7a
YG
3605 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3606 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3607 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3608 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3609 dmae->len = sizeof(struct host_port_stats) >> 2;
3610 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3611 dmae->comp_addr_hi = 0;
3612 dmae->comp_val = 1;
a2fbb9ea
ET
3613 }
3614
bb2a0f7a
YG
3615 if (bp->func_stx) {
3616
3617 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3618 dmae->opcode = opcode;
3619 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3620 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3621 dmae->dst_addr_lo = bp->func_stx >> 2;
3622 dmae->dst_addr_hi = 0;
3623 dmae->len = sizeof(struct host_func_stats) >> 2;
3624 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3625 dmae->comp_addr_hi = 0;
3626 dmae->comp_val = 1;
a2fbb9ea
ET
3627 }
3628
bb2a0f7a 3629 /* MAC */
a2fbb9ea
ET
3630 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3631 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3632 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3633#ifdef __BIG_ENDIAN
3634 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3635#else
3636 DMAE_CMD_ENDIANITY_DW_SWAP |
3637#endif
bb2a0f7a
YG
3638 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3639 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3640
c18487ee 3641 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3642
3643 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3644 NIG_REG_INGRESS_BMAC0_MEM);
3645
3646 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3647 BIGMAC_REGISTER_TX_STAT_GTBYT */
3648 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3649 dmae->opcode = opcode;
3650 dmae->src_addr_lo = (mac_addr +
3651 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3652 dmae->src_addr_hi = 0;
3653 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3654 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3655 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3656 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3657 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3658 dmae->comp_addr_hi = 0;
3659 dmae->comp_val = 1;
3660
3661 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3662 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3663 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3664 dmae->opcode = opcode;
3665 dmae->src_addr_lo = (mac_addr +
3666 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3667 dmae->src_addr_hi = 0;
3668 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3669 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3670 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3671 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3672 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3673 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3674 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3675 dmae->comp_addr_hi = 0;
3676 dmae->comp_val = 1;
3677
c18487ee 3678 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3679
3680 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3681
3682 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3683 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3684 dmae->opcode = opcode;
3685 dmae->src_addr_lo = (mac_addr +
3686 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3687 dmae->src_addr_hi = 0;
3688 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3689 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3690 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3691 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3692 dmae->comp_addr_hi = 0;
3693 dmae->comp_val = 1;
3694
3695 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3696 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3697 dmae->opcode = opcode;
3698 dmae->src_addr_lo = (mac_addr +
3699 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3700 dmae->src_addr_hi = 0;
3701 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3702 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3703 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3704 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3705 dmae->len = 1;
3706 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3707 dmae->comp_addr_hi = 0;
3708 dmae->comp_val = 1;
3709
3710 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3711 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3712 dmae->opcode = opcode;
3713 dmae->src_addr_lo = (mac_addr +
3714 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3715 dmae->src_addr_hi = 0;
3716 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3717 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3718 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3719 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3720 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3721 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3722 dmae->comp_addr_hi = 0;
3723 dmae->comp_val = 1;
3724 }
3725
3726 /* NIG */
bb2a0f7a
YG
3727 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3728 dmae->opcode = opcode;
3729 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3730 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3731 dmae->src_addr_hi = 0;
3732 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3733 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3734 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3735 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3736 dmae->comp_addr_hi = 0;
3737 dmae->comp_val = 1;
3738
3739 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3740 dmae->opcode = opcode;
3741 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3742 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3743 dmae->src_addr_hi = 0;
3744 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3745 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3746 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3747 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3748 dmae->len = (2*sizeof(u32)) >> 2;
3749 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3750 dmae->comp_addr_hi = 0;
3751 dmae->comp_val = 1;
3752
a2fbb9ea
ET
3753 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3754 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3755 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3756 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3757#ifdef __BIG_ENDIAN
3758 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3759#else
3760 DMAE_CMD_ENDIANITY_DW_SWAP |
3761#endif
bb2a0f7a
YG
3762 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3763 (vn << DMAE_CMD_E1HVN_SHIFT));
3764 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3765 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3766 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3767 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3768 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3769 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3770 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3771 dmae->len = (2*sizeof(u32)) >> 2;
3772 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3773 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3774 dmae->comp_val = DMAE_COMP_VAL;
3775
3776 *stats_comp = 0;
a2fbb9ea
ET
3777}
3778
bb2a0f7a 3779static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3780{
bb2a0f7a
YG
3781 struct dmae_command *dmae = &bp->stats_dmae;
3782 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3783
bb2a0f7a
YG
3784 /* sanity */
3785 if (!bp->func_stx) {
3786 BNX2X_ERR("BUG!\n");
3787 return;
3788 }
a2fbb9ea 3789
bb2a0f7a
YG
3790 bp->executer_idx = 0;
3791 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3792
bb2a0f7a
YG
3793 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3794 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3795 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3796#ifdef __BIG_ENDIAN
3797 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3798#else
3799 DMAE_CMD_ENDIANITY_DW_SWAP |
3800#endif
3801 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3802 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3803 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3804 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3805 dmae->dst_addr_lo = bp->func_stx >> 2;
3806 dmae->dst_addr_hi = 0;
3807 dmae->len = sizeof(struct host_func_stats) >> 2;
3808 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3809 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3810 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3811
bb2a0f7a
YG
3812 *stats_comp = 0;
3813}
a2fbb9ea 3814
bb2a0f7a
YG
3815static void bnx2x_stats_start(struct bnx2x *bp)
3816{
3817 if (bp->port.pmf)
3818 bnx2x_port_stats_init(bp);
3819
3820 else if (bp->func_stx)
3821 bnx2x_func_stats_init(bp);
3822
3823 bnx2x_hw_stats_post(bp);
3824 bnx2x_storm_stats_post(bp);
3825}
3826
3827static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3828{
3829 bnx2x_stats_comp(bp);
3830 bnx2x_stats_pmf_update(bp);
3831 bnx2x_stats_start(bp);
3832}
3833
3834static void bnx2x_stats_restart(struct bnx2x *bp)
3835{
3836 bnx2x_stats_comp(bp);
3837 bnx2x_stats_start(bp);
3838}
3839
3840static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3841{
3842 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3843 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3844 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3845 struct {
3846 u32 lo;
3847 u32 hi;
3848 } diff;
bb2a0f7a
YG
3849
3850 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3851 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3852 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3853 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3854 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3855 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3856 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3857 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3858 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3859 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3860 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3861 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3862 UPDATE_STAT64(tx_stat_gt127,
3863 tx_stat_etherstatspkts65octetsto127octets);
3864 UPDATE_STAT64(tx_stat_gt255,
3865 tx_stat_etherstatspkts128octetsto255octets);
3866 UPDATE_STAT64(tx_stat_gt511,
3867 tx_stat_etherstatspkts256octetsto511octets);
3868 UPDATE_STAT64(tx_stat_gt1023,
3869 tx_stat_etherstatspkts512octetsto1023octets);
3870 UPDATE_STAT64(tx_stat_gt1518,
3871 tx_stat_etherstatspkts1024octetsto1522octets);
3872 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3873 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3874 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3875 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3876 UPDATE_STAT64(tx_stat_gterr,
3877 tx_stat_dot3statsinternalmactransmiterrors);
3878 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3879
3880 estats->pause_frames_received_hi =
3881 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3882 estats->pause_frames_received_lo =
3883 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3884
3885 estats->pause_frames_sent_hi =
3886 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3887 estats->pause_frames_sent_lo =
3888 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3889}
3890
3891static void bnx2x_emac_stats_update(struct bnx2x *bp)
3892{
3893 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3894 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3895 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3896
3897 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3898 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3899 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3900 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3901 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3902 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3903 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3904 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3905 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3906 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3907 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3908 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3909 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3910 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3911 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3912 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3913 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3914 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3915 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3916 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3917 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3918 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3919 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3920 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3921 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3922 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3923 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3924 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3925 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3926 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3927 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3928
3929 estats->pause_frames_received_hi =
3930 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3931 estats->pause_frames_received_lo =
3932 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3933 ADD_64(estats->pause_frames_received_hi,
3934 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3935 estats->pause_frames_received_lo,
3936 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3937
3938 estats->pause_frames_sent_hi =
3939 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3940 estats->pause_frames_sent_lo =
3941 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3942 ADD_64(estats->pause_frames_sent_hi,
3943 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3944 estats->pause_frames_sent_lo,
3945 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3946}
3947
3948static int bnx2x_hw_stats_update(struct bnx2x *bp)
3949{
3950 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3951 struct nig_stats *old = &(bp->port.old_nig_stats);
3952 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3953 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3954 struct {
3955 u32 lo;
3956 u32 hi;
3957 } diff;
de832a55 3958 u32 nig_timer_max;
bb2a0f7a
YG
3959
3960 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3961 bnx2x_bmac_stats_update(bp);
3962
3963 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3964 bnx2x_emac_stats_update(bp);
3965
3966 else { /* unreached */
c3eefaf6 3967 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3968 return -1;
3969 }
a2fbb9ea 3970
bb2a0f7a
YG
3971 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3972 new->brb_discard - old->brb_discard);
66e855f3
YG
3973 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3974 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3975
bb2a0f7a
YG
3976 UPDATE_STAT64_NIG(egress_mac_pkt0,
3977 etherstatspkts1024octetsto1522octets);
3978 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3979
bb2a0f7a 3980 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3981
bb2a0f7a
YG
3982 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3983 sizeof(struct mac_stx));
3984 estats->brb_drop_hi = pstats->brb_drop_hi;
3985 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3986
bb2a0f7a 3987 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3988
de832a55
EG
3989 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3990 if (nig_timer_max != estats->nig_timer_max) {
3991 estats->nig_timer_max = nig_timer_max;
3992 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3993 }
3994
bb2a0f7a 3995 return 0;
a2fbb9ea
ET
3996}
3997
bb2a0f7a 3998static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3999{
4000 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 4001 struct tstorm_per_port_stats *tport =
de832a55 4002 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
4003 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4004 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
4005 int i;
4006
6fe49bb9
EG
4007 memcpy(&(fstats->total_bytes_received_hi),
4008 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
4009 sizeof(struct host_func_stats) - 2*sizeof(u32));
4010 estats->error_bytes_received_hi = 0;
4011 estats->error_bytes_received_lo = 0;
4012 estats->etherstatsoverrsizepkts_hi = 0;
4013 estats->etherstatsoverrsizepkts_lo = 0;
4014 estats->no_buff_discard_hi = 0;
4015 estats->no_buff_discard_lo = 0;
a2fbb9ea 4016
ca00392c 4017 for_each_rx_queue(bp, i) {
de832a55
EG
4018 struct bnx2x_fastpath *fp = &bp->fp[i];
4019 int cl_id = fp->cl_id;
4020 struct tstorm_per_client_stats *tclient =
4021 &stats->tstorm_common.client_statistics[cl_id];
4022 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4023 struct ustorm_per_client_stats *uclient =
4024 &stats->ustorm_common.client_statistics[cl_id];
4025 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4026 struct xstorm_per_client_stats *xclient =
4027 &stats->xstorm_common.client_statistics[cl_id];
4028 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4029 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4030 u32 diff;
4031
4032 /* are storm stats valid? */
4033 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 4034 bp->stats_counter) {
de832a55
EG
4035 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4036 " xstorm counter (%d) != stats_counter (%d)\n",
4037 i, xclient->stats_counter, bp->stats_counter);
4038 return -1;
4039 }
4040 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 4041 bp->stats_counter) {
de832a55
EG
4042 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4043 " tstorm counter (%d) != stats_counter (%d)\n",
4044 i, tclient->stats_counter, bp->stats_counter);
4045 return -2;
4046 }
4047 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4048 bp->stats_counter) {
4049 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4050 " ustorm counter (%d) != stats_counter (%d)\n",
4051 i, uclient->stats_counter, bp->stats_counter);
4052 return -4;
4053 }
a2fbb9ea 4054
de832a55 4055 qstats->total_bytes_received_hi =
ca00392c 4056 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 4057 qstats->total_bytes_received_lo =
ca00392c
EG
4058 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4059
4060 ADD_64(qstats->total_bytes_received_hi,
4061 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4062 qstats->total_bytes_received_lo,
4063 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4064
4065 ADD_64(qstats->total_bytes_received_hi,
4066 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4067 qstats->total_bytes_received_lo,
4068 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4069
4070 qstats->valid_bytes_received_hi =
4071 qstats->total_bytes_received_hi;
de832a55 4072 qstats->valid_bytes_received_lo =
ca00392c 4073 qstats->total_bytes_received_lo;
bb2a0f7a 4074
de832a55 4075 qstats->error_bytes_received_hi =
bb2a0f7a 4076 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4077 qstats->error_bytes_received_lo =
bb2a0f7a 4078 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4079
de832a55
EG
4080 ADD_64(qstats->total_bytes_received_hi,
4081 qstats->error_bytes_received_hi,
4082 qstats->total_bytes_received_lo,
4083 qstats->error_bytes_received_lo);
4084
4085 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4086 total_unicast_packets_received);
4087 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4088 total_multicast_packets_received);
4089 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4090 total_broadcast_packets_received);
4091 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4092 etherstatsoverrsizepkts);
4093 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4094
4095 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4096 total_unicast_packets_received);
4097 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4098 total_multicast_packets_received);
4099 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4100 total_broadcast_packets_received);
4101 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4102 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4103 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4104
4105 qstats->total_bytes_transmitted_hi =
ca00392c 4106 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4107 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4108 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4109
4110 ADD_64(qstats->total_bytes_transmitted_hi,
4111 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4112 qstats->total_bytes_transmitted_lo,
4113 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4114
4115 ADD_64(qstats->total_bytes_transmitted_hi,
4116 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4117 qstats->total_bytes_transmitted_lo,
4118 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4119
de832a55
EG
4120 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4121 total_unicast_packets_transmitted);
4122 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4123 total_multicast_packets_transmitted);
4124 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4125 total_broadcast_packets_transmitted);
4126
4127 old_tclient->checksum_discard = tclient->checksum_discard;
4128 old_tclient->ttl0_discard = tclient->ttl0_discard;
4129
4130 ADD_64(fstats->total_bytes_received_hi,
4131 qstats->total_bytes_received_hi,
4132 fstats->total_bytes_received_lo,
4133 qstats->total_bytes_received_lo);
4134 ADD_64(fstats->total_bytes_transmitted_hi,
4135 qstats->total_bytes_transmitted_hi,
4136 fstats->total_bytes_transmitted_lo,
4137 qstats->total_bytes_transmitted_lo);
4138 ADD_64(fstats->total_unicast_packets_received_hi,
4139 qstats->total_unicast_packets_received_hi,
4140 fstats->total_unicast_packets_received_lo,
4141 qstats->total_unicast_packets_received_lo);
4142 ADD_64(fstats->total_multicast_packets_received_hi,
4143 qstats->total_multicast_packets_received_hi,
4144 fstats->total_multicast_packets_received_lo,
4145 qstats->total_multicast_packets_received_lo);
4146 ADD_64(fstats->total_broadcast_packets_received_hi,
4147 qstats->total_broadcast_packets_received_hi,
4148 fstats->total_broadcast_packets_received_lo,
4149 qstats->total_broadcast_packets_received_lo);
4150 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4151 qstats->total_unicast_packets_transmitted_hi,
4152 fstats->total_unicast_packets_transmitted_lo,
4153 qstats->total_unicast_packets_transmitted_lo);
4154 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4155 qstats->total_multicast_packets_transmitted_hi,
4156 fstats->total_multicast_packets_transmitted_lo,
4157 qstats->total_multicast_packets_transmitted_lo);
4158 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4159 qstats->total_broadcast_packets_transmitted_hi,
4160 fstats->total_broadcast_packets_transmitted_lo,
4161 qstats->total_broadcast_packets_transmitted_lo);
4162 ADD_64(fstats->valid_bytes_received_hi,
4163 qstats->valid_bytes_received_hi,
4164 fstats->valid_bytes_received_lo,
4165 qstats->valid_bytes_received_lo);
4166
4167 ADD_64(estats->error_bytes_received_hi,
4168 qstats->error_bytes_received_hi,
4169 estats->error_bytes_received_lo,
4170 qstats->error_bytes_received_lo);
4171 ADD_64(estats->etherstatsoverrsizepkts_hi,
4172 qstats->etherstatsoverrsizepkts_hi,
4173 estats->etherstatsoverrsizepkts_lo,
4174 qstats->etherstatsoverrsizepkts_lo);
4175 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4176 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4177 }
4178
4179 ADD_64(fstats->total_bytes_received_hi,
4180 estats->rx_stat_ifhcinbadoctets_hi,
4181 fstats->total_bytes_received_lo,
4182 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4183
4184 memcpy(estats, &(fstats->total_bytes_received_hi),
4185 sizeof(struct host_func_stats) - 2*sizeof(u32));
4186
de832a55
EG
4187 ADD_64(estats->etherstatsoverrsizepkts_hi,
4188 estats->rx_stat_dot3statsframestoolong_hi,
4189 estats->etherstatsoverrsizepkts_lo,
4190 estats->rx_stat_dot3statsframestoolong_lo);
4191 ADD_64(estats->error_bytes_received_hi,
4192 estats->rx_stat_ifhcinbadoctets_hi,
4193 estats->error_bytes_received_lo,
4194 estats->rx_stat_ifhcinbadoctets_lo);
4195
4196 if (bp->port.pmf) {
4197 estats->mac_filter_discard =
4198 le32_to_cpu(tport->mac_filter_discard);
4199 estats->xxoverflow_discard =
4200 le32_to_cpu(tport->xxoverflow_discard);
4201 estats->brb_truncate_discard =
bb2a0f7a 4202 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4203 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4204 }
bb2a0f7a
YG
4205
4206 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4207
de832a55
EG
4208 bp->stats_pending = 0;
4209
a2fbb9ea
ET
4210 return 0;
4211}
4212
bb2a0f7a 4213static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4214{
bb2a0f7a 4215 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4216 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4217 int i;
a2fbb9ea
ET
4218
4219 nstats->rx_packets =
4220 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4221 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4222 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4223
4224 nstats->tx_packets =
4225 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4226 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4227 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4228
de832a55 4229 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4230
0e39e645 4231 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4232
de832a55 4233 nstats->rx_dropped = estats->mac_discard;
ca00392c 4234 for_each_rx_queue(bp, i)
de832a55
EG
4235 nstats->rx_dropped +=
4236 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4237
a2fbb9ea
ET
4238 nstats->tx_dropped = 0;
4239
4240 nstats->multicast =
de832a55 4241 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4242
bb2a0f7a 4243 nstats->collisions =
de832a55 4244 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4245
4246 nstats->rx_length_errors =
de832a55
EG
4247 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4248 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4249 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4250 bnx2x_hilo(&estats->brb_truncate_hi);
4251 nstats->rx_crc_errors =
4252 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4253 nstats->rx_frame_errors =
4254 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4255 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4256 nstats->rx_missed_errors = estats->xxoverflow_discard;
4257
4258 nstats->rx_errors = nstats->rx_length_errors +
4259 nstats->rx_over_errors +
4260 nstats->rx_crc_errors +
4261 nstats->rx_frame_errors +
0e39e645
ET
4262 nstats->rx_fifo_errors +
4263 nstats->rx_missed_errors;
a2fbb9ea 4264
bb2a0f7a 4265 nstats->tx_aborted_errors =
de832a55
EG
4266 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4267 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4268 nstats->tx_carrier_errors =
4269 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4270 nstats->tx_fifo_errors = 0;
4271 nstats->tx_heartbeat_errors = 0;
4272 nstats->tx_window_errors = 0;
4273
4274 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4275 nstats->tx_carrier_errors +
4276 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4277}
4278
4279static void bnx2x_drv_stats_update(struct bnx2x *bp)
4280{
4281 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4282 int i;
4283
4284 estats->driver_xoff = 0;
4285 estats->rx_err_discard_pkt = 0;
4286 estats->rx_skb_alloc_failed = 0;
4287 estats->hw_csum_err = 0;
ca00392c 4288 for_each_rx_queue(bp, i) {
de832a55
EG
4289 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4290
4291 estats->driver_xoff += qstats->driver_xoff;
4292 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4293 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4294 estats->hw_csum_err += qstats->hw_csum_err;
4295 }
a2fbb9ea
ET
4296}
4297
bb2a0f7a 4298static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4299{
bb2a0f7a 4300 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4301
bb2a0f7a
YG
4302 if (*stats_comp != DMAE_COMP_VAL)
4303 return;
4304
4305 if (bp->port.pmf)
de832a55 4306 bnx2x_hw_stats_update(bp);
a2fbb9ea 4307
de832a55
EG
4308 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4309 BNX2X_ERR("storm stats were not updated for 3 times\n");
4310 bnx2x_panic();
4311 return;
a2fbb9ea
ET
4312 }
4313
de832a55
EG
4314 bnx2x_net_stats_update(bp);
4315 bnx2x_drv_stats_update(bp);
4316
a2fbb9ea 4317 if (bp->msglevel & NETIF_MSG_TIMER) {
ca00392c
EG
4318 struct bnx2x_fastpath *fp0_rx = bp->fp;
4319 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
de832a55
EG
4320 struct tstorm_per_client_stats *old_tclient =
4321 &bp->fp->old_tclient;
4322 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4323 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4324 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4325 int i;
a2fbb9ea
ET
4326
4327 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4328 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4329 " tx pkt (%lx)\n",
ca00392c
EG
4330 bnx2x_tx_avail(fp0_tx),
4331 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4332 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4333 " rx pkt (%lx)\n",
ca00392c
EG
4334 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4335 fp0_rx->rx_comp_cons),
4336 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4337 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4338 "brb truncate %u\n",
4339 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4340 qstats->driver_xoff,
4341 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4342 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4343 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4344 "mac_discard %u mac_filter_discard %u "
4345 "xxovrflow_discard %u brb_truncate_discard %u "
4346 "ttl0_discard %u\n",
4781bfad 4347 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4348 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4349 bnx2x_hilo(&qstats->no_buff_discard_hi),
4350 estats->mac_discard, estats->mac_filter_discard,
4351 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4352 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4353
4354 for_each_queue(bp, i) {
4355 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4356 bnx2x_fp(bp, i, tx_pkt),
4357 bnx2x_fp(bp, i, rx_pkt),
4358 bnx2x_fp(bp, i, rx_calls));
4359 }
4360 }
4361
bb2a0f7a
YG
4362 bnx2x_hw_stats_post(bp);
4363 bnx2x_storm_stats_post(bp);
4364}
a2fbb9ea 4365
bb2a0f7a
YG
4366static void bnx2x_port_stats_stop(struct bnx2x *bp)
4367{
4368 struct dmae_command *dmae;
4369 u32 opcode;
4370 int loader_idx = PMF_DMAE_C(bp);
4371 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4372
bb2a0f7a 4373 bp->executer_idx = 0;
a2fbb9ea 4374
bb2a0f7a
YG
4375 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4376 DMAE_CMD_C_ENABLE |
4377 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4378#ifdef __BIG_ENDIAN
bb2a0f7a 4379 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4380#else
bb2a0f7a 4381 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4382#endif
bb2a0f7a
YG
4383 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4384 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4385
4386 if (bp->port.port_stx) {
4387
4388 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4389 if (bp->func_stx)
4390 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4391 else
4392 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4393 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4394 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4395 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4396 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4397 dmae->len = sizeof(struct host_port_stats) >> 2;
4398 if (bp->func_stx) {
4399 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4400 dmae->comp_addr_hi = 0;
4401 dmae->comp_val = 1;
4402 } else {
4403 dmae->comp_addr_lo =
4404 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4405 dmae->comp_addr_hi =
4406 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4407 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4408
bb2a0f7a
YG
4409 *stats_comp = 0;
4410 }
a2fbb9ea
ET
4411 }
4412
bb2a0f7a
YG
4413 if (bp->func_stx) {
4414
4415 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4416 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4417 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4418 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4419 dmae->dst_addr_lo = bp->func_stx >> 2;
4420 dmae->dst_addr_hi = 0;
4421 dmae->len = sizeof(struct host_func_stats) >> 2;
4422 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4423 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4424 dmae->comp_val = DMAE_COMP_VAL;
4425
4426 *stats_comp = 0;
a2fbb9ea 4427 }
bb2a0f7a
YG
4428}
4429
4430static void bnx2x_stats_stop(struct bnx2x *bp)
4431{
4432 int update = 0;
4433
4434 bnx2x_stats_comp(bp);
4435
4436 if (bp->port.pmf)
4437 update = (bnx2x_hw_stats_update(bp) == 0);
4438
4439 update |= (bnx2x_storm_stats_update(bp) == 0);
4440
4441 if (update) {
4442 bnx2x_net_stats_update(bp);
a2fbb9ea 4443
bb2a0f7a
YG
4444 if (bp->port.pmf)
4445 bnx2x_port_stats_stop(bp);
4446
4447 bnx2x_hw_stats_post(bp);
4448 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4449 }
4450}
4451
bb2a0f7a
YG
4452static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4453{
4454}
4455
4456static const struct {
4457 void (*action)(struct bnx2x *bp);
4458 enum bnx2x_stats_state next_state;
4459} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4460/* state event */
4461{
4462/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4463/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4464/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4465/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4466},
4467{
4468/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4469/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4470/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4471/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4472}
4473};
4474
4475static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4476{
4477 enum bnx2x_stats_state state = bp->stats_state;
4478
4479 bnx2x_stats_stm[state][event].action(bp);
4480 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4481
8924665a
EG
4482 /* Make sure the state has been "changed" */
4483 smp_wmb();
4484
bb2a0f7a
YG
4485 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4486 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4487 state, event, bp->stats_state);
4488}
4489
6fe49bb9
EG
4490static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4491{
4492 struct dmae_command *dmae;
4493 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4494
4495 /* sanity */
4496 if (!bp->port.pmf || !bp->port.port_stx) {
4497 BNX2X_ERR("BUG!\n");
4498 return;
4499 }
4500
4501 bp->executer_idx = 0;
4502
4503 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4504 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4505 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4506 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4507#ifdef __BIG_ENDIAN
4508 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4509#else
4510 DMAE_CMD_ENDIANITY_DW_SWAP |
4511#endif
4512 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4513 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4514 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4515 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4516 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4517 dmae->dst_addr_hi = 0;
4518 dmae->len = sizeof(struct host_port_stats) >> 2;
4519 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4520 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4521 dmae->comp_val = DMAE_COMP_VAL;
4522
4523 *stats_comp = 0;
4524 bnx2x_hw_stats_post(bp);
4525 bnx2x_stats_comp(bp);
4526}
4527
4528static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4529{
4530 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4531 int port = BP_PORT(bp);
4532 int func;
4533 u32 func_stx;
4534
4535 /* sanity */
4536 if (!bp->port.pmf || !bp->func_stx) {
4537 BNX2X_ERR("BUG!\n");
4538 return;
4539 }
4540
4541 /* save our func_stx */
4542 func_stx = bp->func_stx;
4543
4544 for (vn = VN_0; vn < vn_max; vn++) {
4545 func = 2*vn + port;
4546
4547 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4548 bnx2x_func_stats_init(bp);
4549 bnx2x_hw_stats_post(bp);
4550 bnx2x_stats_comp(bp);
4551 }
4552
4553 /* restore our func_stx */
4554 bp->func_stx = func_stx;
4555}
4556
4557static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4558{
4559 struct dmae_command *dmae = &bp->stats_dmae;
4560 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4561
4562 /* sanity */
4563 if (!bp->func_stx) {
4564 BNX2X_ERR("BUG!\n");
4565 return;
4566 }
4567
4568 bp->executer_idx = 0;
4569 memset(dmae, 0, sizeof(struct dmae_command));
4570
4571 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4572 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4573 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4574#ifdef __BIG_ENDIAN
4575 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4576#else
4577 DMAE_CMD_ENDIANITY_DW_SWAP |
4578#endif
4579 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4580 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4581 dmae->src_addr_lo = bp->func_stx >> 2;
4582 dmae->src_addr_hi = 0;
4583 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4584 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4585 dmae->len = sizeof(struct host_func_stats) >> 2;
4586 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4587 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4588 dmae->comp_val = DMAE_COMP_VAL;
4589
4590 *stats_comp = 0;
4591 bnx2x_hw_stats_post(bp);
4592 bnx2x_stats_comp(bp);
4593}
4594
4595static void bnx2x_stats_init(struct bnx2x *bp)
4596{
4597 int port = BP_PORT(bp);
4598 int func = BP_FUNC(bp);
4599 int i;
4600
4601 bp->stats_pending = 0;
4602 bp->executer_idx = 0;
4603 bp->stats_counter = 0;
4604
4605 /* port and func stats for management */
4606 if (!BP_NOMCP(bp)) {
4607 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4608 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4609
4610 } else {
4611 bp->port.port_stx = 0;
4612 bp->func_stx = 0;
4613 }
4614 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4615 bp->port.port_stx, bp->func_stx);
4616
4617 /* port stats */
4618 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4619 bp->port.old_nig_stats.brb_discard =
4620 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4621 bp->port.old_nig_stats.brb_truncate =
4622 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4623 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4624 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4625 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4626 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4627
4628 /* function stats */
4629 for_each_queue(bp, i) {
4630 struct bnx2x_fastpath *fp = &bp->fp[i];
4631
4632 memset(&fp->old_tclient, 0,
4633 sizeof(struct tstorm_per_client_stats));
4634 memset(&fp->old_uclient, 0,
4635 sizeof(struct ustorm_per_client_stats));
4636 memset(&fp->old_xclient, 0,
4637 sizeof(struct xstorm_per_client_stats));
4638 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4639 }
4640
4641 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4642 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4643
4644 bp->stats_state = STATS_STATE_DISABLED;
4645
4646 if (bp->port.pmf) {
4647 if (bp->port.port_stx)
4648 bnx2x_port_stats_base_init(bp);
4649
4650 if (bp->func_stx)
4651 bnx2x_func_stats_base_init(bp);
4652
4653 } else if (bp->func_stx)
4654 bnx2x_func_stats_base_update(bp);
4655}
4656
a2fbb9ea
ET
4657static void bnx2x_timer(unsigned long data)
4658{
4659 struct bnx2x *bp = (struct bnx2x *) data;
4660
4661 if (!netif_running(bp->dev))
4662 return;
4663
4664 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4665 goto timer_restart;
a2fbb9ea
ET
4666
4667 if (poll) {
4668 struct bnx2x_fastpath *fp = &bp->fp[0];
4669 int rc;
4670
7961f791 4671 bnx2x_tx_int(fp);
a2fbb9ea
ET
4672 rc = bnx2x_rx_int(fp, 1000);
4673 }
4674
34f80b04
EG
4675 if (!BP_NOMCP(bp)) {
4676 int func = BP_FUNC(bp);
a2fbb9ea
ET
4677 u32 drv_pulse;
4678 u32 mcp_pulse;
4679
4680 ++bp->fw_drv_pulse_wr_seq;
4681 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4682 /* TBD - add SYSTEM_TIME */
4683 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4684 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4685
34f80b04 4686 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4687 MCP_PULSE_SEQ_MASK);
4688 /* The delta between driver pulse and mcp response
4689 * should be 1 (before mcp response) or 0 (after mcp response)
4690 */
4691 if ((drv_pulse != mcp_pulse) &&
4692 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4693 /* someone lost a heartbeat... */
4694 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4695 drv_pulse, mcp_pulse);
4696 }
4697 }
4698
f34d28ea 4699 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 4700 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4701
f1410647 4702timer_restart:
a2fbb9ea
ET
4703 mod_timer(&bp->timer, jiffies + bp->current_interval);
4704}
4705
4706/* end of Statistics */
4707
4708/* nic init */
4709
4710/*
4711 * nic init service functions
4712 */
4713
34f80b04 4714static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4715{
34f80b04
EG
4716 int port = BP_PORT(bp);
4717
ca00392c
EG
4718 /* "CSTORM" */
4719 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4720 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4721 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4722 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4723 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4724 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
4725}
4726
5c862848
EG
4727static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4728 dma_addr_t mapping, int sb_id)
34f80b04
EG
4729{
4730 int port = BP_PORT(bp);
bb2a0f7a 4731 int func = BP_FUNC(bp);
a2fbb9ea 4732 int index;
34f80b04 4733 u64 section;
a2fbb9ea
ET
4734
4735 /* USTORM */
4736 section = ((u64)mapping) + offsetof(struct host_status_block,
4737 u_status_block);
34f80b04 4738 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 4739
ca00392c
EG
4740 REG_WR(bp, BAR_CSTRORM_INTMEM +
4741 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4742 REG_WR(bp, BAR_CSTRORM_INTMEM +
4743 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4744 U64_HI(section));
ca00392c
EG
4745 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4746 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4747
4748 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
4749 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4750 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4751
4752 /* CSTORM */
4753 section = ((u64)mapping) + offsetof(struct host_status_block,
4754 c_status_block);
34f80b04 4755 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4756
4757 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4758 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4759 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4760 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4761 U64_HI(section));
7a9b2557 4762 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 4763 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4764
4765 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4766 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4767 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
4768
4769 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4770}
4771
4772static void bnx2x_zero_def_sb(struct bnx2x *bp)
4773{
4774 int func = BP_FUNC(bp);
a2fbb9ea 4775
ca00392c 4776 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
4777 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4778 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
4779 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4780 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4781 sizeof(struct cstorm_def_status_block_u)/4);
4782 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4783 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4784 sizeof(struct cstorm_def_status_block_c)/4);
4785 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
4786 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4787 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4788}
4789
4790static void bnx2x_init_def_sb(struct bnx2x *bp,
4791 struct host_def_status_block *def_sb,
34f80b04 4792 dma_addr_t mapping, int sb_id)
a2fbb9ea 4793{
34f80b04
EG
4794 int port = BP_PORT(bp);
4795 int func = BP_FUNC(bp);
a2fbb9ea
ET
4796 int index, val, reg_offset;
4797 u64 section;
4798
4799 /* ATTN */
4800 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4801 atten_status_block);
34f80b04 4802 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4803
49d66772
ET
4804 bp->attn_state = 0;
4805
a2fbb9ea
ET
4806 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4807 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4808
34f80b04 4809 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4810 bp->attn_group[index].sig[0] = REG_RD(bp,
4811 reg_offset + 0x10*index);
4812 bp->attn_group[index].sig[1] = REG_RD(bp,
4813 reg_offset + 0x4 + 0x10*index);
4814 bp->attn_group[index].sig[2] = REG_RD(bp,
4815 reg_offset + 0x8 + 0x10*index);
4816 bp->attn_group[index].sig[3] = REG_RD(bp,
4817 reg_offset + 0xc + 0x10*index);
4818 }
4819
a2fbb9ea
ET
4820 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4821 HC_REG_ATTN_MSG0_ADDR_L);
4822
4823 REG_WR(bp, reg_offset, U64_LO(section));
4824 REG_WR(bp, reg_offset + 4, U64_HI(section));
4825
4826 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4827
4828 val = REG_RD(bp, reg_offset);
34f80b04 4829 val |= sb_id;
a2fbb9ea
ET
4830 REG_WR(bp, reg_offset, val);
4831
4832 /* USTORM */
4833 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4834 u_def_status_block);
34f80b04 4835 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4836
ca00392c
EG
4837 REG_WR(bp, BAR_CSTRORM_INTMEM +
4838 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4839 REG_WR(bp, BAR_CSTRORM_INTMEM +
4840 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 4841 U64_HI(section));
ca00392c
EG
4842 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4843 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
4844
4845 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
4846 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4847 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
4848
4849 /* CSTORM */
4850 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4851 c_def_status_block);
34f80b04 4852 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4853
4854 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4855 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 4856 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4857 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 4858 U64_HI(section));
5c862848 4859 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 4860 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
4861
4862 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4863 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4864 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
4865
4866 /* TSTORM */
4867 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4868 t_def_status_block);
34f80b04 4869 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4870
4871 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4872 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4873 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4874 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4875 U64_HI(section));
5c862848 4876 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4877 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4878
4879 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4880 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4881 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4882
4883 /* XSTORM */
4884 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4885 x_def_status_block);
34f80b04 4886 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4887
4888 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4889 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4890 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4891 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4892 U64_HI(section));
5c862848 4893 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4894 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4895
4896 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4897 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4898 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4899
bb2a0f7a 4900 bp->stats_pending = 0;
66e855f3 4901 bp->set_mac_pending = 0;
bb2a0f7a 4902
34f80b04 4903 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4904}
4905
4906static void bnx2x_update_coalesce(struct bnx2x *bp)
4907{
34f80b04 4908 int port = BP_PORT(bp);
a2fbb9ea
ET
4909 int i;
4910
4911 for_each_queue(bp, i) {
34f80b04 4912 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4913
4914 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
4915 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4916 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4917 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4918 bp->rx_ticks/12);
ca00392c
EG
4919 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4920 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4921 U_SB_ETH_RX_CQ_INDEX),
3799cf47 4922 (bp->rx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4923
4924 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4925 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4926 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4927 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4928 bp->tx_ticks/12);
a2fbb9ea 4929 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4930 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4931 C_SB_ETH_TX_CQ_INDEX),
3799cf47 4932 (bp->tx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4933 }
4934}
4935
7a9b2557
VZ
4936static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4937 struct bnx2x_fastpath *fp, int last)
4938{
4939 int i;
4940
4941 for (i = 0; i < last; i++) {
4942 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4943 struct sk_buff *skb = rx_buf->skb;
4944
4945 if (skb == NULL) {
4946 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4947 continue;
4948 }
4949
4950 if (fp->tpa_state[i] == BNX2X_TPA_START)
4951 pci_unmap_single(bp->pdev,
4952 pci_unmap_addr(rx_buf, mapping),
356e2385 4953 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4954
4955 dev_kfree_skb(skb);
4956 rx_buf->skb = NULL;
4957 }
4958}
4959
a2fbb9ea
ET
4960static void bnx2x_init_rx_rings(struct bnx2x *bp)
4961{
7a9b2557 4962 int func = BP_FUNC(bp);
32626230
EG
4963 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4964 ETH_MAX_AGGREGATION_QUEUES_E1H;
4965 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4966 int i, j;
a2fbb9ea 4967
87942b46 4968 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4969 DP(NETIF_MSG_IFUP,
4970 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4971
7a9b2557 4972 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4973
555f6c78 4974 for_each_rx_queue(bp, j) {
32626230 4975 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4976
32626230 4977 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4978 fp->tpa_pool[i].skb =
4979 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4980 if (!fp->tpa_pool[i].skb) {
4981 BNX2X_ERR("Failed to allocate TPA "
4982 "skb pool for queue[%d] - "
4983 "disabling TPA on this "
4984 "queue!\n", j);
4985 bnx2x_free_tpa_pool(bp, fp, i);
4986 fp->disable_tpa = 1;
4987 break;
4988 }
4989 pci_unmap_addr_set((struct sw_rx_bd *)
4990 &bp->fp->tpa_pool[i],
4991 mapping, 0);
4992 fp->tpa_state[i] = BNX2X_TPA_STOP;
4993 }
4994 }
4995 }
4996
555f6c78 4997 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4998 struct bnx2x_fastpath *fp = &bp->fp[j];
4999
5000 fp->rx_bd_cons = 0;
5001 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
5002 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5003
ca00392c
EG
5004 /* Mark queue as Rx */
5005 fp->is_rx_queue = 1;
5006
7a9b2557
VZ
5007 /* "next page" elements initialization */
5008 /* SGE ring */
5009 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5010 struct eth_rx_sge *sge;
5011
5012 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5013 sge->addr_hi =
5014 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5015 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5016 sge->addr_lo =
5017 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5018 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5019 }
5020
5021 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 5022
7a9b2557 5023 /* RX BD ring */
a2fbb9ea
ET
5024 for (i = 1; i <= NUM_RX_RINGS; i++) {
5025 struct eth_rx_bd *rx_bd;
5026
5027 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5028 rx_bd->addr_hi =
5029 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 5030 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5031 rx_bd->addr_lo =
5032 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 5033 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5034 }
5035
34f80b04 5036 /* CQ ring */
a2fbb9ea
ET
5037 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5038 struct eth_rx_cqe_next_page *nextpg;
5039
5040 nextpg = (struct eth_rx_cqe_next_page *)
5041 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5042 nextpg->addr_hi =
5043 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 5044 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5045 nextpg->addr_lo =
5046 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 5047 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5048 }
5049
7a9b2557
VZ
5050 /* Allocate SGEs and initialize the ring elements */
5051 for (i = 0, ring_prod = 0;
5052 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 5053
7a9b2557
VZ
5054 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5055 BNX2X_ERR("was only able to allocate "
5056 "%d rx sges\n", i);
5057 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5058 /* Cleanup already allocated elements */
5059 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 5060 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
5061 fp->disable_tpa = 1;
5062 ring_prod = 0;
5063 break;
5064 }
5065 ring_prod = NEXT_SGE_IDX(ring_prod);
5066 }
5067 fp->rx_sge_prod = ring_prod;
5068
5069 /* Allocate BDs and initialize BD ring */
66e855f3 5070 fp->rx_comp_cons = 0;
7a9b2557 5071 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
5072 for (i = 0; i < bp->rx_ring_size; i++) {
5073 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5074 BNX2X_ERR("was only able to allocate "
de832a55
EG
5075 "%d rx skbs on queue[%d]\n", i, j);
5076 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
5077 break;
5078 }
5079 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 5080 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5081 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5082 }
5083
7a9b2557
VZ
5084 fp->rx_bd_prod = ring_prod;
5085 /* must not have more available CQEs than BDs */
5086 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5087 cqe_ring_prod);
a2fbb9ea
ET
5088 fp->rx_pkt = fp->rx_calls = 0;
5089
7a9b2557
VZ
5090 /* Warning!
5091 * this will generate an interrupt (to the TSTORM)
5092 * must only be done after chip is initialized
5093 */
5094 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5095 fp->rx_sge_prod);
a2fbb9ea
ET
5096 if (j != 0)
5097 continue;
5098
5099 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5100 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5101 U64_LO(fp->rx_comp_mapping));
5102 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5103 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5104 U64_HI(fp->rx_comp_mapping));
5105 }
5106}
5107
5108static void bnx2x_init_tx_ring(struct bnx2x *bp)
5109{
5110 int i, j;
5111
555f6c78 5112 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
5113 struct bnx2x_fastpath *fp = &bp->fp[j];
5114
5115 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5116 struct eth_tx_next_bd *tx_next_bd =
5117 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5118
ca00392c 5119 tx_next_bd->addr_hi =
a2fbb9ea 5120 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5121 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5122 tx_next_bd->addr_lo =
a2fbb9ea 5123 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5124 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5125 }
5126
ca00392c
EG
5127 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5128 fp->tx_db.data.zero_fill1 = 0;
5129 fp->tx_db.data.prod = 0;
5130
a2fbb9ea
ET
5131 fp->tx_pkt_prod = 0;
5132 fp->tx_pkt_cons = 0;
5133 fp->tx_bd_prod = 0;
5134 fp->tx_bd_cons = 0;
5135 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5136 fp->tx_pkt = 0;
5137 }
6fe49bb9
EG
5138
5139 /* clean tx statistics */
5140 for_each_rx_queue(bp, i)
5141 bnx2x_fp(bp, i, tx_pkt) = 0;
a2fbb9ea
ET
5142}
5143
5144static void bnx2x_init_sp_ring(struct bnx2x *bp)
5145{
34f80b04 5146 int func = BP_FUNC(bp);
a2fbb9ea
ET
5147
5148 spin_lock_init(&bp->spq_lock);
5149
5150 bp->spq_left = MAX_SPQ_PENDING;
5151 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5152 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5153 bp->spq_prod_bd = bp->spq;
5154 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5155
34f80b04 5156 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5157 U64_LO(bp->spq_mapping));
34f80b04
EG
5158 REG_WR(bp,
5159 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5160 U64_HI(bp->spq_mapping));
5161
34f80b04 5162 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5163 bp->spq_prod_idx);
5164}
5165
5166static void bnx2x_init_context(struct bnx2x *bp)
5167{
5168 int i;
5169
ca00392c 5170 for_each_rx_queue(bp, i) {
a2fbb9ea
ET
5171 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5172 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5173 u8 cl_id = fp->cl_id;
a2fbb9ea 5174
34f80b04
EG
5175 context->ustorm_st_context.common.sb_index_numbers =
5176 BNX2X_RX_SB_INDEX_NUM;
0626b899 5177 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5178 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5179 context->ustorm_st_context.common.flags =
de832a55
EG
5180 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5181 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5182 context->ustorm_st_context.common.statistics_counter_id =
5183 cl_id;
8d9c5f34 5184 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5185 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5186 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5187 bp->rx_buf_size;
34f80b04 5188 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5189 U64_HI(fp->rx_desc_mapping);
34f80b04 5190 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5191 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5192 if (!fp->disable_tpa) {
5193 context->ustorm_st_context.common.flags |=
ca00392c 5194 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5195 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
5196 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5197 (u32)0xffff);
7a9b2557
VZ
5198 context->ustorm_st_context.common.sge_page_base_hi =
5199 U64_HI(fp->rx_sge_mapping);
5200 context->ustorm_st_context.common.sge_page_base_lo =
5201 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5202
5203 context->ustorm_st_context.common.max_sges_for_packet =
5204 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5205 context->ustorm_st_context.common.max_sges_for_packet =
5206 ((context->ustorm_st_context.common.
5207 max_sges_for_packet + PAGES_PER_SGE - 1) &
5208 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5209 }
5210
8d9c5f34
EG
5211 context->ustorm_ag_context.cdu_usage =
5212 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5213 CDU_REGION_NUMBER_UCM_AG,
5214 ETH_CONNECTION_TYPE);
5215
ca00392c
EG
5216 context->xstorm_ag_context.cdu_reserved =
5217 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5218 CDU_REGION_NUMBER_XCM_AG,
5219 ETH_CONNECTION_TYPE);
5220 }
5221
5222 for_each_tx_queue(bp, i) {
5223 struct bnx2x_fastpath *fp = &bp->fp[i];
5224 struct eth_context *context =
5225 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5226
5227 context->cstorm_st_context.sb_index_number =
5228 C_SB_ETH_TX_CQ_INDEX;
5229 context->cstorm_st_context.status_block_id = fp->sb_id;
5230
8d9c5f34
EG
5231 context->xstorm_st_context.tx_bd_page_base_hi =
5232 U64_HI(fp->tx_desc_mapping);
5233 context->xstorm_st_context.tx_bd_page_base_lo =
5234 U64_LO(fp->tx_desc_mapping);
ca00392c 5235 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5236 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5237 }
5238}
5239
5240static void bnx2x_init_ind_table(struct bnx2x *bp)
5241{
26c8fa4d 5242 int func = BP_FUNC(bp);
a2fbb9ea
ET
5243 int i;
5244
555f6c78 5245 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5246 return;
5247
555f6c78
EG
5248 DP(NETIF_MSG_IFUP,
5249 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5250 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5251 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5252 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 5253 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
5254}
5255
49d66772
ET
5256static void bnx2x_set_client_config(struct bnx2x *bp)
5257{
49d66772 5258 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5259 int port = BP_PORT(bp);
5260 int i;
49d66772 5261
e7799c5f 5262 tstorm_client.mtu = bp->dev->mtu;
49d66772 5263 tstorm_client.config_flags =
de832a55
EG
5264 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5265 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5266#ifdef BCM_VLAN
0c6671b0 5267 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5268 tstorm_client.config_flags |=
8d9c5f34 5269 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5270 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5271 }
5272#endif
49d66772
ET
5273
5274 for_each_queue(bp, i) {
de832a55
EG
5275 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5276
49d66772 5277 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5278 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5279 ((u32 *)&tstorm_client)[0]);
5280 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5281 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5282 ((u32 *)&tstorm_client)[1]);
5283 }
5284
34f80b04
EG
5285 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5286 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5287}
5288
a2fbb9ea
ET
5289static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5290{
a2fbb9ea 5291 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04 5292 int mode = bp->rx_mode;
37b091ba 5293 int mask = bp->rx_mode_cl_mask;
34f80b04 5294 int func = BP_FUNC(bp);
581ce43d 5295 int port = BP_PORT(bp);
a2fbb9ea 5296 int i;
581ce43d
EG
5297 /* All but management unicast packets should pass to the host as well */
5298 u32 llh_mask =
5299 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5300 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5301 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5302 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5303
3196a88a 5304 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5305
5306 switch (mode) {
5307 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5308 tstorm_mac_filter.ucast_drop_all = mask;
5309 tstorm_mac_filter.mcast_drop_all = mask;
5310 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5311 break;
356e2385 5312
a2fbb9ea 5313 case BNX2X_RX_MODE_NORMAL:
34f80b04 5314 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5315 break;
356e2385 5316
a2fbb9ea 5317 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5318 tstorm_mac_filter.mcast_accept_all = mask;
5319 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5320 break;
356e2385 5321
a2fbb9ea 5322 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5323 tstorm_mac_filter.ucast_accept_all = mask;
5324 tstorm_mac_filter.mcast_accept_all = mask;
5325 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5326 /* pass management unicast packets as well */
5327 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5328 break;
356e2385 5329
a2fbb9ea 5330 default:
34f80b04
EG
5331 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5332 break;
a2fbb9ea
ET
5333 }
5334
581ce43d
EG
5335 REG_WR(bp,
5336 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5337 llh_mask);
5338
a2fbb9ea
ET
5339 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5340 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5341 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5342 ((u32 *)&tstorm_mac_filter)[i]);
5343
34f80b04 5344/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5345 ((u32 *)&tstorm_mac_filter)[i]); */
5346 }
a2fbb9ea 5347
49d66772
ET
5348 if (mode != BNX2X_RX_MODE_NONE)
5349 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5350}
5351
471de716
EG
5352static void bnx2x_init_internal_common(struct bnx2x *bp)
5353{
5354 int i;
5355
5356 /* Zero this manually as its initialization is
5357 currently missing in the initTool */
5358 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5359 REG_WR(bp, BAR_USTRORM_INTMEM +
5360 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5361}
5362
5363static void bnx2x_init_internal_port(struct bnx2x *bp)
5364{
5365 int port = BP_PORT(bp);
5366
ca00392c
EG
5367 REG_WR(bp,
5368 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5369 REG_WR(bp,
5370 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5371 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5372 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5373}
5374
5375static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5376{
a2fbb9ea
ET
5377 struct tstorm_eth_function_common_config tstorm_config = {0};
5378 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5379 int port = BP_PORT(bp);
5380 int func = BP_FUNC(bp);
de832a55
EG
5381 int i, j;
5382 u32 offset;
471de716 5383 u16 max_agg_size;
a2fbb9ea
ET
5384
5385 if (is_multi(bp)) {
555f6c78 5386 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5387 tstorm_config.rss_result_mask = MULTI_MASK;
5388 }
ca00392c
EG
5389
5390 /* Enable TPA if needed */
5391 if (bp->flags & TPA_ENABLE_FLAG)
5392 tstorm_config.config_flags |=
5393 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5394
8d9c5f34
EG
5395 if (IS_E1HMF(bp))
5396 tstorm_config.config_flags |=
5397 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5398
34f80b04
EG
5399 tstorm_config.leading_client_id = BP_L_ID(bp);
5400
a2fbb9ea 5401 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5402 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5403 (*(u32 *)&tstorm_config));
5404
c14423fe 5405 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
37b091ba 5406 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
a2fbb9ea
ET
5407 bnx2x_set_storm_rx_mode(bp);
5408
de832a55
EG
5409 for_each_queue(bp, i) {
5410 u8 cl_id = bp->fp[i].cl_id;
5411
5412 /* reset xstorm per client statistics */
5413 offset = BAR_XSTRORM_INTMEM +
5414 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5415 for (j = 0;
5416 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5417 REG_WR(bp, offset + j*4, 0);
5418
5419 /* reset tstorm per client statistics */
5420 offset = BAR_TSTRORM_INTMEM +
5421 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5422 for (j = 0;
5423 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5424 REG_WR(bp, offset + j*4, 0);
5425
5426 /* reset ustorm per client statistics */
5427 offset = BAR_USTRORM_INTMEM +
5428 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5429 for (j = 0;
5430 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5431 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5432 }
5433
5434 /* Init statistics related context */
34f80b04 5435 stats_flags.collect_eth = 1;
a2fbb9ea 5436
66e855f3 5437 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5438 ((u32 *)&stats_flags)[0]);
66e855f3 5439 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5440 ((u32 *)&stats_flags)[1]);
5441
66e855f3 5442 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5443 ((u32 *)&stats_flags)[0]);
66e855f3 5444 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5445 ((u32 *)&stats_flags)[1]);
5446
de832a55
EG
5447 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5448 ((u32 *)&stats_flags)[0]);
5449 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5450 ((u32 *)&stats_flags)[1]);
5451
66e855f3 5452 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5453 ((u32 *)&stats_flags)[0]);
66e855f3 5454 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5455 ((u32 *)&stats_flags)[1]);
5456
66e855f3
YG
5457 REG_WR(bp, BAR_XSTRORM_INTMEM +
5458 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5459 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5460 REG_WR(bp, BAR_XSTRORM_INTMEM +
5461 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5462 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5463
5464 REG_WR(bp, BAR_TSTRORM_INTMEM +
5465 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5466 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5467 REG_WR(bp, BAR_TSTRORM_INTMEM +
5468 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5469 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5470
de832a55
EG
5471 REG_WR(bp, BAR_USTRORM_INTMEM +
5472 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5473 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5474 REG_WR(bp, BAR_USTRORM_INTMEM +
5475 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5476 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5477
34f80b04
EG
5478 if (CHIP_IS_E1H(bp)) {
5479 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5480 IS_E1HMF(bp));
5481 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5482 IS_E1HMF(bp));
5483 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5484 IS_E1HMF(bp));
5485 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5486 IS_E1HMF(bp));
5487
7a9b2557
VZ
5488 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5489 bp->e1hov);
34f80b04
EG
5490 }
5491
4f40f2cb
EG
5492 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5493 max_agg_size =
5494 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5495 SGE_PAGE_SIZE * PAGES_PER_SGE),
5496 (u32)0xffff);
555f6c78 5497 for_each_rx_queue(bp, i) {
7a9b2557 5498 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5499
5500 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5501 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5502 U64_LO(fp->rx_comp_mapping));
5503 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5504 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5505 U64_HI(fp->rx_comp_mapping));
5506
ca00392c
EG
5507 /* Next page */
5508 REG_WR(bp, BAR_USTRORM_INTMEM +
5509 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5510 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5511 REG_WR(bp, BAR_USTRORM_INTMEM +
5512 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5513 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5514
7a9b2557 5515 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5516 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5517 max_agg_size);
5518 }
8a1c38d1 5519
1c06328c
EG
5520 /* dropless flow control */
5521 if (CHIP_IS_E1H(bp)) {
5522 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5523
5524 rx_pause.bd_thr_low = 250;
5525 rx_pause.cqe_thr_low = 250;
5526 rx_pause.cos = 1;
5527 rx_pause.sge_thr_low = 0;
5528 rx_pause.bd_thr_high = 350;
5529 rx_pause.cqe_thr_high = 350;
5530 rx_pause.sge_thr_high = 0;
5531
5532 for_each_rx_queue(bp, i) {
5533 struct bnx2x_fastpath *fp = &bp->fp[i];
5534
5535 if (!fp->disable_tpa) {
5536 rx_pause.sge_thr_low = 150;
5537 rx_pause.sge_thr_high = 250;
5538 }
5539
5540
5541 offset = BAR_USTRORM_INTMEM +
5542 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5543 fp->cl_id);
5544 for (j = 0;
5545 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5546 j++)
5547 REG_WR(bp, offset + j*4,
5548 ((u32 *)&rx_pause)[j]);
5549 }
5550 }
5551
8a1c38d1
EG
5552 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5553
5554 /* Init rate shaping and fairness contexts */
5555 if (IS_E1HMF(bp)) {
5556 int vn;
5557
5558 /* During init there is no active link
5559 Until link is up, set link rate to 10Gbps */
5560 bp->link_vars.line_speed = SPEED_10000;
5561 bnx2x_init_port_minmax(bp);
5562
b015e3d1
EG
5563 if (!BP_NOMCP(bp))
5564 bp->mf_config =
5565 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8a1c38d1
EG
5566 bnx2x_calc_vn_weight_sum(bp);
5567
5568 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5569 bnx2x_init_vn_minmax(bp, 2*vn + port);
5570
5571 /* Enable rate shaping and fairness */
b015e3d1 5572 bp->cmng.flags.cmng_enables |=
8a1c38d1 5573 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
b015e3d1 5574
8a1c38d1
EG
5575 } else {
5576 /* rate shaping and fairness are disabled */
5577 DP(NETIF_MSG_IFUP,
5578 "single function mode minmax will be disabled\n");
5579 }
5580
5581
5582 /* Store it to internal memory */
5583 if (bp->port.pmf)
5584 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5585 REG_WR(bp, BAR_XSTRORM_INTMEM +
5586 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5587 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5588}
5589
471de716
EG
5590static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5591{
5592 switch (load_code) {
5593 case FW_MSG_CODE_DRV_LOAD_COMMON:
5594 bnx2x_init_internal_common(bp);
5595 /* no break */
5596
5597 case FW_MSG_CODE_DRV_LOAD_PORT:
5598 bnx2x_init_internal_port(bp);
5599 /* no break */
5600
5601 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5602 bnx2x_init_internal_func(bp);
5603 break;
5604
5605 default:
5606 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5607 break;
5608 }
5609}
5610
5611static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5612{
5613 int i;
5614
5615 for_each_queue(bp, i) {
5616 struct bnx2x_fastpath *fp = &bp->fp[i];
5617
34f80b04 5618 fp->bp = bp;
a2fbb9ea 5619 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5620 fp->index = i;
34f80b04 5621 fp->cl_id = BP_L_ID(bp) + i;
37b091ba
MC
5622#ifdef BCM_CNIC
5623 fp->sb_id = fp->cl_id + 1;
5624#else
34f80b04 5625 fp->sb_id = fp->cl_id;
37b091ba 5626#endif
ca00392c
EG
5627 /* Suitable Rx and Tx SBs are served by the same client */
5628 if (i >= bp->num_rx_queues)
5629 fp->cl_id -= bp->num_rx_queues;
34f80b04 5630 DP(NETIF_MSG_IFUP,
f5372251
EG
5631 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5632 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5633 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5634 fp->sb_id);
5c862848 5635 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5636 }
5637
16119785
EG
5638 /* ensure status block indices were read */
5639 rmb();
5640
5641
5c862848
EG
5642 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5643 DEF_SB_ID);
5644 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5645 bnx2x_update_coalesce(bp);
5646 bnx2x_init_rx_rings(bp);
5647 bnx2x_init_tx_ring(bp);
5648 bnx2x_init_sp_ring(bp);
5649 bnx2x_init_context(bp);
471de716 5650 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5651 bnx2x_init_ind_table(bp);
0ef00459
EG
5652 bnx2x_stats_init(bp);
5653
5654 /* At this point, we are ready for interrupts */
5655 atomic_set(&bp->intr_sem, 0);
5656
5657 /* flush all before enabling interrupts */
5658 mb();
5659 mmiowb();
5660
615f8fd9 5661 bnx2x_int_enable(bp);
eb8da205
EG
5662
5663 /* Check for SPIO5 */
5664 bnx2x_attn_int_deasserted0(bp,
5665 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5666 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5667}
5668
5669/* end of nic init */
5670
5671/*
5672 * gzip service functions
5673 */
5674
5675static int bnx2x_gunzip_init(struct bnx2x *bp)
5676{
5677 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5678 &bp->gunzip_mapping);
5679 if (bp->gunzip_buf == NULL)
5680 goto gunzip_nomem1;
5681
5682 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5683 if (bp->strm == NULL)
5684 goto gunzip_nomem2;
5685
5686 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5687 GFP_KERNEL);
5688 if (bp->strm->workspace == NULL)
5689 goto gunzip_nomem3;
5690
5691 return 0;
5692
5693gunzip_nomem3:
5694 kfree(bp->strm);
5695 bp->strm = NULL;
5696
5697gunzip_nomem2:
5698 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5699 bp->gunzip_mapping);
5700 bp->gunzip_buf = NULL;
5701
5702gunzip_nomem1:
5703 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5704 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5705 return -ENOMEM;
5706}
5707
5708static void bnx2x_gunzip_end(struct bnx2x *bp)
5709{
5710 kfree(bp->strm->workspace);
5711
5712 kfree(bp->strm);
5713 bp->strm = NULL;
5714
5715 if (bp->gunzip_buf) {
5716 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5717 bp->gunzip_mapping);
5718 bp->gunzip_buf = NULL;
5719 }
5720}
5721
94a78b79 5722static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5723{
5724 int n, rc;
5725
5726 /* check gzip header */
94a78b79
VZ
5727 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5728 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5729 return -EINVAL;
94a78b79 5730 }
a2fbb9ea
ET
5731
5732 n = 10;
5733
34f80b04 5734#define FNAME 0x8
a2fbb9ea
ET
5735
5736 if (zbuf[3] & FNAME)
5737 while ((zbuf[n++] != 0) && (n < len));
5738
94a78b79 5739 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5740 bp->strm->avail_in = len - n;
5741 bp->strm->next_out = bp->gunzip_buf;
5742 bp->strm->avail_out = FW_BUF_SIZE;
5743
5744 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5745 if (rc != Z_OK)
5746 return rc;
5747
5748 rc = zlib_inflate(bp->strm, Z_FINISH);
5749 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5750 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5751 bp->dev->name, bp->strm->msg);
5752
5753 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5754 if (bp->gunzip_outlen & 0x3)
5755 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5756 " gunzip_outlen (%d) not aligned\n",
5757 bp->dev->name, bp->gunzip_outlen);
5758 bp->gunzip_outlen >>= 2;
5759
5760 zlib_inflateEnd(bp->strm);
5761
5762 if (rc == Z_STREAM_END)
5763 return 0;
5764
5765 return rc;
5766}
5767
5768/* nic load/unload */
5769
5770/*
34f80b04 5771 * General service functions
a2fbb9ea
ET
5772 */
5773
5774/* send a NIG loopback debug packet */
5775static void bnx2x_lb_pckt(struct bnx2x *bp)
5776{
a2fbb9ea 5777 u32 wb_write[3];
a2fbb9ea
ET
5778
5779 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5780 wb_write[0] = 0x55555555;
5781 wb_write[1] = 0x55555555;
34f80b04 5782 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5783 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5784
5785 /* NON-IP protocol */
a2fbb9ea
ET
5786 wb_write[0] = 0x09000000;
5787 wb_write[1] = 0x55555555;
34f80b04 5788 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5789 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5790}
5791
5792/* some of the internal memories
5793 * are not directly readable from the driver
5794 * to test them we send debug packets
5795 */
5796static int bnx2x_int_mem_test(struct bnx2x *bp)
5797{
5798 int factor;
5799 int count, i;
5800 u32 val = 0;
5801
ad8d3948 5802 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5803 factor = 120;
ad8d3948
EG
5804 else if (CHIP_REV_IS_EMUL(bp))
5805 factor = 200;
5806 else
a2fbb9ea 5807 factor = 1;
a2fbb9ea
ET
5808
5809 DP(NETIF_MSG_HW, "start part1\n");
5810
5811 /* Disable inputs of parser neighbor blocks */
5812 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5813 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5814 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5815 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5816
5817 /* Write 0 to parser credits for CFC search request */
5818 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5819
5820 /* send Ethernet packet */
5821 bnx2x_lb_pckt(bp);
5822
5823 /* TODO do i reset NIG statistic? */
5824 /* Wait until NIG register shows 1 packet of size 0x10 */
5825 count = 1000 * factor;
5826 while (count) {
34f80b04 5827
a2fbb9ea
ET
5828 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5829 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5830 if (val == 0x10)
5831 break;
5832
5833 msleep(10);
5834 count--;
5835 }
5836 if (val != 0x10) {
5837 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5838 return -1;
5839 }
5840
5841 /* Wait until PRS register shows 1 packet */
5842 count = 1000 * factor;
5843 while (count) {
5844 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5845 if (val == 1)
5846 break;
5847
5848 msleep(10);
5849 count--;
5850 }
5851 if (val != 0x1) {
5852 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5853 return -2;
5854 }
5855
5856 /* Reset and init BRB, PRS */
34f80b04 5857 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5858 msleep(50);
34f80b04 5859 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5860 msleep(50);
94a78b79
VZ
5861 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5862 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5863
5864 DP(NETIF_MSG_HW, "part2\n");
5865
5866 /* Disable inputs of parser neighbor blocks */
5867 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5868 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5869 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5870 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5871
5872 /* Write 0 to parser credits for CFC search request */
5873 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5874
5875 /* send 10 Ethernet packets */
5876 for (i = 0; i < 10; i++)
5877 bnx2x_lb_pckt(bp);
5878
5879 /* Wait until NIG register shows 10 + 1
5880 packets of size 11*0x10 = 0xb0 */
5881 count = 1000 * factor;
5882 while (count) {
34f80b04 5883
a2fbb9ea
ET
5884 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5885 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5886 if (val == 0xb0)
5887 break;
5888
5889 msleep(10);
5890 count--;
5891 }
5892 if (val != 0xb0) {
5893 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5894 return -3;
5895 }
5896
5897 /* Wait until PRS register shows 2 packets */
5898 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5899 if (val != 2)
5900 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5901
5902 /* Write 1 to parser credits for CFC search request */
5903 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5904
5905 /* Wait until PRS register shows 3 packets */
5906 msleep(10 * factor);
5907 /* Wait until NIG register shows 1 packet of size 0x10 */
5908 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5909 if (val != 3)
5910 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5911
5912 /* clear NIG EOP FIFO */
5913 for (i = 0; i < 11; i++)
5914 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5915 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5916 if (val != 1) {
5917 BNX2X_ERR("clear of NIG failed\n");
5918 return -4;
5919 }
5920
5921 /* Reset and init BRB, PRS, NIG */
5922 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5923 msleep(50);
5924 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5925 msleep(50);
94a78b79
VZ
5926 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5927 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 5928#ifndef BCM_CNIC
a2fbb9ea
ET
5929 /* set NIC mode */
5930 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5931#endif
5932
5933 /* Enable inputs of parser neighbor blocks */
5934 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5935 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5936 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5937 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5938
5939 DP(NETIF_MSG_HW, "done\n");
5940
5941 return 0; /* OK */
5942}
5943
5944static void enable_blocks_attention(struct bnx2x *bp)
5945{
5946 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5947 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5948 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5949 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5950 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5951 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5952 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5953 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5954 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5955/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5956/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5957 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5958 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5959 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5960/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5961/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5962 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5963 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5964 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5965 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5966/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5967/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5968 if (CHIP_REV_IS_FPGA(bp))
5969 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5970 else
5971 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5972 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5973 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5974 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5975/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5976/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5977 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5978 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5979/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5980 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5981}
5982
34f80b04 5983
81f75bbf
EG
5984static void bnx2x_reset_common(struct bnx2x *bp)
5985{
5986 /* reset_common */
5987 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5988 0xd3ffff7f);
5989 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5990}
5991
573f2035
EG
5992static void bnx2x_init_pxp(struct bnx2x *bp)
5993{
5994 u16 devctl;
5995 int r_order, w_order;
5996
5997 pci_read_config_word(bp->pdev,
5998 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5999 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6000 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6001 if (bp->mrrs == -1)
6002 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6003 else {
6004 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6005 r_order = bp->mrrs;
6006 }
6007
6008 bnx2x_init_pxp_arb(bp, r_order, w_order);
6009}
fd4ef40d
EG
6010
6011static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6012{
6013 u32 val;
6014 u8 port;
6015 u8 is_required = 0;
6016
6017 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6018 SHARED_HW_CFG_FAN_FAILURE_MASK;
6019
6020 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6021 is_required = 1;
6022
6023 /*
6024 * The fan failure mechanism is usually related to the PHY type since
6025 * the power consumption of the board is affected by the PHY. Currently,
6026 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6027 */
6028 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6029 for (port = PORT_0; port < PORT_MAX; port++) {
6030 u32 phy_type =
6031 SHMEM_RD(bp, dev_info.port_hw_config[port].
6032 external_phy_config) &
6033 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6034 is_required |=
6035 ((phy_type ==
6036 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
6037 (phy_type ==
6038 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
6039 (phy_type ==
6040 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6041 }
6042
6043 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6044
6045 if (is_required == 0)
6046 return;
6047
6048 /* Fan failure is indicated by SPIO 5 */
6049 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6050 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6051
6052 /* set to active low mode */
6053 val = REG_RD(bp, MISC_REG_SPIO_INT);
6054 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6055 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6056 REG_WR(bp, MISC_REG_SPIO_INT, val);
6057
6058 /* enable interrupt to signal the IGU */
6059 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6060 val |= (1 << MISC_REGISTERS_SPIO_5);
6061 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6062}
6063
34f80b04 6064static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 6065{
a2fbb9ea 6066 u32 val, i;
37b091ba
MC
6067#ifdef BCM_CNIC
6068 u32 wb_write[2];
6069#endif
a2fbb9ea 6070
34f80b04 6071 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 6072
81f75bbf 6073 bnx2x_reset_common(bp);
34f80b04
EG
6074 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6075 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 6076
94a78b79 6077 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
6078 if (CHIP_IS_E1H(bp))
6079 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 6080
34f80b04
EG
6081 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6082 msleep(30);
6083 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 6084
94a78b79 6085 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
6086 if (CHIP_IS_E1(bp)) {
6087 /* enable HW interrupt from PXP on USDM overflow
6088 bit 16 on INT_MASK_0 */
6089 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6090 }
a2fbb9ea 6091
94a78b79 6092 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 6093 bnx2x_init_pxp(bp);
a2fbb9ea
ET
6094
6095#ifdef __BIG_ENDIAN
34f80b04
EG
6096 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6097 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6098 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6099 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6100 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6101 /* make sure this value is 0 */
6102 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6103
6104/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6105 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6106 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6107 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6108 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6109#endif
6110
34f80b04 6111 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
37b091ba 6112#ifdef BCM_CNIC
34f80b04
EG
6113 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6114 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6115 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6116#endif
6117
34f80b04
EG
6118 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6119 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6120
34f80b04
EG
6121 /* let the HW do it's magic ... */
6122 msleep(100);
6123 /* finish PXP init */
6124 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6125 if (val != 1) {
6126 BNX2X_ERR("PXP2 CFG failed\n");
6127 return -EBUSY;
6128 }
6129 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6130 if (val != 1) {
6131 BNX2X_ERR("PXP2 RD_INIT failed\n");
6132 return -EBUSY;
6133 }
a2fbb9ea 6134
34f80b04
EG
6135 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6136 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6137
94a78b79 6138 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6139
34f80b04
EG
6140 /* clean the DMAE memory */
6141 bp->dmae_ready = 1;
6142 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6143
94a78b79
VZ
6144 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6145 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6146 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6147 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6148
34f80b04
EG
6149 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6150 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6151 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6152 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6153
94a78b79 6154 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba
MC
6155
6156#ifdef BCM_CNIC
6157 wb_write[0] = 0;
6158 wb_write[1] = 0;
6159 for (i = 0; i < 64; i++) {
6160 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6161 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6162
6163 if (CHIP_IS_E1H(bp)) {
6164 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6165 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6166 wb_write, 2);
6167 }
6168 }
6169#endif
34f80b04
EG
6170 /* soft reset pulse */
6171 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6172 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 6173
37b091ba 6174#ifdef BCM_CNIC
94a78b79 6175 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6176#endif
a2fbb9ea 6177
94a78b79 6178 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6179 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6180 if (!CHIP_REV_IS_SLOW(bp)) {
6181 /* enable hw interrupt from doorbell Q */
6182 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6183 }
a2fbb9ea 6184
94a78b79
VZ
6185 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6186 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6187 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 6188#ifndef BCM_CNIC
3196a88a
EG
6189 /* set NIC mode */
6190 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 6191#endif
34f80b04
EG
6192 if (CHIP_IS_E1H(bp))
6193 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6194
94a78b79
VZ
6195 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6196 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6197 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6198 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6199
ca00392c
EG
6200 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6201 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6202 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6203 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6204
94a78b79
VZ
6205 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6206 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6207 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6208 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6209
34f80b04
EG
6210 /* sync semi rtc */
6211 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6212 0x80000000);
6213 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6214 0x80000000);
a2fbb9ea 6215
94a78b79
VZ
6216 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6217 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6218 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6219
34f80b04
EG
6220 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6221 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6222 REG_WR(bp, i, 0xc0cac01a);
6223 /* TODO: replace with something meaningful */
6224 }
94a78b79 6225 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
6226#ifdef BCM_CNIC
6227 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6228 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6229 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6230 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6231 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6232 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6233 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6234 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6235 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6236 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6237#endif
34f80b04 6238 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6239
34f80b04
EG
6240 if (sizeof(union cdu_context) != 1024)
6241 /* we currently assume that a context is 1024 bytes */
6242 printk(KERN_ALERT PFX "please adjust the size of"
6243 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 6244
94a78b79 6245 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6246 val = (4 << 24) + (0 << 12) + 1024;
6247 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6248
94a78b79 6249 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6250 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6251 /* enable context validation interrupt from CFC */
6252 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6253
6254 /* set the thresholds to prevent CFC/CDU race */
6255 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6256
94a78b79
VZ
6257 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6258 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6259
94a78b79 6260 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6261 /* Reset PCIE errors for debug */
6262 REG_WR(bp, 0x2814, 0xffffffff);
6263 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6264
94a78b79 6265 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6266 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6267 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6268 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6269
94a78b79 6270 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6271 if (CHIP_IS_E1H(bp)) {
6272 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6273 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6274 }
6275
6276 if (CHIP_REV_IS_SLOW(bp))
6277 msleep(200);
6278
6279 /* finish CFC init */
6280 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6281 if (val != 1) {
6282 BNX2X_ERR("CFC LL_INIT failed\n");
6283 return -EBUSY;
6284 }
6285 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6286 if (val != 1) {
6287 BNX2X_ERR("CFC AC_INIT failed\n");
6288 return -EBUSY;
6289 }
6290 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6291 if (val != 1) {
6292 BNX2X_ERR("CFC CAM_INIT failed\n");
6293 return -EBUSY;
6294 }
6295 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6296
34f80b04
EG
6297 /* read NIG statistic
6298 to see if this is our first up since powerup */
6299 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6300 val = *bnx2x_sp(bp, wb_data[0]);
6301
6302 /* do internal memory self test */
6303 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6304 BNX2X_ERR("internal mem self test failed\n");
6305 return -EBUSY;
6306 }
6307
35b19ba5 6308 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6309 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6310 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6311 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6312 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6313 bp->port.need_hw_lock = 1;
6314 break;
6315
34f80b04
EG
6316 default:
6317 break;
6318 }
f1410647 6319
fd4ef40d
EG
6320 bnx2x_setup_fan_failure_detection(bp);
6321
34f80b04
EG
6322 /* clear PXP2 attentions */
6323 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6324
34f80b04 6325 enable_blocks_attention(bp);
a2fbb9ea 6326
6bbca910
YR
6327 if (!BP_NOMCP(bp)) {
6328 bnx2x_acquire_phy_lock(bp);
6329 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6330 bnx2x_release_phy_lock(bp);
6331 } else
6332 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6333
34f80b04
EG
6334 return 0;
6335}
a2fbb9ea 6336
34f80b04
EG
6337static int bnx2x_init_port(struct bnx2x *bp)
6338{
6339 int port = BP_PORT(bp);
94a78b79 6340 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6341 u32 low, high;
34f80b04 6342 u32 val;
a2fbb9ea 6343
34f80b04
EG
6344 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6345
6346 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6347
94a78b79 6348 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6349 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6350
6351 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6352 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6353 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 6354 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6355
37b091ba
MC
6356#ifdef BCM_CNIC
6357 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
a2fbb9ea 6358
94a78b79 6359 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
6360 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6361 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 6362#endif
94a78b79 6363 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6364
94a78b79 6365 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6366 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6367 /* no pause for emulation and FPGA */
6368 low = 0;
6369 high = 513;
6370 } else {
6371 if (IS_E1HMF(bp))
6372 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6373 else if (bp->dev->mtu > 4096) {
6374 if (bp->flags & ONE_PORT_FLAG)
6375 low = 160;
6376 else {
6377 val = bp->dev->mtu;
6378 /* (24*1024 + val*4)/256 */
6379 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6380 }
6381 } else
6382 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6383 high = low + 56; /* 14*1024/256 */
6384 }
6385 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6386 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6387
6388
94a78b79 6389 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6390
94a78b79 6391 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6392 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6393 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6394 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6395
94a78b79
VZ
6396 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6397 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6398 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6399 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6400
94a78b79 6401 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6402 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6403
94a78b79 6404 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6405
6406 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6407 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6408
6409 /* update threshold */
34f80b04 6410 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6411 /* update init credit */
34f80b04 6412 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6413
6414 /* probe changes */
34f80b04 6415 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6416 msleep(5);
34f80b04 6417 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea 6418
37b091ba
MC
6419#ifdef BCM_CNIC
6420 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 6421#endif
94a78b79 6422 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6423 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6424
6425 if (CHIP_IS_E1(bp)) {
6426 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6427 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6428 }
94a78b79 6429 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6430
94a78b79 6431 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6432 /* init aeu_mask_attn_func_0/1:
6433 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6434 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6435 * bits 4-7 are used for "per vn group attention" */
6436 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6437 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6438
94a78b79 6439 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6440 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6441 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6442 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6443 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6444
94a78b79 6445 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6446
6447 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6448
6449 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6450 /* 0x2 disable e1hov, 0x1 enable */
6451 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6452 (IS_E1HMF(bp) ? 0x1 : 0x2));
6453
1c06328c
EG
6454 {
6455 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6456 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6457 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6458 }
34f80b04
EG
6459 }
6460
94a78b79 6461 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6462 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6463
35b19ba5 6464 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6465 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6466 {
6467 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6468
6469 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6470 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6471
6472 /* The GPIO should be swapped if the swap register is
6473 set and active */
6474 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6475 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6476
6477 /* Select function upon port-swap configuration */
6478 if (port == 0) {
6479 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6480 aeu_gpio_mask = (swap_val && swap_override) ?
6481 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6482 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6483 } else {
6484 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6485 aeu_gpio_mask = (swap_val && swap_override) ?
6486 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6487 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6488 }
6489 val = REG_RD(bp, offset);
6490 /* add GPIO3 to group */
6491 val |= aeu_gpio_mask;
6492 REG_WR(bp, offset, val);
6493 }
6494 break;
6495
35b19ba5 6496 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6497 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6498 /* add SPIO 5 to group 0 */
4d295db0
EG
6499 {
6500 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6501 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6502 val = REG_RD(bp, reg_addr);
f1410647 6503 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6504 REG_WR(bp, reg_addr, val);
6505 }
f1410647
ET
6506 break;
6507
6508 default:
6509 break;
6510 }
6511
c18487ee 6512 bnx2x__link_reset(bp);
a2fbb9ea 6513
34f80b04
EG
6514 return 0;
6515}
6516
6517#define ILT_PER_FUNC (768/2)
6518#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6519/* the phys address is shifted right 12 bits and has an added
6520 1=valid bit added to the 53rd bit
6521 then since this is a wide register(TM)
6522 we split it into two 32 bit writes
6523 */
6524#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6525#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6526#define PXP_ONE_ILT(x) (((x) << 10) | x)
6527#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6528
37b091ba
MC
6529#ifdef BCM_CNIC
6530#define CNIC_ILT_LINES 127
6531#define CNIC_CTX_PER_ILT 16
6532#else
34f80b04 6533#define CNIC_ILT_LINES 0
37b091ba 6534#endif
34f80b04
EG
6535
6536static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6537{
6538 int reg;
6539
6540 if (CHIP_IS_E1H(bp))
6541 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6542 else /* E1 */
6543 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6544
6545 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6546}
6547
6548static int bnx2x_init_func(struct bnx2x *bp)
6549{
6550 int port = BP_PORT(bp);
6551 int func = BP_FUNC(bp);
8badd27a 6552 u32 addr, val;
34f80b04
EG
6553 int i;
6554
6555 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6556
8badd27a
EG
6557 /* set MSI reconfigure capability */
6558 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6559 val = REG_RD(bp, addr);
6560 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6561 REG_WR(bp, addr, val);
6562
34f80b04
EG
6563 i = FUNC_ILT_BASE(func);
6564
6565 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6566 if (CHIP_IS_E1H(bp)) {
6567 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6568 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6569 } else /* E1 */
6570 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6571 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6572
37b091ba
MC
6573#ifdef BCM_CNIC
6574 i += 1 + CNIC_ILT_LINES;
6575 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6576 if (CHIP_IS_E1(bp))
6577 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6578 else {
6579 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6580 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6581 }
6582
6583 i++;
6584 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6585 if (CHIP_IS_E1(bp))
6586 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6587 else {
6588 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6589 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6590 }
6591
6592 i++;
6593 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6594 if (CHIP_IS_E1(bp))
6595 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6596 else {
6597 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6598 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6599 }
6600
6601 /* tell the searcher where the T2 table is */
6602 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6603
6604 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6605 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6606
6607 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6608 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6609 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6610
6611 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6612#endif
34f80b04
EG
6613
6614 if (CHIP_IS_E1H(bp)) {
573f2035
EG
6615 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6616 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6617 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6618 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6619 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6620 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6621 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6622 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6623 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
6624
6625 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6626 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6627 }
6628
6629 /* HC init per function */
6630 if (CHIP_IS_E1H(bp)) {
6631 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6632
6633 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6634 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6635 }
94a78b79 6636 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6637
c14423fe 6638 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6639 REG_WR(bp, 0x2114, 0xffffffff);
6640 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6641
34f80b04
EG
6642 return 0;
6643}
6644
6645static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6646{
6647 int i, rc = 0;
a2fbb9ea 6648
34f80b04
EG
6649 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6650 BP_FUNC(bp), load_code);
a2fbb9ea 6651
34f80b04
EG
6652 bp->dmae_ready = 0;
6653 mutex_init(&bp->dmae_mutex);
54016b26
EG
6654 rc = bnx2x_gunzip_init(bp);
6655 if (rc)
6656 return rc;
a2fbb9ea 6657
34f80b04
EG
6658 switch (load_code) {
6659 case FW_MSG_CODE_DRV_LOAD_COMMON:
6660 rc = bnx2x_init_common(bp);
6661 if (rc)
6662 goto init_hw_err;
6663 /* no break */
6664
6665 case FW_MSG_CODE_DRV_LOAD_PORT:
6666 bp->dmae_ready = 1;
6667 rc = bnx2x_init_port(bp);
6668 if (rc)
6669 goto init_hw_err;
6670 /* no break */
6671
6672 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6673 bp->dmae_ready = 1;
6674 rc = bnx2x_init_func(bp);
6675 if (rc)
6676 goto init_hw_err;
6677 break;
6678
6679 default:
6680 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6681 break;
6682 }
6683
6684 if (!BP_NOMCP(bp)) {
6685 int func = BP_FUNC(bp);
a2fbb9ea
ET
6686
6687 bp->fw_drv_pulse_wr_seq =
34f80b04 6688 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6689 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
6690 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6691 }
a2fbb9ea 6692
34f80b04
EG
6693 /* this needs to be done before gunzip end */
6694 bnx2x_zero_def_sb(bp);
6695 for_each_queue(bp, i)
6696 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
37b091ba
MC
6697#ifdef BCM_CNIC
6698 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6699#endif
34f80b04
EG
6700
6701init_hw_err:
6702 bnx2x_gunzip_end(bp);
6703
6704 return rc;
a2fbb9ea
ET
6705}
6706
a2fbb9ea
ET
6707static void bnx2x_free_mem(struct bnx2x *bp)
6708{
6709
6710#define BNX2X_PCI_FREE(x, y, size) \
6711 do { \
6712 if (x) { \
6713 pci_free_consistent(bp->pdev, size, x, y); \
6714 x = NULL; \
6715 y = 0; \
6716 } \
6717 } while (0)
6718
6719#define BNX2X_FREE(x) \
6720 do { \
6721 if (x) { \
6722 vfree(x); \
6723 x = NULL; \
6724 } \
6725 } while (0)
6726
6727 int i;
6728
6729 /* fastpath */
555f6c78 6730 /* Common */
a2fbb9ea
ET
6731 for_each_queue(bp, i) {
6732
555f6c78 6733 /* status blocks */
a2fbb9ea
ET
6734 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6735 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6736 sizeof(struct host_status_block));
555f6c78
EG
6737 }
6738 /* Rx */
6739 for_each_rx_queue(bp, i) {
a2fbb9ea 6740
555f6c78 6741 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6742 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6743 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6744 bnx2x_fp(bp, i, rx_desc_mapping),
6745 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6746
6747 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6748 bnx2x_fp(bp, i, rx_comp_mapping),
6749 sizeof(struct eth_fast_path_rx_cqe) *
6750 NUM_RCQ_BD);
a2fbb9ea 6751
7a9b2557 6752 /* SGE ring */
32626230 6753 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6754 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6755 bnx2x_fp(bp, i, rx_sge_mapping),
6756 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6757 }
555f6c78
EG
6758 /* Tx */
6759 for_each_tx_queue(bp, i) {
6760
6761 /* fastpath tx rings: tx_buf tx_desc */
6762 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6763 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6764 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6765 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6766 }
a2fbb9ea
ET
6767 /* end of fastpath */
6768
6769 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6770 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6771
6772 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6773 sizeof(struct bnx2x_slowpath));
a2fbb9ea 6774
37b091ba 6775#ifdef BCM_CNIC
a2fbb9ea
ET
6776 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6777 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6778 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6779 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
37b091ba
MC
6780 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6781 sizeof(struct host_status_block));
a2fbb9ea 6782#endif
7a9b2557 6783 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6784
6785#undef BNX2X_PCI_FREE
6786#undef BNX2X_KFREE
6787}
6788
6789static int bnx2x_alloc_mem(struct bnx2x *bp)
6790{
6791
6792#define BNX2X_PCI_ALLOC(x, y, size) \
6793 do { \
6794 x = pci_alloc_consistent(bp->pdev, size, y); \
6795 if (x == NULL) \
6796 goto alloc_mem_err; \
6797 memset(x, 0, size); \
6798 } while (0)
6799
6800#define BNX2X_ALLOC(x, size) \
6801 do { \
6802 x = vmalloc(size); \
6803 if (x == NULL) \
6804 goto alloc_mem_err; \
6805 memset(x, 0, size); \
6806 } while (0)
6807
6808 int i;
6809
6810 /* fastpath */
555f6c78 6811 /* Common */
a2fbb9ea
ET
6812 for_each_queue(bp, i) {
6813 bnx2x_fp(bp, i, bp) = bp;
6814
555f6c78 6815 /* status blocks */
a2fbb9ea
ET
6816 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6817 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6818 sizeof(struct host_status_block));
555f6c78
EG
6819 }
6820 /* Rx */
6821 for_each_rx_queue(bp, i) {
a2fbb9ea 6822
555f6c78 6823 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6824 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6825 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6826 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6827 &bnx2x_fp(bp, i, rx_desc_mapping),
6828 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6829
6830 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6831 &bnx2x_fp(bp, i, rx_comp_mapping),
6832 sizeof(struct eth_fast_path_rx_cqe) *
6833 NUM_RCQ_BD);
6834
7a9b2557
VZ
6835 /* SGE ring */
6836 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6837 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6838 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6839 &bnx2x_fp(bp, i, rx_sge_mapping),
6840 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6841 }
555f6c78
EG
6842 /* Tx */
6843 for_each_tx_queue(bp, i) {
6844
555f6c78
EG
6845 /* fastpath tx rings: tx_buf tx_desc */
6846 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6847 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6848 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6849 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6850 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6851 }
a2fbb9ea
ET
6852 /* end of fastpath */
6853
6854 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6855 sizeof(struct host_def_status_block));
6856
6857 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6858 sizeof(struct bnx2x_slowpath));
6859
37b091ba 6860#ifdef BCM_CNIC
a2fbb9ea
ET
6861 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6862
a2fbb9ea
ET
6863 /* allocate searcher T2 table
6864 we allocate 1/4 of alloc num for T2
6865 (which is not entered into the ILT) */
6866 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6867
37b091ba 6868 /* Initialize T2 (for 1024 connections) */
a2fbb9ea 6869 for (i = 0; i < 16*1024; i += 64)
37b091ba 6870 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
a2fbb9ea 6871
37b091ba 6872 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
a2fbb9ea
ET
6873 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6874
6875 /* QM queues (128*MAX_CONN) */
6876 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
37b091ba
MC
6877
6878 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6879 sizeof(struct host_status_block));
a2fbb9ea
ET
6880#endif
6881
6882 /* Slow path ring */
6883 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6884
6885 return 0;
6886
6887alloc_mem_err:
6888 bnx2x_free_mem(bp);
6889 return -ENOMEM;
6890
6891#undef BNX2X_PCI_ALLOC
6892#undef BNX2X_ALLOC
6893}
6894
6895static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6896{
6897 int i;
6898
555f6c78 6899 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6900 struct bnx2x_fastpath *fp = &bp->fp[i];
6901
6902 u16 bd_cons = fp->tx_bd_cons;
6903 u16 sw_prod = fp->tx_pkt_prod;
6904 u16 sw_cons = fp->tx_pkt_cons;
6905
a2fbb9ea
ET
6906 while (sw_cons != sw_prod) {
6907 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6908 sw_cons++;
6909 }
6910 }
6911}
6912
6913static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6914{
6915 int i, j;
6916
555f6c78 6917 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6918 struct bnx2x_fastpath *fp = &bp->fp[j];
6919
a2fbb9ea
ET
6920 for (i = 0; i < NUM_RX_BD; i++) {
6921 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6922 struct sk_buff *skb = rx_buf->skb;
6923
6924 if (skb == NULL)
6925 continue;
6926
6927 pci_unmap_single(bp->pdev,
6928 pci_unmap_addr(rx_buf, mapping),
356e2385 6929 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6930
6931 rx_buf->skb = NULL;
6932 dev_kfree_skb(skb);
6933 }
7a9b2557 6934 if (!fp->disable_tpa)
32626230
EG
6935 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6936 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6937 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6938 }
6939}
6940
6941static void bnx2x_free_skbs(struct bnx2x *bp)
6942{
6943 bnx2x_free_tx_skbs(bp);
6944 bnx2x_free_rx_skbs(bp);
6945}
6946
6947static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6948{
34f80b04 6949 int i, offset = 1;
a2fbb9ea
ET
6950
6951 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6952 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6953 bp->msix_table[0].vector);
6954
37b091ba
MC
6955#ifdef BCM_CNIC
6956 offset++;
6957#endif
a2fbb9ea 6958 for_each_queue(bp, i) {
c14423fe 6959 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6960 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6961 bnx2x_fp(bp, i, state));
6962
34f80b04 6963 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6964 }
a2fbb9ea
ET
6965}
6966
6967static void bnx2x_free_irq(struct bnx2x *bp)
6968{
a2fbb9ea 6969 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6970 bnx2x_free_msix_irqs(bp);
6971 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6972 bp->flags &= ~USING_MSIX_FLAG;
6973
8badd27a
EG
6974 } else if (bp->flags & USING_MSI_FLAG) {
6975 free_irq(bp->pdev->irq, bp->dev);
6976 pci_disable_msi(bp->pdev);
6977 bp->flags &= ~USING_MSI_FLAG;
6978
a2fbb9ea
ET
6979 } else
6980 free_irq(bp->pdev->irq, bp->dev);
6981}
6982
6983static int bnx2x_enable_msix(struct bnx2x *bp)
6984{
8badd27a
EG
6985 int i, rc, offset = 1;
6986 int igu_vec = 0;
a2fbb9ea 6987
8badd27a
EG
6988 bp->msix_table[0].entry = igu_vec;
6989 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6990
37b091ba
MC
6991#ifdef BCM_CNIC
6992 igu_vec = BP_L_ID(bp) + offset;
6993 bp->msix_table[1].entry = igu_vec;
6994 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6995 offset++;
6996#endif
34f80b04 6997 for_each_queue(bp, i) {
8badd27a 6998 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6999 bp->msix_table[i + offset].entry = igu_vec;
7000 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7001 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
7002 }
7003
34f80b04 7004 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 7005 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 7006 if (rc) {
8badd27a
EG
7007 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7008 return rc;
34f80b04 7009 }
8badd27a 7010
a2fbb9ea
ET
7011 bp->flags |= USING_MSIX_FLAG;
7012
7013 return 0;
a2fbb9ea
ET
7014}
7015
a2fbb9ea
ET
7016static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7017{
34f80b04 7018 int i, rc, offset = 1;
a2fbb9ea 7019
a2fbb9ea
ET
7020 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7021 bp->dev->name, bp->dev);
a2fbb9ea
ET
7022 if (rc) {
7023 BNX2X_ERR("request sp irq failed\n");
7024 return -EBUSY;
7025 }
7026
37b091ba
MC
7027#ifdef BCM_CNIC
7028 offset++;
7029#endif
a2fbb9ea 7030 for_each_queue(bp, i) {
555f6c78
EG
7031 struct bnx2x_fastpath *fp = &bp->fp[i];
7032
ca00392c
EG
7033 if (i < bp->num_rx_queues)
7034 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
7035 else
7036 sprintf(fp->name, "%s-tx-%d",
7037 bp->dev->name, i - bp->num_rx_queues);
7038
34f80b04 7039 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 7040 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 7041 if (rc) {
555f6c78 7042 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
7043 bnx2x_free_msix_irqs(bp);
7044 return -EBUSY;
7045 }
7046
555f6c78 7047 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
7048 }
7049
555f6c78 7050 i = BNX2X_NUM_QUEUES(bp);
ca00392c
EG
7051 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
7052 " ... fp[%d] %d\n",
7053 bp->dev->name, bp->msix_table[0].vector,
7054 0, bp->msix_table[offset].vector,
7055 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 7056
a2fbb9ea 7057 return 0;
a2fbb9ea
ET
7058}
7059
8badd27a
EG
7060static int bnx2x_enable_msi(struct bnx2x *bp)
7061{
7062 int rc;
7063
7064 rc = pci_enable_msi(bp->pdev);
7065 if (rc) {
7066 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7067 return -1;
7068 }
7069 bp->flags |= USING_MSI_FLAG;
7070
7071 return 0;
7072}
7073
a2fbb9ea
ET
7074static int bnx2x_req_irq(struct bnx2x *bp)
7075{
8badd27a 7076 unsigned long flags;
34f80b04 7077 int rc;
a2fbb9ea 7078
8badd27a
EG
7079 if (bp->flags & USING_MSI_FLAG)
7080 flags = 0;
7081 else
7082 flags = IRQF_SHARED;
7083
7084 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 7085 bp->dev->name, bp->dev);
a2fbb9ea
ET
7086 if (!rc)
7087 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7088
7089 return rc;
a2fbb9ea
ET
7090}
7091
65abd74d
YG
7092static void bnx2x_napi_enable(struct bnx2x *bp)
7093{
7094 int i;
7095
555f6c78 7096 for_each_rx_queue(bp, i)
65abd74d
YG
7097 napi_enable(&bnx2x_fp(bp, i, napi));
7098}
7099
7100static void bnx2x_napi_disable(struct bnx2x *bp)
7101{
7102 int i;
7103
555f6c78 7104 for_each_rx_queue(bp, i)
65abd74d
YG
7105 napi_disable(&bnx2x_fp(bp, i, napi));
7106}
7107
7108static void bnx2x_netif_start(struct bnx2x *bp)
7109{
e1510706
EG
7110 int intr_sem;
7111
7112 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7113 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7114
7115 if (intr_sem) {
65abd74d 7116 if (netif_running(bp->dev)) {
65abd74d
YG
7117 bnx2x_napi_enable(bp);
7118 bnx2x_int_enable(bp);
555f6c78
EG
7119 if (bp->state == BNX2X_STATE_OPEN)
7120 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
7121 }
7122 }
7123}
7124
f8ef6e44 7125static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 7126{
f8ef6e44 7127 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 7128 bnx2x_napi_disable(bp);
762d5f6c
EG
7129 netif_tx_disable(bp->dev);
7130 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
7131}
7132
a2fbb9ea
ET
7133/*
7134 * Init service functions
7135 */
7136
e665bfda
MC
7137/**
7138 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7139 *
7140 * @param bp driver descriptor
7141 * @param set set or clear an entry (1 or 0)
7142 * @param mac pointer to a buffer containing a MAC
7143 * @param cl_bit_vec bit vector of clients to register a MAC for
7144 * @param cam_offset offset in a CAM to use
7145 * @param with_bcast set broadcast MAC as well
7146 */
7147static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7148 u32 cl_bit_vec, u8 cam_offset,
7149 u8 with_bcast)
a2fbb9ea
ET
7150{
7151 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 7152 int port = BP_PORT(bp);
a2fbb9ea
ET
7153
7154 /* CAM allocation
7155 * unicasts 0-31:port0 32-63:port1
7156 * multicast 64-127:port0 128-191:port1
7157 */
e665bfda
MC
7158 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7159 config->hdr.offset = cam_offset;
7160 config->hdr.client_id = 0xff;
a2fbb9ea
ET
7161 config->hdr.reserved1 = 0;
7162
7163 /* primary MAC */
7164 config->config_table[0].cam_entry.msb_mac_addr =
e665bfda 7165 swab16(*(u16 *)&mac[0]);
a2fbb9ea 7166 config->config_table[0].cam_entry.middle_mac_addr =
e665bfda 7167 swab16(*(u16 *)&mac[2]);
a2fbb9ea 7168 config->config_table[0].cam_entry.lsb_mac_addr =
e665bfda 7169 swab16(*(u16 *)&mac[4]);
34f80b04 7170 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7171 if (set)
7172 config->config_table[0].target_table_entry.flags = 0;
7173 else
7174 CAM_INVALIDATE(config->config_table[0]);
ca00392c 7175 config->config_table[0].target_table_entry.clients_bit_vector =
e665bfda 7176 cpu_to_le32(cl_bit_vec);
a2fbb9ea
ET
7177 config->config_table[0].target_table_entry.vlan_id = 0;
7178
3101c2bc
YG
7179 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7180 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7181 config->config_table[0].cam_entry.msb_mac_addr,
7182 config->config_table[0].cam_entry.middle_mac_addr,
7183 config->config_table[0].cam_entry.lsb_mac_addr);
7184
7185 /* broadcast */
e665bfda
MC
7186 if (with_bcast) {
7187 config->config_table[1].cam_entry.msb_mac_addr =
7188 cpu_to_le16(0xffff);
7189 config->config_table[1].cam_entry.middle_mac_addr =
7190 cpu_to_le16(0xffff);
7191 config->config_table[1].cam_entry.lsb_mac_addr =
7192 cpu_to_le16(0xffff);
7193 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7194 if (set)
7195 config->config_table[1].target_table_entry.flags =
7196 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7197 else
7198 CAM_INVALIDATE(config->config_table[1]);
7199 config->config_table[1].target_table_entry.clients_bit_vector =
7200 cpu_to_le32(cl_bit_vec);
7201 config->config_table[1].target_table_entry.vlan_id = 0;
7202 }
a2fbb9ea
ET
7203
7204 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7205 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7206 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7207}
7208
e665bfda
MC
7209/**
7210 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7211 *
7212 * @param bp driver descriptor
7213 * @param set set or clear an entry (1 or 0)
7214 * @param mac pointer to a buffer containing a MAC
7215 * @param cl_bit_vec bit vector of clients to register a MAC for
7216 * @param cam_offset offset in a CAM to use
7217 */
7218static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7219 u32 cl_bit_vec, u8 cam_offset)
34f80b04
EG
7220{
7221 struct mac_configuration_cmd_e1h *config =
7222 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7223
8d9c5f34 7224 config->hdr.length = 1;
e665bfda
MC
7225 config->hdr.offset = cam_offset;
7226 config->hdr.client_id = 0xff;
34f80b04
EG
7227 config->hdr.reserved1 = 0;
7228
7229 /* primary MAC */
7230 config->config_table[0].msb_mac_addr =
e665bfda 7231 swab16(*(u16 *)&mac[0]);
34f80b04 7232 config->config_table[0].middle_mac_addr =
e665bfda 7233 swab16(*(u16 *)&mac[2]);
34f80b04 7234 config->config_table[0].lsb_mac_addr =
e665bfda 7235 swab16(*(u16 *)&mac[4]);
ca00392c 7236 config->config_table[0].clients_bit_vector =
e665bfda 7237 cpu_to_le32(cl_bit_vec);
34f80b04
EG
7238 config->config_table[0].vlan_id = 0;
7239 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7240 if (set)
7241 config->config_table[0].flags = BP_PORT(bp);
7242 else
7243 config->config_table[0].flags =
7244 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7245
e665bfda 7246 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
3101c2bc 7247 (set ? "setting" : "clearing"),
34f80b04
EG
7248 config->config_table[0].msb_mac_addr,
7249 config->config_table[0].middle_mac_addr,
e665bfda 7250 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
34f80b04
EG
7251
7252 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7253 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7254 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7255}
7256
a2fbb9ea
ET
7257static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7258 int *state_p, int poll)
7259{
7260 /* can take a while if any port is running */
8b3a0f0b 7261 int cnt = 5000;
a2fbb9ea 7262
c14423fe
ET
7263 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7264 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7265
7266 might_sleep();
34f80b04 7267 while (cnt--) {
a2fbb9ea
ET
7268 if (poll) {
7269 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7270 /* if index is different from 0
7271 * the reply for some commands will
3101c2bc 7272 * be on the non default queue
a2fbb9ea
ET
7273 */
7274 if (idx)
7275 bnx2x_rx_int(&bp->fp[idx], 10);
7276 }
a2fbb9ea 7277
3101c2bc 7278 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7279 if (*state_p == state) {
7280#ifdef BNX2X_STOP_ON_ERROR
7281 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7282#endif
a2fbb9ea 7283 return 0;
8b3a0f0b 7284 }
a2fbb9ea 7285
a2fbb9ea 7286 msleep(1);
e3553b29
EG
7287
7288 if (bp->panic)
7289 return -EIO;
a2fbb9ea
ET
7290 }
7291
a2fbb9ea 7292 /* timeout! */
49d66772
ET
7293 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7294 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7295#ifdef BNX2X_STOP_ON_ERROR
7296 bnx2x_panic();
7297#endif
a2fbb9ea 7298
49d66772 7299 return -EBUSY;
a2fbb9ea
ET
7300}
7301
e665bfda
MC
7302static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7303{
7304 bp->set_mac_pending++;
7305 smp_wmb();
7306
7307 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7308 (1 << bp->fp->cl_id), BP_FUNC(bp));
7309
7310 /* Wait for a completion */
7311 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7312}
7313
7314static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7315{
7316 bp->set_mac_pending++;
7317 smp_wmb();
7318
7319 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7320 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7321 1);
7322
7323 /* Wait for a completion */
7324 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7325}
7326
993ac7b5
MC
7327#ifdef BCM_CNIC
7328/**
7329 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7330 * MAC(s). This function will wait until the ramdord completion
7331 * returns.
7332 *
7333 * @param bp driver handle
7334 * @param set set or clear the CAM entry
7335 *
7336 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7337 */
7338static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7339{
7340 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7341
7342 bp->set_mac_pending++;
7343 smp_wmb();
7344
7345 /* Send a SET_MAC ramrod */
7346 if (CHIP_IS_E1(bp))
7347 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7348 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7349 1);
7350 else
7351 /* CAM allocation for E1H
7352 * unicasts: by func number
7353 * multicast: 20+FUNC*20, 20 each
7354 */
7355 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7356 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7357
7358 /* Wait for a completion when setting */
7359 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7360
7361 return 0;
7362}
7363#endif
7364
a2fbb9ea
ET
7365static int bnx2x_setup_leading(struct bnx2x *bp)
7366{
34f80b04 7367 int rc;
a2fbb9ea 7368
c14423fe 7369 /* reset IGU state */
34f80b04 7370 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7371
7372 /* SETUP ramrod */
7373 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7374
34f80b04
EG
7375 /* Wait for completion */
7376 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7377
34f80b04 7378 return rc;
a2fbb9ea
ET
7379}
7380
7381static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7382{
555f6c78
EG
7383 struct bnx2x_fastpath *fp = &bp->fp[index];
7384
a2fbb9ea 7385 /* reset IGU state */
555f6c78 7386 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7387
228241eb 7388 /* SETUP ramrod */
555f6c78
EG
7389 fp->state = BNX2X_FP_STATE_OPENING;
7390 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7391 fp->cl_id, 0);
a2fbb9ea
ET
7392
7393 /* Wait for completion */
7394 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7395 &(fp->state), 0);
a2fbb9ea
ET
7396}
7397
a2fbb9ea 7398static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7399
ca00392c
EG
7400static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7401 int *num_tx_queues_out)
7402{
7403 int _num_rx_queues = 0, _num_tx_queues = 0;
7404
7405 switch (bp->multi_mode) {
7406 case ETH_RSS_MODE_DISABLED:
7407 _num_rx_queues = 1;
7408 _num_tx_queues = 1;
7409 break;
7410
7411 case ETH_RSS_MODE_REGULAR:
7412 if (num_rx_queues)
7413 _num_rx_queues = min_t(u32, num_rx_queues,
7414 BNX2X_MAX_QUEUES(bp));
7415 else
7416 _num_rx_queues = min_t(u32, num_online_cpus(),
7417 BNX2X_MAX_QUEUES(bp));
7418
7419 if (num_tx_queues)
7420 _num_tx_queues = min_t(u32, num_tx_queues,
7421 BNX2X_MAX_QUEUES(bp));
7422 else
7423 _num_tx_queues = min_t(u32, num_online_cpus(),
7424 BNX2X_MAX_QUEUES(bp));
7425
7426 /* There must be not more Tx queues than Rx queues */
7427 if (_num_tx_queues > _num_rx_queues) {
7428 BNX2X_ERR("number of tx queues (%d) > "
7429 "number of rx queues (%d)"
7430 " defaulting to %d\n",
7431 _num_tx_queues, _num_rx_queues,
7432 _num_rx_queues);
7433 _num_tx_queues = _num_rx_queues;
7434 }
7435 break;
7436
7437
7438 default:
7439 _num_rx_queues = 1;
7440 _num_tx_queues = 1;
7441 break;
7442 }
7443
7444 *num_rx_queues_out = _num_rx_queues;
7445 *num_tx_queues_out = _num_tx_queues;
7446}
7447
7448static int bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 7449{
ca00392c 7450 int rc = 0;
a2fbb9ea 7451
8badd27a
EG
7452 switch (int_mode) {
7453 case INT_MODE_INTx:
7454 case INT_MODE_MSI:
ca00392c
EG
7455 bp->num_rx_queues = 1;
7456 bp->num_tx_queues = 1;
7457 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a
EG
7458 break;
7459
7460 case INT_MODE_MSIX:
7461 default:
ca00392c
EG
7462 /* Set interrupt mode according to bp->multi_mode value */
7463 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7464 &bp->num_tx_queues);
7465
7466 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
555f6c78 7467 bp->num_rx_queues, bp->num_tx_queues);
ca00392c 7468
2dfe0e1f
EG
7469 /* if we can't use MSI-X we only need one fp,
7470 * so try to enable MSI-X with the requested number of fp's
7471 * and fallback to MSI or legacy INTx with one fp
7472 */
ca00392c
EG
7473 rc = bnx2x_enable_msix(bp);
7474 if (rc) {
34f80b04 7475 /* failed to enable MSI-X */
555f6c78
EG
7476 if (bp->multi_mode)
7477 BNX2X_ERR("Multi requested but failed to "
ca00392c
EG
7478 "enable MSI-X (rx %d tx %d), "
7479 "set number of queues to 1\n",
7480 bp->num_rx_queues, bp->num_tx_queues);
7481 bp->num_rx_queues = 1;
7482 bp->num_tx_queues = 1;
a2fbb9ea 7483 }
8badd27a 7484 break;
a2fbb9ea 7485 }
555f6c78 7486 bp->dev->real_num_tx_queues = bp->num_tx_queues;
ca00392c 7487 return rc;
8badd27a
EG
7488}
7489
993ac7b5
MC
7490#ifdef BCM_CNIC
7491static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7492static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7493#endif
8badd27a
EG
7494
7495/* must be called with rtnl_lock */
7496static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7497{
7498 u32 load_code;
ca00392c
EG
7499 int i, rc;
7500
8badd27a 7501#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7502 if (unlikely(bp->panic))
7503 return -EPERM;
7504#endif
7505
7506 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7507
ca00392c 7508 rc = bnx2x_set_int_mode(bp);
c14423fe 7509
a2fbb9ea
ET
7510 if (bnx2x_alloc_mem(bp))
7511 return -ENOMEM;
7512
555f6c78 7513 for_each_rx_queue(bp, i)
7a9b2557
VZ
7514 bnx2x_fp(bp, i, disable_tpa) =
7515 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7516
555f6c78 7517 for_each_rx_queue(bp, i)
2dfe0e1f
EG
7518 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7519 bnx2x_poll, 128);
7520
2dfe0e1f
EG
7521 bnx2x_napi_enable(bp);
7522
34f80b04
EG
7523 if (bp->flags & USING_MSIX_FLAG) {
7524 rc = bnx2x_req_msix_irqs(bp);
7525 if (rc) {
7526 pci_disable_msix(bp->pdev);
2dfe0e1f 7527 goto load_error1;
34f80b04
EG
7528 }
7529 } else {
ca00392c
EG
7530 /* Fall to INTx if failed to enable MSI-X due to lack of
7531 memory (in bnx2x_set_int_mode()) */
8badd27a
EG
7532 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7533 bnx2x_enable_msi(bp);
34f80b04
EG
7534 bnx2x_ack_int(bp);
7535 rc = bnx2x_req_irq(bp);
7536 if (rc) {
2dfe0e1f 7537 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
7538 if (bp->flags & USING_MSI_FLAG)
7539 pci_disable_msi(bp->pdev);
2dfe0e1f 7540 goto load_error1;
a2fbb9ea 7541 }
8badd27a
EG
7542 if (bp->flags & USING_MSI_FLAG) {
7543 bp->dev->irq = bp->pdev->irq;
7544 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7545 bp->dev->name, bp->pdev->irq);
7546 }
a2fbb9ea
ET
7547 }
7548
2dfe0e1f
EG
7549 /* Send LOAD_REQUEST command to MCP
7550 Returns the type of LOAD command:
7551 if it is the first port to be initialized
7552 common blocks should be initialized, otherwise - not
7553 */
7554 if (!BP_NOMCP(bp)) {
7555 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7556 if (!load_code) {
7557 BNX2X_ERR("MCP response failure, aborting\n");
7558 rc = -EBUSY;
7559 goto load_error2;
7560 }
7561 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7562 rc = -EBUSY; /* other port in diagnostic mode */
7563 goto load_error2;
7564 }
7565
7566 } else {
7567 int port = BP_PORT(bp);
7568
f5372251 7569 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7570 load_count[0], load_count[1], load_count[2]);
7571 load_count[0]++;
7572 load_count[1 + port]++;
f5372251 7573 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7574 load_count[0], load_count[1], load_count[2]);
7575 if (load_count[0] == 1)
7576 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7577 else if (load_count[1 + port] == 1)
7578 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7579 else
7580 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7581 }
7582
7583 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7584 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7585 bp->port.pmf = 1;
7586 else
7587 bp->port.pmf = 0;
7588 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7589
a2fbb9ea 7590 /* Initialize HW */
34f80b04
EG
7591 rc = bnx2x_init_hw(bp, load_code);
7592 if (rc) {
a2fbb9ea 7593 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 7594 goto load_error2;
a2fbb9ea
ET
7595 }
7596
a2fbb9ea 7597 /* Setup NIC internals and enable interrupts */
471de716 7598 bnx2x_nic_init(bp, load_code);
a2fbb9ea 7599
2691d51d
EG
7600 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7601 (bp->common.shmem2_base))
7602 SHMEM2_WR(bp, dcc_support,
7603 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7604 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7605
a2fbb9ea 7606 /* Send LOAD_DONE command to MCP */
34f80b04 7607 if (!BP_NOMCP(bp)) {
228241eb
ET
7608 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7609 if (!load_code) {
da5a662a 7610 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7611 rc = -EBUSY;
2dfe0e1f 7612 goto load_error3;
a2fbb9ea
ET
7613 }
7614 }
7615
7616 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7617
34f80b04
EG
7618 rc = bnx2x_setup_leading(bp);
7619 if (rc) {
da5a662a 7620 BNX2X_ERR("Setup leading failed!\n");
e3553b29 7621#ifndef BNX2X_STOP_ON_ERROR
2dfe0e1f 7622 goto load_error3;
e3553b29
EG
7623#else
7624 bp->panic = 1;
7625 return -EBUSY;
7626#endif
34f80b04 7627 }
a2fbb9ea 7628
34f80b04
EG
7629 if (CHIP_IS_E1H(bp))
7630 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7631 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
f34d28ea 7632 bp->flags |= MF_FUNC_DIS;
34f80b04 7633 }
a2fbb9ea 7634
ca00392c 7635 if (bp->state == BNX2X_STATE_OPEN) {
37b091ba
MC
7636#ifdef BCM_CNIC
7637 /* Enable Timer scan */
7638 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7639#endif
34f80b04
EG
7640 for_each_nondefault_queue(bp, i) {
7641 rc = bnx2x_setup_multi(bp, i);
7642 if (rc)
37b091ba
MC
7643#ifdef BCM_CNIC
7644 goto load_error4;
7645#else
2dfe0e1f 7646 goto load_error3;
37b091ba 7647#endif
34f80b04 7648 }
a2fbb9ea 7649
ca00392c 7650 if (CHIP_IS_E1(bp))
e665bfda 7651 bnx2x_set_eth_mac_addr_e1(bp, 1);
ca00392c 7652 else
e665bfda 7653 bnx2x_set_eth_mac_addr_e1h(bp, 1);
993ac7b5
MC
7654#ifdef BCM_CNIC
7655 /* Set iSCSI L2 MAC */
7656 mutex_lock(&bp->cnic_mutex);
7657 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7658 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7659 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7660 }
7661 mutex_unlock(&bp->cnic_mutex);
7662#endif
ca00392c 7663 }
34f80b04
EG
7664
7665 if (bp->port.pmf)
b5bf9068 7666 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7667
7668 /* Start fast path */
34f80b04
EG
7669 switch (load_mode) {
7670 case LOAD_NORMAL:
ca00392c
EG
7671 if (bp->state == BNX2X_STATE_OPEN) {
7672 /* Tx queue should be only reenabled */
7673 netif_tx_wake_all_queues(bp->dev);
7674 }
2dfe0e1f 7675 /* Initialize the receive filter. */
34f80b04
EG
7676 bnx2x_set_rx_mode(bp->dev);
7677 break;
7678
7679 case LOAD_OPEN:
555f6c78 7680 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
7681 if (bp->state != BNX2X_STATE_OPEN)
7682 netif_tx_disable(bp->dev);
2dfe0e1f 7683 /* Initialize the receive filter. */
34f80b04 7684 bnx2x_set_rx_mode(bp->dev);
34f80b04 7685 break;
a2fbb9ea 7686
34f80b04 7687 case LOAD_DIAG:
2dfe0e1f 7688 /* Initialize the receive filter. */
a2fbb9ea 7689 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7690 bp->state = BNX2X_STATE_DIAG;
7691 break;
7692
7693 default:
7694 break;
a2fbb9ea
ET
7695 }
7696
34f80b04
EG
7697 if (!bp->port.pmf)
7698 bnx2x__link_status_update(bp);
7699
a2fbb9ea
ET
7700 /* start the timer */
7701 mod_timer(&bp->timer, jiffies + bp->current_interval);
7702
993ac7b5
MC
7703#ifdef BCM_CNIC
7704 bnx2x_setup_cnic_irq_info(bp);
7705 if (bp->state == BNX2X_STATE_OPEN)
7706 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7707#endif
34f80b04 7708
a2fbb9ea
ET
7709 return 0;
7710
37b091ba
MC
7711#ifdef BCM_CNIC
7712load_error4:
7713 /* Disable Timer scan */
7714 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7715#endif
2dfe0e1f
EG
7716load_error3:
7717 bnx2x_int_disable_sync(bp, 1);
7718 if (!BP_NOMCP(bp)) {
7719 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7720 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7721 }
7722 bp->port.pmf = 0;
7a9b2557
VZ
7723 /* Free SKBs, SGEs, TPA pool and driver internals */
7724 bnx2x_free_skbs(bp);
555f6c78 7725 for_each_rx_queue(bp, i)
3196a88a 7726 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7727load_error2:
d1014634
YG
7728 /* Release IRQs */
7729 bnx2x_free_irq(bp);
2dfe0e1f
EG
7730load_error1:
7731 bnx2x_napi_disable(bp);
555f6c78 7732 for_each_rx_queue(bp, i)
7cde1c8b 7733 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7734 bnx2x_free_mem(bp);
7735
34f80b04 7736 return rc;
a2fbb9ea
ET
7737}
7738
7739static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7740{
555f6c78 7741 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7742 int rc;
7743
c14423fe 7744 /* halt the connection */
555f6c78
EG
7745 fp->state = BNX2X_FP_STATE_HALTING;
7746 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7747
34f80b04 7748 /* Wait for completion */
a2fbb9ea 7749 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7750 &(fp->state), 1);
c14423fe 7751 if (rc) /* timeout */
a2fbb9ea
ET
7752 return rc;
7753
7754 /* delete cfc entry */
7755 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7756
34f80b04
EG
7757 /* Wait for completion */
7758 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7759 &(fp->state), 1);
34f80b04 7760 return rc;
a2fbb9ea
ET
7761}
7762
da5a662a 7763static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7764{
4781bfad 7765 __le16 dsb_sp_prod_idx;
c14423fe 7766 /* if the other port is handling traffic,
a2fbb9ea 7767 this can take a lot of time */
34f80b04
EG
7768 int cnt = 500;
7769 int rc;
a2fbb9ea
ET
7770
7771 might_sleep();
7772
7773 /* Send HALT ramrod */
7774 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7775 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7776
34f80b04
EG
7777 /* Wait for completion */
7778 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7779 &(bp->fp[0].state), 1);
7780 if (rc) /* timeout */
da5a662a 7781 return rc;
a2fbb9ea 7782
49d66772 7783 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7784
228241eb 7785 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7786 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7787
49d66772 7788 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7789 we are going to reset the chip anyway
7790 so there is not much to do if this times out
7791 */
34f80b04 7792 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7793 if (!cnt) {
7794 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7795 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7796 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7797#ifdef BNX2X_STOP_ON_ERROR
7798 bnx2x_panic();
7799#endif
36e552ab 7800 rc = -EBUSY;
34f80b04
EG
7801 break;
7802 }
7803 cnt--;
da5a662a 7804 msleep(1);
5650d9d4 7805 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7806 }
7807 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7808 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7809
7810 return rc;
a2fbb9ea
ET
7811}
7812
34f80b04
EG
7813static void bnx2x_reset_func(struct bnx2x *bp)
7814{
7815 int port = BP_PORT(bp);
7816 int func = BP_FUNC(bp);
7817 int base, i;
7818
7819 /* Configure IGU */
7820 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7821 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7822
37b091ba
MC
7823#ifdef BCM_CNIC
7824 /* Disable Timer scan */
7825 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7826 /*
7827 * Wait for at least 10ms and up to 2 second for the timers scan to
7828 * complete
7829 */
7830 for (i = 0; i < 200; i++) {
7831 msleep(10);
7832 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7833 break;
7834 }
7835#endif
34f80b04
EG
7836 /* Clear ILT */
7837 base = FUNC_ILT_BASE(func);
7838 for (i = base; i < base + ILT_PER_FUNC; i++)
7839 bnx2x_ilt_wr(bp, i, 0);
7840}
7841
7842static void bnx2x_reset_port(struct bnx2x *bp)
7843{
7844 int port = BP_PORT(bp);
7845 u32 val;
7846
7847 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7848
7849 /* Do not rcv packets to BRB */
7850 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7851 /* Do not direct rcv packets that are not for MCP to the BRB */
7852 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7853 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7854
7855 /* Configure AEU */
7856 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7857
7858 msleep(100);
7859 /* Check for BRB port occupancy */
7860 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7861 if (val)
7862 DP(NETIF_MSG_IFDOWN,
33471629 7863 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7864
7865 /* TODO: Close Doorbell port? */
7866}
7867
34f80b04
EG
7868static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7869{
7870 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7871 BP_FUNC(bp), reset_code);
7872
7873 switch (reset_code) {
7874 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7875 bnx2x_reset_port(bp);
7876 bnx2x_reset_func(bp);
7877 bnx2x_reset_common(bp);
7878 break;
7879
7880 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7881 bnx2x_reset_port(bp);
7882 bnx2x_reset_func(bp);
7883 break;
7884
7885 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7886 bnx2x_reset_func(bp);
7887 break;
49d66772 7888
34f80b04
EG
7889 default:
7890 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7891 break;
7892 }
7893}
7894
33471629 7895/* must be called with rtnl_lock */
34f80b04 7896static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7897{
da5a662a 7898 int port = BP_PORT(bp);
a2fbb9ea 7899 u32 reset_code = 0;
da5a662a 7900 int i, cnt, rc;
a2fbb9ea 7901
993ac7b5
MC
7902#ifdef BCM_CNIC
7903 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7904#endif
a2fbb9ea
ET
7905 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7906
ab6ad5a4 7907 /* Set "drop all" */
228241eb
ET
7908 bp->rx_mode = BNX2X_RX_MODE_NONE;
7909 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7910
ab6ad5a4 7911 /* Disable HW interrupts, NAPI and Tx */
f8ef6e44 7912 bnx2x_netif_stop(bp, 1);
e94d8af3 7913
34f80b04
EG
7914 del_timer_sync(&bp->timer);
7915 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7916 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7917 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7918
70b9986c
EG
7919 /* Release IRQs */
7920 bnx2x_free_irq(bp);
7921
555f6c78
EG
7922 /* Wait until tx fastpath tasks complete */
7923 for_each_tx_queue(bp, i) {
228241eb
ET
7924 struct bnx2x_fastpath *fp = &bp->fp[i];
7925
34f80b04 7926 cnt = 1000;
e8b5fc51 7927 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7928
7961f791 7929 bnx2x_tx_int(fp);
34f80b04
EG
7930 if (!cnt) {
7931 BNX2X_ERR("timeout waiting for queue[%d]\n",
7932 i);
7933#ifdef BNX2X_STOP_ON_ERROR
7934 bnx2x_panic();
7935 return -EBUSY;
7936#else
7937 break;
7938#endif
7939 }
7940 cnt--;
da5a662a 7941 msleep(1);
34f80b04 7942 }
228241eb 7943 }
da5a662a
VZ
7944 /* Give HW time to discard old tx messages */
7945 msleep(1);
a2fbb9ea 7946
3101c2bc
YG
7947 if (CHIP_IS_E1(bp)) {
7948 struct mac_configuration_cmd *config =
7949 bnx2x_sp(bp, mcast_config);
7950
e665bfda 7951 bnx2x_set_eth_mac_addr_e1(bp, 0);
3101c2bc 7952
8d9c5f34 7953 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7954 CAM_INVALIDATE(config->config_table[i]);
7955
8d9c5f34 7956 config->hdr.length = i;
3101c2bc
YG
7957 if (CHIP_REV_IS_SLOW(bp))
7958 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7959 else
7960 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7961 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7962 config->hdr.reserved1 = 0;
7963
e665bfda
MC
7964 bp->set_mac_pending++;
7965 smp_wmb();
7966
3101c2bc
YG
7967 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7968 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7969 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7970
7971 } else { /* E1H */
65abd74d
YG
7972 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7973
e665bfda 7974 bnx2x_set_eth_mac_addr_e1h(bp, 0);
3101c2bc
YG
7975
7976 for (i = 0; i < MC_HASH_SIZE; i++)
7977 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
7978
7979 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc 7980 }
993ac7b5
MC
7981#ifdef BCM_CNIC
7982 /* Clear iSCSI L2 MAC */
7983 mutex_lock(&bp->cnic_mutex);
7984 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7985 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7986 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7987 }
7988 mutex_unlock(&bp->cnic_mutex);
7989#endif
3101c2bc 7990
65abd74d
YG
7991 if (unload_mode == UNLOAD_NORMAL)
7992 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7993
7d0446c2 7994 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7995 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7996
7d0446c2 7997 else if (bp->wol) {
65abd74d
YG
7998 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7999 u8 *mac_addr = bp->dev->dev_addr;
8000 u32 val;
8001 /* The mac address is written to entries 1-4 to
8002 preserve entry 0 which is used by the PMF */
8003 u8 entry = (BP_E1HVN(bp) + 1)*8;
8004
8005 val = (mac_addr[0] << 8) | mac_addr[1];
8006 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8007
8008 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8009 (mac_addr[4] << 8) | mac_addr[5];
8010 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8011
8012 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8013
8014 } else
8015 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8016
34f80b04
EG
8017 /* Close multi and leading connections
8018 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
8019 for_each_nondefault_queue(bp, i)
8020 if (bnx2x_stop_multi(bp, i))
228241eb 8021 goto unload_error;
a2fbb9ea 8022
da5a662a
VZ
8023 rc = bnx2x_stop_leading(bp);
8024 if (rc) {
34f80b04 8025 BNX2X_ERR("Stop leading failed!\n");
da5a662a 8026#ifdef BNX2X_STOP_ON_ERROR
34f80b04 8027 return -EBUSY;
da5a662a
VZ
8028#else
8029 goto unload_error;
34f80b04 8030#endif
228241eb
ET
8031 }
8032
8033unload_error:
34f80b04 8034 if (!BP_NOMCP(bp))
228241eb 8035 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 8036 else {
f5372251 8037 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
8038 load_count[0], load_count[1], load_count[2]);
8039 load_count[0]--;
da5a662a 8040 load_count[1 + port]--;
f5372251 8041 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
8042 load_count[0], load_count[1], load_count[2]);
8043 if (load_count[0] == 0)
8044 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 8045 else if (load_count[1 + port] == 0)
34f80b04
EG
8046 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8047 else
8048 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8049 }
a2fbb9ea 8050
34f80b04
EG
8051 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8052 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8053 bnx2x__link_reset(bp);
a2fbb9ea
ET
8054
8055 /* Reset the chip */
228241eb 8056 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
8057
8058 /* Report UNLOAD_DONE to MCP */
34f80b04 8059 if (!BP_NOMCP(bp))
a2fbb9ea 8060 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 8061
9a035440 8062 bp->port.pmf = 0;
a2fbb9ea 8063
7a9b2557 8064 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 8065 bnx2x_free_skbs(bp);
555f6c78 8066 for_each_rx_queue(bp, i)
3196a88a 8067 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 8068 for_each_rx_queue(bp, i)
7cde1c8b 8069 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
8070 bnx2x_free_mem(bp);
8071
8072 bp->state = BNX2X_STATE_CLOSED;
228241eb 8073
a2fbb9ea
ET
8074 netif_carrier_off(bp->dev);
8075
8076 return 0;
8077}
8078
34f80b04
EG
8079static void bnx2x_reset_task(struct work_struct *work)
8080{
8081 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8082
8083#ifdef BNX2X_STOP_ON_ERROR
8084 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8085 " so reset not done to allow debug dump,\n"
ad361c98 8086 " you will need to reboot when done\n");
34f80b04
EG
8087 return;
8088#endif
8089
8090 rtnl_lock();
8091
8092 if (!netif_running(bp->dev))
8093 goto reset_task_exit;
8094
8095 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8096 bnx2x_nic_load(bp, LOAD_NORMAL);
8097
8098reset_task_exit:
8099 rtnl_unlock();
8100}
8101
a2fbb9ea
ET
8102/* end of nic load/unload */
8103
8104/* ethtool_ops */
8105
8106/*
8107 * Init service functions
8108 */
8109
f1ef27ef
EG
8110static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8111{
8112 switch (func) {
8113 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8114 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8115 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8116 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8117 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8118 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8119 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8120 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8121 default:
8122 BNX2X_ERR("Unsupported function index: %d\n", func);
8123 return (u32)(-1);
8124 }
8125}
8126
8127static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8128{
8129 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8130
8131 /* Flush all outstanding writes */
8132 mmiowb();
8133
8134 /* Pretend to be function 0 */
8135 REG_WR(bp, reg, 0);
8136 /* Flush the GRC transaction (in the chip) */
8137 new_val = REG_RD(bp, reg);
8138 if (new_val != 0) {
8139 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8140 new_val);
8141 BUG();
8142 }
8143
8144 /* From now we are in the "like-E1" mode */
8145 bnx2x_int_disable(bp);
8146
8147 /* Flush all outstanding writes */
8148 mmiowb();
8149
8150 /* Restore the original funtion settings */
8151 REG_WR(bp, reg, orig_func);
8152 new_val = REG_RD(bp, reg);
8153 if (new_val != orig_func) {
8154 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8155 orig_func, new_val);
8156 BUG();
8157 }
8158}
8159
8160static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8161{
8162 if (CHIP_IS_E1H(bp))
8163 bnx2x_undi_int_disable_e1h(bp, func);
8164 else
8165 bnx2x_int_disable(bp);
8166}
8167
34f80b04
EG
8168static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8169{
8170 u32 val;
8171
8172 /* Check if there is any driver already loaded */
8173 val = REG_RD(bp, MISC_REG_UNPREPARED);
8174 if (val == 0x1) {
8175 /* Check if it is the UNDI driver
8176 * UNDI driver initializes CID offset for normal bell to 0x7
8177 */
4a37fb66 8178 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
8179 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8180 if (val == 0x7) {
8181 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8182 /* save our func */
34f80b04 8183 int func = BP_FUNC(bp);
da5a662a
VZ
8184 u32 swap_en;
8185 u32 swap_val;
34f80b04 8186
b4661739
EG
8187 /* clear the UNDI indication */
8188 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8189
34f80b04
EG
8190 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8191
8192 /* try unload UNDI on port 0 */
8193 bp->func = 0;
da5a662a
VZ
8194 bp->fw_seq =
8195 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8196 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 8197 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
8198
8199 /* if UNDI is loaded on the other port */
8200 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8201
da5a662a
VZ
8202 /* send "DONE" for previous unload */
8203 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8204
8205 /* unload UNDI on port 1 */
34f80b04 8206 bp->func = 1;
da5a662a
VZ
8207 bp->fw_seq =
8208 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8209 DRV_MSG_SEQ_NUMBER_MASK);
8210 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8211
8212 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
8213 }
8214
b4661739
EG
8215 /* now it's safe to release the lock */
8216 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8217
f1ef27ef 8218 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
8219
8220 /* close input traffic and wait for it */
8221 /* Do not rcv packets to BRB */
8222 REG_WR(bp,
8223 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8224 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8225 /* Do not direct rcv packets that are not for MCP to
8226 * the BRB */
8227 REG_WR(bp,
8228 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8229 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8230 /* clear AEU */
8231 REG_WR(bp,
8232 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8233 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8234 msleep(10);
8235
8236 /* save NIG port swap info */
8237 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8238 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
8239 /* reset device */
8240 REG_WR(bp,
8241 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 8242 0xd3ffffff);
34f80b04
EG
8243 REG_WR(bp,
8244 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8245 0x1403);
da5a662a
VZ
8246 /* take the NIG out of reset and restore swap values */
8247 REG_WR(bp,
8248 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8249 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8250 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8251 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8252
8253 /* send unload done to the MCP */
8254 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8255
8256 /* restore our func and fw_seq */
8257 bp->func = func;
8258 bp->fw_seq =
8259 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8260 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
8261
8262 } else
8263 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
8264 }
8265}
8266
8267static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8268{
8269 u32 val, val2, val3, val4, id;
72ce58c3 8270 u16 pmc;
34f80b04
EG
8271
8272 /* Get the chip revision id and number. */
8273 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8274 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8275 id = ((val & 0xffff) << 16);
8276 val = REG_RD(bp, MISC_REG_CHIP_REV);
8277 id |= ((val & 0xf) << 12);
8278 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8279 id |= ((val & 0xff) << 4);
5a40e08e 8280 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
8281 id |= (val & 0xf);
8282 bp->common.chip_id = id;
8283 bp->link_params.chip_id = bp->common.chip_id;
8284 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8285
1c06328c
EG
8286 val = (REG_RD(bp, 0x2874) & 0x55);
8287 if ((bp->common.chip_id & 0x1) ||
8288 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8289 bp->flags |= ONE_PORT_FLAG;
8290 BNX2X_DEV_INFO("single port device\n");
8291 }
8292
34f80b04
EG
8293 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8294 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8295 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8296 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8297 bp->common.flash_size, bp->common.flash_size);
8298
8299 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 8300 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 8301 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
8302 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8303 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
8304
8305 if (!bp->common.shmem_base ||
8306 (bp->common.shmem_base < 0xA0000) ||
8307 (bp->common.shmem_base >= 0xC0000)) {
8308 BNX2X_DEV_INFO("MCP not active\n");
8309 bp->flags |= NO_MCP_FLAG;
8310 return;
8311 }
8312
8313 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8314 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8315 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8316 BNX2X_ERR("BAD MCP validity signature\n");
8317
8318 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 8319 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
8320
8321 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8322 SHARED_HW_CFG_LED_MODE_MASK) >>
8323 SHARED_HW_CFG_LED_MODE_SHIFT);
8324
c2c8b03e
EG
8325 bp->link_params.feature_config_flags = 0;
8326 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8327 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8328 bp->link_params.feature_config_flags |=
8329 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8330 else
8331 bp->link_params.feature_config_flags &=
8332 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8333
34f80b04
EG
8334 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8335 bp->common.bc_ver = val;
8336 BNX2X_DEV_INFO("bc_ver %X\n", val);
8337 if (val < BNX2X_BC_VER) {
8338 /* for now only warn
8339 * later we might need to enforce this */
8340 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8341 " please upgrade BC\n", BNX2X_BC_VER, val);
8342 }
4d295db0
EG
8343 bp->link_params.feature_config_flags |=
8344 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8345 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
8346
8347 if (BP_E1HVN(bp) == 0) {
8348 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8349 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8350 } else {
8351 /* no WOL capability for E1HVN != 0 */
8352 bp->flags |= NO_WOL_FLAG;
8353 }
8354 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 8355 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
8356
8357 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8358 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8359 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8360 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8361
8362 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8363 val, val2, val3, val4);
8364}
8365
8366static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8367 u32 switch_cfg)
a2fbb9ea 8368{
34f80b04 8369 int port = BP_PORT(bp);
a2fbb9ea
ET
8370 u32 ext_phy_type;
8371
a2fbb9ea
ET
8372 switch (switch_cfg) {
8373 case SWITCH_CFG_1G:
8374 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8375
c18487ee
YR
8376 ext_phy_type =
8377 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8378 switch (ext_phy_type) {
8379 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8380 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8381 ext_phy_type);
8382
34f80b04
EG
8383 bp->port.supported |= (SUPPORTED_10baseT_Half |
8384 SUPPORTED_10baseT_Full |
8385 SUPPORTED_100baseT_Half |
8386 SUPPORTED_100baseT_Full |
8387 SUPPORTED_1000baseT_Full |
8388 SUPPORTED_2500baseX_Full |
8389 SUPPORTED_TP |
8390 SUPPORTED_FIBRE |
8391 SUPPORTED_Autoneg |
8392 SUPPORTED_Pause |
8393 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8394 break;
8395
8396 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8397 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8398 ext_phy_type);
8399
34f80b04
EG
8400 bp->port.supported |= (SUPPORTED_10baseT_Half |
8401 SUPPORTED_10baseT_Full |
8402 SUPPORTED_100baseT_Half |
8403 SUPPORTED_100baseT_Full |
8404 SUPPORTED_1000baseT_Full |
8405 SUPPORTED_TP |
8406 SUPPORTED_FIBRE |
8407 SUPPORTED_Autoneg |
8408 SUPPORTED_Pause |
8409 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8410 break;
8411
8412 default:
8413 BNX2X_ERR("NVRAM config error. "
8414 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 8415 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8416 return;
8417 }
8418
34f80b04
EG
8419 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8420 port*0x10);
8421 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
8422 break;
8423
8424 case SWITCH_CFG_10G:
8425 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8426
c18487ee
YR
8427 ext_phy_type =
8428 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8429 switch (ext_phy_type) {
8430 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8431 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8432 ext_phy_type);
8433
34f80b04
EG
8434 bp->port.supported |= (SUPPORTED_10baseT_Half |
8435 SUPPORTED_10baseT_Full |
8436 SUPPORTED_100baseT_Half |
8437 SUPPORTED_100baseT_Full |
8438 SUPPORTED_1000baseT_Full |
8439 SUPPORTED_2500baseX_Full |
8440 SUPPORTED_10000baseT_Full |
8441 SUPPORTED_TP |
8442 SUPPORTED_FIBRE |
8443 SUPPORTED_Autoneg |
8444 SUPPORTED_Pause |
8445 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8446 break;
8447
589abe3a
EG
8448 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8449 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 8450 ext_phy_type);
f1410647 8451
34f80b04 8452 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8453 SUPPORTED_1000baseT_Full |
34f80b04 8454 SUPPORTED_FIBRE |
589abe3a 8455 SUPPORTED_Autoneg |
34f80b04
EG
8456 SUPPORTED_Pause |
8457 SUPPORTED_Asym_Pause);
f1410647
ET
8458 break;
8459
589abe3a
EG
8460 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8461 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
8462 ext_phy_type);
8463
34f80b04 8464 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8465 SUPPORTED_2500baseX_Full |
34f80b04 8466 SUPPORTED_1000baseT_Full |
589abe3a
EG
8467 SUPPORTED_FIBRE |
8468 SUPPORTED_Autoneg |
8469 SUPPORTED_Pause |
8470 SUPPORTED_Asym_Pause);
8471 break;
8472
8473 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8474 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8475 ext_phy_type);
8476
8477 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
8478 SUPPORTED_FIBRE |
8479 SUPPORTED_Pause |
8480 SUPPORTED_Asym_Pause);
f1410647
ET
8481 break;
8482
589abe3a
EG
8483 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8484 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
8485 ext_phy_type);
8486
34f80b04
EG
8487 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8488 SUPPORTED_1000baseT_Full |
8489 SUPPORTED_FIBRE |
34f80b04
EG
8490 SUPPORTED_Pause |
8491 SUPPORTED_Asym_Pause);
f1410647
ET
8492 break;
8493
589abe3a
EG
8494 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8495 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
8496 ext_phy_type);
8497
34f80b04 8498 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 8499 SUPPORTED_1000baseT_Full |
34f80b04 8500 SUPPORTED_Autoneg |
589abe3a 8501 SUPPORTED_FIBRE |
34f80b04
EG
8502 SUPPORTED_Pause |
8503 SUPPORTED_Asym_Pause);
c18487ee
YR
8504 break;
8505
4d295db0
EG
8506 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8507 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8508 ext_phy_type);
8509
8510 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8511 SUPPORTED_1000baseT_Full |
8512 SUPPORTED_Autoneg |
8513 SUPPORTED_FIBRE |
8514 SUPPORTED_Pause |
8515 SUPPORTED_Asym_Pause);
8516 break;
8517
f1410647
ET
8518 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8519 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8520 ext_phy_type);
8521
34f80b04
EG
8522 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8523 SUPPORTED_TP |
8524 SUPPORTED_Autoneg |
8525 SUPPORTED_Pause |
8526 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8527 break;
8528
28577185
EG
8529 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8530 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8531 ext_phy_type);
8532
8533 bp->port.supported |= (SUPPORTED_10baseT_Half |
8534 SUPPORTED_10baseT_Full |
8535 SUPPORTED_100baseT_Half |
8536 SUPPORTED_100baseT_Full |
8537 SUPPORTED_1000baseT_Full |
8538 SUPPORTED_10000baseT_Full |
8539 SUPPORTED_TP |
8540 SUPPORTED_Autoneg |
8541 SUPPORTED_Pause |
8542 SUPPORTED_Asym_Pause);
8543 break;
8544
c18487ee
YR
8545 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8546 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8547 bp->link_params.ext_phy_config);
8548 break;
8549
a2fbb9ea
ET
8550 default:
8551 BNX2X_ERR("NVRAM config error. "
8552 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 8553 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8554 return;
8555 }
8556
34f80b04
EG
8557 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8558 port*0x18);
8559 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 8560
a2fbb9ea
ET
8561 break;
8562
8563 default:
8564 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 8565 bp->port.link_config);
a2fbb9ea
ET
8566 return;
8567 }
34f80b04 8568 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
8569
8570 /* mask what we support according to speed_cap_mask */
c18487ee
YR
8571 if (!(bp->link_params.speed_cap_mask &
8572 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 8573 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 8574
c18487ee
YR
8575 if (!(bp->link_params.speed_cap_mask &
8576 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 8577 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 8578
c18487ee
YR
8579 if (!(bp->link_params.speed_cap_mask &
8580 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 8581 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 8582
c18487ee
YR
8583 if (!(bp->link_params.speed_cap_mask &
8584 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 8585 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 8586
c18487ee
YR
8587 if (!(bp->link_params.speed_cap_mask &
8588 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
8589 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8590 SUPPORTED_1000baseT_Full);
a2fbb9ea 8591
c18487ee
YR
8592 if (!(bp->link_params.speed_cap_mask &
8593 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 8594 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 8595
c18487ee
YR
8596 if (!(bp->link_params.speed_cap_mask &
8597 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 8598 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 8599
34f80b04 8600 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
8601}
8602
34f80b04 8603static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8604{
c18487ee 8605 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 8606
34f80b04 8607 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 8608 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 8609 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 8610 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8611 bp->port.advertising = bp->port.supported;
a2fbb9ea 8612 } else {
c18487ee
YR
8613 u32 ext_phy_type =
8614 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8615
8616 if ((ext_phy_type ==
8617 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8618 (ext_phy_type ==
8619 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 8620 /* force 10G, no AN */
c18487ee 8621 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 8622 bp->port.advertising =
a2fbb9ea
ET
8623 (ADVERTISED_10000baseT_Full |
8624 ADVERTISED_FIBRE);
8625 break;
8626 }
8627 BNX2X_ERR("NVRAM config error. "
8628 "Invalid link_config 0x%x"
8629 " Autoneg not supported\n",
34f80b04 8630 bp->port.link_config);
a2fbb9ea
ET
8631 return;
8632 }
8633 break;
8634
8635 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 8636 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 8637 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
8638 bp->port.advertising = (ADVERTISED_10baseT_Full |
8639 ADVERTISED_TP);
a2fbb9ea
ET
8640 } else {
8641 BNX2X_ERR("NVRAM config error. "
8642 "Invalid link_config 0x%x"
8643 " speed_cap_mask 0x%x\n",
34f80b04 8644 bp->port.link_config,
c18487ee 8645 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8646 return;
8647 }
8648 break;
8649
8650 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 8651 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
8652 bp->link_params.req_line_speed = SPEED_10;
8653 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8654 bp->port.advertising = (ADVERTISED_10baseT_Half |
8655 ADVERTISED_TP);
a2fbb9ea
ET
8656 } else {
8657 BNX2X_ERR("NVRAM config error. "
8658 "Invalid link_config 0x%x"
8659 " speed_cap_mask 0x%x\n",
34f80b04 8660 bp->port.link_config,
c18487ee 8661 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8662 return;
8663 }
8664 break;
8665
8666 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 8667 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 8668 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
8669 bp->port.advertising = (ADVERTISED_100baseT_Full |
8670 ADVERTISED_TP);
a2fbb9ea
ET
8671 } else {
8672 BNX2X_ERR("NVRAM config error. "
8673 "Invalid link_config 0x%x"
8674 " speed_cap_mask 0x%x\n",
34f80b04 8675 bp->port.link_config,
c18487ee 8676 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8677 return;
8678 }
8679 break;
8680
8681 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 8682 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
8683 bp->link_params.req_line_speed = SPEED_100;
8684 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8685 bp->port.advertising = (ADVERTISED_100baseT_Half |
8686 ADVERTISED_TP);
a2fbb9ea
ET
8687 } else {
8688 BNX2X_ERR("NVRAM config error. "
8689 "Invalid link_config 0x%x"
8690 " speed_cap_mask 0x%x\n",
34f80b04 8691 bp->port.link_config,
c18487ee 8692 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8693 return;
8694 }
8695 break;
8696
8697 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 8698 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 8699 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
8700 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8701 ADVERTISED_TP);
a2fbb9ea
ET
8702 } else {
8703 BNX2X_ERR("NVRAM config error. "
8704 "Invalid link_config 0x%x"
8705 " speed_cap_mask 0x%x\n",
34f80b04 8706 bp->port.link_config,
c18487ee 8707 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8708 return;
8709 }
8710 break;
8711
8712 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8713 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8714 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8715 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8716 ADVERTISED_TP);
a2fbb9ea
ET
8717 } else {
8718 BNX2X_ERR("NVRAM config error. "
8719 "Invalid link_config 0x%x"
8720 " speed_cap_mask 0x%x\n",
34f80b04 8721 bp->port.link_config,
c18487ee 8722 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8723 return;
8724 }
8725 break;
8726
8727 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8728 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8729 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8730 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8731 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8732 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8733 ADVERTISED_FIBRE);
a2fbb9ea
ET
8734 } else {
8735 BNX2X_ERR("NVRAM config error. "
8736 "Invalid link_config 0x%x"
8737 " speed_cap_mask 0x%x\n",
34f80b04 8738 bp->port.link_config,
c18487ee 8739 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8740 return;
8741 }
8742 break;
8743
8744 default:
8745 BNX2X_ERR("NVRAM config error. "
8746 "BAD link speed link_config 0x%x\n",
34f80b04 8747 bp->port.link_config);
c18487ee 8748 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8749 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8750 break;
8751 }
a2fbb9ea 8752
34f80b04
EG
8753 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8754 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8755 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8756 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8757 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8758
c18487ee 8759 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8760 " advertising 0x%x\n",
c18487ee
YR
8761 bp->link_params.req_line_speed,
8762 bp->link_params.req_duplex,
34f80b04 8763 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8764}
8765
e665bfda
MC
8766static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8767{
8768 mac_hi = cpu_to_be16(mac_hi);
8769 mac_lo = cpu_to_be32(mac_lo);
8770 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8771 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8772}
8773
34f80b04 8774static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8775{
34f80b04
EG
8776 int port = BP_PORT(bp);
8777 u32 val, val2;
589abe3a 8778 u32 config;
c2c8b03e 8779 u16 i;
01cd4528 8780 u32 ext_phy_type;
a2fbb9ea 8781
c18487ee 8782 bp->link_params.bp = bp;
34f80b04 8783 bp->link_params.port = port;
c18487ee 8784
c18487ee 8785 bp->link_params.lane_config =
a2fbb9ea 8786 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8787 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8788 SHMEM_RD(bp,
8789 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
8790 /* BCM8727_NOC => BCM8727 no over current */
8791 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8792 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8793 bp->link_params.ext_phy_config &=
8794 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8795 bp->link_params.ext_phy_config |=
8796 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8797 bp->link_params.feature_config_flags |=
8798 FEATURE_CONFIG_BCM8727_NOC;
8799 }
8800
c18487ee 8801 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8802 SHMEM_RD(bp,
8803 dev_info.port_hw_config[port].speed_capability_mask);
8804
34f80b04 8805 bp->port.link_config =
a2fbb9ea
ET
8806 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8807
c2c8b03e
EG
8808 /* Get the 4 lanes xgxs config rx and tx */
8809 for (i = 0; i < 2; i++) {
8810 val = SHMEM_RD(bp,
8811 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8812 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8813 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8814
8815 val = SHMEM_RD(bp,
8816 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8817 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8818 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8819 }
8820
3ce2c3f9
EG
8821 /* If the device is capable of WoL, set the default state according
8822 * to the HW
8823 */
4d295db0 8824 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8825 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8826 (config & PORT_FEATURE_WOL_ENABLED));
8827
c2c8b03e
EG
8828 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8829 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8830 bp->link_params.lane_config,
8831 bp->link_params.ext_phy_config,
34f80b04 8832 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8833
4d295db0
EG
8834 bp->link_params.switch_cfg |= (bp->port.link_config &
8835 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 8836 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8837
8838 bnx2x_link_settings_requested(bp);
8839
01cd4528
EG
8840 /*
8841 * If connected directly, work with the internal PHY, otherwise, work
8842 * with the external PHY
8843 */
8844 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8845 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8846 bp->mdio.prtad = bp->link_params.phy_addr;
8847
8848 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8849 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8850 bp->mdio.prtad =
659bc5c4 8851 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
01cd4528 8852
a2fbb9ea
ET
8853 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8854 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 8855 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
8856 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8857 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
8858
8859#ifdef BCM_CNIC
8860 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8861 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8862 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8863#endif
34f80b04
EG
8864}
8865
8866static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8867{
8868 int func = BP_FUNC(bp);
8869 u32 val, val2;
8870 int rc = 0;
a2fbb9ea 8871
34f80b04 8872 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8873
34f80b04
EG
8874 bp->e1hov = 0;
8875 bp->e1hmf = 0;
8876 if (CHIP_IS_E1H(bp)) {
8877 bp->mf_config =
8878 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8879
2691d51d 8880 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 8881 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 8882 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 8883 bp->e1hmf = 1;
2691d51d
EG
8884 BNX2X_DEV_INFO("%s function mode\n",
8885 IS_E1HMF(bp) ? "multi" : "single");
8886
8887 if (IS_E1HMF(bp)) {
8888 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8889 e1hov_tag) &
8890 FUNC_MF_CFG_E1HOV_TAG_MASK);
8891 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8892 bp->e1hov = val;
8893 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8894 "(0x%04x)\n",
8895 func, bp->e1hov, bp->e1hov);
8896 } else {
34f80b04
EG
8897 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8898 " aborting\n", func);
8899 rc = -EPERM;
8900 }
2691d51d
EG
8901 } else {
8902 if (BP_E1HVN(bp)) {
8903 BNX2X_ERR("!!! VN %d in single function mode,"
8904 " aborting\n", BP_E1HVN(bp));
8905 rc = -EPERM;
8906 }
34f80b04
EG
8907 }
8908 }
a2fbb9ea 8909
34f80b04
EG
8910 if (!BP_NOMCP(bp)) {
8911 bnx2x_get_port_hwinfo(bp);
8912
8913 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8914 DRV_MSG_SEQ_NUMBER_MASK);
8915 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8916 }
8917
8918 if (IS_E1HMF(bp)) {
8919 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8920 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8921 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8922 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8923 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8924 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8925 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8926 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8927 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8928 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8929 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8930 ETH_ALEN);
8931 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8932 ETH_ALEN);
a2fbb9ea 8933 }
34f80b04
EG
8934
8935 return rc;
a2fbb9ea
ET
8936 }
8937
34f80b04
EG
8938 if (BP_NOMCP(bp)) {
8939 /* only supposed to happen on emulation/FPGA */
33471629 8940 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8941 random_ether_addr(bp->dev->dev_addr);
8942 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8943 }
a2fbb9ea 8944
34f80b04
EG
8945 return rc;
8946}
8947
8948static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8949{
8950 int func = BP_FUNC(bp);
87942b46 8951 int timer_interval;
34f80b04
EG
8952 int rc;
8953
da5a662a
VZ
8954 /* Disable interrupt handling until HW is initialized */
8955 atomic_set(&bp->intr_sem, 1);
e1510706 8956 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8957
34f80b04 8958 mutex_init(&bp->port.phy_mutex);
993ac7b5
MC
8959#ifdef BCM_CNIC
8960 mutex_init(&bp->cnic_mutex);
8961#endif
a2fbb9ea 8962
1cf167f2 8963 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8964 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8965
8966 rc = bnx2x_get_hwinfo(bp);
8967
8968 /* need to reset chip if undi was active */
8969 if (!BP_NOMCP(bp))
8970 bnx2x_undi_unload(bp);
8971
8972 if (CHIP_REV_IS_FPGA(bp))
8973 printk(KERN_ERR PFX "FPGA detected\n");
8974
8975 if (BP_NOMCP(bp) && (func == 0))
8976 printk(KERN_ERR PFX
8977 "MCP disabled, must load devices in order!\n");
8978
555f6c78 8979 /* Set multi queue mode */
8badd27a
EG
8980 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8981 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8982 printk(KERN_ERR PFX
8badd27a 8983 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8984 multi_mode = ETH_RSS_MODE_DISABLED;
8985 }
8986 bp->multi_mode = multi_mode;
8987
8988
7a9b2557
VZ
8989 /* Set TPA flags */
8990 if (disable_tpa) {
8991 bp->flags &= ~TPA_ENABLE_FLAG;
8992 bp->dev->features &= ~NETIF_F_LRO;
8993 } else {
8994 bp->flags |= TPA_ENABLE_FLAG;
8995 bp->dev->features |= NETIF_F_LRO;
8996 }
8997
a18f5128
EG
8998 if (CHIP_IS_E1(bp))
8999 bp->dropless_fc = 0;
9000 else
9001 bp->dropless_fc = dropless_fc;
9002
8d5726c4 9003 bp->mrrs = mrrs;
7a9b2557 9004
34f80b04
EG
9005 bp->tx_ring_size = MAX_TX_AVAIL;
9006 bp->rx_ring_size = MAX_RX_AVAIL;
9007
9008 bp->rx_csum = 1;
34f80b04
EG
9009
9010 bp->tx_ticks = 50;
9011 bp->rx_ticks = 25;
9012
87942b46
EG
9013 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9014 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
9015
9016 init_timer(&bp->timer);
9017 bp->timer.expires = jiffies + bp->current_interval;
9018 bp->timer.data = (unsigned long) bp;
9019 bp->timer.function = bnx2x_timer;
9020
9021 return rc;
a2fbb9ea
ET
9022}
9023
9024/*
9025 * ethtool service functions
9026 */
9027
9028/* All ethtool functions called with rtnl_lock */
9029
9030static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9031{
9032 struct bnx2x *bp = netdev_priv(dev);
9033
34f80b04
EG
9034 cmd->supported = bp->port.supported;
9035 cmd->advertising = bp->port.advertising;
a2fbb9ea 9036
f34d28ea
EG
9037 if ((bp->state == BNX2X_STATE_OPEN) &&
9038 !(bp->flags & MF_FUNC_DIS) &&
9039 (bp->link_vars.link_up)) {
c18487ee
YR
9040 cmd->speed = bp->link_vars.line_speed;
9041 cmd->duplex = bp->link_vars.duplex;
b015e3d1
EG
9042 if (IS_E1HMF(bp)) {
9043 u16 vn_max_rate;
34f80b04 9044
b015e3d1
EG
9045 vn_max_rate =
9046 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
34f80b04 9047 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
b015e3d1
EG
9048 if (vn_max_rate < cmd->speed)
9049 cmd->speed = vn_max_rate;
9050 }
9051 } else {
9052 cmd->speed = -1;
9053 cmd->duplex = -1;
34f80b04 9054 }
a2fbb9ea 9055
c18487ee
YR
9056 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9057 u32 ext_phy_type =
9058 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
9059
9060 switch (ext_phy_type) {
9061 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 9062 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 9063 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
9064 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9065 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9066 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 9067 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
9068 cmd->port = PORT_FIBRE;
9069 break;
9070
9071 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 9072 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
9073 cmd->port = PORT_TP;
9074 break;
9075
c18487ee
YR
9076 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9077 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9078 bp->link_params.ext_phy_config);
9079 break;
9080
f1410647
ET
9081 default:
9082 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
9083 bp->link_params.ext_phy_config);
9084 break;
f1410647
ET
9085 }
9086 } else
a2fbb9ea 9087 cmd->port = PORT_TP;
a2fbb9ea 9088
01cd4528 9089 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
9090 cmd->transceiver = XCVR_INTERNAL;
9091
c18487ee 9092 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 9093 cmd->autoneg = AUTONEG_ENABLE;
f1410647 9094 else
a2fbb9ea 9095 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
9096
9097 cmd->maxtxpkt = 0;
9098 cmd->maxrxpkt = 0;
9099
9100 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9101 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9102 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9103 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9104 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9105 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9106 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9107
9108 return 0;
9109}
9110
9111static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9112{
9113 struct bnx2x *bp = netdev_priv(dev);
9114 u32 advertising;
9115
34f80b04
EG
9116 if (IS_E1HMF(bp))
9117 return 0;
9118
a2fbb9ea
ET
9119 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9120 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9121 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9122 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9123 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9124 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9125 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9126
a2fbb9ea 9127 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
9128 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9129 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 9130 return -EINVAL;
f1410647 9131 }
a2fbb9ea
ET
9132
9133 /* advertise the requested speed and duplex if supported */
34f80b04 9134 cmd->advertising &= bp->port.supported;
a2fbb9ea 9135
c18487ee
YR
9136 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9137 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
9138 bp->port.advertising |= (ADVERTISED_Autoneg |
9139 cmd->advertising);
a2fbb9ea
ET
9140
9141 } else { /* forced speed */
9142 /* advertise the requested speed and duplex if supported */
9143 switch (cmd->speed) {
9144 case SPEED_10:
9145 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 9146 if (!(bp->port.supported &
f1410647
ET
9147 SUPPORTED_10baseT_Full)) {
9148 DP(NETIF_MSG_LINK,
9149 "10M full not supported\n");
a2fbb9ea 9150 return -EINVAL;
f1410647 9151 }
a2fbb9ea
ET
9152
9153 advertising = (ADVERTISED_10baseT_Full |
9154 ADVERTISED_TP);
9155 } else {
34f80b04 9156 if (!(bp->port.supported &
f1410647
ET
9157 SUPPORTED_10baseT_Half)) {
9158 DP(NETIF_MSG_LINK,
9159 "10M half not supported\n");
a2fbb9ea 9160 return -EINVAL;
f1410647 9161 }
a2fbb9ea
ET
9162
9163 advertising = (ADVERTISED_10baseT_Half |
9164 ADVERTISED_TP);
9165 }
9166 break;
9167
9168 case SPEED_100:
9169 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 9170 if (!(bp->port.supported &
f1410647
ET
9171 SUPPORTED_100baseT_Full)) {
9172 DP(NETIF_MSG_LINK,
9173 "100M full not supported\n");
a2fbb9ea 9174 return -EINVAL;
f1410647 9175 }
a2fbb9ea
ET
9176
9177 advertising = (ADVERTISED_100baseT_Full |
9178 ADVERTISED_TP);
9179 } else {
34f80b04 9180 if (!(bp->port.supported &
f1410647
ET
9181 SUPPORTED_100baseT_Half)) {
9182 DP(NETIF_MSG_LINK,
9183 "100M half not supported\n");
a2fbb9ea 9184 return -EINVAL;
f1410647 9185 }
a2fbb9ea
ET
9186
9187 advertising = (ADVERTISED_100baseT_Half |
9188 ADVERTISED_TP);
9189 }
9190 break;
9191
9192 case SPEED_1000:
f1410647
ET
9193 if (cmd->duplex != DUPLEX_FULL) {
9194 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 9195 return -EINVAL;
f1410647 9196 }
a2fbb9ea 9197
34f80b04 9198 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 9199 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 9200 return -EINVAL;
f1410647 9201 }
a2fbb9ea
ET
9202
9203 advertising = (ADVERTISED_1000baseT_Full |
9204 ADVERTISED_TP);
9205 break;
9206
9207 case SPEED_2500:
f1410647
ET
9208 if (cmd->duplex != DUPLEX_FULL) {
9209 DP(NETIF_MSG_LINK,
9210 "2.5G half not supported\n");
a2fbb9ea 9211 return -EINVAL;
f1410647 9212 }
a2fbb9ea 9213
34f80b04 9214 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
9215 DP(NETIF_MSG_LINK,
9216 "2.5G full not supported\n");
a2fbb9ea 9217 return -EINVAL;
f1410647 9218 }
a2fbb9ea 9219
f1410647 9220 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
9221 ADVERTISED_TP);
9222 break;
9223
9224 case SPEED_10000:
f1410647
ET
9225 if (cmd->duplex != DUPLEX_FULL) {
9226 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 9227 return -EINVAL;
f1410647 9228 }
a2fbb9ea 9229
34f80b04 9230 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 9231 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 9232 return -EINVAL;
f1410647 9233 }
a2fbb9ea
ET
9234
9235 advertising = (ADVERTISED_10000baseT_Full |
9236 ADVERTISED_FIBRE);
9237 break;
9238
9239 default:
f1410647 9240 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
9241 return -EINVAL;
9242 }
9243
c18487ee
YR
9244 bp->link_params.req_line_speed = cmd->speed;
9245 bp->link_params.req_duplex = cmd->duplex;
34f80b04 9246 bp->port.advertising = advertising;
a2fbb9ea
ET
9247 }
9248
c18487ee 9249 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 9250 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 9251 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 9252 bp->port.advertising);
a2fbb9ea 9253
34f80b04 9254 if (netif_running(dev)) {
bb2a0f7a 9255 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9256 bnx2x_link_set(bp);
9257 }
a2fbb9ea
ET
9258
9259 return 0;
9260}
9261
0a64ea57
EG
9262#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9263#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9264
9265static int bnx2x_get_regs_len(struct net_device *dev)
9266{
0a64ea57 9267 struct bnx2x *bp = netdev_priv(dev);
0d28e49a 9268 int regdump_len = 0;
0a64ea57
EG
9269 int i;
9270
0a64ea57
EG
9271 if (CHIP_IS_E1(bp)) {
9272 for (i = 0; i < REGS_COUNT; i++)
9273 if (IS_E1_ONLINE(reg_addrs[i].info))
9274 regdump_len += reg_addrs[i].size;
9275
9276 for (i = 0; i < WREGS_COUNT_E1; i++)
9277 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9278 regdump_len += wreg_addrs_e1[i].size *
9279 (1 + wreg_addrs_e1[i].read_regs_count);
9280
9281 } else { /* E1H */
9282 for (i = 0; i < REGS_COUNT; i++)
9283 if (IS_E1H_ONLINE(reg_addrs[i].info))
9284 regdump_len += reg_addrs[i].size;
9285
9286 for (i = 0; i < WREGS_COUNT_E1H; i++)
9287 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9288 regdump_len += wreg_addrs_e1h[i].size *
9289 (1 + wreg_addrs_e1h[i].read_regs_count);
9290 }
9291 regdump_len *= 4;
9292 regdump_len += sizeof(struct dump_hdr);
9293
9294 return regdump_len;
9295}
9296
9297static void bnx2x_get_regs(struct net_device *dev,
9298 struct ethtool_regs *regs, void *_p)
9299{
9300 u32 *p = _p, i, j;
9301 struct bnx2x *bp = netdev_priv(dev);
9302 struct dump_hdr dump_hdr = {0};
9303
9304 regs->version = 0;
9305 memset(p, 0, regs->len);
9306
9307 if (!netif_running(bp->dev))
9308 return;
9309
9310 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9311 dump_hdr.dump_sign = dump_sign_all;
9312 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9313 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9314 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9315 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9316 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9317
9318 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9319 p += dump_hdr.hdr_size + 1;
9320
9321 if (CHIP_IS_E1(bp)) {
9322 for (i = 0; i < REGS_COUNT; i++)
9323 if (IS_E1_ONLINE(reg_addrs[i].info))
9324 for (j = 0; j < reg_addrs[i].size; j++)
9325 *p++ = REG_RD(bp,
9326 reg_addrs[i].addr + j*4);
9327
9328 } else { /* E1H */
9329 for (i = 0; i < REGS_COUNT; i++)
9330 if (IS_E1H_ONLINE(reg_addrs[i].info))
9331 for (j = 0; j < reg_addrs[i].size; j++)
9332 *p++ = REG_RD(bp,
9333 reg_addrs[i].addr + j*4);
9334 }
9335}
9336
0d28e49a
EG
9337#define PHY_FW_VER_LEN 10
9338
9339static void bnx2x_get_drvinfo(struct net_device *dev,
9340 struct ethtool_drvinfo *info)
9341{
9342 struct bnx2x *bp = netdev_priv(dev);
9343 u8 phy_fw_ver[PHY_FW_VER_LEN];
9344
9345 strcpy(info->driver, DRV_MODULE_NAME);
9346 strcpy(info->version, DRV_MODULE_VERSION);
9347
9348 phy_fw_ver[0] = '\0';
9349 if (bp->port.pmf) {
9350 bnx2x_acquire_phy_lock(bp);
9351 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9352 (bp->state != BNX2X_STATE_CLOSED),
9353 phy_fw_ver, PHY_FW_VER_LEN);
9354 bnx2x_release_phy_lock(bp);
9355 }
9356
9357 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9358 (bp->common.bc_ver & 0xff0000) >> 16,
9359 (bp->common.bc_ver & 0xff00) >> 8,
9360 (bp->common.bc_ver & 0xff),
9361 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9362 strcpy(info->bus_info, pci_name(bp->pdev));
9363 info->n_stats = BNX2X_NUM_STATS;
9364 info->testinfo_len = BNX2X_NUM_TESTS;
9365 info->eedump_len = bp->common.flash_size;
9366 info->regdump_len = bnx2x_get_regs_len(dev);
9367}
9368
a2fbb9ea
ET
9369static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9370{
9371 struct bnx2x *bp = netdev_priv(dev);
9372
9373 if (bp->flags & NO_WOL_FLAG) {
9374 wol->supported = 0;
9375 wol->wolopts = 0;
9376 } else {
9377 wol->supported = WAKE_MAGIC;
9378 if (bp->wol)
9379 wol->wolopts = WAKE_MAGIC;
9380 else
9381 wol->wolopts = 0;
9382 }
9383 memset(&wol->sopass, 0, sizeof(wol->sopass));
9384}
9385
9386static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9387{
9388 struct bnx2x *bp = netdev_priv(dev);
9389
9390 if (wol->wolopts & ~WAKE_MAGIC)
9391 return -EINVAL;
9392
9393 if (wol->wolopts & WAKE_MAGIC) {
9394 if (bp->flags & NO_WOL_FLAG)
9395 return -EINVAL;
9396
9397 bp->wol = 1;
34f80b04 9398 } else
a2fbb9ea 9399 bp->wol = 0;
34f80b04 9400
a2fbb9ea
ET
9401 return 0;
9402}
9403
9404static u32 bnx2x_get_msglevel(struct net_device *dev)
9405{
9406 struct bnx2x *bp = netdev_priv(dev);
9407
9408 return bp->msglevel;
9409}
9410
9411static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9412{
9413 struct bnx2x *bp = netdev_priv(dev);
9414
9415 if (capable(CAP_NET_ADMIN))
9416 bp->msglevel = level;
9417}
9418
9419static int bnx2x_nway_reset(struct net_device *dev)
9420{
9421 struct bnx2x *bp = netdev_priv(dev);
9422
34f80b04
EG
9423 if (!bp->port.pmf)
9424 return 0;
a2fbb9ea 9425
34f80b04 9426 if (netif_running(dev)) {
bb2a0f7a 9427 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9428 bnx2x_link_set(bp);
9429 }
a2fbb9ea
ET
9430
9431 return 0;
9432}
9433
ab6ad5a4 9434static u32 bnx2x_get_link(struct net_device *dev)
01e53298
NO
9435{
9436 struct bnx2x *bp = netdev_priv(dev);
9437
f34d28ea
EG
9438 if (bp->flags & MF_FUNC_DIS)
9439 return 0;
9440
01e53298
NO
9441 return bp->link_vars.link_up;
9442}
9443
a2fbb9ea
ET
9444static int bnx2x_get_eeprom_len(struct net_device *dev)
9445{
9446 struct bnx2x *bp = netdev_priv(dev);
9447
34f80b04 9448 return bp->common.flash_size;
a2fbb9ea
ET
9449}
9450
9451static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9452{
34f80b04 9453 int port = BP_PORT(bp);
a2fbb9ea
ET
9454 int count, i;
9455 u32 val = 0;
9456
9457 /* adjust timeout for emulation/FPGA */
9458 count = NVRAM_TIMEOUT_COUNT;
9459 if (CHIP_REV_IS_SLOW(bp))
9460 count *= 100;
9461
9462 /* request access to nvram interface */
9463 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9464 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9465
9466 for (i = 0; i < count*10; i++) {
9467 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9468 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9469 break;
9470
9471 udelay(5);
9472 }
9473
9474 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 9475 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
9476 return -EBUSY;
9477 }
9478
9479 return 0;
9480}
9481
9482static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9483{
34f80b04 9484 int port = BP_PORT(bp);
a2fbb9ea
ET
9485 int count, i;
9486 u32 val = 0;
9487
9488 /* adjust timeout for emulation/FPGA */
9489 count = NVRAM_TIMEOUT_COUNT;
9490 if (CHIP_REV_IS_SLOW(bp))
9491 count *= 100;
9492
9493 /* relinquish nvram interface */
9494 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9495 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9496
9497 for (i = 0; i < count*10; i++) {
9498 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9499 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9500 break;
9501
9502 udelay(5);
9503 }
9504
9505 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 9506 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
9507 return -EBUSY;
9508 }
9509
9510 return 0;
9511}
9512
9513static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9514{
9515 u32 val;
9516
9517 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9518
9519 /* enable both bits, even on read */
9520 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9521 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9522 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9523}
9524
9525static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9526{
9527 u32 val;
9528
9529 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9530
9531 /* disable both bits, even after read */
9532 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9533 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9534 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9535}
9536
4781bfad 9537static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
9538 u32 cmd_flags)
9539{
f1410647 9540 int count, i, rc;
a2fbb9ea
ET
9541 u32 val;
9542
9543 /* build the command word */
9544 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9545
9546 /* need to clear DONE bit separately */
9547 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9548
9549 /* address of the NVRAM to read from */
9550 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9551 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9552
9553 /* issue a read command */
9554 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9555
9556 /* adjust timeout for emulation/FPGA */
9557 count = NVRAM_TIMEOUT_COUNT;
9558 if (CHIP_REV_IS_SLOW(bp))
9559 count *= 100;
9560
9561 /* wait for completion */
9562 *ret_val = 0;
9563 rc = -EBUSY;
9564 for (i = 0; i < count; i++) {
9565 udelay(5);
9566 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9567
9568 if (val & MCPR_NVM_COMMAND_DONE) {
9569 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
9570 /* we read nvram data in cpu order
9571 * but ethtool sees it as an array of bytes
9572 * converting to big-endian will do the work */
4781bfad 9573 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
9574 rc = 0;
9575 break;
9576 }
9577 }
9578
9579 return rc;
9580}
9581
9582static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9583 int buf_size)
9584{
9585 int rc;
9586 u32 cmd_flags;
4781bfad 9587 __be32 val;
a2fbb9ea
ET
9588
9589 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9590 DP(BNX2X_MSG_NVM,
c14423fe 9591 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9592 offset, buf_size);
9593 return -EINVAL;
9594 }
9595
34f80b04
EG
9596 if (offset + buf_size > bp->common.flash_size) {
9597 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9598 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9599 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9600 return -EINVAL;
9601 }
9602
9603 /* request access to nvram interface */
9604 rc = bnx2x_acquire_nvram_lock(bp);
9605 if (rc)
9606 return rc;
9607
9608 /* enable access to nvram interface */
9609 bnx2x_enable_nvram_access(bp);
9610
9611 /* read the first word(s) */
9612 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9613 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9614 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9615 memcpy(ret_buf, &val, 4);
9616
9617 /* advance to the next dword */
9618 offset += sizeof(u32);
9619 ret_buf += sizeof(u32);
9620 buf_size -= sizeof(u32);
9621 cmd_flags = 0;
9622 }
9623
9624 if (rc == 0) {
9625 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9626 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9627 memcpy(ret_buf, &val, 4);
9628 }
9629
9630 /* disable access to nvram interface */
9631 bnx2x_disable_nvram_access(bp);
9632 bnx2x_release_nvram_lock(bp);
9633
9634 return rc;
9635}
9636
9637static int bnx2x_get_eeprom(struct net_device *dev,
9638 struct ethtool_eeprom *eeprom, u8 *eebuf)
9639{
9640 struct bnx2x *bp = netdev_priv(dev);
9641 int rc;
9642
2add3acb
EG
9643 if (!netif_running(dev))
9644 return -EAGAIN;
9645
34f80b04 9646 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9647 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9648 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9649 eeprom->len, eeprom->len);
9650
9651 /* parameters already validated in ethtool_get_eeprom */
9652
9653 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9654
9655 return rc;
9656}
9657
9658static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9659 u32 cmd_flags)
9660{
f1410647 9661 int count, i, rc;
a2fbb9ea
ET
9662
9663 /* build the command word */
9664 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9665
9666 /* need to clear DONE bit separately */
9667 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9668
9669 /* write the data */
9670 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9671
9672 /* address of the NVRAM to write to */
9673 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9674 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9675
9676 /* issue the write command */
9677 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9678
9679 /* adjust timeout for emulation/FPGA */
9680 count = NVRAM_TIMEOUT_COUNT;
9681 if (CHIP_REV_IS_SLOW(bp))
9682 count *= 100;
9683
9684 /* wait for completion */
9685 rc = -EBUSY;
9686 for (i = 0; i < count; i++) {
9687 udelay(5);
9688 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9689 if (val & MCPR_NVM_COMMAND_DONE) {
9690 rc = 0;
9691 break;
9692 }
9693 }
9694
9695 return rc;
9696}
9697
f1410647 9698#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
9699
9700static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9701 int buf_size)
9702{
9703 int rc;
9704 u32 cmd_flags;
9705 u32 align_offset;
4781bfad 9706 __be32 val;
a2fbb9ea 9707
34f80b04
EG
9708 if (offset + buf_size > bp->common.flash_size) {
9709 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9710 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9711 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9712 return -EINVAL;
9713 }
9714
9715 /* request access to nvram interface */
9716 rc = bnx2x_acquire_nvram_lock(bp);
9717 if (rc)
9718 return rc;
9719
9720 /* enable access to nvram interface */
9721 bnx2x_enable_nvram_access(bp);
9722
9723 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9724 align_offset = (offset & ~0x03);
9725 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9726
9727 if (rc == 0) {
9728 val &= ~(0xff << BYTE_OFFSET(offset));
9729 val |= (*data_buf << BYTE_OFFSET(offset));
9730
9731 /* nvram data is returned as an array of bytes
9732 * convert it back to cpu order */
9733 val = be32_to_cpu(val);
9734
a2fbb9ea
ET
9735 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9736 cmd_flags);
9737 }
9738
9739 /* disable access to nvram interface */
9740 bnx2x_disable_nvram_access(bp);
9741 bnx2x_release_nvram_lock(bp);
9742
9743 return rc;
9744}
9745
9746static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9747 int buf_size)
9748{
9749 int rc;
9750 u32 cmd_flags;
9751 u32 val;
9752 u32 written_so_far;
9753
34f80b04 9754 if (buf_size == 1) /* ethtool */
a2fbb9ea 9755 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9756
9757 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9758 DP(BNX2X_MSG_NVM,
c14423fe 9759 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9760 offset, buf_size);
9761 return -EINVAL;
9762 }
9763
34f80b04
EG
9764 if (offset + buf_size > bp->common.flash_size) {
9765 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9766 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9767 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9768 return -EINVAL;
9769 }
9770
9771 /* request access to nvram interface */
9772 rc = bnx2x_acquire_nvram_lock(bp);
9773 if (rc)
9774 return rc;
9775
9776 /* enable access to nvram interface */
9777 bnx2x_enable_nvram_access(bp);
9778
9779 written_so_far = 0;
9780 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9781 while ((written_so_far < buf_size) && (rc == 0)) {
9782 if (written_so_far == (buf_size - sizeof(u32)))
9783 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9784 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9785 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9786 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9787 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9788
9789 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9790
9791 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9792
9793 /* advance to the next dword */
9794 offset += sizeof(u32);
9795 data_buf += sizeof(u32);
9796 written_so_far += sizeof(u32);
9797 cmd_flags = 0;
9798 }
9799
9800 /* disable access to nvram interface */
9801 bnx2x_disable_nvram_access(bp);
9802 bnx2x_release_nvram_lock(bp);
9803
9804 return rc;
9805}
9806
9807static int bnx2x_set_eeprom(struct net_device *dev,
9808 struct ethtool_eeprom *eeprom, u8 *eebuf)
9809{
9810 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
9811 int port = BP_PORT(bp);
9812 int rc = 0;
a2fbb9ea 9813
9f4c9583
EG
9814 if (!netif_running(dev))
9815 return -EAGAIN;
9816
34f80b04 9817 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9818 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9819 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9820 eeprom->len, eeprom->len);
9821
9822 /* parameters already validated in ethtool_set_eeprom */
9823
f57a6025
EG
9824 /* PHY eeprom can be accessed only by the PMF */
9825 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9826 !bp->port.pmf)
9827 return -EINVAL;
9828
9829 if (eeprom->magic == 0x50485950) {
9830 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9831 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 9832
f57a6025
EG
9833 bnx2x_acquire_phy_lock(bp);
9834 rc |= bnx2x_link_reset(&bp->link_params,
9835 &bp->link_vars, 0);
9836 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9837 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9838 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9839 MISC_REGISTERS_GPIO_HIGH, port);
9840 bnx2x_release_phy_lock(bp);
9841 bnx2x_link_report(bp);
9842
9843 } else if (eeprom->magic == 0x50485952) {
9844 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
f34d28ea 9845 if (bp->state == BNX2X_STATE_OPEN) {
4a37fb66 9846 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
9847 rc |= bnx2x_link_reset(&bp->link_params,
9848 &bp->link_vars, 1);
9849
9850 rc |= bnx2x_phy_init(&bp->link_params,
9851 &bp->link_vars);
4a37fb66 9852 bnx2x_release_phy_lock(bp);
f57a6025
EG
9853 bnx2x_calc_fc_adv(bp);
9854 }
9855 } else if (eeprom->magic == 0x53985943) {
9856 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9857 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9858 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9859 u8 ext_phy_addr =
659bc5c4 9860 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
f57a6025
EG
9861
9862 /* DSP Remove Download Mode */
9863 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9864 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 9865
f57a6025
EG
9866 bnx2x_acquire_phy_lock(bp);
9867
9868 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9869
9870 /* wait 0.5 sec to allow it to run */
9871 msleep(500);
9872 bnx2x_ext_phy_hw_reset(bp, port);
9873 msleep(500);
9874 bnx2x_release_phy_lock(bp);
9875 }
9876 } else
c18487ee 9877 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9878
9879 return rc;
9880}
9881
9882static int bnx2x_get_coalesce(struct net_device *dev,
9883 struct ethtool_coalesce *coal)
9884{
9885 struct bnx2x *bp = netdev_priv(dev);
9886
9887 memset(coal, 0, sizeof(struct ethtool_coalesce));
9888
9889 coal->rx_coalesce_usecs = bp->rx_ticks;
9890 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9891
9892 return 0;
9893}
9894
ca00392c 9895#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
a2fbb9ea
ET
9896static int bnx2x_set_coalesce(struct net_device *dev,
9897 struct ethtool_coalesce *coal)
9898{
9899 struct bnx2x *bp = netdev_priv(dev);
9900
9901 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
ca00392c
EG
9902 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9903 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea
ET
9904
9905 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
ca00392c
EG
9906 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9907 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea 9908
34f80b04 9909 if (netif_running(dev))
a2fbb9ea
ET
9910 bnx2x_update_coalesce(bp);
9911
9912 return 0;
9913}
9914
9915static void bnx2x_get_ringparam(struct net_device *dev,
9916 struct ethtool_ringparam *ering)
9917{
9918 struct bnx2x *bp = netdev_priv(dev);
9919
9920 ering->rx_max_pending = MAX_RX_AVAIL;
9921 ering->rx_mini_max_pending = 0;
9922 ering->rx_jumbo_max_pending = 0;
9923
9924 ering->rx_pending = bp->rx_ring_size;
9925 ering->rx_mini_pending = 0;
9926 ering->rx_jumbo_pending = 0;
9927
9928 ering->tx_max_pending = MAX_TX_AVAIL;
9929 ering->tx_pending = bp->tx_ring_size;
9930}
9931
9932static int bnx2x_set_ringparam(struct net_device *dev,
9933 struct ethtool_ringparam *ering)
9934{
9935 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9936 int rc = 0;
a2fbb9ea
ET
9937
9938 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9939 (ering->tx_pending > MAX_TX_AVAIL) ||
9940 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9941 return -EINVAL;
9942
9943 bp->rx_ring_size = ering->rx_pending;
9944 bp->tx_ring_size = ering->tx_pending;
9945
34f80b04
EG
9946 if (netif_running(dev)) {
9947 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9948 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9949 }
9950
34f80b04 9951 return rc;
a2fbb9ea
ET
9952}
9953
9954static void bnx2x_get_pauseparam(struct net_device *dev,
9955 struct ethtool_pauseparam *epause)
9956{
9957 struct bnx2x *bp = netdev_priv(dev);
9958
356e2385
EG
9959 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9960 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9961 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9962
c0700f90
DM
9963 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9964 BNX2X_FLOW_CTRL_RX);
9965 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9966 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9967
9968 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9969 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9970 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9971}
9972
9973static int bnx2x_set_pauseparam(struct net_device *dev,
9974 struct ethtool_pauseparam *epause)
9975{
9976 struct bnx2x *bp = netdev_priv(dev);
9977
34f80b04
EG
9978 if (IS_E1HMF(bp))
9979 return 0;
9980
a2fbb9ea
ET
9981 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9982 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9983 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9984
c0700f90 9985 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9986
f1410647 9987 if (epause->rx_pause)
c0700f90 9988 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9989
f1410647 9990 if (epause->tx_pause)
c0700f90 9991 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9992
c0700f90
DM
9993 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9994 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9995
c18487ee 9996 if (epause->autoneg) {
34f80b04 9997 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9998 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9999 return -EINVAL;
10000 }
a2fbb9ea 10001
c18487ee 10002 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 10003 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 10004 }
a2fbb9ea 10005
c18487ee
YR
10006 DP(NETIF_MSG_LINK,
10007 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
10008
10009 if (netif_running(dev)) {
bb2a0f7a 10010 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10011 bnx2x_link_set(bp);
10012 }
a2fbb9ea
ET
10013
10014 return 0;
10015}
10016
df0f2343
VZ
10017static int bnx2x_set_flags(struct net_device *dev, u32 data)
10018{
10019 struct bnx2x *bp = netdev_priv(dev);
10020 int changed = 0;
10021 int rc = 0;
10022
10023 /* TPA requires Rx CSUM offloading */
10024 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
10025 if (!(dev->features & NETIF_F_LRO)) {
10026 dev->features |= NETIF_F_LRO;
10027 bp->flags |= TPA_ENABLE_FLAG;
10028 changed = 1;
10029 }
10030
10031 } else if (dev->features & NETIF_F_LRO) {
10032 dev->features &= ~NETIF_F_LRO;
10033 bp->flags &= ~TPA_ENABLE_FLAG;
10034 changed = 1;
10035 }
10036
10037 if (changed && netif_running(dev)) {
10038 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10039 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10040 }
10041
10042 return rc;
10043}
10044
a2fbb9ea
ET
10045static u32 bnx2x_get_rx_csum(struct net_device *dev)
10046{
10047 struct bnx2x *bp = netdev_priv(dev);
10048
10049 return bp->rx_csum;
10050}
10051
10052static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10053{
10054 struct bnx2x *bp = netdev_priv(dev);
df0f2343 10055 int rc = 0;
a2fbb9ea
ET
10056
10057 bp->rx_csum = data;
df0f2343
VZ
10058
10059 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10060 TPA'ed packets will be discarded due to wrong TCP CSUM */
10061 if (!data) {
10062 u32 flags = ethtool_op_get_flags(dev);
10063
10064 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10065 }
10066
10067 return rc;
a2fbb9ea
ET
10068}
10069
10070static int bnx2x_set_tso(struct net_device *dev, u32 data)
10071{
755735eb 10072 if (data) {
a2fbb9ea 10073 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
10074 dev->features |= NETIF_F_TSO6;
10075 } else {
a2fbb9ea 10076 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
10077 dev->features &= ~NETIF_F_TSO6;
10078 }
10079
a2fbb9ea
ET
10080 return 0;
10081}
10082
f3c87cdd 10083static const struct {
a2fbb9ea
ET
10084 char string[ETH_GSTRING_LEN];
10085} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
10086 { "register_test (offline)" },
10087 { "memory_test (offline)" },
10088 { "loopback_test (offline)" },
10089 { "nvram_test (online)" },
10090 { "interrupt_test (online)" },
10091 { "link_test (online)" },
d3d4f495 10092 { "idle check (online)" }
a2fbb9ea
ET
10093};
10094
f3c87cdd
YG
10095static int bnx2x_test_registers(struct bnx2x *bp)
10096{
10097 int idx, i, rc = -ENODEV;
10098 u32 wr_val = 0;
9dabc424 10099 int port = BP_PORT(bp);
f3c87cdd
YG
10100 static const struct {
10101 u32 offset0;
10102 u32 offset1;
10103 u32 mask;
10104 } reg_tbl[] = {
10105/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10106 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
10107 { HC_REG_AGG_INT_0, 4, 0x000003ff },
10108 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
10109 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
10110 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
10111 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
10112 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10113 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
10114 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10115/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
10116 { QM_REG_CONNNUM_0, 4, 0x000fffff },
10117 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
10118 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
10119 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
10120 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10121 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
10122 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 10123 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
10124 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
10125/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
10126 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
10127 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
10128 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
10129 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
10130 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
10131 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
10132 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
10133 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
10134 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
10135/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
10136 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
10137 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
10138 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10139 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
10140 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10141 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
10142
10143 { 0xffffffff, 0, 0x00000000 }
10144 };
10145
10146 if (!netif_running(bp->dev))
10147 return rc;
10148
10149 /* Repeat the test twice:
10150 First by writing 0x00000000, second by writing 0xffffffff */
10151 for (idx = 0; idx < 2; idx++) {
10152
10153 switch (idx) {
10154 case 0:
10155 wr_val = 0;
10156 break;
10157 case 1:
10158 wr_val = 0xffffffff;
10159 break;
10160 }
10161
10162 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10163 u32 offset, mask, save_val, val;
f3c87cdd
YG
10164
10165 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10166 mask = reg_tbl[i].mask;
10167
10168 save_val = REG_RD(bp, offset);
10169
10170 REG_WR(bp, offset, wr_val);
10171 val = REG_RD(bp, offset);
10172
10173 /* Restore the original register's value */
10174 REG_WR(bp, offset, save_val);
10175
10176 /* verify that value is as expected value */
10177 if ((val & mask) != (wr_val & mask))
10178 goto test_reg_exit;
10179 }
10180 }
10181
10182 rc = 0;
10183
10184test_reg_exit:
10185 return rc;
10186}
10187
10188static int bnx2x_test_memory(struct bnx2x *bp)
10189{
10190 int i, j, rc = -ENODEV;
10191 u32 val;
10192 static const struct {
10193 u32 offset;
10194 int size;
10195 } mem_tbl[] = {
10196 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
10197 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10198 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
10199 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
10200 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
10201 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
10202 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
10203
10204 { 0xffffffff, 0 }
10205 };
10206 static const struct {
10207 char *name;
10208 u32 offset;
9dabc424
YG
10209 u32 e1_mask;
10210 u32 e1h_mask;
f3c87cdd 10211 } prty_tbl[] = {
9dabc424
YG
10212 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
10213 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
10214 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
10215 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
10216 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
10217 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
10218
10219 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
10220 };
10221
10222 if (!netif_running(bp->dev))
10223 return rc;
10224
10225 /* Go through all the memories */
10226 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10227 for (j = 0; j < mem_tbl[i].size; j++)
10228 REG_RD(bp, mem_tbl[i].offset + j*4);
10229
10230 /* Check the parity status */
10231 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10232 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
10233 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10234 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
10235 DP(NETIF_MSG_HW,
10236 "%s is 0x%x\n", prty_tbl[i].name, val);
10237 goto test_mem_exit;
10238 }
10239 }
10240
10241 rc = 0;
10242
10243test_mem_exit:
10244 return rc;
10245}
10246
f3c87cdd
YG
10247static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10248{
10249 int cnt = 1000;
10250
10251 if (link_up)
10252 while (bnx2x_link_test(bp) && cnt--)
10253 msleep(10);
10254}
10255
10256static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10257{
10258 unsigned int pkt_size, num_pkts, i;
10259 struct sk_buff *skb;
10260 unsigned char *packet;
ca00392c
EG
10261 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10262 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
f3c87cdd
YG
10263 u16 tx_start_idx, tx_idx;
10264 u16 rx_start_idx, rx_idx;
ca00392c 10265 u16 pkt_prod, bd_prod;
f3c87cdd 10266 struct sw_tx_bd *tx_buf;
ca00392c
EG
10267 struct eth_tx_start_bd *tx_start_bd;
10268 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
10269 dma_addr_t mapping;
10270 union eth_rx_cqe *cqe;
10271 u8 cqe_fp_flags;
10272 struct sw_rx_bd *rx_buf;
10273 u16 len;
10274 int rc = -ENODEV;
10275
b5bf9068
EG
10276 /* check the loopback mode */
10277 switch (loopback_mode) {
10278 case BNX2X_PHY_LOOPBACK:
10279 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10280 return -EINVAL;
10281 break;
10282 case BNX2X_MAC_LOOPBACK:
f3c87cdd 10283 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 10284 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
10285 break;
10286 default:
f3c87cdd 10287 return -EINVAL;
b5bf9068 10288 }
f3c87cdd 10289
b5bf9068
EG
10290 /* prepare the loopback packet */
10291 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10292 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
10293 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10294 if (!skb) {
10295 rc = -ENOMEM;
10296 goto test_loopback_exit;
10297 }
10298 packet = skb_put(skb, pkt_size);
10299 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
10300 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10301 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
10302 for (i = ETH_HLEN; i < pkt_size; i++)
10303 packet[i] = (unsigned char) (i & 0xff);
10304
b5bf9068 10305 /* send the loopback packet */
f3c87cdd 10306 num_pkts = 0;
ca00392c
EG
10307 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10308 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 10309
ca00392c
EG
10310 pkt_prod = fp_tx->tx_pkt_prod++;
10311 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10312 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 10313 tx_buf->skb = skb;
ca00392c 10314 tx_buf->flags = 0;
f3c87cdd 10315
ca00392c
EG
10316 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10317 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
f3c87cdd
YG
10318 mapping = pci_map_single(bp->pdev, skb->data,
10319 skb_headlen(skb), PCI_DMA_TODEVICE);
ca00392c
EG
10320 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10321 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10322 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10323 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10324 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10325 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10326 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10327 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10328
10329 /* turn on parsing and get a BD */
10330 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10331 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10332
10333 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 10334
58f4c4cf
EG
10335 wmb();
10336
ca00392c
EG
10337 fp_tx->tx_db.data.prod += 2;
10338 barrier();
10339 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
f3c87cdd
YG
10340
10341 mmiowb();
10342
10343 num_pkts++;
ca00392c 10344 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
10345 bp->dev->trans_start = jiffies;
10346
10347 udelay(100);
10348
ca00392c 10349 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
10350 if (tx_idx != tx_start_idx + num_pkts)
10351 goto test_loopback_exit;
10352
ca00392c 10353 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
10354 if (rx_idx != rx_start_idx + num_pkts)
10355 goto test_loopback_exit;
10356
ca00392c 10357 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
10358 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10359 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10360 goto test_loopback_rx_exit;
10361
10362 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10363 if (len != pkt_size)
10364 goto test_loopback_rx_exit;
10365
ca00392c 10366 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
10367 skb = rx_buf->skb;
10368 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10369 for (i = ETH_HLEN; i < pkt_size; i++)
10370 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10371 goto test_loopback_rx_exit;
10372
10373 rc = 0;
10374
10375test_loopback_rx_exit:
f3c87cdd 10376
ca00392c
EG
10377 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10378 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10379 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10380 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
10381
10382 /* Update producers */
ca00392c
EG
10383 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10384 fp_rx->rx_sge_prod);
f3c87cdd
YG
10385
10386test_loopback_exit:
10387 bp->link_params.loopback_mode = LOOPBACK_NONE;
10388
10389 return rc;
10390}
10391
10392static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10393{
b5bf9068 10394 int rc = 0, res;
f3c87cdd
YG
10395
10396 if (!netif_running(bp->dev))
10397 return BNX2X_LOOPBACK_FAILED;
10398
f8ef6e44 10399 bnx2x_netif_stop(bp, 1);
3910c8ae 10400 bnx2x_acquire_phy_lock(bp);
f3c87cdd 10401
b5bf9068
EG
10402 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10403 if (res) {
10404 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10405 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
10406 }
10407
b5bf9068
EG
10408 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10409 if (res) {
10410 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10411 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
10412 }
10413
3910c8ae 10414 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
10415 bnx2x_netif_start(bp);
10416
10417 return rc;
10418}
10419
10420#define CRC32_RESIDUAL 0xdebb20e3
10421
10422static int bnx2x_test_nvram(struct bnx2x *bp)
10423{
10424 static const struct {
10425 int offset;
10426 int size;
10427 } nvram_tbl[] = {
10428 { 0, 0x14 }, /* bootstrap */
10429 { 0x14, 0xec }, /* dir */
10430 { 0x100, 0x350 }, /* manuf_info */
10431 { 0x450, 0xf0 }, /* feature_info */
10432 { 0x640, 0x64 }, /* upgrade_key_info */
10433 { 0x6a4, 0x64 },
10434 { 0x708, 0x70 }, /* manuf_key_info */
10435 { 0x778, 0x70 },
10436 { 0, 0 }
10437 };
4781bfad 10438 __be32 buf[0x350 / 4];
f3c87cdd
YG
10439 u8 *data = (u8 *)buf;
10440 int i, rc;
ab6ad5a4 10441 u32 magic, crc;
f3c87cdd
YG
10442
10443 rc = bnx2x_nvram_read(bp, 0, data, 4);
10444 if (rc) {
f5372251 10445 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
10446 goto test_nvram_exit;
10447 }
10448
10449 magic = be32_to_cpu(buf[0]);
10450 if (magic != 0x669955aa) {
10451 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10452 rc = -ENODEV;
10453 goto test_nvram_exit;
10454 }
10455
10456 for (i = 0; nvram_tbl[i].size; i++) {
10457
10458 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10459 nvram_tbl[i].size);
10460 if (rc) {
10461 DP(NETIF_MSG_PROBE,
f5372251 10462 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
10463 goto test_nvram_exit;
10464 }
10465
ab6ad5a4
EG
10466 crc = ether_crc_le(nvram_tbl[i].size, data);
10467 if (crc != CRC32_RESIDUAL) {
f3c87cdd 10468 DP(NETIF_MSG_PROBE,
ab6ad5a4 10469 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
f3c87cdd
YG
10470 rc = -ENODEV;
10471 goto test_nvram_exit;
10472 }
10473 }
10474
10475test_nvram_exit:
10476 return rc;
10477}
10478
10479static int bnx2x_test_intr(struct bnx2x *bp)
10480{
10481 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10482 int i, rc;
10483
10484 if (!netif_running(bp->dev))
10485 return -ENODEV;
10486
8d9c5f34 10487 config->hdr.length = 0;
af246401
EG
10488 if (CHIP_IS_E1(bp))
10489 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10490 else
10491 config->hdr.offset = BP_FUNC(bp);
0626b899 10492 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
10493 config->hdr.reserved1 = 0;
10494
e665bfda
MC
10495 bp->set_mac_pending++;
10496 smp_wmb();
f3c87cdd
YG
10497 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10498 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10499 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10500 if (rc == 0) {
f3c87cdd
YG
10501 for (i = 0; i < 10; i++) {
10502 if (!bp->set_mac_pending)
10503 break;
e665bfda 10504 smp_rmb();
f3c87cdd
YG
10505 msleep_interruptible(10);
10506 }
10507 if (i == 10)
10508 rc = -ENODEV;
10509 }
10510
10511 return rc;
10512}
10513
a2fbb9ea
ET
10514static void bnx2x_self_test(struct net_device *dev,
10515 struct ethtool_test *etest, u64 *buf)
10516{
10517 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
10518
10519 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10520
f3c87cdd 10521 if (!netif_running(dev))
a2fbb9ea 10522 return;
a2fbb9ea 10523
33471629 10524 /* offline tests are not supported in MF mode */
f3c87cdd
YG
10525 if (IS_E1HMF(bp))
10526 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10527
10528 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
10529 int port = BP_PORT(bp);
10530 u32 val;
f3c87cdd
YG
10531 u8 link_up;
10532
279abdf5
EG
10533 /* save current value of input enable for TX port IF */
10534 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10535 /* disable input for TX port IF */
10536 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10537
f3c87cdd
YG
10538 link_up = bp->link_vars.link_up;
10539 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10540 bnx2x_nic_load(bp, LOAD_DIAG);
10541 /* wait until link state is restored */
10542 bnx2x_wait_for_link(bp, link_up);
10543
10544 if (bnx2x_test_registers(bp) != 0) {
10545 buf[0] = 1;
10546 etest->flags |= ETH_TEST_FL_FAILED;
10547 }
10548 if (bnx2x_test_memory(bp) != 0) {
10549 buf[1] = 1;
10550 etest->flags |= ETH_TEST_FL_FAILED;
10551 }
10552 buf[2] = bnx2x_test_loopback(bp, link_up);
10553 if (buf[2] != 0)
10554 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 10555
f3c87cdd 10556 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
10557
10558 /* restore input for TX port IF */
10559 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10560
f3c87cdd
YG
10561 bnx2x_nic_load(bp, LOAD_NORMAL);
10562 /* wait until link state is restored */
10563 bnx2x_wait_for_link(bp, link_up);
10564 }
10565 if (bnx2x_test_nvram(bp) != 0) {
10566 buf[3] = 1;
a2fbb9ea
ET
10567 etest->flags |= ETH_TEST_FL_FAILED;
10568 }
f3c87cdd
YG
10569 if (bnx2x_test_intr(bp) != 0) {
10570 buf[4] = 1;
10571 etest->flags |= ETH_TEST_FL_FAILED;
10572 }
10573 if (bp->port.pmf)
10574 if (bnx2x_link_test(bp) != 0) {
10575 buf[5] = 1;
10576 etest->flags |= ETH_TEST_FL_FAILED;
10577 }
f3c87cdd
YG
10578
10579#ifdef BNX2X_EXTRA_DEBUG
10580 bnx2x_panic_dump(bp);
10581#endif
a2fbb9ea
ET
10582}
10583
de832a55
EG
10584static const struct {
10585 long offset;
10586 int size;
10587 u8 string[ETH_GSTRING_LEN];
10588} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10589/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10590 { Q_STATS_OFFSET32(error_bytes_received_hi),
10591 8, "[%d]: rx_error_bytes" },
10592 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10593 8, "[%d]: rx_ucast_packets" },
10594 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10595 8, "[%d]: rx_mcast_packets" },
10596 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10597 8, "[%d]: rx_bcast_packets" },
10598 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10599 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10600 4, "[%d]: rx_phy_ip_err_discards"},
10601 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10602 4, "[%d]: rx_skb_alloc_discard" },
10603 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10604
10605/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10606 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10607 8, "[%d]: tx_packets" }
10608};
10609
bb2a0f7a
YG
10610static const struct {
10611 long offset;
10612 int size;
10613 u32 flags;
66e855f3
YG
10614#define STATS_FLAGS_PORT 1
10615#define STATS_FLAGS_FUNC 2
de832a55 10616#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 10617 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 10618} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
10619/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10620 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 10621 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 10622 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 10623 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 10624 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 10625 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 10626 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 10627 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 10628 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 10629 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 10630 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 10631 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 10632 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
10633 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10634 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10635 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10636 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10637/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10638 8, STATS_FLAGS_PORT, "rx_fragments" },
10639 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10640 8, STATS_FLAGS_PORT, "rx_jabbers" },
10641 { STATS_OFFSET32(no_buff_discard_hi),
10642 8, STATS_FLAGS_BOTH, "rx_discards" },
10643 { STATS_OFFSET32(mac_filter_discard),
10644 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10645 { STATS_OFFSET32(xxoverflow_discard),
10646 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10647 { STATS_OFFSET32(brb_drop_hi),
10648 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10649 { STATS_OFFSET32(brb_truncate_hi),
10650 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10651 { STATS_OFFSET32(pause_frames_received_hi),
10652 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10653 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10654 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10655 { STATS_OFFSET32(nig_timer_max),
10656 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10657/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10658 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10659 { STATS_OFFSET32(rx_skb_alloc_failed),
10660 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10661 { STATS_OFFSET32(hw_csum_err),
10662 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10663
10664 { STATS_OFFSET32(total_bytes_transmitted_hi),
10665 8, STATS_FLAGS_BOTH, "tx_bytes" },
10666 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10667 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10668 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10669 8, STATS_FLAGS_BOTH, "tx_packets" },
10670 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10671 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10672 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10673 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 10674 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 10675 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 10676 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 10677 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 10678/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 10679 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 10680 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 10681 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 10682 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 10683 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 10684 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 10685 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 10686 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 10687 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 10688 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 10689 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 10690 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 10691 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 10692 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 10693 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 10694 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 10695 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 10696 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 10697 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 10698/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 10699 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
10700 { STATS_OFFSET32(pause_frames_sent_hi),
10701 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
10702};
10703
de832a55
EG
10704#define IS_PORT_STAT(i) \
10705 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10706#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10707#define IS_E1HMF_MODE_STAT(bp) \
10708 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 10709
15f0a394
BH
10710static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10711{
10712 struct bnx2x *bp = netdev_priv(dev);
10713 int i, num_stats;
10714
10715 switch(stringset) {
10716 case ETH_SS_STATS:
10717 if (is_multi(bp)) {
10718 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10719 if (!IS_E1HMF_MODE_STAT(bp))
10720 num_stats += BNX2X_NUM_STATS;
10721 } else {
10722 if (IS_E1HMF_MODE_STAT(bp)) {
10723 num_stats = 0;
10724 for (i = 0; i < BNX2X_NUM_STATS; i++)
10725 if (IS_FUNC_STAT(i))
10726 num_stats++;
10727 } else
10728 num_stats = BNX2X_NUM_STATS;
10729 }
10730 return num_stats;
10731
10732 case ETH_SS_TEST:
10733 return BNX2X_NUM_TESTS;
10734
10735 default:
10736 return -EINVAL;
10737 }
10738}
10739
a2fbb9ea
ET
10740static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10741{
bb2a0f7a 10742 struct bnx2x *bp = netdev_priv(dev);
de832a55 10743 int i, j, k;
bb2a0f7a 10744
a2fbb9ea
ET
10745 switch (stringset) {
10746 case ETH_SS_STATS:
de832a55
EG
10747 if (is_multi(bp)) {
10748 k = 0;
ca00392c 10749 for_each_rx_queue(bp, i) {
de832a55
EG
10750 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10751 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10752 bnx2x_q_stats_arr[j].string, i);
10753 k += BNX2X_NUM_Q_STATS;
10754 }
10755 if (IS_E1HMF_MODE_STAT(bp))
10756 break;
10757 for (j = 0; j < BNX2X_NUM_STATS; j++)
10758 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10759 bnx2x_stats_arr[j].string);
10760 } else {
10761 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10762 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10763 continue;
10764 strcpy(buf + j*ETH_GSTRING_LEN,
10765 bnx2x_stats_arr[i].string);
10766 j++;
10767 }
bb2a0f7a 10768 }
a2fbb9ea
ET
10769 break;
10770
10771 case ETH_SS_TEST:
10772 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10773 break;
10774 }
10775}
10776
a2fbb9ea
ET
10777static void bnx2x_get_ethtool_stats(struct net_device *dev,
10778 struct ethtool_stats *stats, u64 *buf)
10779{
10780 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
10781 u32 *hw_stats, *offset;
10782 int i, j, k;
bb2a0f7a 10783
de832a55
EG
10784 if (is_multi(bp)) {
10785 k = 0;
ca00392c 10786 for_each_rx_queue(bp, i) {
de832a55
EG
10787 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10788 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10789 if (bnx2x_q_stats_arr[j].size == 0) {
10790 /* skip this counter */
10791 buf[k + j] = 0;
10792 continue;
10793 }
10794 offset = (hw_stats +
10795 bnx2x_q_stats_arr[j].offset);
10796 if (bnx2x_q_stats_arr[j].size == 4) {
10797 /* 4-byte counter */
10798 buf[k + j] = (u64) *offset;
10799 continue;
10800 }
10801 /* 8-byte counter */
10802 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10803 }
10804 k += BNX2X_NUM_Q_STATS;
10805 }
10806 if (IS_E1HMF_MODE_STAT(bp))
10807 return;
10808 hw_stats = (u32 *)&bp->eth_stats;
10809 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10810 if (bnx2x_stats_arr[j].size == 0) {
10811 /* skip this counter */
10812 buf[k + j] = 0;
10813 continue;
10814 }
10815 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10816 if (bnx2x_stats_arr[j].size == 4) {
10817 /* 4-byte counter */
10818 buf[k + j] = (u64) *offset;
10819 continue;
10820 }
10821 /* 8-byte counter */
10822 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10823 }
de832a55
EG
10824 } else {
10825 hw_stats = (u32 *)&bp->eth_stats;
10826 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10827 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10828 continue;
10829 if (bnx2x_stats_arr[i].size == 0) {
10830 /* skip this counter */
10831 buf[j] = 0;
10832 j++;
10833 continue;
10834 }
10835 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10836 if (bnx2x_stats_arr[i].size == 4) {
10837 /* 4-byte counter */
10838 buf[j] = (u64) *offset;
10839 j++;
10840 continue;
10841 }
10842 /* 8-byte counter */
10843 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10844 j++;
a2fbb9ea 10845 }
a2fbb9ea
ET
10846 }
10847}
10848
10849static int bnx2x_phys_id(struct net_device *dev, u32 data)
10850{
10851 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10852 int port = BP_PORT(bp);
a2fbb9ea
ET
10853 int i;
10854
34f80b04
EG
10855 if (!netif_running(dev))
10856 return 0;
10857
10858 if (!bp->port.pmf)
10859 return 0;
10860
a2fbb9ea
ET
10861 if (data == 0)
10862 data = 2;
10863
10864 for (i = 0; i < (data * 2); i++) {
c18487ee 10865 if ((i % 2) == 0)
34f80b04 10866 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
10867 bp->link_params.hw_led_mode,
10868 bp->link_params.chip_id);
10869 else
34f80b04 10870 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
10871 bp->link_params.hw_led_mode,
10872 bp->link_params.chip_id);
10873
a2fbb9ea
ET
10874 msleep_interruptible(500);
10875 if (signal_pending(current))
10876 break;
10877 }
10878
c18487ee 10879 if (bp->link_vars.link_up)
34f80b04 10880 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
10881 bp->link_vars.line_speed,
10882 bp->link_params.hw_led_mode,
10883 bp->link_params.chip_id);
a2fbb9ea
ET
10884
10885 return 0;
10886}
10887
0fc0b732 10888static const struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10889 .get_settings = bnx2x_get_settings,
10890 .set_settings = bnx2x_set_settings,
10891 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10892 .get_regs_len = bnx2x_get_regs_len,
10893 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10894 .get_wol = bnx2x_get_wol,
10895 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10896 .get_msglevel = bnx2x_get_msglevel,
10897 .set_msglevel = bnx2x_set_msglevel,
10898 .nway_reset = bnx2x_nway_reset,
01e53298 10899 .get_link = bnx2x_get_link,
7a9b2557
VZ
10900 .get_eeprom_len = bnx2x_get_eeprom_len,
10901 .get_eeprom = bnx2x_get_eeprom,
10902 .set_eeprom = bnx2x_set_eeprom,
10903 .get_coalesce = bnx2x_get_coalesce,
10904 .set_coalesce = bnx2x_set_coalesce,
10905 .get_ringparam = bnx2x_get_ringparam,
10906 .set_ringparam = bnx2x_set_ringparam,
10907 .get_pauseparam = bnx2x_get_pauseparam,
10908 .set_pauseparam = bnx2x_set_pauseparam,
10909 .get_rx_csum = bnx2x_get_rx_csum,
10910 .set_rx_csum = bnx2x_set_rx_csum,
10911 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10912 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10913 .set_flags = bnx2x_set_flags,
10914 .get_flags = ethtool_op_get_flags,
10915 .get_sg = ethtool_op_get_sg,
10916 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10917 .get_tso = ethtool_op_get_tso,
10918 .set_tso = bnx2x_set_tso,
7a9b2557 10919 .self_test = bnx2x_self_test,
15f0a394 10920 .get_sset_count = bnx2x_get_sset_count,
7a9b2557 10921 .get_strings = bnx2x_get_strings,
a2fbb9ea 10922 .phys_id = bnx2x_phys_id,
bb2a0f7a 10923 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10924};
10925
10926/* end of ethtool_ops */
10927
10928/****************************************************************************
10929* General service functions
10930****************************************************************************/
10931
10932static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10933{
10934 u16 pmcsr;
10935
10936 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10937
10938 switch (state) {
10939 case PCI_D0:
34f80b04 10940 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10941 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10942 PCI_PM_CTRL_PME_STATUS));
10943
10944 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10945 /* delay required during transition out of D3hot */
a2fbb9ea 10946 msleep(20);
34f80b04 10947 break;
a2fbb9ea 10948
34f80b04
EG
10949 case PCI_D3hot:
10950 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10951 pmcsr |= 3;
a2fbb9ea 10952
34f80b04
EG
10953 if (bp->wol)
10954 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10955
34f80b04
EG
10956 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10957 pmcsr);
a2fbb9ea 10958
34f80b04
EG
10959 /* No more memory access after this point until
10960 * device is brought back to D0.
10961 */
10962 break;
10963
10964 default:
10965 return -EINVAL;
10966 }
10967 return 0;
a2fbb9ea
ET
10968}
10969
237907c1
EG
10970static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10971{
10972 u16 rx_cons_sb;
10973
10974 /* Tell compiler that status block fields can change */
10975 barrier();
10976 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10977 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10978 rx_cons_sb++;
10979 return (fp->rx_comp_cons != rx_cons_sb);
10980}
10981
34f80b04
EG
10982/*
10983 * net_device service functions
10984 */
10985
a2fbb9ea
ET
10986static int bnx2x_poll(struct napi_struct *napi, int budget)
10987{
10988 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10989 napi);
10990 struct bnx2x *bp = fp->bp;
10991 int work_done = 0;
10992
10993#ifdef BNX2X_STOP_ON_ERROR
10994 if (unlikely(bp->panic))
34f80b04 10995 goto poll_panic;
a2fbb9ea
ET
10996#endif
10997
a2fbb9ea
ET
10998 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10999 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
11000
11001 bnx2x_update_fpsb_idx(fp);
11002
8534f32c 11003 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 11004 work_done = bnx2x_rx_int(fp, budget);
356e2385 11005
8534f32c
EG
11006 /* must not complete if we consumed full budget */
11007 if (work_done >= budget)
11008 goto poll_again;
11009 }
a2fbb9ea 11010
ca00392c 11011 /* bnx2x_has_rx_work() reads the status block, thus we need to
8534f32c 11012 * ensure that status block indices have been actually read
ca00392c 11013 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
8534f32c 11014 * so that we won't write the "newer" value of the status block to IGU
ca00392c 11015 * (if there was a DMA right after bnx2x_has_rx_work and
8534f32c
EG
11016 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
11017 * may be postponed to right before bnx2x_ack_sb). In this case
11018 * there will never be another interrupt until there is another update
11019 * of the status block, while there is still unhandled work.
11020 */
11021 rmb();
a2fbb9ea 11022
ca00392c 11023 if (!bnx2x_has_rx_work(fp)) {
a2fbb9ea 11024#ifdef BNX2X_STOP_ON_ERROR
34f80b04 11025poll_panic:
a2fbb9ea 11026#endif
288379f0 11027 napi_complete(napi);
a2fbb9ea 11028
0626b899 11029 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 11030 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 11031 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
11032 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
11033 }
356e2385 11034
8534f32c 11035poll_again:
a2fbb9ea
ET
11036 return work_done;
11037}
11038
755735eb
EG
11039
11040/* we split the first BD into headers and data BDs
33471629 11041 * to ease the pain of our fellow microcode engineers
755735eb
EG
11042 * we use one mapping for both BDs
11043 * So far this has only been observed to happen
11044 * in Other Operating Systems(TM)
11045 */
11046static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
11047 struct bnx2x_fastpath *fp,
ca00392c
EG
11048 struct sw_tx_bd *tx_buf,
11049 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
11050 u16 bd_prod, int nbd)
11051{
ca00392c 11052 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
11053 struct eth_tx_bd *d_tx_bd;
11054 dma_addr_t mapping;
11055 int old_len = le16_to_cpu(h_tx_bd->nbytes);
11056
11057 /* first fix first BD */
11058 h_tx_bd->nbd = cpu_to_le16(nbd);
11059 h_tx_bd->nbytes = cpu_to_le16(hlen);
11060
11061 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11062 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11063 h_tx_bd->addr_lo, h_tx_bd->nbd);
11064
11065 /* now get a new data BD
11066 * (after the pbd) and fill it */
11067 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 11068 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
11069
11070 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11071 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11072
11073 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11074 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11075 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
11076
11077 /* this marks the BD as one that has no individual mapping */
11078 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11079
755735eb
EG
11080 DP(NETIF_MSG_TX_QUEUED,
11081 "TSO split data size is %d (%x:%x)\n",
11082 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11083
ca00392c
EG
11084 /* update tx_bd */
11085 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
11086
11087 return bd_prod;
11088}
11089
11090static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11091{
11092 if (fix > 0)
11093 csum = (u16) ~csum_fold(csum_sub(csum,
11094 csum_partial(t_header - fix, fix, 0)));
11095
11096 else if (fix < 0)
11097 csum = (u16) ~csum_fold(csum_add(csum,
11098 csum_partial(t_header, -fix, 0)));
11099
11100 return swab16(csum);
11101}
11102
11103static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11104{
11105 u32 rc;
11106
11107 if (skb->ip_summed != CHECKSUM_PARTIAL)
11108 rc = XMIT_PLAIN;
11109
11110 else {
4781bfad 11111 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
11112 rc = XMIT_CSUM_V6;
11113 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11114 rc |= XMIT_CSUM_TCP;
11115
11116 } else {
11117 rc = XMIT_CSUM_V4;
11118 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11119 rc |= XMIT_CSUM_TCP;
11120 }
11121 }
11122
11123 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
11124 rc |= XMIT_GSO_V4;
11125
11126 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
11127 rc |= XMIT_GSO_V6;
11128
11129 return rc;
11130}
11131
632da4d6 11132#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
11133/* check if packet requires linearization (packet is too fragmented)
11134 no need to check fragmentation if page size > 8K (there will be no
11135 violation to FW restrictions) */
755735eb
EG
11136static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11137 u32 xmit_type)
11138{
11139 int to_copy = 0;
11140 int hlen = 0;
11141 int first_bd_sz = 0;
11142
11143 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11144 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11145
11146 if (xmit_type & XMIT_GSO) {
11147 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11148 /* Check if LSO packet needs to be copied:
11149 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11150 int wnd_size = MAX_FETCH_BD - 3;
33471629 11151 /* Number of windows to check */
755735eb
EG
11152 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11153 int wnd_idx = 0;
11154 int frag_idx = 0;
11155 u32 wnd_sum = 0;
11156
11157 /* Headers length */
11158 hlen = (int)(skb_transport_header(skb) - skb->data) +
11159 tcp_hdrlen(skb);
11160
11161 /* Amount of data (w/o headers) on linear part of SKB*/
11162 first_bd_sz = skb_headlen(skb) - hlen;
11163
11164 wnd_sum = first_bd_sz;
11165
11166 /* Calculate the first sum - it's special */
11167 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11168 wnd_sum +=
11169 skb_shinfo(skb)->frags[frag_idx].size;
11170
11171 /* If there was data on linear skb data - check it */
11172 if (first_bd_sz > 0) {
11173 if (unlikely(wnd_sum < lso_mss)) {
11174 to_copy = 1;
11175 goto exit_lbl;
11176 }
11177
11178 wnd_sum -= first_bd_sz;
11179 }
11180
11181 /* Others are easier: run through the frag list and
11182 check all windows */
11183 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11184 wnd_sum +=
11185 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11186
11187 if (unlikely(wnd_sum < lso_mss)) {
11188 to_copy = 1;
11189 break;
11190 }
11191 wnd_sum -=
11192 skb_shinfo(skb)->frags[wnd_idx].size;
11193 }
755735eb
EG
11194 } else {
11195 /* in non-LSO too fragmented packet should always
11196 be linearized */
11197 to_copy = 1;
11198 }
11199 }
11200
11201exit_lbl:
11202 if (unlikely(to_copy))
11203 DP(NETIF_MSG_TX_QUEUED,
11204 "Linearization IS REQUIRED for %s packet. "
11205 "num_frags %d hlen %d first_bd_sz %d\n",
11206 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11207 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11208
11209 return to_copy;
11210}
632da4d6 11211#endif
755735eb
EG
11212
11213/* called with netif_tx_lock
a2fbb9ea 11214 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 11215 * netif_wake_queue()
a2fbb9ea 11216 */
61357325 11217static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
a2fbb9ea
ET
11218{
11219 struct bnx2x *bp = netdev_priv(dev);
ca00392c 11220 struct bnx2x_fastpath *fp, *fp_stat;
555f6c78 11221 struct netdev_queue *txq;
a2fbb9ea 11222 struct sw_tx_bd *tx_buf;
ca00392c
EG
11223 struct eth_tx_start_bd *tx_start_bd;
11224 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
11225 struct eth_tx_parse_bd *pbd = NULL;
11226 u16 pkt_prod, bd_prod;
755735eb 11227 int nbd, fp_index;
a2fbb9ea 11228 dma_addr_t mapping;
755735eb 11229 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
11230 int i;
11231 u8 hlen = 0;
ca00392c 11232 __le16 pkt_size = 0;
a2fbb9ea
ET
11233
11234#ifdef BNX2X_STOP_ON_ERROR
11235 if (unlikely(bp->panic))
11236 return NETDEV_TX_BUSY;
11237#endif
11238
555f6c78
EG
11239 fp_index = skb_get_queue_mapping(skb);
11240 txq = netdev_get_tx_queue(dev, fp_index);
11241
ca00392c
EG
11242 fp = &bp->fp[fp_index + bp->num_rx_queues];
11243 fp_stat = &bp->fp[fp_index];
755735eb 11244
231fd58a 11245 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
ca00392c 11246 fp_stat->eth_q_stats.driver_xoff++;
555f6c78 11247 netif_tx_stop_queue(txq);
a2fbb9ea
ET
11248 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11249 return NETDEV_TX_BUSY;
11250 }
11251
755735eb
EG
11252 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
11253 " gso type %x xmit_type %x\n",
11254 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11255 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11256
632da4d6 11257#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
11258 /* First, check if we need to linearize the skb (due to FW
11259 restrictions). No need to check fragmentation if page size > 8K
11260 (there will be no violation to FW restrictions) */
755735eb
EG
11261 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11262 /* Statistics of linearization */
11263 bp->lin_cnt++;
11264 if (skb_linearize(skb) != 0) {
11265 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11266 "silently dropping this SKB\n");
11267 dev_kfree_skb_any(skb);
da5a662a 11268 return NETDEV_TX_OK;
755735eb
EG
11269 }
11270 }
632da4d6 11271#endif
755735eb 11272
a2fbb9ea 11273 /*
755735eb 11274 Please read carefully. First we use one BD which we mark as start,
ca00392c 11275 then we have a parsing info BD (used for TSO or xsum),
755735eb 11276 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
11277 (don't forget to mark the last one as last,
11278 and to unmap only AFTER you write to the BD ...)
755735eb 11279 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
11280 */
11281
11282 pkt_prod = fp->tx_pkt_prod++;
755735eb 11283 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 11284
755735eb 11285 /* get a tx_buf and first BD */
a2fbb9ea 11286 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 11287 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 11288
ca00392c
EG
11289 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11290 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11291 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 11292 /* header nbd */
ca00392c 11293 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 11294
755735eb
EG
11295 /* remember the first BD of the packet */
11296 tx_buf->first_bd = fp->tx_bd_prod;
11297 tx_buf->skb = skb;
ca00392c 11298 tx_buf->flags = 0;
a2fbb9ea
ET
11299
11300 DP(NETIF_MSG_TX_QUEUED,
11301 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 11302 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 11303
0c6671b0
EG
11304#ifdef BCM_VLAN
11305 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11306 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
11307 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11308 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 11309 } else
0c6671b0 11310#endif
ca00392c 11311 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 11312
ca00392c
EG
11313 /* turn on parsing and get a BD */
11314 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11315 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 11316
ca00392c 11317 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
11318
11319 if (xmit_type & XMIT_CSUM) {
ca00392c 11320 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
11321
11322 /* for now NS flag is not used in Linux */
4781bfad
EG
11323 pbd->global_data =
11324 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11325 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 11326
755735eb
EG
11327 pbd->ip_hlen = (skb_transport_header(skb) -
11328 skb_network_header(skb)) / 2;
11329
11330 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 11331
755735eb 11332 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 11333 hlen = hlen*2;
a2fbb9ea 11334
ca00392c 11335 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
11336
11337 if (xmit_type & XMIT_CSUM_V4)
ca00392c 11338 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
11339 ETH_TX_BD_FLAGS_IP_CSUM;
11340 else
ca00392c
EG
11341 tx_start_bd->bd_flags.as_bitfield |=
11342 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
11343
11344 if (xmit_type & XMIT_CSUM_TCP) {
11345 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11346
11347 } else {
11348 s8 fix = SKB_CS_OFF(skb); /* signed! */
11349
ca00392c 11350 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 11351
755735eb 11352 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11353 "hlen %d fix %d csum before fix %x\n",
11354 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
11355
11356 /* HW bug: fixup the CSUM */
11357 pbd->tcp_pseudo_csum =
11358 bnx2x_csum_fix(skb_transport_header(skb),
11359 SKB_CS(skb), fix);
11360
11361 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11362 pbd->tcp_pseudo_csum);
11363 }
a2fbb9ea
ET
11364 }
11365
11366 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 11367 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea 11368
ca00392c
EG
11369 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11370 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11371 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11372 tx_start_bd->nbd = cpu_to_le16(nbd);
11373 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11374 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
11375
11376 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 11377 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
11378 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11379 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11380 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 11381
755735eb 11382 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
11383
11384 DP(NETIF_MSG_TX_QUEUED,
11385 "TSO packet len %d hlen %d total len %d tso size %d\n",
11386 skb->len, hlen, skb_headlen(skb),
11387 skb_shinfo(skb)->gso_size);
11388
ca00392c 11389 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 11390
755735eb 11391 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
11392 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11393 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
11394
11395 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11396 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
11397 pbd->tcp_flags = pbd_tcp_flags(skb);
11398
11399 if (xmit_type & XMIT_GSO_V4) {
11400 pbd->ip_id = swab16(ip_hdr(skb)->id);
11401 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
11402 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11403 ip_hdr(skb)->daddr,
11404 0, IPPROTO_TCP, 0));
755735eb
EG
11405
11406 } else
11407 pbd->tcp_pseudo_csum =
11408 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11409 &ipv6_hdr(skb)->daddr,
11410 0, IPPROTO_TCP, 0));
11411
a2fbb9ea
ET
11412 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11413 }
ca00392c 11414 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 11415
755735eb
EG
11416 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11417 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 11418
755735eb 11419 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
11420 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11421 if (total_pkt_bd == NULL)
11422 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 11423
755735eb
EG
11424 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11425 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 11426
ca00392c
EG
11427 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11428 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11429 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11430 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 11431
755735eb 11432 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11433 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11434 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11435 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
11436 }
11437
ca00392c 11438 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 11439
a2fbb9ea
ET
11440 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11441
755735eb 11442 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
11443 * if the packet contains or ends with it
11444 */
11445 if (TX_BD_POFF(bd_prod) < nbd)
11446 nbd++;
11447
ca00392c
EG
11448 if (total_pkt_bd != NULL)
11449 total_pkt_bd->total_pkt_bytes = pkt_size;
11450
a2fbb9ea
ET
11451 if (pbd)
11452 DP(NETIF_MSG_TX_QUEUED,
11453 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11454 " tcp_flags %x xsum %x seq %u hlen %u\n",
11455 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11456 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 11457 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 11458
755735eb 11459 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 11460
58f4c4cf
EG
11461 /*
11462 * Make sure that the BD data is updated before updating the producer
11463 * since FW might read the BD right after the producer is updated.
11464 * This is only applicable for weak-ordered memory model archs such
11465 * as IA-64. The following barrier is also mandatory since FW will
11466 * assumes packets must have BDs.
11467 */
11468 wmb();
11469
ca00392c
EG
11470 fp->tx_db.data.prod += nbd;
11471 barrier();
11472 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
a2fbb9ea
ET
11473
11474 mmiowb();
11475
755735eb 11476 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
11477
11478 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 11479 netif_tx_stop_queue(txq);
58f4c4cf
EG
11480 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11481 if we put Tx into XOFF state. */
11482 smp_mb();
ca00392c 11483 fp_stat->eth_q_stats.driver_xoff++;
a2fbb9ea 11484 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 11485 netif_tx_wake_queue(txq);
a2fbb9ea 11486 }
ca00392c 11487 fp_stat->tx_pkt++;
a2fbb9ea
ET
11488
11489 return NETDEV_TX_OK;
11490}
11491
bb2a0f7a 11492/* called with rtnl_lock */
a2fbb9ea
ET
11493static int bnx2x_open(struct net_device *dev)
11494{
11495 struct bnx2x *bp = netdev_priv(dev);
11496
6eccabb3
EG
11497 netif_carrier_off(dev);
11498
a2fbb9ea
ET
11499 bnx2x_set_power_state(bp, PCI_D0);
11500
bb2a0f7a 11501 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
11502}
11503
bb2a0f7a 11504/* called with rtnl_lock */
a2fbb9ea
ET
11505static int bnx2x_close(struct net_device *dev)
11506{
a2fbb9ea
ET
11507 struct bnx2x *bp = netdev_priv(dev);
11508
11509 /* Unload the driver, release IRQs */
bb2a0f7a
YG
11510 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11511 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11512 if (!CHIP_REV_IS_SLOW(bp))
11513 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
11514
11515 return 0;
11516}
11517
f5372251 11518/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
11519static void bnx2x_set_rx_mode(struct net_device *dev)
11520{
11521 struct bnx2x *bp = netdev_priv(dev);
11522 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11523 int port = BP_PORT(bp);
11524
11525 if (bp->state != BNX2X_STATE_OPEN) {
11526 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11527 return;
11528 }
11529
11530 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11531
11532 if (dev->flags & IFF_PROMISC)
11533 rx_mode = BNX2X_RX_MODE_PROMISC;
11534
11535 else if ((dev->flags & IFF_ALLMULTI) ||
11536 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11537 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11538
11539 else { /* some multicasts */
11540 if (CHIP_IS_E1(bp)) {
11541 int i, old, offset;
11542 struct dev_mc_list *mclist;
11543 struct mac_configuration_cmd *config =
11544 bnx2x_sp(bp, mcast_config);
11545
11546 for (i = 0, mclist = dev->mc_list;
11547 mclist && (i < dev->mc_count);
11548 i++, mclist = mclist->next) {
11549
11550 config->config_table[i].
11551 cam_entry.msb_mac_addr =
11552 swab16(*(u16 *)&mclist->dmi_addr[0]);
11553 config->config_table[i].
11554 cam_entry.middle_mac_addr =
11555 swab16(*(u16 *)&mclist->dmi_addr[2]);
11556 config->config_table[i].
11557 cam_entry.lsb_mac_addr =
11558 swab16(*(u16 *)&mclist->dmi_addr[4]);
11559 config->config_table[i].cam_entry.flags =
11560 cpu_to_le16(port);
11561 config->config_table[i].
11562 target_table_entry.flags = 0;
ca00392c
EG
11563 config->config_table[i].target_table_entry.
11564 clients_bit_vector =
11565 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
11566 config->config_table[i].
11567 target_table_entry.vlan_id = 0;
11568
11569 DP(NETIF_MSG_IFUP,
11570 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11571 config->config_table[i].
11572 cam_entry.msb_mac_addr,
11573 config->config_table[i].
11574 cam_entry.middle_mac_addr,
11575 config->config_table[i].
11576 cam_entry.lsb_mac_addr);
11577 }
8d9c5f34 11578 old = config->hdr.length;
34f80b04
EG
11579 if (old > i) {
11580 for (; i < old; i++) {
11581 if (CAM_IS_INVALID(config->
11582 config_table[i])) {
af246401 11583 /* already invalidated */
34f80b04
EG
11584 break;
11585 }
11586 /* invalidate */
11587 CAM_INVALIDATE(config->
11588 config_table[i]);
11589 }
11590 }
11591
11592 if (CHIP_REV_IS_SLOW(bp))
11593 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11594 else
11595 offset = BNX2X_MAX_MULTICAST*(1 + port);
11596
8d9c5f34 11597 config->hdr.length = i;
34f80b04 11598 config->hdr.offset = offset;
8d9c5f34 11599 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
11600 config->hdr.reserved1 = 0;
11601
e665bfda
MC
11602 bp->set_mac_pending++;
11603 smp_wmb();
11604
34f80b04
EG
11605 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11606 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11607 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11608 0);
11609 } else { /* E1H */
11610 /* Accept one or more multicasts */
11611 struct dev_mc_list *mclist;
11612 u32 mc_filter[MC_HASH_SIZE];
11613 u32 crc, bit, regidx;
11614 int i;
11615
11616 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11617
11618 for (i = 0, mclist = dev->mc_list;
11619 mclist && (i < dev->mc_count);
11620 i++, mclist = mclist->next) {
11621
7c510e4b
JB
11622 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11623 mclist->dmi_addr);
34f80b04
EG
11624
11625 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11626 bit = (crc >> 24) & 0xff;
11627 regidx = bit >> 5;
11628 bit &= 0x1f;
11629 mc_filter[regidx] |= (1 << bit);
11630 }
11631
11632 for (i = 0; i < MC_HASH_SIZE; i++)
11633 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11634 mc_filter[i]);
11635 }
11636 }
11637
11638 bp->rx_mode = rx_mode;
11639 bnx2x_set_storm_rx_mode(bp);
11640}
11641
11642/* called with rtnl_lock */
a2fbb9ea
ET
11643static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11644{
11645 struct sockaddr *addr = p;
11646 struct bnx2x *bp = netdev_priv(dev);
11647
34f80b04 11648 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
11649 return -EINVAL;
11650
11651 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
11652 if (netif_running(dev)) {
11653 if (CHIP_IS_E1(bp))
e665bfda 11654 bnx2x_set_eth_mac_addr_e1(bp, 1);
34f80b04 11655 else
e665bfda 11656 bnx2x_set_eth_mac_addr_e1h(bp, 1);
34f80b04 11657 }
a2fbb9ea
ET
11658
11659 return 0;
11660}
11661
c18487ee 11662/* called with rtnl_lock */
01cd4528
EG
11663static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11664 int devad, u16 addr)
a2fbb9ea 11665{
01cd4528
EG
11666 struct bnx2x *bp = netdev_priv(netdev);
11667 u16 value;
11668 int rc;
11669 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 11670
01cd4528
EG
11671 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11672 prtad, devad, addr);
a2fbb9ea 11673
01cd4528
EG
11674 if (prtad != bp->mdio.prtad) {
11675 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11676 prtad, bp->mdio.prtad);
11677 return -EINVAL;
11678 }
11679
11680 /* The HW expects different devad if CL22 is used */
11681 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 11682
01cd4528
EG
11683 bnx2x_acquire_phy_lock(bp);
11684 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11685 devad, addr, &value);
11686 bnx2x_release_phy_lock(bp);
11687 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 11688
01cd4528
EG
11689 if (!rc)
11690 rc = value;
11691 return rc;
11692}
a2fbb9ea 11693
01cd4528
EG
11694/* called with rtnl_lock */
11695static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11696 u16 addr, u16 value)
11697{
11698 struct bnx2x *bp = netdev_priv(netdev);
11699 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11700 int rc;
11701
11702 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11703 " value 0x%x\n", prtad, devad, addr, value);
11704
11705 if (prtad != bp->mdio.prtad) {
11706 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11707 prtad, bp->mdio.prtad);
11708 return -EINVAL;
a2fbb9ea
ET
11709 }
11710
01cd4528
EG
11711 /* The HW expects different devad if CL22 is used */
11712 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 11713
01cd4528
EG
11714 bnx2x_acquire_phy_lock(bp);
11715 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11716 devad, addr, value);
11717 bnx2x_release_phy_lock(bp);
11718 return rc;
11719}
c18487ee 11720
01cd4528
EG
11721/* called with rtnl_lock */
11722static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11723{
11724 struct bnx2x *bp = netdev_priv(dev);
11725 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 11726
01cd4528
EG
11727 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11728 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 11729
01cd4528
EG
11730 if (!netif_running(dev))
11731 return -EAGAIN;
11732
11733 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
11734}
11735
34f80b04 11736/* called with rtnl_lock */
a2fbb9ea
ET
11737static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11738{
11739 struct bnx2x *bp = netdev_priv(dev);
34f80b04 11740 int rc = 0;
a2fbb9ea
ET
11741
11742 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11743 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11744 return -EINVAL;
11745
11746 /* This does not race with packet allocation
c14423fe 11747 * because the actual alloc size is
a2fbb9ea
ET
11748 * only updated as part of load
11749 */
11750 dev->mtu = new_mtu;
11751
11752 if (netif_running(dev)) {
34f80b04
EG
11753 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11754 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 11755 }
34f80b04
EG
11756
11757 return rc;
a2fbb9ea
ET
11758}
11759
11760static void bnx2x_tx_timeout(struct net_device *dev)
11761{
11762 struct bnx2x *bp = netdev_priv(dev);
11763
11764#ifdef BNX2X_STOP_ON_ERROR
11765 if (!bp->panic)
11766 bnx2x_panic();
11767#endif
11768 /* This allows the netif to be shutdown gracefully before resetting */
11769 schedule_work(&bp->reset_task);
11770}
11771
11772#ifdef BCM_VLAN
34f80b04 11773/* called with rtnl_lock */
a2fbb9ea
ET
11774static void bnx2x_vlan_rx_register(struct net_device *dev,
11775 struct vlan_group *vlgrp)
11776{
11777 struct bnx2x *bp = netdev_priv(dev);
11778
11779 bp->vlgrp = vlgrp;
0c6671b0
EG
11780
11781 /* Set flags according to the required capabilities */
11782 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11783
11784 if (dev->features & NETIF_F_HW_VLAN_TX)
11785 bp->flags |= HW_VLAN_TX_FLAG;
11786
11787 if (dev->features & NETIF_F_HW_VLAN_RX)
11788 bp->flags |= HW_VLAN_RX_FLAG;
11789
a2fbb9ea 11790 if (netif_running(dev))
49d66772 11791 bnx2x_set_client_config(bp);
a2fbb9ea 11792}
34f80b04 11793
a2fbb9ea
ET
11794#endif
11795
11796#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11797static void poll_bnx2x(struct net_device *dev)
11798{
11799 struct bnx2x *bp = netdev_priv(dev);
11800
11801 disable_irq(bp->pdev->irq);
11802 bnx2x_interrupt(bp->pdev->irq, dev);
11803 enable_irq(bp->pdev->irq);
11804}
11805#endif
11806
c64213cd
SH
11807static const struct net_device_ops bnx2x_netdev_ops = {
11808 .ndo_open = bnx2x_open,
11809 .ndo_stop = bnx2x_close,
11810 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 11811 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
11812 .ndo_set_mac_address = bnx2x_change_mac_addr,
11813 .ndo_validate_addr = eth_validate_addr,
11814 .ndo_do_ioctl = bnx2x_ioctl,
11815 .ndo_change_mtu = bnx2x_change_mtu,
11816 .ndo_tx_timeout = bnx2x_tx_timeout,
11817#ifdef BCM_VLAN
11818 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11819#endif
11820#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11821 .ndo_poll_controller = poll_bnx2x,
11822#endif
11823};
11824
34f80b04
EG
11825static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11826 struct net_device *dev)
a2fbb9ea
ET
11827{
11828 struct bnx2x *bp;
11829 int rc;
11830
11831 SET_NETDEV_DEV(dev, &pdev->dev);
11832 bp = netdev_priv(dev);
11833
34f80b04
EG
11834 bp->dev = dev;
11835 bp->pdev = pdev;
a2fbb9ea 11836 bp->flags = 0;
34f80b04 11837 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11838
11839 rc = pci_enable_device(pdev);
11840 if (rc) {
11841 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11842 goto err_out;
11843 }
11844
11845 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11846 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11847 " aborting\n");
11848 rc = -ENODEV;
11849 goto err_out_disable;
11850 }
11851
11852 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11853 printk(KERN_ERR PFX "Cannot find second PCI device"
11854 " base address, aborting\n");
11855 rc = -ENODEV;
11856 goto err_out_disable;
11857 }
11858
34f80b04
EG
11859 if (atomic_read(&pdev->enable_cnt) == 1) {
11860 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11861 if (rc) {
11862 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11863 " aborting\n");
11864 goto err_out_disable;
11865 }
a2fbb9ea 11866
34f80b04
EG
11867 pci_set_master(pdev);
11868 pci_save_state(pdev);
11869 }
a2fbb9ea
ET
11870
11871 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11872 if (bp->pm_cap == 0) {
11873 printk(KERN_ERR PFX "Cannot find power management"
11874 " capability, aborting\n");
11875 rc = -EIO;
11876 goto err_out_release;
11877 }
11878
11879 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11880 if (bp->pcie_cap == 0) {
11881 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11882 " aborting\n");
11883 rc = -EIO;
11884 goto err_out_release;
11885 }
11886
6a35528a 11887 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11888 bp->flags |= USING_DAC_FLAG;
6a35528a 11889 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
11890 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11891 " failed, aborting\n");
11892 rc = -EIO;
11893 goto err_out_release;
11894 }
11895
284901a9 11896 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a2fbb9ea
ET
11897 printk(KERN_ERR PFX "System does not support DMA,"
11898 " aborting\n");
11899 rc = -EIO;
11900 goto err_out_release;
11901 }
11902
34f80b04
EG
11903 dev->mem_start = pci_resource_start(pdev, 0);
11904 dev->base_addr = dev->mem_start;
11905 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11906
11907 dev->irq = pdev->irq;
11908
275f165f 11909 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11910 if (!bp->regview) {
11911 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11912 rc = -ENOMEM;
11913 goto err_out_release;
11914 }
11915
34f80b04
EG
11916 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11917 min_t(u64, BNX2X_DB_SIZE,
11918 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11919 if (!bp->doorbells) {
11920 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11921 rc = -ENOMEM;
11922 goto err_out_unmap;
11923 }
11924
11925 bnx2x_set_power_state(bp, PCI_D0);
11926
34f80b04
EG
11927 /* clean indirect addresses */
11928 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11929 PCICFG_VENDOR_ID_OFFSET);
11930 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11931 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11932 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11933 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11934
34f80b04 11935 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11936
c64213cd 11937 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11938 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11939 dev->features |= NETIF_F_SG;
11940 dev->features |= NETIF_F_HW_CSUM;
11941 if (bp->flags & USING_DAC_FLAG)
11942 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
11943 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11944 dev->features |= NETIF_F_TSO6;
34f80b04
EG
11945#ifdef BCM_VLAN
11946 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11947 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
11948
11949 dev->vlan_features |= NETIF_F_SG;
11950 dev->vlan_features |= NETIF_F_HW_CSUM;
11951 if (bp->flags & USING_DAC_FLAG)
11952 dev->vlan_features |= NETIF_F_HIGHDMA;
11953 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11954 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 11955#endif
a2fbb9ea 11956
01cd4528
EG
11957 /* get_port_hwinfo() will set prtad and mmds properly */
11958 bp->mdio.prtad = MDIO_PRTAD_NONE;
11959 bp->mdio.mmds = 0;
11960 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11961 bp->mdio.dev = dev;
11962 bp->mdio.mdio_read = bnx2x_mdio_read;
11963 bp->mdio.mdio_write = bnx2x_mdio_write;
11964
a2fbb9ea
ET
11965 return 0;
11966
11967err_out_unmap:
11968 if (bp->regview) {
11969 iounmap(bp->regview);
11970 bp->regview = NULL;
11971 }
a2fbb9ea
ET
11972 if (bp->doorbells) {
11973 iounmap(bp->doorbells);
11974 bp->doorbells = NULL;
11975 }
11976
11977err_out_release:
34f80b04
EG
11978 if (atomic_read(&pdev->enable_cnt) == 1)
11979 pci_release_regions(pdev);
a2fbb9ea
ET
11980
11981err_out_disable:
11982 pci_disable_device(pdev);
11983 pci_set_drvdata(pdev, NULL);
11984
11985err_out:
11986 return rc;
11987}
11988
37f9ce62
EG
11989static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11990 int *width, int *speed)
25047950
ET
11991{
11992 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11993
37f9ce62 11994 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 11995
37f9ce62
EG
11996 /* return value of 1=2.5GHz 2=5GHz */
11997 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 11998}
37f9ce62 11999
94a78b79
VZ
12000static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
12001{
37f9ce62 12002 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
12003 struct bnx2x_fw_file_hdr *fw_hdr;
12004 struct bnx2x_fw_file_section *sections;
94a78b79 12005 u32 offset, len, num_ops;
37f9ce62 12006 u16 *ops_offsets;
94a78b79 12007 int i;
37f9ce62 12008 const u8 *fw_ver;
94a78b79
VZ
12009
12010 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
12011 return -EINVAL;
12012
12013 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
12014 sections = (struct bnx2x_fw_file_section *)fw_hdr;
12015
12016 /* Make sure none of the offsets and sizes make us read beyond
12017 * the end of the firmware data */
12018 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
12019 offset = be32_to_cpu(sections[i].offset);
12020 len = be32_to_cpu(sections[i].len);
12021 if (offset + len > firmware->size) {
37f9ce62
EG
12022 printk(KERN_ERR PFX "Section %d length is out of "
12023 "bounds\n", i);
94a78b79
VZ
12024 return -EINVAL;
12025 }
12026 }
12027
12028 /* Likewise for the init_ops offsets */
12029 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
12030 ops_offsets = (u16 *)(firmware->data + offset);
12031 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
12032
12033 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
12034 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
37f9ce62
EG
12035 printk(KERN_ERR PFX "Section offset %d is out of "
12036 "bounds\n", i);
94a78b79
VZ
12037 return -EINVAL;
12038 }
12039 }
12040
12041 /* Check FW version */
12042 offset = be32_to_cpu(fw_hdr->fw_version.offset);
12043 fw_ver = firmware->data + offset;
12044 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
12045 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
12046 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
12047 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
12048 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
12049 " Should be %d.%d.%d.%d\n",
12050 fw_ver[0], fw_ver[1], fw_ver[2],
12051 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
12052 BCM_5710_FW_MINOR_VERSION,
12053 BCM_5710_FW_REVISION_VERSION,
12054 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 12055 return -EINVAL;
94a78b79
VZ
12056 }
12057
12058 return 0;
12059}
12060
ab6ad5a4 12061static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 12062{
ab6ad5a4
EG
12063 const __be32 *source = (const __be32 *)_source;
12064 u32 *target = (u32 *)_target;
94a78b79 12065 u32 i;
94a78b79
VZ
12066
12067 for (i = 0; i < n/4; i++)
12068 target[i] = be32_to_cpu(source[i]);
12069}
12070
12071/*
12072 Ops array is stored in the following format:
12073 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12074 */
ab6ad5a4 12075static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 12076{
ab6ad5a4
EG
12077 const __be32 *source = (const __be32 *)_source;
12078 struct raw_op *target = (struct raw_op *)_target;
94a78b79 12079 u32 i, j, tmp;
94a78b79 12080
ab6ad5a4 12081 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
12082 tmp = be32_to_cpu(source[j]);
12083 target[i].op = (tmp >> 24) & 0xff;
12084 target[i].offset = tmp & 0xffffff;
12085 target[i].raw_data = be32_to_cpu(source[j+1]);
12086 }
12087}
ab6ad5a4
EG
12088
12089static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 12090{
ab6ad5a4
EG
12091 const __be16 *source = (const __be16 *)_source;
12092 u16 *target = (u16 *)_target;
94a78b79 12093 u32 i;
94a78b79
VZ
12094
12095 for (i = 0; i < n/2; i++)
12096 target[i] = be16_to_cpu(source[i]);
12097}
12098
12099#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
ab6ad5a4
EG
12100 do { \
12101 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12102 bp->arr = kmalloc(len, GFP_KERNEL); \
94a78b79 12103 if (!bp->arr) { \
ab6ad5a4
EG
12104 printk(KERN_ERR PFX "Failed to allocate %d bytes " \
12105 "for "#arr"\n", len); \
94a78b79
VZ
12106 goto lbl; \
12107 } \
ab6ad5a4
EG
12108 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12109 (u8 *)bp->arr, len); \
94a78b79
VZ
12110 } while (0)
12111
94a78b79
VZ
12112static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12113{
12114 char fw_file_name[40] = {0};
94a78b79 12115 struct bnx2x_fw_file_hdr *fw_hdr;
ab6ad5a4 12116 int rc, offset;
94a78b79
VZ
12117
12118 /* Create a FW file name */
12119 if (CHIP_IS_E1(bp))
ab6ad5a4 12120 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
94a78b79
VZ
12121 else
12122 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
12123
12124 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
12125 BCM_5710_FW_MAJOR_VERSION,
ab6ad5a4
EG
12126 BCM_5710_FW_MINOR_VERSION,
12127 BCM_5710_FW_REVISION_VERSION,
12128 BCM_5710_FW_ENGINEERING_VERSION);
94a78b79
VZ
12129
12130 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
12131
12132 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12133 if (rc) {
ab6ad5a4
EG
12134 printk(KERN_ERR PFX "Can't load firmware file %s\n",
12135 fw_file_name);
94a78b79
VZ
12136 goto request_firmware_exit;
12137 }
12138
12139 rc = bnx2x_check_firmware(bp);
12140 if (rc) {
12141 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
12142 goto request_firmware_exit;
12143 }
12144
12145 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12146
12147 /* Initialize the pointers to the init arrays */
12148 /* Blob */
12149 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12150
12151 /* Opcodes */
12152 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12153
12154 /* Offsets */
ab6ad5a4
EG
12155 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12156 be16_to_cpu_n);
94a78b79
VZ
12157
12158 /* STORMs firmware */
573f2035
EG
12159 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12160 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12161 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12162 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12163 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12164 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12165 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12166 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12167 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12168 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12169 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12170 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12171 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12172 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12173 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12174 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
12175
12176 return 0;
ab6ad5a4 12177
94a78b79
VZ
12178init_offsets_alloc_err:
12179 kfree(bp->init_ops);
12180init_ops_alloc_err:
12181 kfree(bp->init_data);
12182request_firmware_exit:
12183 release_firmware(bp->firmware);
12184
12185 return rc;
12186}
12187
12188
a2fbb9ea
ET
12189static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12190 const struct pci_device_id *ent)
12191{
a2fbb9ea
ET
12192 struct net_device *dev = NULL;
12193 struct bnx2x *bp;
37f9ce62 12194 int pcie_width, pcie_speed;
25047950 12195 int rc;
a2fbb9ea 12196
a2fbb9ea 12197 /* dev zeroed in init_etherdev */
555f6c78 12198 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
12199 if (!dev) {
12200 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 12201 return -ENOMEM;
34f80b04 12202 }
a2fbb9ea 12203
a2fbb9ea
ET
12204 bp = netdev_priv(dev);
12205 bp->msglevel = debug;
12206
df4770de
EG
12207 pci_set_drvdata(pdev, dev);
12208
34f80b04 12209 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
12210 if (rc < 0) {
12211 free_netdev(dev);
12212 return rc;
12213 }
12214
34f80b04 12215 rc = bnx2x_init_bp(bp);
693fc0d1
EG
12216 if (rc)
12217 goto init_one_exit;
12218
94a78b79
VZ
12219 /* Set init arrays */
12220 rc = bnx2x_init_firmware(bp, &pdev->dev);
12221 if (rc) {
12222 printk(KERN_ERR PFX "Error loading firmware\n");
12223 goto init_one_exit;
12224 }
12225
693fc0d1 12226 rc = register_netdev(dev);
34f80b04 12227 if (rc) {
693fc0d1 12228 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
12229 goto init_one_exit;
12230 }
12231
37f9ce62 12232 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
25047950 12233 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 12234 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 12235 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
37f9ce62 12236 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
25047950 12237 dev->base_addr, bp->pdev->irq);
e174961c 12238 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 12239
a2fbb9ea 12240 return 0;
34f80b04
EG
12241
12242init_one_exit:
12243 if (bp->regview)
12244 iounmap(bp->regview);
12245
12246 if (bp->doorbells)
12247 iounmap(bp->doorbells);
12248
12249 free_netdev(dev);
12250
12251 if (atomic_read(&pdev->enable_cnt) == 1)
12252 pci_release_regions(pdev);
12253
12254 pci_disable_device(pdev);
12255 pci_set_drvdata(pdev, NULL);
12256
12257 return rc;
a2fbb9ea
ET
12258}
12259
12260static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12261{
12262 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
12263 struct bnx2x *bp;
12264
12265 if (!dev) {
228241eb
ET
12266 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12267 return;
12268 }
228241eb 12269 bp = netdev_priv(dev);
a2fbb9ea 12270
a2fbb9ea
ET
12271 unregister_netdev(dev);
12272
94a78b79
VZ
12273 kfree(bp->init_ops_offsets);
12274 kfree(bp->init_ops);
12275 kfree(bp->init_data);
12276 release_firmware(bp->firmware);
12277
a2fbb9ea
ET
12278 if (bp->regview)
12279 iounmap(bp->regview);
12280
12281 if (bp->doorbells)
12282 iounmap(bp->doorbells);
12283
12284 free_netdev(dev);
34f80b04
EG
12285
12286 if (atomic_read(&pdev->enable_cnt) == 1)
12287 pci_release_regions(pdev);
12288
a2fbb9ea
ET
12289 pci_disable_device(pdev);
12290 pci_set_drvdata(pdev, NULL);
12291}
12292
12293static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12294{
12295 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
12296 struct bnx2x *bp;
12297
34f80b04
EG
12298 if (!dev) {
12299 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12300 return -ENODEV;
12301 }
12302 bp = netdev_priv(dev);
a2fbb9ea 12303
34f80b04 12304 rtnl_lock();
a2fbb9ea 12305
34f80b04 12306 pci_save_state(pdev);
228241eb 12307
34f80b04
EG
12308 if (!netif_running(dev)) {
12309 rtnl_unlock();
12310 return 0;
12311 }
a2fbb9ea
ET
12312
12313 netif_device_detach(dev);
a2fbb9ea 12314
da5a662a 12315 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 12316
a2fbb9ea 12317 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 12318
34f80b04
EG
12319 rtnl_unlock();
12320
a2fbb9ea
ET
12321 return 0;
12322}
12323
12324static int bnx2x_resume(struct pci_dev *pdev)
12325{
12326 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 12327 struct bnx2x *bp;
a2fbb9ea
ET
12328 int rc;
12329
228241eb
ET
12330 if (!dev) {
12331 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12332 return -ENODEV;
12333 }
228241eb 12334 bp = netdev_priv(dev);
a2fbb9ea 12335
34f80b04
EG
12336 rtnl_lock();
12337
228241eb 12338 pci_restore_state(pdev);
34f80b04
EG
12339
12340 if (!netif_running(dev)) {
12341 rtnl_unlock();
12342 return 0;
12343 }
12344
a2fbb9ea
ET
12345 bnx2x_set_power_state(bp, PCI_D0);
12346 netif_device_attach(dev);
12347
da5a662a 12348 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 12349
34f80b04
EG
12350 rtnl_unlock();
12351
12352 return rc;
a2fbb9ea
ET
12353}
12354
f8ef6e44
YG
12355static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12356{
12357 int i;
12358
12359 bp->state = BNX2X_STATE_ERROR;
12360
12361 bp->rx_mode = BNX2X_RX_MODE_NONE;
12362
12363 bnx2x_netif_stop(bp, 0);
12364
12365 del_timer_sync(&bp->timer);
12366 bp->stats_state = STATS_STATE_DISABLED;
12367 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12368
12369 /* Release IRQs */
12370 bnx2x_free_irq(bp);
12371
12372 if (CHIP_IS_E1(bp)) {
12373 struct mac_configuration_cmd *config =
12374 bnx2x_sp(bp, mcast_config);
12375
8d9c5f34 12376 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
12377 CAM_INVALIDATE(config->config_table[i]);
12378 }
12379
12380 /* Free SKBs, SGEs, TPA pool and driver internals */
12381 bnx2x_free_skbs(bp);
555f6c78 12382 for_each_rx_queue(bp, i)
f8ef6e44 12383 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 12384 for_each_rx_queue(bp, i)
7cde1c8b 12385 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
12386 bnx2x_free_mem(bp);
12387
12388 bp->state = BNX2X_STATE_CLOSED;
12389
12390 netif_carrier_off(bp->dev);
12391
12392 return 0;
12393}
12394
12395static void bnx2x_eeh_recover(struct bnx2x *bp)
12396{
12397 u32 val;
12398
12399 mutex_init(&bp->port.phy_mutex);
12400
12401 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12402 bp->link_params.shmem_base = bp->common.shmem_base;
12403 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12404
12405 if (!bp->common.shmem_base ||
12406 (bp->common.shmem_base < 0xA0000) ||
12407 (bp->common.shmem_base >= 0xC0000)) {
12408 BNX2X_DEV_INFO("MCP not active\n");
12409 bp->flags |= NO_MCP_FLAG;
12410 return;
12411 }
12412
12413 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12414 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12415 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12416 BNX2X_ERR("BAD MCP validity signature\n");
12417
12418 if (!BP_NOMCP(bp)) {
12419 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12420 & DRV_MSG_SEQ_NUMBER_MASK);
12421 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12422 }
12423}
12424
493adb1f
WX
12425/**
12426 * bnx2x_io_error_detected - called when PCI error is detected
12427 * @pdev: Pointer to PCI device
12428 * @state: The current pci connection state
12429 *
12430 * This function is called after a PCI bus error affecting
12431 * this device has been detected.
12432 */
12433static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12434 pci_channel_state_t state)
12435{
12436 struct net_device *dev = pci_get_drvdata(pdev);
12437 struct bnx2x *bp = netdev_priv(dev);
12438
12439 rtnl_lock();
12440
12441 netif_device_detach(dev);
12442
07ce50e4
DN
12443 if (state == pci_channel_io_perm_failure) {
12444 rtnl_unlock();
12445 return PCI_ERS_RESULT_DISCONNECT;
12446 }
12447
493adb1f 12448 if (netif_running(dev))
f8ef6e44 12449 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
12450
12451 pci_disable_device(pdev);
12452
12453 rtnl_unlock();
12454
12455 /* Request a slot reset */
12456 return PCI_ERS_RESULT_NEED_RESET;
12457}
12458
12459/**
12460 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12461 * @pdev: Pointer to PCI device
12462 *
12463 * Restart the card from scratch, as if from a cold-boot.
12464 */
12465static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12466{
12467 struct net_device *dev = pci_get_drvdata(pdev);
12468 struct bnx2x *bp = netdev_priv(dev);
12469
12470 rtnl_lock();
12471
12472 if (pci_enable_device(pdev)) {
12473 dev_err(&pdev->dev,
12474 "Cannot re-enable PCI device after reset\n");
12475 rtnl_unlock();
12476 return PCI_ERS_RESULT_DISCONNECT;
12477 }
12478
12479 pci_set_master(pdev);
12480 pci_restore_state(pdev);
12481
12482 if (netif_running(dev))
12483 bnx2x_set_power_state(bp, PCI_D0);
12484
12485 rtnl_unlock();
12486
12487 return PCI_ERS_RESULT_RECOVERED;
12488}
12489
12490/**
12491 * bnx2x_io_resume - called when traffic can start flowing again
12492 * @pdev: Pointer to PCI device
12493 *
12494 * This callback is called when the error recovery driver tells us that
12495 * its OK to resume normal operation.
12496 */
12497static void bnx2x_io_resume(struct pci_dev *pdev)
12498{
12499 struct net_device *dev = pci_get_drvdata(pdev);
12500 struct bnx2x *bp = netdev_priv(dev);
12501
12502 rtnl_lock();
12503
f8ef6e44
YG
12504 bnx2x_eeh_recover(bp);
12505
493adb1f 12506 if (netif_running(dev))
f8ef6e44 12507 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
12508
12509 netif_device_attach(dev);
12510
12511 rtnl_unlock();
12512}
12513
12514static struct pci_error_handlers bnx2x_err_handler = {
12515 .error_detected = bnx2x_io_error_detected,
356e2385
EG
12516 .slot_reset = bnx2x_io_slot_reset,
12517 .resume = bnx2x_io_resume,
493adb1f
WX
12518};
12519
a2fbb9ea 12520static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
12521 .name = DRV_MODULE_NAME,
12522 .id_table = bnx2x_pci_tbl,
12523 .probe = bnx2x_init_one,
12524 .remove = __devexit_p(bnx2x_remove_one),
12525 .suspend = bnx2x_suspend,
12526 .resume = bnx2x_resume,
12527 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
12528};
12529
12530static int __init bnx2x_init(void)
12531{
dd21ca6d
SG
12532 int ret;
12533
938cf541
EG
12534 printk(KERN_INFO "%s", version);
12535
1cf167f2
EG
12536 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12537 if (bnx2x_wq == NULL) {
12538 printk(KERN_ERR PFX "Cannot create workqueue\n");
12539 return -ENOMEM;
12540 }
12541
dd21ca6d
SG
12542 ret = pci_register_driver(&bnx2x_pci_driver);
12543 if (ret) {
12544 printk(KERN_ERR PFX "Cannot register driver\n");
12545 destroy_workqueue(bnx2x_wq);
12546 }
12547 return ret;
a2fbb9ea
ET
12548}
12549
12550static void __exit bnx2x_cleanup(void)
12551{
12552 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
12553
12554 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
12555}
12556
12557module_init(bnx2x_init);
12558module_exit(bnx2x_cleanup);
12559
993ac7b5
MC
12560#ifdef BCM_CNIC
12561
12562/* count denotes the number of new completions we have seen */
12563static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12564{
12565 struct eth_spe *spe;
12566
12567#ifdef BNX2X_STOP_ON_ERROR
12568 if (unlikely(bp->panic))
12569 return;
12570#endif
12571
12572 spin_lock_bh(&bp->spq_lock);
12573 bp->cnic_spq_pending -= count;
12574
12575 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12576 bp->cnic_spq_pending++) {
12577
12578 if (!bp->cnic_kwq_pending)
12579 break;
12580
12581 spe = bnx2x_sp_get_next(bp);
12582 *spe = *bp->cnic_kwq_cons;
12583
12584 bp->cnic_kwq_pending--;
12585
12586 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12587 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12588
12589 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12590 bp->cnic_kwq_cons = bp->cnic_kwq;
12591 else
12592 bp->cnic_kwq_cons++;
12593 }
12594 bnx2x_sp_prod_update(bp);
12595 spin_unlock_bh(&bp->spq_lock);
12596}
12597
12598static int bnx2x_cnic_sp_queue(struct net_device *dev,
12599 struct kwqe_16 *kwqes[], u32 count)
12600{
12601 struct bnx2x *bp = netdev_priv(dev);
12602 int i;
12603
12604#ifdef BNX2X_STOP_ON_ERROR
12605 if (unlikely(bp->panic))
12606 return -EIO;
12607#endif
12608
12609 spin_lock_bh(&bp->spq_lock);
12610
12611 for (i = 0; i < count; i++) {
12612 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12613
12614 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12615 break;
12616
12617 *bp->cnic_kwq_prod = *spe;
12618
12619 bp->cnic_kwq_pending++;
12620
12621 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12622 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12623 spe->data.mac_config_addr.hi,
12624 spe->data.mac_config_addr.lo,
12625 bp->cnic_kwq_pending);
12626
12627 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12628 bp->cnic_kwq_prod = bp->cnic_kwq;
12629 else
12630 bp->cnic_kwq_prod++;
12631 }
12632
12633 spin_unlock_bh(&bp->spq_lock);
12634
12635 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12636 bnx2x_cnic_sp_post(bp, 0);
12637
12638 return i;
12639}
12640
12641static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12642{
12643 struct cnic_ops *c_ops;
12644 int rc = 0;
12645
12646 mutex_lock(&bp->cnic_mutex);
12647 c_ops = bp->cnic_ops;
12648 if (c_ops)
12649 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12650 mutex_unlock(&bp->cnic_mutex);
12651
12652 return rc;
12653}
12654
12655static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12656{
12657 struct cnic_ops *c_ops;
12658 int rc = 0;
12659
12660 rcu_read_lock();
12661 c_ops = rcu_dereference(bp->cnic_ops);
12662 if (c_ops)
12663 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12664 rcu_read_unlock();
12665
12666 return rc;
12667}
12668
12669/*
12670 * for commands that have no data
12671 */
12672static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12673{
12674 struct cnic_ctl_info ctl = {0};
12675
12676 ctl.cmd = cmd;
12677
12678 return bnx2x_cnic_ctl_send(bp, &ctl);
12679}
12680
12681static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12682{
12683 struct cnic_ctl_info ctl;
12684
12685 /* first we tell CNIC and only then we count this as a completion */
12686 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12687 ctl.data.comp.cid = cid;
12688
12689 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12690 bnx2x_cnic_sp_post(bp, 1);
12691}
12692
12693static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12694{
12695 struct bnx2x *bp = netdev_priv(dev);
12696 int rc = 0;
12697
12698 switch (ctl->cmd) {
12699 case DRV_CTL_CTXTBL_WR_CMD: {
12700 u32 index = ctl->data.io.offset;
12701 dma_addr_t addr = ctl->data.io.dma_addr;
12702
12703 bnx2x_ilt_wr(bp, index, addr);
12704 break;
12705 }
12706
12707 case DRV_CTL_COMPLETION_CMD: {
12708 int count = ctl->data.comp.comp_count;
12709
12710 bnx2x_cnic_sp_post(bp, count);
12711 break;
12712 }
12713
12714 /* rtnl_lock is held. */
12715 case DRV_CTL_START_L2_CMD: {
12716 u32 cli = ctl->data.ring.client_id;
12717
12718 bp->rx_mode_cl_mask |= (1 << cli);
12719 bnx2x_set_storm_rx_mode(bp);
12720 break;
12721 }
12722
12723 /* rtnl_lock is held. */
12724 case DRV_CTL_STOP_L2_CMD: {
12725 u32 cli = ctl->data.ring.client_id;
12726
12727 bp->rx_mode_cl_mask &= ~(1 << cli);
12728 bnx2x_set_storm_rx_mode(bp);
12729 break;
12730 }
12731
12732 default:
12733 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12734 rc = -EINVAL;
12735 }
12736
12737 return rc;
12738}
12739
12740static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12741{
12742 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12743
12744 if (bp->flags & USING_MSIX_FLAG) {
12745 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12746 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12747 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12748 } else {
12749 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12750 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12751 }
12752 cp->irq_arr[0].status_blk = bp->cnic_sb;
12753 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12754 cp->irq_arr[1].status_blk = bp->def_status_blk;
12755 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12756
12757 cp->num_irq = 2;
12758}
12759
12760static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12761 void *data)
12762{
12763 struct bnx2x *bp = netdev_priv(dev);
12764 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12765
12766 if (ops == NULL)
12767 return -EINVAL;
12768
12769 if (atomic_read(&bp->intr_sem) != 0)
12770 return -EBUSY;
12771
12772 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12773 if (!bp->cnic_kwq)
12774 return -ENOMEM;
12775
12776 bp->cnic_kwq_cons = bp->cnic_kwq;
12777 bp->cnic_kwq_prod = bp->cnic_kwq;
12778 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12779
12780 bp->cnic_spq_pending = 0;
12781 bp->cnic_kwq_pending = 0;
12782
12783 bp->cnic_data = data;
12784
12785 cp->num_irq = 0;
12786 cp->drv_state = CNIC_DRV_STATE_REGD;
12787
12788 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12789
12790 bnx2x_setup_cnic_irq_info(bp);
12791 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12792 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12793 rcu_assign_pointer(bp->cnic_ops, ops);
12794
12795 return 0;
12796}
12797
12798static int bnx2x_unregister_cnic(struct net_device *dev)
12799{
12800 struct bnx2x *bp = netdev_priv(dev);
12801 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12802
12803 mutex_lock(&bp->cnic_mutex);
12804 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12805 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12806 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12807 }
12808 cp->drv_state = 0;
12809 rcu_assign_pointer(bp->cnic_ops, NULL);
12810 mutex_unlock(&bp->cnic_mutex);
12811 synchronize_rcu();
12812 kfree(bp->cnic_kwq);
12813 bp->cnic_kwq = NULL;
12814
12815 return 0;
12816}
12817
12818struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12819{
12820 struct bnx2x *bp = netdev_priv(dev);
12821 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12822
12823 cp->drv_owner = THIS_MODULE;
12824 cp->chip_id = CHIP_ID(bp);
12825 cp->pdev = bp->pdev;
12826 cp->io_base = bp->regview;
12827 cp->io_base2 = bp->doorbells;
12828 cp->max_kwqe_pending = 8;
12829 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12830 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12831 cp->ctx_tbl_len = CNIC_ILT_LINES;
12832 cp->starting_cid = BCM_CNIC_CID_START;
12833 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12834 cp->drv_ctl = bnx2x_drv_ctl;
12835 cp->drv_register_cnic = bnx2x_register_cnic;
12836 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12837
12838 return cp;
12839}
12840EXPORT_SYMBOL(bnx2x_cnic_probe);
12841
12842#endif /* BCM_CNIC */
94a78b79 12843