]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Remove old FW files
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
0a64ea57 57#include "bnx2x_dump.h"
a2fbb9ea 58
c458bc50
EG
59#define DRV_MODULE_VERSION "1.52.1"
60#define DRV_MODULE_RELDATE "2009/08/12"
34f80b04 61#define BNX2X_BC_VER 0x040200
a2fbb9ea 62
94a78b79
VZ
63#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
ab6ad5a4
EG
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
94a78b79 68
34f80b04
EG
69/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
a2fbb9ea 71
53a10565 72static char version[] __devinitdata =
34f80b04 73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
24e3fcef 76MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 80
555f6c78
EG
81static int multi_mode = 1;
82module_param(multi_mode, int, 0);
ca00392c
EG
83MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
85
86static int num_rx_queues;
87module_param(num_rx_queues, int, 0);
88MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
90
91static int num_tx_queues;
92module_param(num_tx_queues, int, 0);
93MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
555f6c78 95
19680c48 96static int disable_tpa;
19680c48 97module_param(disable_tpa, int, 0);
9898f86d 98MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
99
100static int int_mode;
101module_param(int_mode, int, 0);
102MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
a18f5128
EG
104static int dropless_fc;
105module_param(dropless_fc, int, 0);
106MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
107
9898f86d 108static int poll;
a2fbb9ea 109module_param(poll, int, 0);
9898f86d 110MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
111
112static int mrrs = -1;
113module_param(mrrs, int, 0);
114MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
115
9898f86d 116static int debug;
a2fbb9ea 117module_param(debug, int, 0);
9898f86d
EG
118MODULE_PARM_DESC(debug, " Default debug msglevel");
119
120static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 121
1cf167f2 122static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
123
124enum bnx2x_board_type {
125 BCM57710 = 0,
34f80b04
EG
126 BCM57711 = 1,
127 BCM57711E = 2,
a2fbb9ea
ET
128};
129
34f80b04 130/* indexed by board_type, above */
53a10565 131static struct {
a2fbb9ea
ET
132 char *name;
133} board_info[] __devinitdata = {
34f80b04
EG
134 { "Broadcom NetXtreme II BCM57710 XGb" },
135 { "Broadcom NetXtreme II BCM57711 XGb" },
136 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
137};
138
34f80b04 139
a2fbb9ea 140static const struct pci_device_id bnx2x_pci_tbl[] = {
e4ed7113
EG
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
143 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
144 { 0 }
145};
146
147MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
148
149/****************************************************************************
150* General service functions
151****************************************************************************/
152
153/* used only at init
154 * locking is done by mcp
155 */
573f2035 156void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
157{
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
160 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
161 PCICFG_VENDOR_ID_OFFSET);
162}
163
a2fbb9ea
ET
164static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
165{
166 u32 val;
167
168 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
169 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
170 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
171 PCICFG_VENDOR_ID_OFFSET);
172
173 return val;
174}
a2fbb9ea
ET
175
176static const u32 dmae_reg_go_c[] = {
177 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
178 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
179 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
180 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
181};
182
183/* copy command into DMAE command memory and set DMAE command go */
184static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
185 int idx)
186{
187 u32 cmd_offset;
188 int i;
189
190 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
191 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
192 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
193
ad8d3948
EG
194 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
195 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
196 }
197 REG_WR(bp, dmae_reg_go_c[idx], 1);
198}
199
ad8d3948
EG
200void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
201 u32 len32)
a2fbb9ea 202{
5ff7b6d4 203 struct dmae_command dmae;
a2fbb9ea 204 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
205 int cnt = 200;
206
207 if (!bp->dmae_ready) {
208 u32 *data = bnx2x_sp(bp, wb_data[0]);
209
210 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
211 " using indirect\n", dst_addr, len32);
212 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
213 return;
214 }
215
5ff7b6d4 216 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 217
5ff7b6d4
EG
218 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
219 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
220 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 221#ifdef __BIG_ENDIAN
5ff7b6d4 222 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 223#else
5ff7b6d4 224 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 225#endif
5ff7b6d4
EG
226 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
227 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
228 dmae.src_addr_lo = U64_LO(dma_addr);
229 dmae.src_addr_hi = U64_HI(dma_addr);
230 dmae.dst_addr_lo = dst_addr >> 2;
231 dmae.dst_addr_hi = 0;
232 dmae.len = len32;
233 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
234 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
235 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 236
c3eefaf6 237 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
238 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
239 "dst_addr [%x:%08x (%08x)]\n"
240 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
241 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
242 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
243 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 244 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
245 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
246 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 247
5ff7b6d4
EG
248 mutex_lock(&bp->dmae_mutex);
249
a2fbb9ea
ET
250 *wb_comp = 0;
251
5ff7b6d4 252 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
253
254 udelay(5);
ad8d3948
EG
255
256 while (*wb_comp != DMAE_COMP_VAL) {
257 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
258
ad8d3948 259 if (!cnt) {
c3eefaf6 260 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
261 break;
262 }
ad8d3948 263 cnt--;
12469401
YG
264 /* adjust delay for emulation/FPGA */
265 if (CHIP_REV_IS_SLOW(bp))
266 msleep(100);
267 else
268 udelay(5);
a2fbb9ea 269 }
ad8d3948
EG
270
271 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
272}
273
c18487ee 274void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 275{
5ff7b6d4 276 struct dmae_command dmae;
a2fbb9ea 277 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
278 int cnt = 200;
279
280 if (!bp->dmae_ready) {
281 u32 *data = bnx2x_sp(bp, wb_data[0]);
282 int i;
283
284 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
285 " using indirect\n", src_addr, len32);
286 for (i = 0; i < len32; i++)
287 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
288 return;
289 }
290
5ff7b6d4 291 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 292
5ff7b6d4
EG
293 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
294 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
295 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 296#ifdef __BIG_ENDIAN
5ff7b6d4 297 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 298#else
5ff7b6d4 299 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 300#endif
5ff7b6d4
EG
301 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
302 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
303 dmae.src_addr_lo = src_addr >> 2;
304 dmae.src_addr_hi = 0;
305 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
306 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
307 dmae.len = len32;
308 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
309 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
310 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 311
c3eefaf6 312 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
313 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
314 "dst_addr [%x:%08x (%08x)]\n"
315 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
316 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
317 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
318 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 319
5ff7b6d4
EG
320 mutex_lock(&bp->dmae_mutex);
321
322 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
323 *wb_comp = 0;
324
5ff7b6d4 325 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
326
327 udelay(5);
ad8d3948
EG
328
329 while (*wb_comp != DMAE_COMP_VAL) {
330
ad8d3948 331 if (!cnt) {
c3eefaf6 332 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
333 break;
334 }
ad8d3948 335 cnt--;
12469401
YG
336 /* adjust delay for emulation/FPGA */
337 if (CHIP_REV_IS_SLOW(bp))
338 msleep(100);
339 else
340 udelay(5);
a2fbb9ea 341 }
ad8d3948 342 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
343 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
344 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
345
346 mutex_unlock(&bp->dmae_mutex);
347}
348
573f2035
EG
349void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
350 u32 addr, u32 len)
351{
352 int offset = 0;
353
354 while (len > DMAE_LEN32_WR_MAX) {
355 bnx2x_write_dmae(bp, phys_addr + offset,
356 addr + offset, DMAE_LEN32_WR_MAX);
357 offset += DMAE_LEN32_WR_MAX * 4;
358 len -= DMAE_LEN32_WR_MAX;
359 }
360
361 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
362}
363
ad8d3948
EG
364/* used only for slowpath so not inlined */
365static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
366{
367 u32 wb_write[2];
368
369 wb_write[0] = val_hi;
370 wb_write[1] = val_lo;
371 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 372}
a2fbb9ea 373
ad8d3948
EG
374#ifdef USE_WB_RD
375static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
376{
377 u32 wb_data[2];
378
379 REG_RD_DMAE(bp, reg, wb_data, 2);
380
381 return HILO_U64(wb_data[0], wb_data[1]);
382}
383#endif
384
a2fbb9ea
ET
385static int bnx2x_mc_assert(struct bnx2x *bp)
386{
a2fbb9ea 387 char last_idx;
34f80b04
EG
388 int i, rc = 0;
389 u32 row0, row1, row2, row3;
390
391 /* XSTORM */
392 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
393 XSTORM_ASSERT_LIST_INDEX_OFFSET);
394 if (last_idx)
395 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
396
397 /* print the asserts */
398 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
399
400 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
401 XSTORM_ASSERT_LIST_OFFSET(i));
402 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
403 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
404 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
406 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
408
409 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
410 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
411 " 0x%08x 0x%08x 0x%08x\n",
412 i, row3, row2, row1, row0);
413 rc++;
414 } else {
415 break;
416 }
417 }
418
419 /* TSTORM */
420 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
421 TSTORM_ASSERT_LIST_INDEX_OFFSET);
422 if (last_idx)
423 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
424
425 /* print the asserts */
426 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
427
428 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
429 TSTORM_ASSERT_LIST_OFFSET(i));
430 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
431 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
432 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
434 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
436
437 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
438 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
439 " 0x%08x 0x%08x 0x%08x\n",
440 i, row3, row2, row1, row0);
441 rc++;
442 } else {
443 break;
444 }
445 }
446
447 /* CSTORM */
448 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
449 CSTORM_ASSERT_LIST_INDEX_OFFSET);
450 if (last_idx)
451 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
452
453 /* print the asserts */
454 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
455
456 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
457 CSTORM_ASSERT_LIST_OFFSET(i));
458 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
459 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
460 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
462 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
464
465 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
466 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
467 " 0x%08x 0x%08x 0x%08x\n",
468 i, row3, row2, row1, row0);
469 rc++;
470 } else {
471 break;
472 }
473 }
474
475 /* USTORM */
476 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
477 USTORM_ASSERT_LIST_INDEX_OFFSET);
478 if (last_idx)
479 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
480
481 /* print the asserts */
482 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
483
484 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
485 USTORM_ASSERT_LIST_OFFSET(i));
486 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
487 USTORM_ASSERT_LIST_OFFSET(i) + 4);
488 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
489 USTORM_ASSERT_LIST_OFFSET(i) + 8);
490 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
491 USTORM_ASSERT_LIST_OFFSET(i) + 12);
492
493 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
494 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
495 " 0x%08x 0x%08x 0x%08x\n",
496 i, row3, row2, row1, row0);
497 rc++;
498 } else {
499 break;
a2fbb9ea
ET
500 }
501 }
34f80b04 502
a2fbb9ea
ET
503 return rc;
504}
c14423fe 505
a2fbb9ea
ET
506static void bnx2x_fw_dump(struct bnx2x *bp)
507{
508 u32 mark, offset;
4781bfad 509 __be32 data[9];
a2fbb9ea
ET
510 int word;
511
512 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772 513 mark = ((mark + 0x3) & ~0x3);
ad361c98 514 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 515
ad361c98 516 printk(KERN_ERR PFX);
a2fbb9ea
ET
517 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
518 for (word = 0; word < 8; word++)
519 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
520 offset + 4*word));
521 data[8] = 0x0;
49d66772 522 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
523 }
524 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
525 for (word = 0; word < 8; word++)
526 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
527 offset + 4*word));
528 data[8] = 0x0;
49d66772 529 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea 530 }
ad361c98 531 printk(KERN_ERR PFX "end of fw dump\n");
a2fbb9ea
ET
532}
533
534static void bnx2x_panic_dump(struct bnx2x *bp)
535{
536 int i;
537 u16 j, start, end;
538
66e855f3
YG
539 bp->stats_state = STATS_STATE_DISABLED;
540 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
541
a2fbb9ea
ET
542 BNX2X_ERR("begin crash dump -----------------\n");
543
8440d2b6
EG
544 /* Indices */
545 /* Common */
546 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
547 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
548 " spq_prod_idx(%u)\n",
549 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
550 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
551
552 /* Rx */
553 for_each_rx_queue(bp, i) {
a2fbb9ea 554 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 555
c3eefaf6 556 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
557 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
558 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 559 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
560 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
561 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 562 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
563 " fp_u_idx(%x) *sb_u_idx(%x)\n",
564 fp->rx_sge_prod, fp->last_max_sge,
565 le16_to_cpu(fp->fp_u_idx),
566 fp->status_blk->u_status_block.status_block_index);
567 }
a2fbb9ea 568
8440d2b6
EG
569 /* Tx */
570 for_each_tx_queue(bp, i) {
571 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 572
c3eefaf6 573 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
574 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
575 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
576 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 577 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
ca00392c 578 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 579 fp->status_blk->c_status_block.status_block_index,
ca00392c 580 fp->tx_db.data.prod);
8440d2b6 581 }
a2fbb9ea 582
8440d2b6
EG
583 /* Rings */
584 /* Rx */
585 for_each_rx_queue(bp, i) {
586 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
587
588 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
589 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 590 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
591 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
592 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
593
c3eefaf6
EG
594 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
595 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
596 }
597
3196a88a
EG
598 start = RX_SGE(fp->rx_sge_prod);
599 end = RX_SGE(fp->last_max_sge);
8440d2b6 600 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
601 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
602 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
603
c3eefaf6
EG
604 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
605 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
606 }
607
a2fbb9ea
ET
608 start = RCQ_BD(fp->rx_comp_cons - 10);
609 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 610 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
611 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
612
c3eefaf6
EG
613 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
614 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
615 }
616 }
617
8440d2b6
EG
618 /* Tx */
619 for_each_tx_queue(bp, i) {
620 struct bnx2x_fastpath *fp = &bp->fp[i];
621
622 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
623 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
624 for (j = start; j != end; j = TX_BD(j + 1)) {
625 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
626
c3eefaf6
EG
627 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
628 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
629 }
630
631 start = TX_BD(fp->tx_bd_cons - 10);
632 end = TX_BD(fp->tx_bd_cons + 254);
633 for (j = start; j != end; j = TX_BD(j + 1)) {
634 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
635
c3eefaf6
EG
636 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
637 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
638 }
639 }
a2fbb9ea 640
34f80b04 641 bnx2x_fw_dump(bp);
a2fbb9ea
ET
642 bnx2x_mc_assert(bp);
643 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
644}
645
615f8fd9 646static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 647{
34f80b04 648 int port = BP_PORT(bp);
a2fbb9ea
ET
649 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
650 u32 val = REG_RD(bp, addr);
651 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 652 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
653
654 if (msix) {
8badd27a
EG
655 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
656 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
657 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
658 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
659 } else if (msi) {
660 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
661 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
662 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
664 } else {
665 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 666 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
667 HC_CONFIG_0_REG_INT_LINE_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 669
8badd27a
EG
670 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
671 val, port, addr);
615f8fd9
ET
672
673 REG_WR(bp, addr, val);
674
a2fbb9ea
ET
675 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
676 }
677
8badd27a
EG
678 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
679 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
680
681 REG_WR(bp, addr, val);
37dbbf32
EG
682 /*
683 * Ensure that HC_CONFIG is written before leading/trailing edge config
684 */
685 mmiowb();
686 barrier();
34f80b04
EG
687
688 if (CHIP_IS_E1H(bp)) {
689 /* init leading/trailing edge */
690 if (IS_E1HMF(bp)) {
8badd27a 691 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 692 if (bp->port.pmf)
4acac6a5
EG
693 /* enable nig and gpio3 attention */
694 val |= 0x1100;
34f80b04
EG
695 } else
696 val = 0xffff;
697
698 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
699 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
700 }
37dbbf32
EG
701
702 /* Make sure that interrupts are indeed enabled from here on */
703 mmiowb();
a2fbb9ea
ET
704}
705
615f8fd9 706static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 707{
34f80b04 708 int port = BP_PORT(bp);
a2fbb9ea
ET
709 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
710 u32 val = REG_RD(bp, addr);
711
712 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
713 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
714 HC_CONFIG_0_REG_INT_LINE_EN_0 |
715 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
716
717 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
718 val, port, addr);
719
8badd27a
EG
720 /* flush all outstanding writes */
721 mmiowb();
722
a2fbb9ea
ET
723 REG_WR(bp, addr, val);
724 if (REG_RD(bp, addr) != val)
725 BNX2X_ERR("BUG! proper val not read from IGU!\n");
726}
727
f8ef6e44 728static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 729{
a2fbb9ea 730 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 731 int i, offset;
a2fbb9ea 732
34f80b04 733 /* disable interrupt handling */
a2fbb9ea 734 atomic_inc(&bp->intr_sem);
e1510706
EG
735 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
736
f8ef6e44
YG
737 if (disable_hw)
738 /* prevent the HW from sending interrupts */
739 bnx2x_int_disable(bp);
a2fbb9ea
ET
740
741 /* make sure all ISRs are done */
742 if (msix) {
8badd27a
EG
743 synchronize_irq(bp->msix_table[0].vector);
744 offset = 1;
37b091ba
MC
745#ifdef BCM_CNIC
746 offset++;
747#endif
a2fbb9ea 748 for_each_queue(bp, i)
8badd27a 749 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
750 } else
751 synchronize_irq(bp->pdev->irq);
752
753 /* make sure sp_task is not running */
1cf167f2
EG
754 cancel_delayed_work(&bp->sp_task);
755 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
756}
757
34f80b04 758/* fast path */
a2fbb9ea
ET
759
760/*
34f80b04 761 * General service functions
a2fbb9ea
ET
762 */
763
34f80b04 764static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
765 u8 storm, u16 index, u8 op, u8 update)
766{
5c862848
EG
767 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
768 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
769 struct igu_ack_register igu_ack;
770
771 igu_ack.status_block_index = index;
772 igu_ack.sb_id_and_flags =
34f80b04 773 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
774 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
775 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
776 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
777
5c862848
EG
778 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
779 (*(u32 *)&igu_ack), hc_addr);
780 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
781
782 /* Make sure that ACK is written */
783 mmiowb();
784 barrier();
a2fbb9ea
ET
785}
786
787static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
788{
789 struct host_status_block *fpsb = fp->status_blk;
790 u16 rc = 0;
791
792 barrier(); /* status block is written to by the chip */
793 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
794 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
795 rc |= 1;
796 }
797 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
798 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
799 rc |= 2;
800 }
801 return rc;
802}
803
a2fbb9ea
ET
804static u16 bnx2x_ack_int(struct bnx2x *bp)
805{
5c862848
EG
806 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
807 COMMAND_REG_SIMD_MASK);
808 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 809
5c862848
EG
810 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
811 result, hc_addr);
a2fbb9ea 812
a2fbb9ea
ET
813 return result;
814}
815
816
817/*
818 * fast path service functions
819 */
820
e8b5fc51
VZ
821static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
822{
823 /* Tell compiler that consumer and producer can change */
824 barrier();
825 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
826}
827
a2fbb9ea
ET
828/* free skb in the packet ring at pos idx
829 * return idx of last bd freed
830 */
831static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
832 u16 idx)
833{
834 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
835 struct eth_tx_start_bd *tx_start_bd;
836 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 837 struct sk_buff *skb = tx_buf->skb;
34f80b04 838 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
839 int nbd;
840
841 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
842 idx, tx_buf, skb);
843
844 /* unmap first bd */
845 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c
EG
846 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
847 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
848 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 849
ca00392c 850 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 851#ifdef BNX2X_STOP_ON_ERROR
ca00392c 852 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 853 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
854 bnx2x_panic();
855 }
856#endif
ca00392c 857 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 858
ca00392c
EG
859 /* Get the next bd */
860 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 861
ca00392c
EG
862 /* Skip a parse bd... */
863 --nbd;
864 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
865
866 /* ...and the TSO split header bd since they have no mapping */
867 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
868 --nbd;
869 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
870 }
871
872 /* now free frags */
873 while (nbd > 0) {
874
875 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c
EG
876 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
877 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
878 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
a2fbb9ea
ET
879 if (--nbd)
880 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
881 }
882
883 /* release skb */
53e5e96e 884 WARN_ON(!skb);
ca00392c 885 dev_kfree_skb_any(skb);
a2fbb9ea
ET
886 tx_buf->first_bd = 0;
887 tx_buf->skb = NULL;
888
34f80b04 889 return new_cons;
a2fbb9ea
ET
890}
891
34f80b04 892static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 893{
34f80b04
EG
894 s16 used;
895 u16 prod;
896 u16 cons;
a2fbb9ea 897
34f80b04 898 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
899 prod = fp->tx_bd_prod;
900 cons = fp->tx_bd_cons;
901
34f80b04
EG
902 /* NUM_TX_RINGS = number of "next-page" entries
903 It will be used as a threshold */
904 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 905
34f80b04 906#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
907 WARN_ON(used < 0);
908 WARN_ON(used > fp->bp->tx_ring_size);
909 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 910#endif
a2fbb9ea 911
34f80b04 912 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
913}
914
7961f791 915static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
916{
917 struct bnx2x *bp = fp->bp;
555f6c78 918 struct netdev_queue *txq;
a2fbb9ea
ET
919 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
920 int done = 0;
921
922#ifdef BNX2X_STOP_ON_ERROR
923 if (unlikely(bp->panic))
924 return;
925#endif
926
ca00392c 927 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
a2fbb9ea
ET
928 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
929 sw_cons = fp->tx_pkt_cons;
930
931 while (sw_cons != hw_cons) {
932 u16 pkt_cons;
933
934 pkt_cons = TX_BD(sw_cons);
935
936 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
937
34f80b04 938 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
939 hw_cons, sw_cons, pkt_cons);
940
34f80b04 941/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
942 rmb();
943 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
944 }
945*/
946 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
947 sw_cons++;
948 done++;
a2fbb9ea
ET
949 }
950
951 fp->tx_pkt_cons = sw_cons;
952 fp->tx_bd_cons = bd_cons;
953
a2fbb9ea 954 /* TBD need a thresh? */
555f6c78 955 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 956
6044735d
EG
957 /* Need to make the tx_bd_cons update visible to start_xmit()
958 * before checking for netif_tx_queue_stopped(). Without the
959 * memory barrier, there is a small possibility that
960 * start_xmit() will miss it and cause the queue to be stopped
961 * forever.
962 */
963 smp_mb();
964
555f6c78 965 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 966 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 967 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 968 netif_tx_wake_queue(txq);
a2fbb9ea
ET
969 }
970}
971
993ac7b5
MC
972#ifdef BCM_CNIC
973static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
974#endif
3196a88a 975
a2fbb9ea
ET
976static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
977 union eth_rx_cqe *rr_cqe)
978{
979 struct bnx2x *bp = fp->bp;
980 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
981 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
982
34f80b04 983 DP(BNX2X_MSG_SP,
a2fbb9ea 984 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 985 fp->index, cid, command, bp->state,
34f80b04 986 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
987
988 bp->spq_left++;
989
0626b899 990 if (fp->index) {
a2fbb9ea
ET
991 switch (command | fp->state) {
992 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
993 BNX2X_FP_STATE_OPENING):
994 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
995 cid);
996 fp->state = BNX2X_FP_STATE_OPEN;
997 break;
998
999 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1000 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1001 cid);
1002 fp->state = BNX2X_FP_STATE_HALTED;
1003 break;
1004
1005 default:
34f80b04
EG
1006 BNX2X_ERR("unexpected MC reply (%d) "
1007 "fp->state is %x\n", command, fp->state);
1008 break;
a2fbb9ea 1009 }
34f80b04 1010 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1011 return;
1012 }
c14423fe 1013
a2fbb9ea
ET
1014 switch (command | bp->state) {
1015 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1016 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1017 bp->state = BNX2X_STATE_OPEN;
1018 break;
1019
1020 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1021 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1022 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1023 fp->state = BNX2X_FP_STATE_HALTED;
1024 break;
1025
a2fbb9ea 1026 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1027 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1028 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1029 break;
1030
993ac7b5
MC
1031#ifdef BCM_CNIC
1032 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1033 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1034 bnx2x_cnic_cfc_comp(bp, cid);
1035 break;
1036#endif
3196a88a 1037
a2fbb9ea 1038 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1039 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1040 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
e665bfda
MC
1041 bp->set_mac_pending--;
1042 smp_wmb();
a2fbb9ea
ET
1043 break;
1044
49d66772 1045 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
ca00392c 1046 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
34f80b04 1047 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
e665bfda
MC
1048 bp->set_mac_pending--;
1049 smp_wmb();
49d66772
ET
1050 break;
1051
a2fbb9ea 1052 default:
34f80b04 1053 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1054 command, bp->state);
34f80b04 1055 break;
a2fbb9ea 1056 }
34f80b04 1057 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1058}
1059
7a9b2557
VZ
1060static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1061 struct bnx2x_fastpath *fp, u16 index)
1062{
1063 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1064 struct page *page = sw_buf->page;
1065 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1066
1067 /* Skip "next page" elements */
1068 if (!page)
1069 return;
1070
1071 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1072 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1073 __free_pages(page, PAGES_PER_SGE_SHIFT);
1074
1075 sw_buf->page = NULL;
1076 sge->addr_hi = 0;
1077 sge->addr_lo = 0;
1078}
1079
1080static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1081 struct bnx2x_fastpath *fp, int last)
1082{
1083 int i;
1084
1085 for (i = 0; i < last; i++)
1086 bnx2x_free_rx_sge(bp, fp, i);
1087}
1088
1089static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1090 struct bnx2x_fastpath *fp, u16 index)
1091{
1092 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1093 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1094 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1095 dma_addr_t mapping;
1096
1097 if (unlikely(page == NULL))
1098 return -ENOMEM;
1099
4f40f2cb 1100 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1101 PCI_DMA_FROMDEVICE);
8d8bb39b 1102 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1103 __free_pages(page, PAGES_PER_SGE_SHIFT);
1104 return -ENOMEM;
1105 }
1106
1107 sw_buf->page = page;
1108 pci_unmap_addr_set(sw_buf, mapping, mapping);
1109
1110 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1111 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1112
1113 return 0;
1114}
1115
a2fbb9ea
ET
1116static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1117 struct bnx2x_fastpath *fp, u16 index)
1118{
1119 struct sk_buff *skb;
1120 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1121 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1122 dma_addr_t mapping;
1123
1124 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1125 if (unlikely(skb == NULL))
1126 return -ENOMEM;
1127
437cf2f1 1128 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1129 PCI_DMA_FROMDEVICE);
8d8bb39b 1130 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1131 dev_kfree_skb(skb);
1132 return -ENOMEM;
1133 }
1134
1135 rx_buf->skb = skb;
1136 pci_unmap_addr_set(rx_buf, mapping, mapping);
1137
1138 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1139 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1140
1141 return 0;
1142}
1143
1144/* note that we are not allocating a new skb,
1145 * we are just moving one from cons to prod
1146 * we are not creating a new mapping,
1147 * so there is no need to check for dma_mapping_error().
1148 */
1149static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1150 struct sk_buff *skb, u16 cons, u16 prod)
1151{
1152 struct bnx2x *bp = fp->bp;
1153 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1154 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1155 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1156 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1157
1158 pci_dma_sync_single_for_device(bp->pdev,
1159 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1160 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1161
1162 prod_rx_buf->skb = cons_rx_buf->skb;
1163 pci_unmap_addr_set(prod_rx_buf, mapping,
1164 pci_unmap_addr(cons_rx_buf, mapping));
1165 *prod_bd = *cons_bd;
1166}
1167
7a9b2557
VZ
1168static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1169 u16 idx)
1170{
1171 u16 last_max = fp->last_max_sge;
1172
1173 if (SUB_S16(idx, last_max) > 0)
1174 fp->last_max_sge = idx;
1175}
1176
1177static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1178{
1179 int i, j;
1180
1181 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1182 int idx = RX_SGE_CNT * i - 1;
1183
1184 for (j = 0; j < 2; j++) {
1185 SGE_MASK_CLEAR_BIT(fp, idx);
1186 idx--;
1187 }
1188 }
1189}
1190
1191static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1192 struct eth_fast_path_rx_cqe *fp_cqe)
1193{
1194 struct bnx2x *bp = fp->bp;
4f40f2cb 1195 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1196 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1197 SGE_PAGE_SHIFT;
7a9b2557
VZ
1198 u16 last_max, last_elem, first_elem;
1199 u16 delta = 0;
1200 u16 i;
1201
1202 if (!sge_len)
1203 return;
1204
1205 /* First mark all used pages */
1206 for (i = 0; i < sge_len; i++)
1207 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1208
1209 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1210 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1211
1212 /* Here we assume that the last SGE index is the biggest */
1213 prefetch((void *)(fp->sge_mask));
1214 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1215
1216 last_max = RX_SGE(fp->last_max_sge);
1217 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1218 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1219
1220 /* If ring is not full */
1221 if (last_elem + 1 != first_elem)
1222 last_elem++;
1223
1224 /* Now update the prod */
1225 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1226 if (likely(fp->sge_mask[i]))
1227 break;
1228
1229 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1230 delta += RX_SGE_MASK_ELEM_SZ;
1231 }
1232
1233 if (delta > 0) {
1234 fp->rx_sge_prod += delta;
1235 /* clear page-end entries */
1236 bnx2x_clear_sge_mask_next_elems(fp);
1237 }
1238
1239 DP(NETIF_MSG_RX_STATUS,
1240 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1241 fp->last_max_sge, fp->rx_sge_prod);
1242}
1243
1244static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1245{
1246 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1247 memset(fp->sge_mask, 0xff,
1248 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1249
33471629
EG
1250 /* Clear the two last indices in the page to 1:
1251 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1252 hence will never be indicated and should be removed from
1253 the calculations. */
1254 bnx2x_clear_sge_mask_next_elems(fp);
1255}
1256
1257static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1258 struct sk_buff *skb, u16 cons, u16 prod)
1259{
1260 struct bnx2x *bp = fp->bp;
1261 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1262 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1263 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1264 dma_addr_t mapping;
1265
1266 /* move empty skb from pool to prod and map it */
1267 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1268 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1269 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1270 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1271
1272 /* move partial skb from cons to pool (don't unmap yet) */
1273 fp->tpa_pool[queue] = *cons_rx_buf;
1274
1275 /* mark bin state as start - print error if current state != stop */
1276 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1277 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1278
1279 fp->tpa_state[queue] = BNX2X_TPA_START;
1280
1281 /* point prod_bd to new skb */
1282 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1283 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1284
1285#ifdef BNX2X_STOP_ON_ERROR
1286 fp->tpa_queue_used |= (1 << queue);
1287#ifdef __powerpc64__
1288 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1289#else
1290 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1291#endif
1292 fp->tpa_queue_used);
1293#endif
1294}
1295
1296static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1297 struct sk_buff *skb,
1298 struct eth_fast_path_rx_cqe *fp_cqe,
1299 u16 cqe_idx)
1300{
1301 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1302 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1303 u32 i, frag_len, frag_size, pages;
1304 int err;
1305 int j;
1306
1307 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1308 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1309
1310 /* This is needed in order to enable forwarding support */
1311 if (frag_size)
4f40f2cb 1312 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1313 max(frag_size, (u32)len_on_bd));
1314
1315#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1316 if (pages >
1317 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1318 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1319 pages, cqe_idx);
1320 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1321 fp_cqe->pkt_len, len_on_bd);
1322 bnx2x_panic();
1323 return -EINVAL;
1324 }
1325#endif
1326
1327 /* Run through the SGL and compose the fragmented skb */
1328 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1329 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1330
1331 /* FW gives the indices of the SGE as if the ring is an array
1332 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1333 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1334 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1335 old_rx_pg = *rx_pg;
1336
1337 /* If we fail to allocate a substitute page, we simply stop
1338 where we are and drop the whole packet */
1339 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1340 if (unlikely(err)) {
de832a55 1341 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1342 return err;
1343 }
1344
1345 /* Unmap the page as we r going to pass it to the stack */
1346 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1347 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1348
1349 /* Add one frag and update the appropriate fields in the skb */
1350 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1351
1352 skb->data_len += frag_len;
1353 skb->truesize += frag_len;
1354 skb->len += frag_len;
1355
1356 frag_size -= frag_len;
1357 }
1358
1359 return 0;
1360}
1361
1362static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1363 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1364 u16 cqe_idx)
1365{
1366 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1367 struct sk_buff *skb = rx_buf->skb;
1368 /* alloc new skb */
1369 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1370
1371 /* Unmap skb in the pool anyway, as we are going to change
1372 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1373 fails. */
1374 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1375 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1376
7a9b2557 1377 if (likely(new_skb)) {
66e855f3
YG
1378 /* fix ip xsum and give it to the stack */
1379 /* (no need to map the new skb) */
0c6671b0
EG
1380#ifdef BCM_VLAN
1381 int is_vlan_cqe =
1382 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1383 PARSING_FLAGS_VLAN);
1384 int is_not_hwaccel_vlan_cqe =
1385 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1386#endif
7a9b2557
VZ
1387
1388 prefetch(skb);
1389 prefetch(((char *)(skb)) + 128);
1390
7a9b2557
VZ
1391#ifdef BNX2X_STOP_ON_ERROR
1392 if (pad + len > bp->rx_buf_size) {
1393 BNX2X_ERR("skb_put is about to fail... "
1394 "pad %d len %d rx_buf_size %d\n",
1395 pad, len, bp->rx_buf_size);
1396 bnx2x_panic();
1397 return;
1398 }
1399#endif
1400
1401 skb_reserve(skb, pad);
1402 skb_put(skb, len);
1403
1404 skb->protocol = eth_type_trans(skb, bp->dev);
1405 skb->ip_summed = CHECKSUM_UNNECESSARY;
1406
1407 {
1408 struct iphdr *iph;
1409
1410 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1411#ifdef BCM_VLAN
1412 /* If there is no Rx VLAN offloading -
1413 take VLAN tag into an account */
1414 if (unlikely(is_not_hwaccel_vlan_cqe))
1415 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1416#endif
7a9b2557
VZ
1417 iph->check = 0;
1418 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1419 }
1420
1421 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1422 &cqe->fast_path_cqe, cqe_idx)) {
1423#ifdef BCM_VLAN
0c6671b0
EG
1424 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1425 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1426 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1427 le16_to_cpu(cqe->fast_path_cqe.
1428 vlan_tag));
1429 else
1430#endif
1431 netif_receive_skb(skb);
1432 } else {
1433 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1434 " - dropping packet!\n");
1435 dev_kfree_skb(skb);
1436 }
1437
7a9b2557
VZ
1438
1439 /* put new skb in bin */
1440 fp->tpa_pool[queue].skb = new_skb;
1441
1442 } else {
66e855f3 1443 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1444 DP(NETIF_MSG_RX_STATUS,
1445 "Failed to allocate new skb - dropping packet!\n");
de832a55 1446 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1447 }
1448
1449 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1450}
1451
1452static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1453 struct bnx2x_fastpath *fp,
1454 u16 bd_prod, u16 rx_comp_prod,
1455 u16 rx_sge_prod)
1456{
8d9c5f34 1457 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1458 int i;
1459
1460 /* Update producers */
1461 rx_prods.bd_prod = bd_prod;
1462 rx_prods.cqe_prod = rx_comp_prod;
1463 rx_prods.sge_prod = rx_sge_prod;
1464
58f4c4cf
EG
1465 /*
1466 * Make sure that the BD and SGE data is updated before updating the
1467 * producers since FW might read the BD/SGE right after the producer
1468 * is updated.
1469 * This is only applicable for weak-ordered memory model archs such
1470 * as IA-64. The following barrier is also mandatory since FW will
1471 * assumes BDs must have buffers.
1472 */
1473 wmb();
1474
8d9c5f34
EG
1475 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1476 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1477 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1478 ((u32 *)&rx_prods)[i]);
1479
58f4c4cf
EG
1480 mmiowb(); /* keep prod updates ordered */
1481
7a9b2557 1482 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1483 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1484 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1485}
1486
a2fbb9ea
ET
1487static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1488{
1489 struct bnx2x *bp = fp->bp;
34f80b04 1490 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1491 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1492 int rx_pkt = 0;
1493
1494#ifdef BNX2X_STOP_ON_ERROR
1495 if (unlikely(bp->panic))
1496 return 0;
1497#endif
1498
34f80b04
EG
1499 /* CQ "next element" is of the size of the regular element,
1500 that's why it's ok here */
a2fbb9ea
ET
1501 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1502 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1503 hw_comp_cons++;
1504
1505 bd_cons = fp->rx_bd_cons;
1506 bd_prod = fp->rx_bd_prod;
34f80b04 1507 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1508 sw_comp_cons = fp->rx_comp_cons;
1509 sw_comp_prod = fp->rx_comp_prod;
1510
1511 /* Memory barrier necessary as speculative reads of the rx
1512 * buffer can be ahead of the index in the status block
1513 */
1514 rmb();
1515
1516 DP(NETIF_MSG_RX_STATUS,
1517 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1518 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1519
1520 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1521 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1522 struct sk_buff *skb;
1523 union eth_rx_cqe *cqe;
34f80b04
EG
1524 u8 cqe_fp_flags;
1525 u16 len, pad;
a2fbb9ea
ET
1526
1527 comp_ring_cons = RCQ_BD(sw_comp_cons);
1528 bd_prod = RX_BD(bd_prod);
1529 bd_cons = RX_BD(bd_cons);
1530
619e7a66
EG
1531 /* Prefetch the page containing the BD descriptor
1532 at producer's index. It will be needed when new skb is
1533 allocated */
1534 prefetch((void *)(PAGE_ALIGN((unsigned long)
1535 (&fp->rx_desc_ring[bd_prod])) -
1536 PAGE_SIZE + 1));
1537
a2fbb9ea 1538 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1539 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1540
a2fbb9ea 1541 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1542 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1543 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1544 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1545 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1546 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1547
1548 /* is this a slowpath msg? */
34f80b04 1549 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1550 bnx2x_sp_event(fp, cqe);
1551 goto next_cqe;
1552
1553 /* this is an rx packet */
1554 } else {
1555 rx_buf = &fp->rx_buf_ring[bd_cons];
1556 skb = rx_buf->skb;
a2fbb9ea
ET
1557 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1558 pad = cqe->fast_path_cqe.placement_offset;
1559
7a9b2557
VZ
1560 /* If CQE is marked both TPA_START and TPA_END
1561 it is a non-TPA CQE */
1562 if ((!fp->disable_tpa) &&
1563 (TPA_TYPE(cqe_fp_flags) !=
1564 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1565 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1566
1567 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1568 DP(NETIF_MSG_RX_STATUS,
1569 "calling tpa_start on queue %d\n",
1570 queue);
1571
1572 bnx2x_tpa_start(fp, queue, skb,
1573 bd_cons, bd_prod);
1574 goto next_rx;
1575 }
1576
1577 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1578 DP(NETIF_MSG_RX_STATUS,
1579 "calling tpa_stop on queue %d\n",
1580 queue);
1581
1582 if (!BNX2X_RX_SUM_FIX(cqe))
1583 BNX2X_ERR("STOP on none TCP "
1584 "data\n");
1585
1586 /* This is a size of the linear data
1587 on this skb */
1588 len = le16_to_cpu(cqe->fast_path_cqe.
1589 len_on_bd);
1590 bnx2x_tpa_stop(bp, fp, queue, pad,
1591 len, cqe, comp_ring_cons);
1592#ifdef BNX2X_STOP_ON_ERROR
1593 if (bp->panic)
17cb4006 1594 return 0;
7a9b2557
VZ
1595#endif
1596
1597 bnx2x_update_sge_prod(fp,
1598 &cqe->fast_path_cqe);
1599 goto next_cqe;
1600 }
1601 }
1602
a2fbb9ea
ET
1603 pci_dma_sync_single_for_device(bp->pdev,
1604 pci_unmap_addr(rx_buf, mapping),
1605 pad + RX_COPY_THRESH,
1606 PCI_DMA_FROMDEVICE);
1607 prefetch(skb);
1608 prefetch(((char *)(skb)) + 128);
1609
1610 /* is this an error packet? */
34f80b04 1611 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1612 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1613 "ERROR flags %x rx packet %u\n",
1614 cqe_fp_flags, sw_comp_cons);
de832a55 1615 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1616 goto reuse_rx;
1617 }
1618
1619 /* Since we don't have a jumbo ring
1620 * copy small packets if mtu > 1500
1621 */
1622 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1623 (len <= RX_COPY_THRESH)) {
1624 struct sk_buff *new_skb;
1625
1626 new_skb = netdev_alloc_skb(bp->dev,
1627 len + pad);
1628 if (new_skb == NULL) {
1629 DP(NETIF_MSG_RX_ERR,
34f80b04 1630 "ERROR packet dropped "
a2fbb9ea 1631 "because of alloc failure\n");
de832a55 1632 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1633 goto reuse_rx;
1634 }
1635
1636 /* aligned copy */
1637 skb_copy_from_linear_data_offset(skb, pad,
1638 new_skb->data + pad, len);
1639 skb_reserve(new_skb, pad);
1640 skb_put(new_skb, len);
1641
1642 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1643
1644 skb = new_skb;
1645
a119a069
EG
1646 } else
1647 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
a2fbb9ea
ET
1648 pci_unmap_single(bp->pdev,
1649 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1650 bp->rx_buf_size,
a2fbb9ea
ET
1651 PCI_DMA_FROMDEVICE);
1652 skb_reserve(skb, pad);
1653 skb_put(skb, len);
1654
1655 } else {
1656 DP(NETIF_MSG_RX_ERR,
34f80b04 1657 "ERROR packet dropped because "
a2fbb9ea 1658 "of alloc failure\n");
de832a55 1659 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1660reuse_rx:
1661 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1662 goto next_rx;
1663 }
1664
1665 skb->protocol = eth_type_trans(skb, bp->dev);
1666
1667 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1668 if (bp->rx_csum) {
1adcd8be
EG
1669 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1670 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1671 else
de832a55 1672 fp->eth_q_stats.hw_csum_err++;
66e855f3 1673 }
a2fbb9ea
ET
1674 }
1675
748e5439 1676 skb_record_rx_queue(skb, fp->index);
ab6ad5a4 1677
a2fbb9ea 1678#ifdef BCM_VLAN
0c6671b0 1679 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1680 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1681 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1682 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1683 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1684 else
1685#endif
34f80b04 1686 netif_receive_skb(skb);
a2fbb9ea 1687
a2fbb9ea
ET
1688
1689next_rx:
1690 rx_buf->skb = NULL;
1691
1692 bd_cons = NEXT_RX_IDX(bd_cons);
1693 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1694 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1695 rx_pkt++;
a2fbb9ea
ET
1696next_cqe:
1697 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1698 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1699
34f80b04 1700 if (rx_pkt == budget)
a2fbb9ea
ET
1701 break;
1702 } /* while */
1703
1704 fp->rx_bd_cons = bd_cons;
34f80b04 1705 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1706 fp->rx_comp_cons = sw_comp_cons;
1707 fp->rx_comp_prod = sw_comp_prod;
1708
7a9b2557
VZ
1709 /* Update producers */
1710 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1711 fp->rx_sge_prod);
a2fbb9ea
ET
1712
1713 fp->rx_pkt += rx_pkt;
1714 fp->rx_calls++;
1715
1716 return rx_pkt;
1717}
1718
1719static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1720{
1721 struct bnx2x_fastpath *fp = fp_cookie;
1722 struct bnx2x *bp = fp->bp;
a2fbb9ea 1723
da5a662a
VZ
1724 /* Return here if interrupt is disabled */
1725 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1726 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1727 return IRQ_HANDLED;
1728 }
1729
34f80b04 1730 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1731 fp->index, fp->sb_id);
0626b899 1732 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1733
1734#ifdef BNX2X_STOP_ON_ERROR
1735 if (unlikely(bp->panic))
1736 return IRQ_HANDLED;
1737#endif
ca00392c
EG
1738 /* Handle Rx or Tx according to MSI-X vector */
1739 if (fp->is_rx_queue) {
1740 prefetch(fp->rx_cons_sb);
1741 prefetch(&fp->status_blk->u_status_block.status_block_index);
a2fbb9ea 1742
ca00392c 1743 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1744
ca00392c
EG
1745 } else {
1746 prefetch(fp->tx_cons_sb);
1747 prefetch(&fp->status_blk->c_status_block.status_block_index);
1748
1749 bnx2x_update_fpsb_idx(fp);
1750 rmb();
1751 bnx2x_tx_int(fp);
1752
1753 /* Re-enable interrupts */
1754 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1755 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1756 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1757 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1758 }
34f80b04 1759
a2fbb9ea
ET
1760 return IRQ_HANDLED;
1761}
1762
1763static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1764{
555f6c78 1765 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1766 u16 status = bnx2x_ack_int(bp);
34f80b04 1767 u16 mask;
ca00392c 1768 int i;
a2fbb9ea 1769
34f80b04 1770 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1771 if (unlikely(status == 0)) {
1772 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1773 return IRQ_NONE;
1774 }
f5372251 1775 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1776
34f80b04 1777 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1778 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1779 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1780 return IRQ_HANDLED;
1781 }
1782
3196a88a
EG
1783#ifdef BNX2X_STOP_ON_ERROR
1784 if (unlikely(bp->panic))
1785 return IRQ_HANDLED;
1786#endif
1787
ca00392c
EG
1788 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1789 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1790
ca00392c
EG
1791 mask = 0x2 << fp->sb_id;
1792 if (status & mask) {
1793 /* Handle Rx or Tx according to SB id */
1794 if (fp->is_rx_queue) {
1795 prefetch(fp->rx_cons_sb);
1796 prefetch(&fp->status_blk->u_status_block.
1797 status_block_index);
a2fbb9ea 1798
ca00392c 1799 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1800
ca00392c
EG
1801 } else {
1802 prefetch(fp->tx_cons_sb);
1803 prefetch(&fp->status_blk->c_status_block.
1804 status_block_index);
1805
1806 bnx2x_update_fpsb_idx(fp);
1807 rmb();
1808 bnx2x_tx_int(fp);
1809
1810 /* Re-enable interrupts */
1811 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1812 le16_to_cpu(fp->fp_u_idx),
1813 IGU_INT_NOP, 1);
1814 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1815 le16_to_cpu(fp->fp_c_idx),
1816 IGU_INT_ENABLE, 1);
1817 }
1818 status &= ~mask;
1819 }
a2fbb9ea
ET
1820 }
1821
993ac7b5
MC
1822#ifdef BCM_CNIC
1823 mask = 0x2 << CNIC_SB_ID(bp);
1824 if (status & (mask | 0x1)) {
1825 struct cnic_ops *c_ops = NULL;
1826
1827 rcu_read_lock();
1828 c_ops = rcu_dereference(bp->cnic_ops);
1829 if (c_ops)
1830 c_ops->cnic_handler(bp->cnic_data, NULL);
1831 rcu_read_unlock();
1832
1833 status &= ~mask;
1834 }
1835#endif
a2fbb9ea 1836
34f80b04 1837 if (unlikely(status & 0x1)) {
1cf167f2 1838 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1839
1840 status &= ~0x1;
1841 if (!status)
1842 return IRQ_HANDLED;
1843 }
1844
34f80b04
EG
1845 if (status)
1846 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1847 status);
a2fbb9ea 1848
c18487ee 1849 return IRQ_HANDLED;
a2fbb9ea
ET
1850}
1851
c18487ee 1852/* end of fast path */
a2fbb9ea 1853
bb2a0f7a 1854static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1855
c18487ee
YR
1856/* Link */
1857
1858/*
1859 * General service functions
1860 */
a2fbb9ea 1861
4a37fb66 1862static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1863{
1864 u32 lock_status;
1865 u32 resource_bit = (1 << resource);
4a37fb66
YG
1866 int func = BP_FUNC(bp);
1867 u32 hw_lock_control_reg;
c18487ee 1868 int cnt;
a2fbb9ea 1869
c18487ee
YR
1870 /* Validating that the resource is within range */
1871 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1872 DP(NETIF_MSG_HW,
1873 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1874 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1875 return -EINVAL;
1876 }
a2fbb9ea 1877
4a37fb66
YG
1878 if (func <= 5) {
1879 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1880 } else {
1881 hw_lock_control_reg =
1882 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1883 }
1884
c18487ee 1885 /* Validating that the resource is not already taken */
4a37fb66 1886 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1887 if (lock_status & resource_bit) {
1888 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1889 lock_status, resource_bit);
1890 return -EEXIST;
1891 }
a2fbb9ea 1892
46230476
EG
1893 /* Try for 5 second every 5ms */
1894 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1895 /* Try to acquire the lock */
4a37fb66
YG
1896 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1897 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1898 if (lock_status & resource_bit)
1899 return 0;
a2fbb9ea 1900
c18487ee 1901 msleep(5);
a2fbb9ea 1902 }
c18487ee
YR
1903 DP(NETIF_MSG_HW, "Timeout\n");
1904 return -EAGAIN;
1905}
a2fbb9ea 1906
4a37fb66 1907static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1908{
1909 u32 lock_status;
1910 u32 resource_bit = (1 << resource);
4a37fb66
YG
1911 int func = BP_FUNC(bp);
1912 u32 hw_lock_control_reg;
a2fbb9ea 1913
c18487ee
YR
1914 /* Validating that the resource is within range */
1915 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1916 DP(NETIF_MSG_HW,
1917 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1918 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1919 return -EINVAL;
1920 }
1921
4a37fb66
YG
1922 if (func <= 5) {
1923 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1924 } else {
1925 hw_lock_control_reg =
1926 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1927 }
1928
c18487ee 1929 /* Validating that the resource is currently taken */
4a37fb66 1930 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1931 if (!(lock_status & resource_bit)) {
1932 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1933 lock_status, resource_bit);
1934 return -EFAULT;
a2fbb9ea
ET
1935 }
1936
4a37fb66 1937 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1938 return 0;
1939}
1940
1941/* HW Lock for shared dual port PHYs */
4a37fb66 1942static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1943{
34f80b04 1944 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1945
46c6a674
EG
1946 if (bp->port.need_hw_lock)
1947 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1948}
a2fbb9ea 1949
4a37fb66 1950static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1951{
46c6a674
EG
1952 if (bp->port.need_hw_lock)
1953 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1954
34f80b04 1955 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1956}
a2fbb9ea 1957
4acac6a5
EG
1958int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1959{
1960 /* The GPIO should be swapped if swap register is set and active */
1961 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1962 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1963 int gpio_shift = gpio_num +
1964 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1965 u32 gpio_mask = (1 << gpio_shift);
1966 u32 gpio_reg;
1967 int value;
1968
1969 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1970 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1971 return -EINVAL;
1972 }
1973
1974 /* read GPIO value */
1975 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1976
1977 /* get the requested pin value */
1978 if ((gpio_reg & gpio_mask) == gpio_mask)
1979 value = 1;
1980 else
1981 value = 0;
1982
1983 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1984
1985 return value;
1986}
1987
17de50b7 1988int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1989{
1990 /* The GPIO should be swapped if swap register is set and active */
1991 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1992 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1993 int gpio_shift = gpio_num +
1994 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1995 u32 gpio_mask = (1 << gpio_shift);
1996 u32 gpio_reg;
a2fbb9ea 1997
c18487ee
YR
1998 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1999 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2000 return -EINVAL;
2001 }
a2fbb9ea 2002
4a37fb66 2003 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
2004 /* read GPIO and mask except the float bits */
2005 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 2006
c18487ee
YR
2007 switch (mode) {
2008 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2009 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2010 gpio_num, gpio_shift);
2011 /* clear FLOAT and set CLR */
2012 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2013 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2014 break;
a2fbb9ea 2015
c18487ee
YR
2016 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2017 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2018 gpio_num, gpio_shift);
2019 /* clear FLOAT and set SET */
2020 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2021 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2022 break;
a2fbb9ea 2023
17de50b7 2024 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
2025 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2026 gpio_num, gpio_shift);
2027 /* set FLOAT */
2028 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2029 break;
a2fbb9ea 2030
c18487ee
YR
2031 default:
2032 break;
a2fbb9ea
ET
2033 }
2034
c18487ee 2035 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 2036 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 2037
c18487ee 2038 return 0;
a2fbb9ea
ET
2039}
2040
4acac6a5
EG
2041int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2042{
2043 /* The GPIO should be swapped if swap register is set and active */
2044 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2045 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2046 int gpio_shift = gpio_num +
2047 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2048 u32 gpio_mask = (1 << gpio_shift);
2049 u32 gpio_reg;
2050
2051 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2052 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2053 return -EINVAL;
2054 }
2055
2056 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2057 /* read GPIO int */
2058 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2059
2060 switch (mode) {
2061 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2062 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2063 "output low\n", gpio_num, gpio_shift);
2064 /* clear SET and set CLR */
2065 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2066 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2067 break;
2068
2069 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2070 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2071 "output high\n", gpio_num, gpio_shift);
2072 /* clear CLR and set SET */
2073 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2074 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2075 break;
2076
2077 default:
2078 break;
2079 }
2080
2081 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2082 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2083
2084 return 0;
2085}
2086
c18487ee 2087static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2088{
c18487ee
YR
2089 u32 spio_mask = (1 << spio_num);
2090 u32 spio_reg;
a2fbb9ea 2091
c18487ee
YR
2092 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2093 (spio_num > MISC_REGISTERS_SPIO_7)) {
2094 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2095 return -EINVAL;
a2fbb9ea
ET
2096 }
2097
4a37fb66 2098 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2099 /* read SPIO and mask except the float bits */
2100 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2101
c18487ee 2102 switch (mode) {
6378c025 2103 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2104 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2105 /* clear FLOAT and set CLR */
2106 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2107 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2108 break;
a2fbb9ea 2109
6378c025 2110 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2111 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2112 /* clear FLOAT and set SET */
2113 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2114 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2115 break;
a2fbb9ea 2116
c18487ee
YR
2117 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2118 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2119 /* set FLOAT */
2120 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2121 break;
a2fbb9ea 2122
c18487ee
YR
2123 default:
2124 break;
a2fbb9ea
ET
2125 }
2126
c18487ee 2127 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2128 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2129
a2fbb9ea
ET
2130 return 0;
2131}
2132
c18487ee 2133static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2134{
ad33ea3a
EG
2135 switch (bp->link_vars.ieee_fc &
2136 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2137 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2138 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2139 ADVERTISED_Pause);
2140 break;
356e2385 2141
c18487ee 2142 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2143 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2144 ADVERTISED_Pause);
2145 break;
356e2385 2146
c18487ee 2147 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2148 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2149 break;
356e2385 2150
c18487ee 2151 default:
34f80b04 2152 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2153 ADVERTISED_Pause);
2154 break;
2155 }
2156}
f1410647 2157
c18487ee
YR
2158static void bnx2x_link_report(struct bnx2x *bp)
2159{
2691d51d
EG
2160 if (bp->state == BNX2X_STATE_DISABLED) {
2161 netif_carrier_off(bp->dev);
2162 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2163 return;
2164 }
2165
c18487ee
YR
2166 if (bp->link_vars.link_up) {
2167 if (bp->state == BNX2X_STATE_OPEN)
2168 netif_carrier_on(bp->dev);
2169 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2170
c18487ee 2171 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2172
c18487ee
YR
2173 if (bp->link_vars.duplex == DUPLEX_FULL)
2174 printk("full duplex");
2175 else
2176 printk("half duplex");
f1410647 2177
c0700f90
DM
2178 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2179 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2180 printk(", receive ");
356e2385
EG
2181 if (bp->link_vars.flow_ctrl &
2182 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2183 printk("& transmit ");
2184 } else {
2185 printk(", transmit ");
2186 }
2187 printk("flow control ON");
2188 }
2189 printk("\n");
f1410647 2190
c18487ee
YR
2191 } else { /* link_down */
2192 netif_carrier_off(bp->dev);
2193 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2194 }
c18487ee
YR
2195}
2196
b5bf9068 2197static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2198{
19680c48
EG
2199 if (!BP_NOMCP(bp)) {
2200 u8 rc;
a2fbb9ea 2201
19680c48 2202 /* Initialize link parameters structure variables */
8c99e7b0
YR
2203 /* It is recommended to turn off RX FC for jumbo frames
2204 for better performance */
0c593270 2205 if (bp->dev->mtu > 5000)
c0700f90 2206 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2207 else
c0700f90 2208 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2209
4a37fb66 2210 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2211
2212 if (load_mode == LOAD_DIAG)
2213 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2214
19680c48 2215 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2216
4a37fb66 2217 bnx2x_release_phy_lock(bp);
a2fbb9ea 2218
3c96c68b
EG
2219 bnx2x_calc_fc_adv(bp);
2220
b5bf9068
EG
2221 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2222 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2223 bnx2x_link_report(bp);
b5bf9068 2224 }
34f80b04 2225
19680c48
EG
2226 return rc;
2227 }
f5372251 2228 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2229 return -EINVAL;
a2fbb9ea
ET
2230}
2231
c18487ee 2232static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2233{
19680c48 2234 if (!BP_NOMCP(bp)) {
4a37fb66 2235 bnx2x_acquire_phy_lock(bp);
19680c48 2236 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2237 bnx2x_release_phy_lock(bp);
a2fbb9ea 2238
19680c48
EG
2239 bnx2x_calc_fc_adv(bp);
2240 } else
f5372251 2241 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2242}
a2fbb9ea 2243
c18487ee
YR
2244static void bnx2x__link_reset(struct bnx2x *bp)
2245{
19680c48 2246 if (!BP_NOMCP(bp)) {
4a37fb66 2247 bnx2x_acquire_phy_lock(bp);
589abe3a 2248 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2249 bnx2x_release_phy_lock(bp);
19680c48 2250 } else
f5372251 2251 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2252}
a2fbb9ea 2253
c18487ee
YR
2254static u8 bnx2x_link_test(struct bnx2x *bp)
2255{
2256 u8 rc;
a2fbb9ea 2257
4a37fb66 2258 bnx2x_acquire_phy_lock(bp);
c18487ee 2259 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2260 bnx2x_release_phy_lock(bp);
a2fbb9ea 2261
c18487ee
YR
2262 return rc;
2263}
a2fbb9ea 2264
8a1c38d1 2265static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2266{
8a1c38d1
EG
2267 u32 r_param = bp->link_vars.line_speed / 8;
2268 u32 fair_periodic_timeout_usec;
2269 u32 t_fair;
34f80b04 2270
8a1c38d1
EG
2271 memset(&(bp->cmng.rs_vars), 0,
2272 sizeof(struct rate_shaping_vars_per_port));
2273 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2274
8a1c38d1
EG
2275 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2276 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2277
8a1c38d1
EG
2278 /* this is the threshold below which no timer arming will occur
2279 1.25 coefficient is for the threshold to be a little bigger
2280 than the real time, to compensate for timer in-accuracy */
2281 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2282 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2283
8a1c38d1
EG
2284 /* resolution of fairness timer */
2285 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2286 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2287 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2288
8a1c38d1
EG
2289 /* this is the threshold below which we won't arm the timer anymore */
2290 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2291
8a1c38d1
EG
2292 /* we multiply by 1e3/8 to get bytes/msec.
2293 We don't want the credits to pass a credit
2294 of the t_fair*FAIR_MEM (algorithm resolution) */
2295 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2296 /* since each tick is 4 usec */
2297 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2298}
2299
2691d51d
EG
2300/* Calculates the sum of vn_min_rates.
2301 It's needed for further normalizing of the min_rates.
2302 Returns:
2303 sum of vn_min_rates.
2304 or
2305 0 - if all the min_rates are 0.
2306 In the later case fainess algorithm should be deactivated.
2307 If not all min_rates are zero then those that are zeroes will be set to 1.
2308 */
2309static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2310{
2311 int all_zero = 1;
2312 int port = BP_PORT(bp);
2313 int vn;
2314
2315 bp->vn_weight_sum = 0;
2316 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2317 int func = 2*vn + port;
2318 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2319 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2320 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2321
2322 /* Skip hidden vns */
2323 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2324 continue;
2325
2326 /* If min rate is zero - set it to 1 */
2327 if (!vn_min_rate)
2328 vn_min_rate = DEF_MIN_RATE;
2329 else
2330 all_zero = 0;
2331
2332 bp->vn_weight_sum += vn_min_rate;
2333 }
2334
2335 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
2336 if (all_zero) {
2337 bp->cmng.flags.cmng_enables &=
2338 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2339 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2340 " fairness will be disabled\n");
2341 } else
2342 bp->cmng.flags.cmng_enables |=
2343 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
2344}
2345
8a1c38d1 2346static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2347{
2348 struct rate_shaping_vars_per_vn m_rs_vn;
2349 struct fairness_vars_per_vn m_fair_vn;
2350 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2351 u16 vn_min_rate, vn_max_rate;
2352 int i;
2353
2354 /* If function is hidden - set min and max to zeroes */
2355 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2356 vn_min_rate = 0;
2357 vn_max_rate = 0;
2358
2359 } else {
2360 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2361 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1
EG
2362 /* If min rate is zero - set it to 1 */
2363 if (!vn_min_rate)
34f80b04
EG
2364 vn_min_rate = DEF_MIN_RATE;
2365 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2366 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2367 }
8a1c38d1 2368 DP(NETIF_MSG_IFUP,
b015e3d1 2369 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 2370 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2371
2372 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2373 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2374
2375 /* global vn counter - maximal Mbps for this vn */
2376 m_rs_vn.vn_counter.rate = vn_max_rate;
2377
2378 /* quota - number of bytes transmitted in this period */
2379 m_rs_vn.vn_counter.quota =
2380 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2381
8a1c38d1 2382 if (bp->vn_weight_sum) {
34f80b04
EG
2383 /* credit for each period of the fairness algorithm:
2384 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2385 vn_weight_sum should not be larger than 10000, thus
2386 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2387 than zero */
34f80b04 2388 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2389 max((u32)(vn_min_rate * (T_FAIR_COEF /
2390 (8 * bp->vn_weight_sum))),
2391 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2392 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2393 m_fair_vn.vn_credit_delta);
2394 }
2395
34f80b04
EG
2396 /* Store it to internal memory */
2397 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2398 REG_WR(bp, BAR_XSTRORM_INTMEM +
2399 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2400 ((u32 *)(&m_rs_vn))[i]);
2401
2402 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2403 REG_WR(bp, BAR_XSTRORM_INTMEM +
2404 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2405 ((u32 *)(&m_fair_vn))[i]);
2406}
2407
8a1c38d1 2408
c18487ee
YR
2409/* This function is called upon link interrupt */
2410static void bnx2x_link_attn(struct bnx2x *bp)
2411{
bb2a0f7a
YG
2412 /* Make sure that we are synced with the current statistics */
2413 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2414
c18487ee 2415 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2416
bb2a0f7a
YG
2417 if (bp->link_vars.link_up) {
2418
1c06328c 2419 /* dropless flow control */
a18f5128 2420 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
2421 int port = BP_PORT(bp);
2422 u32 pause_enabled = 0;
2423
2424 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2425 pause_enabled = 1;
2426
2427 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2428 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2429 pause_enabled);
2430 }
2431
bb2a0f7a
YG
2432 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2433 struct host_port_stats *pstats;
2434
2435 pstats = bnx2x_sp(bp, port_stats);
2436 /* reset old bmac stats */
2437 memset(&(pstats->mac_stx[0]), 0,
2438 sizeof(struct mac_stx));
2439 }
2440 if ((bp->state == BNX2X_STATE_OPEN) ||
2441 (bp->state == BNX2X_STATE_DISABLED))
2442 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2443 }
2444
c18487ee
YR
2445 /* indicate link status */
2446 bnx2x_link_report(bp);
34f80b04
EG
2447
2448 if (IS_E1HMF(bp)) {
8a1c38d1 2449 int port = BP_PORT(bp);
34f80b04 2450 int func;
8a1c38d1 2451 int vn;
34f80b04 2452
ab6ad5a4 2453 /* Set the attention towards other drivers on the same port */
34f80b04
EG
2454 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2455 if (vn == BP_E1HVN(bp))
2456 continue;
2457
8a1c38d1 2458 func = ((vn << 1) | port);
34f80b04
EG
2459 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2460 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2461 }
34f80b04 2462
8a1c38d1
EG
2463 if (bp->link_vars.link_up) {
2464 int i;
2465
2466 /* Init rate shaping and fairness contexts */
2467 bnx2x_init_port_minmax(bp);
34f80b04 2468
34f80b04 2469 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2470 bnx2x_init_vn_minmax(bp, 2*vn + port);
2471
2472 /* Store it to internal memory */
2473 for (i = 0;
2474 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2475 REG_WR(bp, BAR_XSTRORM_INTMEM +
2476 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2477 ((u32 *)(&bp->cmng))[i]);
2478 }
34f80b04 2479 }
c18487ee 2480}
a2fbb9ea 2481
c18487ee
YR
2482static void bnx2x__link_status_update(struct bnx2x *bp)
2483{
2691d51d
EG
2484 int func = BP_FUNC(bp);
2485
c18487ee
YR
2486 if (bp->state != BNX2X_STATE_OPEN)
2487 return;
a2fbb9ea 2488
c18487ee 2489 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2490
bb2a0f7a
YG
2491 if (bp->link_vars.link_up)
2492 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2493 else
2494 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2495
2691d51d
EG
2496 bnx2x_calc_vn_weight_sum(bp);
2497
c18487ee
YR
2498 /* indicate link status */
2499 bnx2x_link_report(bp);
a2fbb9ea 2500}
a2fbb9ea 2501
34f80b04
EG
2502static void bnx2x_pmf_update(struct bnx2x *bp)
2503{
2504 int port = BP_PORT(bp);
2505 u32 val;
2506
2507 bp->port.pmf = 1;
2508 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2509
2510 /* enable nig attention */
2511 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2512 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2513 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2514
2515 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2516}
2517
c18487ee 2518/* end of Link */
a2fbb9ea
ET
2519
2520/* slow path */
2521
2522/*
2523 * General service functions
2524 */
2525
2691d51d
EG
2526/* send the MCP a request, block until there is a reply */
2527u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2528{
2529 int func = BP_FUNC(bp);
2530 u32 seq = ++bp->fw_seq;
2531 u32 rc = 0;
2532 u32 cnt = 1;
2533 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2534
2535 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2536 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2537
2538 do {
2539 /* let the FW do it's magic ... */
2540 msleep(delay);
2541
2542 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2543
2544 /* Give the FW up to 2 second (200*10ms) */
2545 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2546
2547 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2548 cnt*delay, rc, seq);
2549
2550 /* is this a reply to our command? */
2551 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2552 rc &= FW_MSG_CODE_MASK;
2553 else {
2554 /* FW BUG! */
2555 BNX2X_ERR("FW failed to respond!\n");
2556 bnx2x_fw_dump(bp);
2557 rc = 0;
2558 }
2559
2560 return rc;
2561}
2562
2563static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
e665bfda 2564static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2691d51d
EG
2565static void bnx2x_set_rx_mode(struct net_device *dev);
2566
2567static void bnx2x_e1h_disable(struct bnx2x *bp)
2568{
2569 int port = BP_PORT(bp);
2570 int i;
2571
2572 bp->rx_mode = BNX2X_RX_MODE_NONE;
2573 bnx2x_set_storm_rx_mode(bp);
2574
2575 netif_tx_disable(bp->dev);
2576 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2577
2578 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2579
e665bfda 2580 bnx2x_set_eth_mac_addr_e1h(bp, 0);
2691d51d
EG
2581
2582 for (i = 0; i < MC_HASH_SIZE; i++)
2583 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2584
2585 netif_carrier_off(bp->dev);
2586}
2587
2588static void bnx2x_e1h_enable(struct bnx2x *bp)
2589{
2590 int port = BP_PORT(bp);
2591
2592 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2593
e665bfda 2594 bnx2x_set_eth_mac_addr_e1h(bp, 1);
2691d51d
EG
2595
2596 /* Tx queue should be only reenabled */
2597 netif_tx_wake_all_queues(bp->dev);
2598
2599 /* Initialize the receive filter. */
2600 bnx2x_set_rx_mode(bp->dev);
2601}
2602
2603static void bnx2x_update_min_max(struct bnx2x *bp)
2604{
2605 int port = BP_PORT(bp);
2606 int vn, i;
2607
2608 /* Init rate shaping and fairness contexts */
2609 bnx2x_init_port_minmax(bp);
2610
2611 bnx2x_calc_vn_weight_sum(bp);
2612
2613 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2614 bnx2x_init_vn_minmax(bp, 2*vn + port);
2615
2616 if (bp->port.pmf) {
2617 int func;
2618
2619 /* Set the attention towards other drivers on the same port */
2620 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2621 if (vn == BP_E1HVN(bp))
2622 continue;
2623
2624 func = ((vn << 1) | port);
2625 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2626 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2627 }
2628
2629 /* Store it to internal memory */
2630 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2631 REG_WR(bp, BAR_XSTRORM_INTMEM +
2632 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2633 ((u32 *)(&bp->cmng))[i]);
2634 }
2635}
2636
2637static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2638{
2691d51d 2639 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2691d51d
EG
2640
2641 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2642
2643 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2644 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2645 bp->state = BNX2X_STATE_DISABLED;
2646
2647 bnx2x_e1h_disable(bp);
2648 } else {
2649 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2650 bp->state = BNX2X_STATE_OPEN;
2651
2652 bnx2x_e1h_enable(bp);
2653 }
2654 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2655 }
2656 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2657
2658 bnx2x_update_min_max(bp);
2659 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2660 }
2661
2662 /* Report results to MCP */
2663 if (dcc_event)
2664 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2665 else
2666 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2667}
2668
28912902
MC
2669/* must be called under the spq lock */
2670static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2671{
2672 struct eth_spe *next_spe = bp->spq_prod_bd;
2673
2674 if (bp->spq_prod_bd == bp->spq_last_bd) {
2675 bp->spq_prod_bd = bp->spq;
2676 bp->spq_prod_idx = 0;
2677 DP(NETIF_MSG_TIMER, "end of spq\n");
2678 } else {
2679 bp->spq_prod_bd++;
2680 bp->spq_prod_idx++;
2681 }
2682 return next_spe;
2683}
2684
2685/* must be called under the spq lock */
2686static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2687{
2688 int func = BP_FUNC(bp);
2689
2690 /* Make sure that BD data is updated before writing the producer */
2691 wmb();
2692
2693 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2694 bp->spq_prod_idx);
2695 mmiowb();
2696}
2697
a2fbb9ea
ET
2698/* the slow path queue is odd since completions arrive on the fastpath ring */
2699static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2700 u32 data_hi, u32 data_lo, int common)
2701{
28912902 2702 struct eth_spe *spe;
a2fbb9ea 2703
34f80b04
EG
2704 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2705 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2706 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2707 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2708 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2709
2710#ifdef BNX2X_STOP_ON_ERROR
2711 if (unlikely(bp->panic))
2712 return -EIO;
2713#endif
2714
34f80b04 2715 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2716
2717 if (!bp->spq_left) {
2718 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2719 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2720 bnx2x_panic();
2721 return -EBUSY;
2722 }
f1410647 2723
28912902
MC
2724 spe = bnx2x_sp_get_next(bp);
2725
a2fbb9ea 2726 /* CID needs port number to be encoded int it */
28912902 2727 spe->hdr.conn_and_cmd_data =
a2fbb9ea
ET
2728 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2729 HW_CID(bp, cid)));
28912902 2730 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 2731 if (common)
28912902 2732 spe->hdr.type |=
a2fbb9ea
ET
2733 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2734
28912902
MC
2735 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2736 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
2737
2738 bp->spq_left--;
2739
28912902 2740 bnx2x_sp_prod_update(bp);
34f80b04 2741 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2742 return 0;
2743}
2744
2745/* acquire split MCP access lock register */
4a37fb66 2746static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2747{
a2fbb9ea 2748 u32 i, j, val;
34f80b04 2749 int rc = 0;
a2fbb9ea
ET
2750
2751 might_sleep();
2752 i = 100;
2753 for (j = 0; j < i*10; j++) {
2754 val = (1UL << 31);
2755 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2756 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2757 if (val & (1L << 31))
2758 break;
2759
2760 msleep(5);
2761 }
a2fbb9ea 2762 if (!(val & (1L << 31))) {
19680c48 2763 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2764 rc = -EBUSY;
2765 }
2766
2767 return rc;
2768}
2769
4a37fb66
YG
2770/* release split MCP access lock register */
2771static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2772{
2773 u32 val = 0;
2774
2775 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2776}
2777
2778static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2779{
2780 struct host_def_status_block *def_sb = bp->def_status_blk;
2781 u16 rc = 0;
2782
2783 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2784 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2785 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2786 rc |= 1;
2787 }
2788 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2789 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2790 rc |= 2;
2791 }
2792 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2793 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2794 rc |= 4;
2795 }
2796 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2797 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2798 rc |= 8;
2799 }
2800 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2801 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2802 rc |= 16;
2803 }
2804 return rc;
2805}
2806
2807/*
2808 * slow path service functions
2809 */
2810
2811static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2812{
34f80b04 2813 int port = BP_PORT(bp);
5c862848
EG
2814 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2815 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2816 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2817 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2818 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2819 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2820 u32 aeu_mask;
87942b46 2821 u32 nig_mask = 0;
a2fbb9ea 2822
a2fbb9ea
ET
2823 if (bp->attn_state & asserted)
2824 BNX2X_ERR("IGU ERROR\n");
2825
3fcaf2e5
EG
2826 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2827 aeu_mask = REG_RD(bp, aeu_addr);
2828
a2fbb9ea 2829 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2830 aeu_mask, asserted);
2831 aeu_mask &= ~(asserted & 0xff);
2832 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2833
3fcaf2e5
EG
2834 REG_WR(bp, aeu_addr, aeu_mask);
2835 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2836
3fcaf2e5 2837 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2838 bp->attn_state |= asserted;
3fcaf2e5 2839 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2840
2841 if (asserted & ATTN_HARD_WIRED_MASK) {
2842 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2843
a5e9a7cf
EG
2844 bnx2x_acquire_phy_lock(bp);
2845
877e9aa4 2846 /* save nig interrupt mask */
87942b46 2847 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2848 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2849
c18487ee 2850 bnx2x_link_attn(bp);
a2fbb9ea
ET
2851
2852 /* handle unicore attn? */
2853 }
2854 if (asserted & ATTN_SW_TIMER_4_FUNC)
2855 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2856
2857 if (asserted & GPIO_2_FUNC)
2858 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2859
2860 if (asserted & GPIO_3_FUNC)
2861 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2862
2863 if (asserted & GPIO_4_FUNC)
2864 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2865
2866 if (port == 0) {
2867 if (asserted & ATTN_GENERAL_ATTN_1) {
2868 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2869 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2870 }
2871 if (asserted & ATTN_GENERAL_ATTN_2) {
2872 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2873 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2874 }
2875 if (asserted & ATTN_GENERAL_ATTN_3) {
2876 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2877 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2878 }
2879 } else {
2880 if (asserted & ATTN_GENERAL_ATTN_4) {
2881 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2882 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2883 }
2884 if (asserted & ATTN_GENERAL_ATTN_5) {
2885 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2886 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2887 }
2888 if (asserted & ATTN_GENERAL_ATTN_6) {
2889 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2890 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2891 }
2892 }
2893
2894 } /* if hardwired */
2895
5c862848
EG
2896 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2897 asserted, hc_addr);
2898 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2899
2900 /* now set back the mask */
a5e9a7cf 2901 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2902 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2903 bnx2x_release_phy_lock(bp);
2904 }
a2fbb9ea
ET
2905}
2906
fd4ef40d
EG
2907static inline void bnx2x_fan_failure(struct bnx2x *bp)
2908{
2909 int port = BP_PORT(bp);
2910
2911 /* mark the failure */
2912 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2913 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2914 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2915 bp->link_params.ext_phy_config);
2916
2917 /* log the failure */
2918 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2919 " the driver to shutdown the card to prevent permanent"
2920 " damage. Please contact Dell Support for assistance\n",
2921 bp->dev->name);
2922}
ab6ad5a4 2923
877e9aa4 2924static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2925{
34f80b04 2926 int port = BP_PORT(bp);
877e9aa4 2927 int reg_offset;
4d295db0 2928 u32 val, swap_val, swap_override;
877e9aa4 2929
34f80b04
EG
2930 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2931 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2932
34f80b04 2933 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2934
2935 val = REG_RD(bp, reg_offset);
2936 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2937 REG_WR(bp, reg_offset, val);
2938
2939 BNX2X_ERR("SPIO5 hw attention\n");
2940
fd4ef40d 2941 /* Fan failure attention */
35b19ba5
EG
2942 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2943 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2944 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2945 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2946 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2947 /* The PHY reset is controlled by GPIO 1 */
2948 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2949 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2950 break;
2951
4d295db0
EG
2952 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2953 /* The PHY reset is controlled by GPIO 1 */
2954 /* fake the port number to cancel the swap done in
2955 set_gpio() */
2956 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2957 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2958 port = (swap_val && swap_override) ^ 1;
2959 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2960 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2961 break;
2962
877e9aa4
ET
2963 default:
2964 break;
2965 }
fd4ef40d 2966 bnx2x_fan_failure(bp);
877e9aa4 2967 }
34f80b04 2968
589abe3a
EG
2969 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2970 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2971 bnx2x_acquire_phy_lock(bp);
2972 bnx2x_handle_module_detect_int(&bp->link_params);
2973 bnx2x_release_phy_lock(bp);
2974 }
2975
34f80b04
EG
2976 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2977
2978 val = REG_RD(bp, reg_offset);
2979 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2980 REG_WR(bp, reg_offset, val);
2981
2982 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2983 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2984 bnx2x_panic();
2985 }
877e9aa4
ET
2986}
2987
2988static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2989{
2990 u32 val;
2991
0626b899 2992 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2993
2994 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2995 BNX2X_ERR("DB hw attention 0x%x\n", val);
2996 /* DORQ discard attention */
2997 if (val & 0x2)
2998 BNX2X_ERR("FATAL error from DORQ\n");
2999 }
34f80b04
EG
3000
3001 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3002
3003 int port = BP_PORT(bp);
3004 int reg_offset;
3005
3006 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3007 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3008
3009 val = REG_RD(bp, reg_offset);
3010 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3011 REG_WR(bp, reg_offset, val);
3012
3013 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 3014 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
3015 bnx2x_panic();
3016 }
877e9aa4
ET
3017}
3018
3019static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3020{
3021 u32 val;
3022
3023 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3024
3025 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3026 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3027 /* CFC error attention */
3028 if (val & 0x2)
3029 BNX2X_ERR("FATAL error from CFC\n");
3030 }
3031
3032 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3033
3034 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3035 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3036 /* RQ_USDMDP_FIFO_OVERFLOW */
3037 if (val & 0x18000)
3038 BNX2X_ERR("FATAL error from PXP\n");
3039 }
34f80b04
EG
3040
3041 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3042
3043 int port = BP_PORT(bp);
3044 int reg_offset;
3045
3046 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3047 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3048
3049 val = REG_RD(bp, reg_offset);
3050 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3051 REG_WR(bp, reg_offset, val);
3052
3053 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3054 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3055 bnx2x_panic();
3056 }
877e9aa4
ET
3057}
3058
3059static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3060{
34f80b04
EG
3061 u32 val;
3062
877e9aa4
ET
3063 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3064
34f80b04
EG
3065 if (attn & BNX2X_PMF_LINK_ASSERT) {
3066 int func = BP_FUNC(bp);
3067
3068 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
b015e3d1
EG
3069 bp->mf_config = SHMEM_RD(bp,
3070 mf_cfg.func_mf_config[func].config);
2691d51d
EG
3071 val = SHMEM_RD(bp, func_mb[func].drv_status);
3072 if (val & DRV_STATUS_DCC_EVENT_MASK)
3073 bnx2x_dcc_event(bp,
3074 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3075 bnx2x__link_status_update(bp);
2691d51d 3076 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3077 bnx2x_pmf_update(bp);
3078
3079 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3080
3081 BNX2X_ERR("MC assert!\n");
3082 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3083 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3084 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3085 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3086 bnx2x_panic();
3087
3088 } else if (attn & BNX2X_MCP_ASSERT) {
3089
3090 BNX2X_ERR("MCP assert!\n");
3091 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3092 bnx2x_fw_dump(bp);
877e9aa4
ET
3093
3094 } else
3095 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3096 }
3097
3098 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3099 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3100 if (attn & BNX2X_GRC_TIMEOUT) {
3101 val = CHIP_IS_E1H(bp) ?
3102 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3103 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3104 }
3105 if (attn & BNX2X_GRC_RSV) {
3106 val = CHIP_IS_E1H(bp) ?
3107 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3108 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3109 }
877e9aa4 3110 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3111 }
3112}
3113
3114static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3115{
a2fbb9ea
ET
3116 struct attn_route attn;
3117 struct attn_route group_mask;
34f80b04 3118 int port = BP_PORT(bp);
877e9aa4 3119 int index;
a2fbb9ea
ET
3120 u32 reg_addr;
3121 u32 val;
3fcaf2e5 3122 u32 aeu_mask;
a2fbb9ea
ET
3123
3124 /* need to take HW lock because MCP or other port might also
3125 try to handle this event */
4a37fb66 3126 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
3127
3128 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3129 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3130 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3131 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3132 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3133 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3134
3135 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3136 if (deasserted & (1 << index)) {
3137 group_mask = bp->attn_group[index];
3138
34f80b04
EG
3139 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3140 index, group_mask.sig[0], group_mask.sig[1],
3141 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 3142
877e9aa4
ET
3143 bnx2x_attn_int_deasserted3(bp,
3144 attn.sig[3] & group_mask.sig[3]);
3145 bnx2x_attn_int_deasserted1(bp,
3146 attn.sig[1] & group_mask.sig[1]);
3147 bnx2x_attn_int_deasserted2(bp,
3148 attn.sig[2] & group_mask.sig[2]);
3149 bnx2x_attn_int_deasserted0(bp,
3150 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 3151
a2fbb9ea
ET
3152 if ((attn.sig[0] & group_mask.sig[0] &
3153 HW_PRTY_ASSERT_SET_0) ||
3154 (attn.sig[1] & group_mask.sig[1] &
3155 HW_PRTY_ASSERT_SET_1) ||
3156 (attn.sig[2] & group_mask.sig[2] &
3157 HW_PRTY_ASSERT_SET_2))
6378c025 3158 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
3159 }
3160 }
3161
4a37fb66 3162 bnx2x_release_alr(bp);
a2fbb9ea 3163
5c862848 3164 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3165
3166 val = ~deasserted;
3fcaf2e5
EG
3167 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3168 val, reg_addr);
5c862848 3169 REG_WR(bp, reg_addr, val);
a2fbb9ea 3170
a2fbb9ea 3171 if (~bp->attn_state & deasserted)
3fcaf2e5 3172 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3173
3174 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3175 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3176
3fcaf2e5
EG
3177 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3178 aeu_mask = REG_RD(bp, reg_addr);
3179
3180 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3181 aeu_mask, deasserted);
3182 aeu_mask |= (deasserted & 0xff);
3183 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3184
3fcaf2e5
EG
3185 REG_WR(bp, reg_addr, aeu_mask);
3186 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3187
3188 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3189 bp->attn_state &= ~deasserted;
3190 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3191}
3192
3193static void bnx2x_attn_int(struct bnx2x *bp)
3194{
3195 /* read local copy of bits */
68d59484
EG
3196 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3197 attn_bits);
3198 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3199 attn_bits_ack);
a2fbb9ea
ET
3200 u32 attn_state = bp->attn_state;
3201
3202 /* look for changed bits */
3203 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3204 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3205
3206 DP(NETIF_MSG_HW,
3207 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3208 attn_bits, attn_ack, asserted, deasserted);
3209
3210 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3211 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3212
3213 /* handle bits that were raised */
3214 if (asserted)
3215 bnx2x_attn_int_asserted(bp, asserted);
3216
3217 if (deasserted)
3218 bnx2x_attn_int_deasserted(bp, deasserted);
3219}
3220
3221static void bnx2x_sp_task(struct work_struct *work)
3222{
1cf167f2 3223 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3224 u16 status;
3225
34f80b04 3226
a2fbb9ea
ET
3227 /* Return here if interrupt is disabled */
3228 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3229 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3230 return;
3231 }
3232
3233 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3234/* if (status == 0) */
3235/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3236
3196a88a 3237 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 3238
877e9aa4
ET
3239 /* HW attentions */
3240 if (status & 0x1)
a2fbb9ea 3241 bnx2x_attn_int(bp);
a2fbb9ea 3242
68d59484 3243 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3244 IGU_INT_NOP, 1);
3245 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3246 IGU_INT_NOP, 1);
3247 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3248 IGU_INT_NOP, 1);
3249 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3250 IGU_INT_NOP, 1);
3251 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3252 IGU_INT_ENABLE, 1);
877e9aa4 3253
a2fbb9ea
ET
3254}
3255
3256static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3257{
3258 struct net_device *dev = dev_instance;
3259 struct bnx2x *bp = netdev_priv(dev);
3260
3261 /* Return here if interrupt is disabled */
3262 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3263 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3264 return IRQ_HANDLED;
3265 }
3266
8d9c5f34 3267 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3268
3269#ifdef BNX2X_STOP_ON_ERROR
3270 if (unlikely(bp->panic))
3271 return IRQ_HANDLED;
3272#endif
3273
993ac7b5
MC
3274#ifdef BCM_CNIC
3275 {
3276 struct cnic_ops *c_ops;
3277
3278 rcu_read_lock();
3279 c_ops = rcu_dereference(bp->cnic_ops);
3280 if (c_ops)
3281 c_ops->cnic_handler(bp->cnic_data, NULL);
3282 rcu_read_unlock();
3283 }
3284#endif
1cf167f2 3285 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3286
3287 return IRQ_HANDLED;
3288}
3289
3290/* end of slow path */
3291
3292/* Statistics */
3293
3294/****************************************************************************
3295* Macros
3296****************************************************************************/
3297
a2fbb9ea
ET
3298/* sum[hi:lo] += add[hi:lo] */
3299#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3300 do { \
3301 s_lo += a_lo; \
f5ba6772 3302 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3303 } while (0)
3304
3305/* difference = minuend - subtrahend */
3306#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3307 do { \
bb2a0f7a
YG
3308 if (m_lo < s_lo) { \
3309 /* underflow */ \
a2fbb9ea 3310 d_hi = m_hi - s_hi; \
bb2a0f7a 3311 if (d_hi > 0) { \
6378c025 3312 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3313 d_hi--; \
3314 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3315 } else { \
6378c025 3316 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3317 d_hi = 0; \
3318 d_lo = 0; \
3319 } \
bb2a0f7a
YG
3320 } else { \
3321 /* m_lo >= s_lo */ \
a2fbb9ea 3322 if (m_hi < s_hi) { \
bb2a0f7a
YG
3323 d_hi = 0; \
3324 d_lo = 0; \
3325 } else { \
6378c025 3326 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3327 d_hi = m_hi - s_hi; \
3328 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3329 } \
3330 } \
3331 } while (0)
3332
bb2a0f7a 3333#define UPDATE_STAT64(s, t) \
a2fbb9ea 3334 do { \
bb2a0f7a
YG
3335 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3336 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3337 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3338 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3339 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3340 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3341 } while (0)
3342
bb2a0f7a 3343#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3344 do { \
bb2a0f7a
YG
3345 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3346 diff.lo, new->s##_lo, old->s##_lo); \
3347 ADD_64(estats->t##_hi, diff.hi, \
3348 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3349 } while (0)
3350
3351/* sum[hi:lo] += add */
3352#define ADD_EXTEND_64(s_hi, s_lo, a) \
3353 do { \
3354 s_lo += a; \
3355 s_hi += (s_lo < a) ? 1 : 0; \
3356 } while (0)
3357
bb2a0f7a 3358#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3359 do { \
bb2a0f7a
YG
3360 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3361 pstats->mac_stx[1].s##_lo, \
3362 new->s); \
a2fbb9ea
ET
3363 } while (0)
3364
bb2a0f7a 3365#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3366 do { \
4781bfad
EG
3367 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3368 old_tclient->s = tclient->s; \
de832a55
EG
3369 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3370 } while (0)
3371
3372#define UPDATE_EXTEND_USTAT(s, t) \
3373 do { \
3374 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3375 old_uclient->s = uclient->s; \
3376 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3377 } while (0)
3378
3379#define UPDATE_EXTEND_XSTAT(s, t) \
3380 do { \
4781bfad
EG
3381 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3382 old_xclient->s = xclient->s; \
de832a55
EG
3383 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3384 } while (0)
3385
3386/* minuend -= subtrahend */
3387#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3388 do { \
3389 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3390 } while (0)
3391
3392/* minuend[hi:lo] -= subtrahend */
3393#define SUB_EXTEND_64(m_hi, m_lo, s) \
3394 do { \
3395 SUB_64(m_hi, 0, m_lo, s); \
3396 } while (0)
3397
3398#define SUB_EXTEND_USTAT(s, t) \
3399 do { \
3400 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3401 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3402 } while (0)
3403
3404/*
3405 * General service functions
3406 */
3407
3408static inline long bnx2x_hilo(u32 *hiref)
3409{
3410 u32 lo = *(hiref + 1);
3411#if (BITS_PER_LONG == 64)
3412 u32 hi = *hiref;
3413
3414 return HILO_U64(hi, lo);
3415#else
3416 return lo;
3417#endif
3418}
3419
3420/*
3421 * Init service functions
3422 */
3423
bb2a0f7a
YG
3424static void bnx2x_storm_stats_post(struct bnx2x *bp)
3425{
3426 if (!bp->stats_pending) {
3427 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3428 int i, rc;
bb2a0f7a
YG
3429
3430 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3431 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3432 for_each_queue(bp, i)
3433 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3434
3435 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3436 ((u32 *)&ramrod_data)[1],
3437 ((u32 *)&ramrod_data)[0], 0);
3438 if (rc == 0) {
3439 /* stats ramrod has it's own slot on the spq */
3440 bp->spq_left++;
3441 bp->stats_pending = 1;
3442 }
3443 }
3444}
3445
bb2a0f7a
YG
3446static void bnx2x_hw_stats_post(struct bnx2x *bp)
3447{
3448 struct dmae_command *dmae = &bp->stats_dmae;
3449 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3450
3451 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3452 if (CHIP_REV_IS_SLOW(bp))
3453 return;
bb2a0f7a
YG
3454
3455 /* loader */
3456 if (bp->executer_idx) {
3457 int loader_idx = PMF_DMAE_C(bp);
3458
3459 memset(dmae, 0, sizeof(struct dmae_command));
3460
3461 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3462 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3463 DMAE_CMD_DST_RESET |
3464#ifdef __BIG_ENDIAN
3465 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3466#else
3467 DMAE_CMD_ENDIANITY_DW_SWAP |
3468#endif
3469 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3470 DMAE_CMD_PORT_0) |
3471 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3472 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3473 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3474 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3475 sizeof(struct dmae_command) *
3476 (loader_idx + 1)) >> 2;
3477 dmae->dst_addr_hi = 0;
3478 dmae->len = sizeof(struct dmae_command) >> 2;
3479 if (CHIP_IS_E1(bp))
3480 dmae->len--;
3481 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3482 dmae->comp_addr_hi = 0;
3483 dmae->comp_val = 1;
3484
3485 *stats_comp = 0;
3486 bnx2x_post_dmae(bp, dmae, loader_idx);
3487
3488 } else if (bp->func_stx) {
3489 *stats_comp = 0;
3490 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3491 }
3492}
3493
3494static int bnx2x_stats_comp(struct bnx2x *bp)
3495{
3496 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3497 int cnt = 10;
3498
3499 might_sleep();
3500 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3501 if (!cnt) {
3502 BNX2X_ERR("timeout waiting for stats finished\n");
3503 break;
3504 }
3505 cnt--;
12469401 3506 msleep(1);
bb2a0f7a
YG
3507 }
3508 return 1;
3509}
3510
3511/*
3512 * Statistics service functions
3513 */
3514
3515static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3516{
3517 struct dmae_command *dmae;
3518 u32 opcode;
3519 int loader_idx = PMF_DMAE_C(bp);
3520 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3521
3522 /* sanity */
3523 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3524 BNX2X_ERR("BUG!\n");
3525 return;
3526 }
3527
3528 bp->executer_idx = 0;
3529
3530 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3531 DMAE_CMD_C_ENABLE |
3532 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3533#ifdef __BIG_ENDIAN
3534 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3535#else
3536 DMAE_CMD_ENDIANITY_DW_SWAP |
3537#endif
3538 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3539 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3540
3541 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3542 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3543 dmae->src_addr_lo = bp->port.port_stx >> 2;
3544 dmae->src_addr_hi = 0;
3545 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3546 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3547 dmae->len = DMAE_LEN32_RD_MAX;
3548 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3549 dmae->comp_addr_hi = 0;
3550 dmae->comp_val = 1;
3551
3552 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3553 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3554 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3555 dmae->src_addr_hi = 0;
7a9b2557
VZ
3556 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3557 DMAE_LEN32_RD_MAX * 4);
3558 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3559 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3560 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3561 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3562 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3563 dmae->comp_val = DMAE_COMP_VAL;
3564
3565 *stats_comp = 0;
3566 bnx2x_hw_stats_post(bp);
3567 bnx2x_stats_comp(bp);
3568}
3569
3570static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3571{
3572 struct dmae_command *dmae;
34f80b04 3573 int port = BP_PORT(bp);
bb2a0f7a 3574 int vn = BP_E1HVN(bp);
a2fbb9ea 3575 u32 opcode;
bb2a0f7a 3576 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3577 u32 mac_addr;
bb2a0f7a
YG
3578 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3579
3580 /* sanity */
3581 if (!bp->link_vars.link_up || !bp->port.pmf) {
3582 BNX2X_ERR("BUG!\n");
3583 return;
3584 }
a2fbb9ea
ET
3585
3586 bp->executer_idx = 0;
bb2a0f7a
YG
3587
3588 /* MCP */
3589 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3590 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3591 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3592#ifdef __BIG_ENDIAN
bb2a0f7a 3593 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3594#else
bb2a0f7a 3595 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3596#endif
bb2a0f7a
YG
3597 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3598 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3599
bb2a0f7a 3600 if (bp->port.port_stx) {
a2fbb9ea
ET
3601
3602 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3603 dmae->opcode = opcode;
bb2a0f7a
YG
3604 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3605 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3606 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3607 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3608 dmae->len = sizeof(struct host_port_stats) >> 2;
3609 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3610 dmae->comp_addr_hi = 0;
3611 dmae->comp_val = 1;
a2fbb9ea
ET
3612 }
3613
bb2a0f7a
YG
3614 if (bp->func_stx) {
3615
3616 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3617 dmae->opcode = opcode;
3618 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3619 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3620 dmae->dst_addr_lo = bp->func_stx >> 2;
3621 dmae->dst_addr_hi = 0;
3622 dmae->len = sizeof(struct host_func_stats) >> 2;
3623 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3624 dmae->comp_addr_hi = 0;
3625 dmae->comp_val = 1;
a2fbb9ea
ET
3626 }
3627
bb2a0f7a 3628 /* MAC */
a2fbb9ea
ET
3629 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3630 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3631 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3632#ifdef __BIG_ENDIAN
3633 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3634#else
3635 DMAE_CMD_ENDIANITY_DW_SWAP |
3636#endif
bb2a0f7a
YG
3637 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3638 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3639
c18487ee 3640 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3641
3642 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3643 NIG_REG_INGRESS_BMAC0_MEM);
3644
3645 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3646 BIGMAC_REGISTER_TX_STAT_GTBYT */
3647 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3648 dmae->opcode = opcode;
3649 dmae->src_addr_lo = (mac_addr +
3650 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3651 dmae->src_addr_hi = 0;
3652 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3653 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3654 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3655 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3656 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3657 dmae->comp_addr_hi = 0;
3658 dmae->comp_val = 1;
3659
3660 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3661 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3662 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3663 dmae->opcode = opcode;
3664 dmae->src_addr_lo = (mac_addr +
3665 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3666 dmae->src_addr_hi = 0;
3667 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3668 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3669 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3670 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3671 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3672 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3673 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3674 dmae->comp_addr_hi = 0;
3675 dmae->comp_val = 1;
3676
c18487ee 3677 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3678
3679 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3680
3681 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3682 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3683 dmae->opcode = opcode;
3684 dmae->src_addr_lo = (mac_addr +
3685 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3686 dmae->src_addr_hi = 0;
3687 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3688 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3689 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3690 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3691 dmae->comp_addr_hi = 0;
3692 dmae->comp_val = 1;
3693
3694 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3695 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3696 dmae->opcode = opcode;
3697 dmae->src_addr_lo = (mac_addr +
3698 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3699 dmae->src_addr_hi = 0;
3700 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3701 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3702 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3703 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3704 dmae->len = 1;
3705 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3706 dmae->comp_addr_hi = 0;
3707 dmae->comp_val = 1;
3708
3709 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3710 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3711 dmae->opcode = opcode;
3712 dmae->src_addr_lo = (mac_addr +
3713 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3714 dmae->src_addr_hi = 0;
3715 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3716 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3717 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3718 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3719 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3720 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3721 dmae->comp_addr_hi = 0;
3722 dmae->comp_val = 1;
3723 }
3724
3725 /* NIG */
bb2a0f7a
YG
3726 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3727 dmae->opcode = opcode;
3728 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3729 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3730 dmae->src_addr_hi = 0;
3731 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3732 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3733 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3734 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3735 dmae->comp_addr_hi = 0;
3736 dmae->comp_val = 1;
3737
3738 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3739 dmae->opcode = opcode;
3740 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3741 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3742 dmae->src_addr_hi = 0;
3743 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3744 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3745 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3746 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3747 dmae->len = (2*sizeof(u32)) >> 2;
3748 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3749 dmae->comp_addr_hi = 0;
3750 dmae->comp_val = 1;
3751
a2fbb9ea
ET
3752 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3753 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3754 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3755 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3756#ifdef __BIG_ENDIAN
3757 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3758#else
3759 DMAE_CMD_ENDIANITY_DW_SWAP |
3760#endif
bb2a0f7a
YG
3761 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3762 (vn << DMAE_CMD_E1HVN_SHIFT));
3763 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3764 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3765 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3766 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3767 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3768 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3769 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3770 dmae->len = (2*sizeof(u32)) >> 2;
3771 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3772 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3773 dmae->comp_val = DMAE_COMP_VAL;
3774
3775 *stats_comp = 0;
a2fbb9ea
ET
3776}
3777
bb2a0f7a 3778static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3779{
bb2a0f7a
YG
3780 struct dmae_command *dmae = &bp->stats_dmae;
3781 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3782
bb2a0f7a
YG
3783 /* sanity */
3784 if (!bp->func_stx) {
3785 BNX2X_ERR("BUG!\n");
3786 return;
3787 }
a2fbb9ea 3788
bb2a0f7a
YG
3789 bp->executer_idx = 0;
3790 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3791
bb2a0f7a
YG
3792 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3793 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3794 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3795#ifdef __BIG_ENDIAN
3796 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3797#else
3798 DMAE_CMD_ENDIANITY_DW_SWAP |
3799#endif
3800 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3801 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3802 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3803 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3804 dmae->dst_addr_lo = bp->func_stx >> 2;
3805 dmae->dst_addr_hi = 0;
3806 dmae->len = sizeof(struct host_func_stats) >> 2;
3807 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3808 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3809 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3810
bb2a0f7a
YG
3811 *stats_comp = 0;
3812}
a2fbb9ea 3813
bb2a0f7a
YG
3814static void bnx2x_stats_start(struct bnx2x *bp)
3815{
3816 if (bp->port.pmf)
3817 bnx2x_port_stats_init(bp);
3818
3819 else if (bp->func_stx)
3820 bnx2x_func_stats_init(bp);
3821
3822 bnx2x_hw_stats_post(bp);
3823 bnx2x_storm_stats_post(bp);
3824}
3825
3826static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3827{
3828 bnx2x_stats_comp(bp);
3829 bnx2x_stats_pmf_update(bp);
3830 bnx2x_stats_start(bp);
3831}
3832
3833static void bnx2x_stats_restart(struct bnx2x *bp)
3834{
3835 bnx2x_stats_comp(bp);
3836 bnx2x_stats_start(bp);
3837}
3838
3839static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3840{
3841 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3842 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3843 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3844 struct {
3845 u32 lo;
3846 u32 hi;
3847 } diff;
bb2a0f7a
YG
3848
3849 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3850 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3851 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3852 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3853 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3854 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3855 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3856 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3857 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3858 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3859 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3860 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3861 UPDATE_STAT64(tx_stat_gt127,
3862 tx_stat_etherstatspkts65octetsto127octets);
3863 UPDATE_STAT64(tx_stat_gt255,
3864 tx_stat_etherstatspkts128octetsto255octets);
3865 UPDATE_STAT64(tx_stat_gt511,
3866 tx_stat_etherstatspkts256octetsto511octets);
3867 UPDATE_STAT64(tx_stat_gt1023,
3868 tx_stat_etherstatspkts512octetsto1023octets);
3869 UPDATE_STAT64(tx_stat_gt1518,
3870 tx_stat_etherstatspkts1024octetsto1522octets);
3871 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3872 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3873 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3874 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3875 UPDATE_STAT64(tx_stat_gterr,
3876 tx_stat_dot3statsinternalmactransmiterrors);
3877 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3878
3879 estats->pause_frames_received_hi =
3880 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3881 estats->pause_frames_received_lo =
3882 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3883
3884 estats->pause_frames_sent_hi =
3885 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3886 estats->pause_frames_sent_lo =
3887 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3888}
3889
3890static void bnx2x_emac_stats_update(struct bnx2x *bp)
3891{
3892 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3893 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3894 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3895
3896 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3897 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3898 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3899 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3900 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3901 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3902 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3903 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3904 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3905 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3906 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3907 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3908 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3909 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3910 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3911 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3912 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3913 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3914 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3915 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3916 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3917 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3918 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3919 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3920 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3921 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3922 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3923 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3924 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3925 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3926 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3927
3928 estats->pause_frames_received_hi =
3929 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3930 estats->pause_frames_received_lo =
3931 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3932 ADD_64(estats->pause_frames_received_hi,
3933 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3934 estats->pause_frames_received_lo,
3935 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3936
3937 estats->pause_frames_sent_hi =
3938 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3939 estats->pause_frames_sent_lo =
3940 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3941 ADD_64(estats->pause_frames_sent_hi,
3942 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3943 estats->pause_frames_sent_lo,
3944 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3945}
3946
3947static int bnx2x_hw_stats_update(struct bnx2x *bp)
3948{
3949 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3950 struct nig_stats *old = &(bp->port.old_nig_stats);
3951 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3952 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3953 struct {
3954 u32 lo;
3955 u32 hi;
3956 } diff;
de832a55 3957 u32 nig_timer_max;
bb2a0f7a
YG
3958
3959 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3960 bnx2x_bmac_stats_update(bp);
3961
3962 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3963 bnx2x_emac_stats_update(bp);
3964
3965 else { /* unreached */
c3eefaf6 3966 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3967 return -1;
3968 }
a2fbb9ea 3969
bb2a0f7a
YG
3970 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3971 new->brb_discard - old->brb_discard);
66e855f3
YG
3972 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3973 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3974
bb2a0f7a
YG
3975 UPDATE_STAT64_NIG(egress_mac_pkt0,
3976 etherstatspkts1024octetsto1522octets);
3977 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3978
bb2a0f7a 3979 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3980
bb2a0f7a
YG
3981 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3982 sizeof(struct mac_stx));
3983 estats->brb_drop_hi = pstats->brb_drop_hi;
3984 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3985
bb2a0f7a 3986 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3987
de832a55
EG
3988 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3989 if (nig_timer_max != estats->nig_timer_max) {
3990 estats->nig_timer_max = nig_timer_max;
3991 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3992 }
3993
bb2a0f7a 3994 return 0;
a2fbb9ea
ET
3995}
3996
bb2a0f7a 3997static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3998{
3999 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 4000 struct tstorm_per_port_stats *tport =
de832a55 4001 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
4002 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4003 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
4004 int i;
4005
6fe49bb9
EG
4006 memcpy(&(fstats->total_bytes_received_hi),
4007 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
4008 sizeof(struct host_func_stats) - 2*sizeof(u32));
4009 estats->error_bytes_received_hi = 0;
4010 estats->error_bytes_received_lo = 0;
4011 estats->etherstatsoverrsizepkts_hi = 0;
4012 estats->etherstatsoverrsizepkts_lo = 0;
4013 estats->no_buff_discard_hi = 0;
4014 estats->no_buff_discard_lo = 0;
a2fbb9ea 4015
ca00392c 4016 for_each_rx_queue(bp, i) {
de832a55
EG
4017 struct bnx2x_fastpath *fp = &bp->fp[i];
4018 int cl_id = fp->cl_id;
4019 struct tstorm_per_client_stats *tclient =
4020 &stats->tstorm_common.client_statistics[cl_id];
4021 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4022 struct ustorm_per_client_stats *uclient =
4023 &stats->ustorm_common.client_statistics[cl_id];
4024 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4025 struct xstorm_per_client_stats *xclient =
4026 &stats->xstorm_common.client_statistics[cl_id];
4027 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4028 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4029 u32 diff;
4030
4031 /* are storm stats valid? */
4032 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 4033 bp->stats_counter) {
de832a55
EG
4034 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4035 " xstorm counter (%d) != stats_counter (%d)\n",
4036 i, xclient->stats_counter, bp->stats_counter);
4037 return -1;
4038 }
4039 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 4040 bp->stats_counter) {
de832a55
EG
4041 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4042 " tstorm counter (%d) != stats_counter (%d)\n",
4043 i, tclient->stats_counter, bp->stats_counter);
4044 return -2;
4045 }
4046 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4047 bp->stats_counter) {
4048 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4049 " ustorm counter (%d) != stats_counter (%d)\n",
4050 i, uclient->stats_counter, bp->stats_counter);
4051 return -4;
4052 }
a2fbb9ea 4053
de832a55 4054 qstats->total_bytes_received_hi =
ca00392c 4055 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 4056 qstats->total_bytes_received_lo =
ca00392c
EG
4057 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4058
4059 ADD_64(qstats->total_bytes_received_hi,
4060 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4061 qstats->total_bytes_received_lo,
4062 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4063
4064 ADD_64(qstats->total_bytes_received_hi,
4065 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4066 qstats->total_bytes_received_lo,
4067 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4068
4069 qstats->valid_bytes_received_hi =
4070 qstats->total_bytes_received_hi;
de832a55 4071 qstats->valid_bytes_received_lo =
ca00392c 4072 qstats->total_bytes_received_lo;
bb2a0f7a 4073
de832a55 4074 qstats->error_bytes_received_hi =
bb2a0f7a 4075 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4076 qstats->error_bytes_received_lo =
bb2a0f7a 4077 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4078
de832a55
EG
4079 ADD_64(qstats->total_bytes_received_hi,
4080 qstats->error_bytes_received_hi,
4081 qstats->total_bytes_received_lo,
4082 qstats->error_bytes_received_lo);
4083
4084 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4085 total_unicast_packets_received);
4086 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4087 total_multicast_packets_received);
4088 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4089 total_broadcast_packets_received);
4090 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4091 etherstatsoverrsizepkts);
4092 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4093
4094 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4095 total_unicast_packets_received);
4096 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4097 total_multicast_packets_received);
4098 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4099 total_broadcast_packets_received);
4100 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4101 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4102 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4103
4104 qstats->total_bytes_transmitted_hi =
ca00392c 4105 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4106 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4107 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4108
4109 ADD_64(qstats->total_bytes_transmitted_hi,
4110 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4111 qstats->total_bytes_transmitted_lo,
4112 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4113
4114 ADD_64(qstats->total_bytes_transmitted_hi,
4115 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4116 qstats->total_bytes_transmitted_lo,
4117 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4118
de832a55
EG
4119 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4120 total_unicast_packets_transmitted);
4121 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4122 total_multicast_packets_transmitted);
4123 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4124 total_broadcast_packets_transmitted);
4125
4126 old_tclient->checksum_discard = tclient->checksum_discard;
4127 old_tclient->ttl0_discard = tclient->ttl0_discard;
4128
4129 ADD_64(fstats->total_bytes_received_hi,
4130 qstats->total_bytes_received_hi,
4131 fstats->total_bytes_received_lo,
4132 qstats->total_bytes_received_lo);
4133 ADD_64(fstats->total_bytes_transmitted_hi,
4134 qstats->total_bytes_transmitted_hi,
4135 fstats->total_bytes_transmitted_lo,
4136 qstats->total_bytes_transmitted_lo);
4137 ADD_64(fstats->total_unicast_packets_received_hi,
4138 qstats->total_unicast_packets_received_hi,
4139 fstats->total_unicast_packets_received_lo,
4140 qstats->total_unicast_packets_received_lo);
4141 ADD_64(fstats->total_multicast_packets_received_hi,
4142 qstats->total_multicast_packets_received_hi,
4143 fstats->total_multicast_packets_received_lo,
4144 qstats->total_multicast_packets_received_lo);
4145 ADD_64(fstats->total_broadcast_packets_received_hi,
4146 qstats->total_broadcast_packets_received_hi,
4147 fstats->total_broadcast_packets_received_lo,
4148 qstats->total_broadcast_packets_received_lo);
4149 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4150 qstats->total_unicast_packets_transmitted_hi,
4151 fstats->total_unicast_packets_transmitted_lo,
4152 qstats->total_unicast_packets_transmitted_lo);
4153 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4154 qstats->total_multicast_packets_transmitted_hi,
4155 fstats->total_multicast_packets_transmitted_lo,
4156 qstats->total_multicast_packets_transmitted_lo);
4157 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4158 qstats->total_broadcast_packets_transmitted_hi,
4159 fstats->total_broadcast_packets_transmitted_lo,
4160 qstats->total_broadcast_packets_transmitted_lo);
4161 ADD_64(fstats->valid_bytes_received_hi,
4162 qstats->valid_bytes_received_hi,
4163 fstats->valid_bytes_received_lo,
4164 qstats->valid_bytes_received_lo);
4165
4166 ADD_64(estats->error_bytes_received_hi,
4167 qstats->error_bytes_received_hi,
4168 estats->error_bytes_received_lo,
4169 qstats->error_bytes_received_lo);
4170 ADD_64(estats->etherstatsoverrsizepkts_hi,
4171 qstats->etherstatsoverrsizepkts_hi,
4172 estats->etherstatsoverrsizepkts_lo,
4173 qstats->etherstatsoverrsizepkts_lo);
4174 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4175 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4176 }
4177
4178 ADD_64(fstats->total_bytes_received_hi,
4179 estats->rx_stat_ifhcinbadoctets_hi,
4180 fstats->total_bytes_received_lo,
4181 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4182
4183 memcpy(estats, &(fstats->total_bytes_received_hi),
4184 sizeof(struct host_func_stats) - 2*sizeof(u32));
4185
de832a55
EG
4186 ADD_64(estats->etherstatsoverrsizepkts_hi,
4187 estats->rx_stat_dot3statsframestoolong_hi,
4188 estats->etherstatsoverrsizepkts_lo,
4189 estats->rx_stat_dot3statsframestoolong_lo);
4190 ADD_64(estats->error_bytes_received_hi,
4191 estats->rx_stat_ifhcinbadoctets_hi,
4192 estats->error_bytes_received_lo,
4193 estats->rx_stat_ifhcinbadoctets_lo);
4194
4195 if (bp->port.pmf) {
4196 estats->mac_filter_discard =
4197 le32_to_cpu(tport->mac_filter_discard);
4198 estats->xxoverflow_discard =
4199 le32_to_cpu(tport->xxoverflow_discard);
4200 estats->brb_truncate_discard =
bb2a0f7a 4201 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4202 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4203 }
bb2a0f7a
YG
4204
4205 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4206
de832a55
EG
4207 bp->stats_pending = 0;
4208
a2fbb9ea
ET
4209 return 0;
4210}
4211
bb2a0f7a 4212static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4213{
bb2a0f7a 4214 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4215 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4216 int i;
a2fbb9ea
ET
4217
4218 nstats->rx_packets =
4219 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4220 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4221 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4222
4223 nstats->tx_packets =
4224 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4225 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4226 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4227
de832a55 4228 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4229
0e39e645 4230 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4231
de832a55 4232 nstats->rx_dropped = estats->mac_discard;
ca00392c 4233 for_each_rx_queue(bp, i)
de832a55
EG
4234 nstats->rx_dropped +=
4235 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4236
a2fbb9ea
ET
4237 nstats->tx_dropped = 0;
4238
4239 nstats->multicast =
de832a55 4240 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4241
bb2a0f7a 4242 nstats->collisions =
de832a55 4243 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4244
4245 nstats->rx_length_errors =
de832a55
EG
4246 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4247 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4248 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4249 bnx2x_hilo(&estats->brb_truncate_hi);
4250 nstats->rx_crc_errors =
4251 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4252 nstats->rx_frame_errors =
4253 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4254 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4255 nstats->rx_missed_errors = estats->xxoverflow_discard;
4256
4257 nstats->rx_errors = nstats->rx_length_errors +
4258 nstats->rx_over_errors +
4259 nstats->rx_crc_errors +
4260 nstats->rx_frame_errors +
0e39e645
ET
4261 nstats->rx_fifo_errors +
4262 nstats->rx_missed_errors;
a2fbb9ea 4263
bb2a0f7a 4264 nstats->tx_aborted_errors =
de832a55
EG
4265 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4266 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4267 nstats->tx_carrier_errors =
4268 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4269 nstats->tx_fifo_errors = 0;
4270 nstats->tx_heartbeat_errors = 0;
4271 nstats->tx_window_errors = 0;
4272
4273 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4274 nstats->tx_carrier_errors +
4275 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4276}
4277
4278static void bnx2x_drv_stats_update(struct bnx2x *bp)
4279{
4280 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4281 int i;
4282
4283 estats->driver_xoff = 0;
4284 estats->rx_err_discard_pkt = 0;
4285 estats->rx_skb_alloc_failed = 0;
4286 estats->hw_csum_err = 0;
ca00392c 4287 for_each_rx_queue(bp, i) {
de832a55
EG
4288 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4289
4290 estats->driver_xoff += qstats->driver_xoff;
4291 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4292 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4293 estats->hw_csum_err += qstats->hw_csum_err;
4294 }
a2fbb9ea
ET
4295}
4296
bb2a0f7a 4297static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4298{
bb2a0f7a 4299 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4300
bb2a0f7a
YG
4301 if (*stats_comp != DMAE_COMP_VAL)
4302 return;
4303
4304 if (bp->port.pmf)
de832a55 4305 bnx2x_hw_stats_update(bp);
a2fbb9ea 4306
de832a55
EG
4307 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4308 BNX2X_ERR("storm stats were not updated for 3 times\n");
4309 bnx2x_panic();
4310 return;
a2fbb9ea
ET
4311 }
4312
de832a55
EG
4313 bnx2x_net_stats_update(bp);
4314 bnx2x_drv_stats_update(bp);
4315
a2fbb9ea 4316 if (bp->msglevel & NETIF_MSG_TIMER) {
ca00392c
EG
4317 struct bnx2x_fastpath *fp0_rx = bp->fp;
4318 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
de832a55
EG
4319 struct tstorm_per_client_stats *old_tclient =
4320 &bp->fp->old_tclient;
4321 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4322 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4323 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4324 int i;
a2fbb9ea
ET
4325
4326 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4327 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4328 " tx pkt (%lx)\n",
ca00392c
EG
4329 bnx2x_tx_avail(fp0_tx),
4330 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4331 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4332 " rx pkt (%lx)\n",
ca00392c
EG
4333 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4334 fp0_rx->rx_comp_cons),
4335 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4336 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4337 "brb truncate %u\n",
4338 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4339 qstats->driver_xoff,
4340 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4341 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4342 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4343 "mac_discard %u mac_filter_discard %u "
4344 "xxovrflow_discard %u brb_truncate_discard %u "
4345 "ttl0_discard %u\n",
4781bfad 4346 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4347 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4348 bnx2x_hilo(&qstats->no_buff_discard_hi),
4349 estats->mac_discard, estats->mac_filter_discard,
4350 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4351 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4352
4353 for_each_queue(bp, i) {
4354 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4355 bnx2x_fp(bp, i, tx_pkt),
4356 bnx2x_fp(bp, i, rx_pkt),
4357 bnx2x_fp(bp, i, rx_calls));
4358 }
4359 }
4360
bb2a0f7a
YG
4361 bnx2x_hw_stats_post(bp);
4362 bnx2x_storm_stats_post(bp);
4363}
a2fbb9ea 4364
bb2a0f7a
YG
4365static void bnx2x_port_stats_stop(struct bnx2x *bp)
4366{
4367 struct dmae_command *dmae;
4368 u32 opcode;
4369 int loader_idx = PMF_DMAE_C(bp);
4370 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4371
bb2a0f7a 4372 bp->executer_idx = 0;
a2fbb9ea 4373
bb2a0f7a
YG
4374 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4375 DMAE_CMD_C_ENABLE |
4376 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4377#ifdef __BIG_ENDIAN
bb2a0f7a 4378 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4379#else
bb2a0f7a 4380 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4381#endif
bb2a0f7a
YG
4382 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4383 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4384
4385 if (bp->port.port_stx) {
4386
4387 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4388 if (bp->func_stx)
4389 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4390 else
4391 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4392 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4393 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4394 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4395 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4396 dmae->len = sizeof(struct host_port_stats) >> 2;
4397 if (bp->func_stx) {
4398 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4399 dmae->comp_addr_hi = 0;
4400 dmae->comp_val = 1;
4401 } else {
4402 dmae->comp_addr_lo =
4403 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4404 dmae->comp_addr_hi =
4405 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4406 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4407
bb2a0f7a
YG
4408 *stats_comp = 0;
4409 }
a2fbb9ea
ET
4410 }
4411
bb2a0f7a
YG
4412 if (bp->func_stx) {
4413
4414 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4415 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4416 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4417 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4418 dmae->dst_addr_lo = bp->func_stx >> 2;
4419 dmae->dst_addr_hi = 0;
4420 dmae->len = sizeof(struct host_func_stats) >> 2;
4421 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4422 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4423 dmae->comp_val = DMAE_COMP_VAL;
4424
4425 *stats_comp = 0;
a2fbb9ea 4426 }
bb2a0f7a
YG
4427}
4428
4429static void bnx2x_stats_stop(struct bnx2x *bp)
4430{
4431 int update = 0;
4432
4433 bnx2x_stats_comp(bp);
4434
4435 if (bp->port.pmf)
4436 update = (bnx2x_hw_stats_update(bp) == 0);
4437
4438 update |= (bnx2x_storm_stats_update(bp) == 0);
4439
4440 if (update) {
4441 bnx2x_net_stats_update(bp);
a2fbb9ea 4442
bb2a0f7a
YG
4443 if (bp->port.pmf)
4444 bnx2x_port_stats_stop(bp);
4445
4446 bnx2x_hw_stats_post(bp);
4447 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4448 }
4449}
4450
bb2a0f7a
YG
4451static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4452{
4453}
4454
4455static const struct {
4456 void (*action)(struct bnx2x *bp);
4457 enum bnx2x_stats_state next_state;
4458} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4459/* state event */
4460{
4461/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4462/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4463/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4464/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4465},
4466{
4467/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4468/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4469/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4470/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4471}
4472};
4473
4474static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4475{
4476 enum bnx2x_stats_state state = bp->stats_state;
4477
4478 bnx2x_stats_stm[state][event].action(bp);
4479 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4480
8924665a
EG
4481 /* Make sure the state has been "changed" */
4482 smp_wmb();
4483
bb2a0f7a
YG
4484 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4485 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4486 state, event, bp->stats_state);
4487}
4488
6fe49bb9
EG
4489static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4490{
4491 struct dmae_command *dmae;
4492 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4493
4494 /* sanity */
4495 if (!bp->port.pmf || !bp->port.port_stx) {
4496 BNX2X_ERR("BUG!\n");
4497 return;
4498 }
4499
4500 bp->executer_idx = 0;
4501
4502 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4503 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4504 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4505 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4506#ifdef __BIG_ENDIAN
4507 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4508#else
4509 DMAE_CMD_ENDIANITY_DW_SWAP |
4510#endif
4511 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4512 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4513 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4514 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4515 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4516 dmae->dst_addr_hi = 0;
4517 dmae->len = sizeof(struct host_port_stats) >> 2;
4518 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4519 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4520 dmae->comp_val = DMAE_COMP_VAL;
4521
4522 *stats_comp = 0;
4523 bnx2x_hw_stats_post(bp);
4524 bnx2x_stats_comp(bp);
4525}
4526
4527static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4528{
4529 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4530 int port = BP_PORT(bp);
4531 int func;
4532 u32 func_stx;
4533
4534 /* sanity */
4535 if (!bp->port.pmf || !bp->func_stx) {
4536 BNX2X_ERR("BUG!\n");
4537 return;
4538 }
4539
4540 /* save our func_stx */
4541 func_stx = bp->func_stx;
4542
4543 for (vn = VN_0; vn < vn_max; vn++) {
4544 func = 2*vn + port;
4545
4546 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4547 bnx2x_func_stats_init(bp);
4548 bnx2x_hw_stats_post(bp);
4549 bnx2x_stats_comp(bp);
4550 }
4551
4552 /* restore our func_stx */
4553 bp->func_stx = func_stx;
4554}
4555
4556static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4557{
4558 struct dmae_command *dmae = &bp->stats_dmae;
4559 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4560
4561 /* sanity */
4562 if (!bp->func_stx) {
4563 BNX2X_ERR("BUG!\n");
4564 return;
4565 }
4566
4567 bp->executer_idx = 0;
4568 memset(dmae, 0, sizeof(struct dmae_command));
4569
4570 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4571 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4572 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4573#ifdef __BIG_ENDIAN
4574 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4575#else
4576 DMAE_CMD_ENDIANITY_DW_SWAP |
4577#endif
4578 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4579 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4580 dmae->src_addr_lo = bp->func_stx >> 2;
4581 dmae->src_addr_hi = 0;
4582 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4583 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4584 dmae->len = sizeof(struct host_func_stats) >> 2;
4585 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4586 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4587 dmae->comp_val = DMAE_COMP_VAL;
4588
4589 *stats_comp = 0;
4590 bnx2x_hw_stats_post(bp);
4591 bnx2x_stats_comp(bp);
4592}
4593
4594static void bnx2x_stats_init(struct bnx2x *bp)
4595{
4596 int port = BP_PORT(bp);
4597 int func = BP_FUNC(bp);
4598 int i;
4599
4600 bp->stats_pending = 0;
4601 bp->executer_idx = 0;
4602 bp->stats_counter = 0;
4603
4604 /* port and func stats for management */
4605 if (!BP_NOMCP(bp)) {
4606 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4607 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4608
4609 } else {
4610 bp->port.port_stx = 0;
4611 bp->func_stx = 0;
4612 }
4613 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4614 bp->port.port_stx, bp->func_stx);
4615
4616 /* port stats */
4617 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4618 bp->port.old_nig_stats.brb_discard =
4619 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4620 bp->port.old_nig_stats.brb_truncate =
4621 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4622 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4623 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4624 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4625 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4626
4627 /* function stats */
4628 for_each_queue(bp, i) {
4629 struct bnx2x_fastpath *fp = &bp->fp[i];
4630
4631 memset(&fp->old_tclient, 0,
4632 sizeof(struct tstorm_per_client_stats));
4633 memset(&fp->old_uclient, 0,
4634 sizeof(struct ustorm_per_client_stats));
4635 memset(&fp->old_xclient, 0,
4636 sizeof(struct xstorm_per_client_stats));
4637 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4638 }
4639
4640 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4641 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4642
4643 bp->stats_state = STATS_STATE_DISABLED;
4644
4645 if (bp->port.pmf) {
4646 if (bp->port.port_stx)
4647 bnx2x_port_stats_base_init(bp);
4648
4649 if (bp->func_stx)
4650 bnx2x_func_stats_base_init(bp);
4651
4652 } else if (bp->func_stx)
4653 bnx2x_func_stats_base_update(bp);
4654}
4655
a2fbb9ea
ET
4656static void bnx2x_timer(unsigned long data)
4657{
4658 struct bnx2x *bp = (struct bnx2x *) data;
4659
4660 if (!netif_running(bp->dev))
4661 return;
4662
4663 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4664 goto timer_restart;
a2fbb9ea
ET
4665
4666 if (poll) {
4667 struct bnx2x_fastpath *fp = &bp->fp[0];
4668 int rc;
4669
7961f791 4670 bnx2x_tx_int(fp);
a2fbb9ea
ET
4671 rc = bnx2x_rx_int(fp, 1000);
4672 }
4673
34f80b04
EG
4674 if (!BP_NOMCP(bp)) {
4675 int func = BP_FUNC(bp);
a2fbb9ea
ET
4676 u32 drv_pulse;
4677 u32 mcp_pulse;
4678
4679 ++bp->fw_drv_pulse_wr_seq;
4680 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4681 /* TBD - add SYSTEM_TIME */
4682 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4683 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4684
34f80b04 4685 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4686 MCP_PULSE_SEQ_MASK);
4687 /* The delta between driver pulse and mcp response
4688 * should be 1 (before mcp response) or 0 (after mcp response)
4689 */
4690 if ((drv_pulse != mcp_pulse) &&
4691 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4692 /* someone lost a heartbeat... */
4693 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4694 drv_pulse, mcp_pulse);
4695 }
4696 }
4697
bb2a0f7a
YG
4698 if ((bp->state == BNX2X_STATE_OPEN) ||
4699 (bp->state == BNX2X_STATE_DISABLED))
4700 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4701
f1410647 4702timer_restart:
a2fbb9ea
ET
4703 mod_timer(&bp->timer, jiffies + bp->current_interval);
4704}
4705
4706/* end of Statistics */
4707
4708/* nic init */
4709
4710/*
4711 * nic init service functions
4712 */
4713
34f80b04 4714static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4715{
34f80b04
EG
4716 int port = BP_PORT(bp);
4717
ca00392c
EG
4718 /* "CSTORM" */
4719 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4720 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4721 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4722 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4723 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4724 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
4725}
4726
5c862848
EG
4727static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4728 dma_addr_t mapping, int sb_id)
34f80b04
EG
4729{
4730 int port = BP_PORT(bp);
bb2a0f7a 4731 int func = BP_FUNC(bp);
a2fbb9ea 4732 int index;
34f80b04 4733 u64 section;
a2fbb9ea
ET
4734
4735 /* USTORM */
4736 section = ((u64)mapping) + offsetof(struct host_status_block,
4737 u_status_block);
34f80b04 4738 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 4739
ca00392c
EG
4740 REG_WR(bp, BAR_CSTRORM_INTMEM +
4741 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4742 REG_WR(bp, BAR_CSTRORM_INTMEM +
4743 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4744 U64_HI(section));
ca00392c
EG
4745 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4746 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4747
4748 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
4749 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4750 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4751
4752 /* CSTORM */
4753 section = ((u64)mapping) + offsetof(struct host_status_block,
4754 c_status_block);
34f80b04 4755 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4756
4757 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4758 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4759 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4760 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4761 U64_HI(section));
7a9b2557 4762 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 4763 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4764
4765 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4766 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4767 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
4768
4769 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4770}
4771
4772static void bnx2x_zero_def_sb(struct bnx2x *bp)
4773{
4774 int func = BP_FUNC(bp);
a2fbb9ea 4775
ca00392c 4776 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
4777 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4778 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
4779 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4780 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4781 sizeof(struct cstorm_def_status_block_u)/4);
4782 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4783 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4784 sizeof(struct cstorm_def_status_block_c)/4);
4785 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
4786 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4787 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4788}
4789
4790static void bnx2x_init_def_sb(struct bnx2x *bp,
4791 struct host_def_status_block *def_sb,
34f80b04 4792 dma_addr_t mapping, int sb_id)
a2fbb9ea 4793{
34f80b04
EG
4794 int port = BP_PORT(bp);
4795 int func = BP_FUNC(bp);
a2fbb9ea
ET
4796 int index, val, reg_offset;
4797 u64 section;
4798
4799 /* ATTN */
4800 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4801 atten_status_block);
34f80b04 4802 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4803
49d66772
ET
4804 bp->attn_state = 0;
4805
a2fbb9ea
ET
4806 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4807 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4808
34f80b04 4809 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4810 bp->attn_group[index].sig[0] = REG_RD(bp,
4811 reg_offset + 0x10*index);
4812 bp->attn_group[index].sig[1] = REG_RD(bp,
4813 reg_offset + 0x4 + 0x10*index);
4814 bp->attn_group[index].sig[2] = REG_RD(bp,
4815 reg_offset + 0x8 + 0x10*index);
4816 bp->attn_group[index].sig[3] = REG_RD(bp,
4817 reg_offset + 0xc + 0x10*index);
4818 }
4819
a2fbb9ea
ET
4820 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4821 HC_REG_ATTN_MSG0_ADDR_L);
4822
4823 REG_WR(bp, reg_offset, U64_LO(section));
4824 REG_WR(bp, reg_offset + 4, U64_HI(section));
4825
4826 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4827
4828 val = REG_RD(bp, reg_offset);
34f80b04 4829 val |= sb_id;
a2fbb9ea
ET
4830 REG_WR(bp, reg_offset, val);
4831
4832 /* USTORM */
4833 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4834 u_def_status_block);
34f80b04 4835 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4836
ca00392c
EG
4837 REG_WR(bp, BAR_CSTRORM_INTMEM +
4838 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4839 REG_WR(bp, BAR_CSTRORM_INTMEM +
4840 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 4841 U64_HI(section));
ca00392c
EG
4842 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4843 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
4844
4845 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
4846 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4847 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
4848
4849 /* CSTORM */
4850 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4851 c_def_status_block);
34f80b04 4852 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4853
4854 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4855 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 4856 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4857 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 4858 U64_HI(section));
5c862848 4859 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 4860 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
4861
4862 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4863 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4864 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
4865
4866 /* TSTORM */
4867 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4868 t_def_status_block);
34f80b04 4869 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4870
4871 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4872 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4873 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4874 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4875 U64_HI(section));
5c862848 4876 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4877 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4878
4879 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4880 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4881 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4882
4883 /* XSTORM */
4884 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4885 x_def_status_block);
34f80b04 4886 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4887
4888 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4889 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4890 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4891 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4892 U64_HI(section));
5c862848 4893 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4894 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4895
4896 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4897 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4898 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4899
bb2a0f7a 4900 bp->stats_pending = 0;
66e855f3 4901 bp->set_mac_pending = 0;
bb2a0f7a 4902
34f80b04 4903 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4904}
4905
4906static void bnx2x_update_coalesce(struct bnx2x *bp)
4907{
34f80b04 4908 int port = BP_PORT(bp);
a2fbb9ea
ET
4909 int i;
4910
4911 for_each_queue(bp, i) {
34f80b04 4912 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4913
4914 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
4915 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4916 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4917 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4918 bp->rx_ticks/12);
ca00392c
EG
4919 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4920 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4921 U_SB_ETH_RX_CQ_INDEX),
3799cf47 4922 (bp->rx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4923
4924 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4925 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4926 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4927 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4928 bp->tx_ticks/12);
a2fbb9ea 4929 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4930 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4931 C_SB_ETH_TX_CQ_INDEX),
3799cf47 4932 (bp->tx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4933 }
4934}
4935
7a9b2557
VZ
4936static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4937 struct bnx2x_fastpath *fp, int last)
4938{
4939 int i;
4940
4941 for (i = 0; i < last; i++) {
4942 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4943 struct sk_buff *skb = rx_buf->skb;
4944
4945 if (skb == NULL) {
4946 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4947 continue;
4948 }
4949
4950 if (fp->tpa_state[i] == BNX2X_TPA_START)
4951 pci_unmap_single(bp->pdev,
4952 pci_unmap_addr(rx_buf, mapping),
356e2385 4953 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4954
4955 dev_kfree_skb(skb);
4956 rx_buf->skb = NULL;
4957 }
4958}
4959
a2fbb9ea
ET
4960static void bnx2x_init_rx_rings(struct bnx2x *bp)
4961{
7a9b2557 4962 int func = BP_FUNC(bp);
32626230
EG
4963 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4964 ETH_MAX_AGGREGATION_QUEUES_E1H;
4965 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4966 int i, j;
a2fbb9ea 4967
87942b46 4968 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4969 DP(NETIF_MSG_IFUP,
4970 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4971
7a9b2557 4972 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4973
555f6c78 4974 for_each_rx_queue(bp, j) {
32626230 4975 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4976
32626230 4977 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4978 fp->tpa_pool[i].skb =
4979 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4980 if (!fp->tpa_pool[i].skb) {
4981 BNX2X_ERR("Failed to allocate TPA "
4982 "skb pool for queue[%d] - "
4983 "disabling TPA on this "
4984 "queue!\n", j);
4985 bnx2x_free_tpa_pool(bp, fp, i);
4986 fp->disable_tpa = 1;
4987 break;
4988 }
4989 pci_unmap_addr_set((struct sw_rx_bd *)
4990 &bp->fp->tpa_pool[i],
4991 mapping, 0);
4992 fp->tpa_state[i] = BNX2X_TPA_STOP;
4993 }
4994 }
4995 }
4996
555f6c78 4997 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4998 struct bnx2x_fastpath *fp = &bp->fp[j];
4999
5000 fp->rx_bd_cons = 0;
5001 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
5002 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5003
ca00392c
EG
5004 /* Mark queue as Rx */
5005 fp->is_rx_queue = 1;
5006
7a9b2557
VZ
5007 /* "next page" elements initialization */
5008 /* SGE ring */
5009 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5010 struct eth_rx_sge *sge;
5011
5012 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5013 sge->addr_hi =
5014 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5015 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5016 sge->addr_lo =
5017 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5018 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5019 }
5020
5021 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 5022
7a9b2557 5023 /* RX BD ring */
a2fbb9ea
ET
5024 for (i = 1; i <= NUM_RX_RINGS; i++) {
5025 struct eth_rx_bd *rx_bd;
5026
5027 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5028 rx_bd->addr_hi =
5029 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 5030 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5031 rx_bd->addr_lo =
5032 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 5033 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5034 }
5035
34f80b04 5036 /* CQ ring */
a2fbb9ea
ET
5037 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5038 struct eth_rx_cqe_next_page *nextpg;
5039
5040 nextpg = (struct eth_rx_cqe_next_page *)
5041 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5042 nextpg->addr_hi =
5043 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 5044 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5045 nextpg->addr_lo =
5046 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 5047 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5048 }
5049
7a9b2557
VZ
5050 /* Allocate SGEs and initialize the ring elements */
5051 for (i = 0, ring_prod = 0;
5052 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 5053
7a9b2557
VZ
5054 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5055 BNX2X_ERR("was only able to allocate "
5056 "%d rx sges\n", i);
5057 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5058 /* Cleanup already allocated elements */
5059 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 5060 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
5061 fp->disable_tpa = 1;
5062 ring_prod = 0;
5063 break;
5064 }
5065 ring_prod = NEXT_SGE_IDX(ring_prod);
5066 }
5067 fp->rx_sge_prod = ring_prod;
5068
5069 /* Allocate BDs and initialize BD ring */
66e855f3 5070 fp->rx_comp_cons = 0;
7a9b2557 5071 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
5072 for (i = 0; i < bp->rx_ring_size; i++) {
5073 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5074 BNX2X_ERR("was only able to allocate "
de832a55
EG
5075 "%d rx skbs on queue[%d]\n", i, j);
5076 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
5077 break;
5078 }
5079 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 5080 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5081 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5082 }
5083
7a9b2557
VZ
5084 fp->rx_bd_prod = ring_prod;
5085 /* must not have more available CQEs than BDs */
5086 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5087 cqe_ring_prod);
a2fbb9ea
ET
5088 fp->rx_pkt = fp->rx_calls = 0;
5089
7a9b2557
VZ
5090 /* Warning!
5091 * this will generate an interrupt (to the TSTORM)
5092 * must only be done after chip is initialized
5093 */
5094 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5095 fp->rx_sge_prod);
a2fbb9ea
ET
5096 if (j != 0)
5097 continue;
5098
5099 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5100 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5101 U64_LO(fp->rx_comp_mapping));
5102 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5103 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5104 U64_HI(fp->rx_comp_mapping));
5105 }
5106}
5107
5108static void bnx2x_init_tx_ring(struct bnx2x *bp)
5109{
5110 int i, j;
5111
555f6c78 5112 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
5113 struct bnx2x_fastpath *fp = &bp->fp[j];
5114
5115 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5116 struct eth_tx_next_bd *tx_next_bd =
5117 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5118
ca00392c 5119 tx_next_bd->addr_hi =
a2fbb9ea 5120 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5121 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5122 tx_next_bd->addr_lo =
a2fbb9ea 5123 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5124 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5125 }
5126
ca00392c
EG
5127 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5128 fp->tx_db.data.zero_fill1 = 0;
5129 fp->tx_db.data.prod = 0;
5130
a2fbb9ea
ET
5131 fp->tx_pkt_prod = 0;
5132 fp->tx_pkt_cons = 0;
5133 fp->tx_bd_prod = 0;
5134 fp->tx_bd_cons = 0;
5135 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5136 fp->tx_pkt = 0;
5137 }
6fe49bb9
EG
5138
5139 /* clean tx statistics */
5140 for_each_rx_queue(bp, i)
5141 bnx2x_fp(bp, i, tx_pkt) = 0;
a2fbb9ea
ET
5142}
5143
5144static void bnx2x_init_sp_ring(struct bnx2x *bp)
5145{
34f80b04 5146 int func = BP_FUNC(bp);
a2fbb9ea
ET
5147
5148 spin_lock_init(&bp->spq_lock);
5149
5150 bp->spq_left = MAX_SPQ_PENDING;
5151 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5152 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5153 bp->spq_prod_bd = bp->spq;
5154 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5155
34f80b04 5156 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5157 U64_LO(bp->spq_mapping));
34f80b04
EG
5158 REG_WR(bp,
5159 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5160 U64_HI(bp->spq_mapping));
5161
34f80b04 5162 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5163 bp->spq_prod_idx);
5164}
5165
5166static void bnx2x_init_context(struct bnx2x *bp)
5167{
5168 int i;
5169
ca00392c 5170 for_each_rx_queue(bp, i) {
a2fbb9ea
ET
5171 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5172 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5173 u8 cl_id = fp->cl_id;
a2fbb9ea 5174
34f80b04
EG
5175 context->ustorm_st_context.common.sb_index_numbers =
5176 BNX2X_RX_SB_INDEX_NUM;
0626b899 5177 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5178 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5179 context->ustorm_st_context.common.flags =
de832a55
EG
5180 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5181 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5182 context->ustorm_st_context.common.statistics_counter_id =
5183 cl_id;
8d9c5f34 5184 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5185 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5186 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5187 bp->rx_buf_size;
34f80b04 5188 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5189 U64_HI(fp->rx_desc_mapping);
34f80b04 5190 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5191 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5192 if (!fp->disable_tpa) {
5193 context->ustorm_st_context.common.flags |=
ca00392c 5194 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5195 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
5196 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5197 (u32)0xffff);
7a9b2557
VZ
5198 context->ustorm_st_context.common.sge_page_base_hi =
5199 U64_HI(fp->rx_sge_mapping);
5200 context->ustorm_st_context.common.sge_page_base_lo =
5201 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5202
5203 context->ustorm_st_context.common.max_sges_for_packet =
5204 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5205 context->ustorm_st_context.common.max_sges_for_packet =
5206 ((context->ustorm_st_context.common.
5207 max_sges_for_packet + PAGES_PER_SGE - 1) &
5208 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5209 }
5210
8d9c5f34
EG
5211 context->ustorm_ag_context.cdu_usage =
5212 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5213 CDU_REGION_NUMBER_UCM_AG,
5214 ETH_CONNECTION_TYPE);
5215
ca00392c
EG
5216 context->xstorm_ag_context.cdu_reserved =
5217 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5218 CDU_REGION_NUMBER_XCM_AG,
5219 ETH_CONNECTION_TYPE);
5220 }
5221
5222 for_each_tx_queue(bp, i) {
5223 struct bnx2x_fastpath *fp = &bp->fp[i];
5224 struct eth_context *context =
5225 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5226
5227 context->cstorm_st_context.sb_index_number =
5228 C_SB_ETH_TX_CQ_INDEX;
5229 context->cstorm_st_context.status_block_id = fp->sb_id;
5230
8d9c5f34
EG
5231 context->xstorm_st_context.tx_bd_page_base_hi =
5232 U64_HI(fp->tx_desc_mapping);
5233 context->xstorm_st_context.tx_bd_page_base_lo =
5234 U64_LO(fp->tx_desc_mapping);
ca00392c 5235 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5236 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5237 }
5238}
5239
5240static void bnx2x_init_ind_table(struct bnx2x *bp)
5241{
26c8fa4d 5242 int func = BP_FUNC(bp);
a2fbb9ea
ET
5243 int i;
5244
555f6c78 5245 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5246 return;
5247
555f6c78
EG
5248 DP(NETIF_MSG_IFUP,
5249 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5250 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5251 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5252 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 5253 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
5254}
5255
49d66772
ET
5256static void bnx2x_set_client_config(struct bnx2x *bp)
5257{
49d66772 5258 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5259 int port = BP_PORT(bp);
5260 int i;
49d66772 5261
e7799c5f 5262 tstorm_client.mtu = bp->dev->mtu;
49d66772 5263 tstorm_client.config_flags =
de832a55
EG
5264 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5265 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5266#ifdef BCM_VLAN
0c6671b0 5267 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5268 tstorm_client.config_flags |=
8d9c5f34 5269 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5270 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5271 }
5272#endif
49d66772
ET
5273
5274 for_each_queue(bp, i) {
de832a55
EG
5275 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5276
49d66772 5277 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5278 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5279 ((u32 *)&tstorm_client)[0]);
5280 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5281 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5282 ((u32 *)&tstorm_client)[1]);
5283 }
5284
34f80b04
EG
5285 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5286 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5287}
5288
a2fbb9ea
ET
5289static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5290{
a2fbb9ea 5291 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04 5292 int mode = bp->rx_mode;
37b091ba 5293 int mask = bp->rx_mode_cl_mask;
34f80b04 5294 int func = BP_FUNC(bp);
581ce43d 5295 int port = BP_PORT(bp);
a2fbb9ea 5296 int i;
581ce43d
EG
5297 /* All but management unicast packets should pass to the host as well */
5298 u32 llh_mask =
5299 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5300 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5301 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5302 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5303
3196a88a 5304 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5305
5306 switch (mode) {
5307 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5308 tstorm_mac_filter.ucast_drop_all = mask;
5309 tstorm_mac_filter.mcast_drop_all = mask;
5310 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5311 break;
356e2385 5312
a2fbb9ea 5313 case BNX2X_RX_MODE_NORMAL:
34f80b04 5314 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5315 break;
356e2385 5316
a2fbb9ea 5317 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5318 tstorm_mac_filter.mcast_accept_all = mask;
5319 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5320 break;
356e2385 5321
a2fbb9ea 5322 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5323 tstorm_mac_filter.ucast_accept_all = mask;
5324 tstorm_mac_filter.mcast_accept_all = mask;
5325 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5326 /* pass management unicast packets as well */
5327 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5328 break;
356e2385 5329
a2fbb9ea 5330 default:
34f80b04
EG
5331 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5332 break;
a2fbb9ea
ET
5333 }
5334
581ce43d
EG
5335 REG_WR(bp,
5336 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5337 llh_mask);
5338
a2fbb9ea
ET
5339 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5340 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5341 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5342 ((u32 *)&tstorm_mac_filter)[i]);
5343
34f80b04 5344/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5345 ((u32 *)&tstorm_mac_filter)[i]); */
5346 }
a2fbb9ea 5347
49d66772
ET
5348 if (mode != BNX2X_RX_MODE_NONE)
5349 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5350}
5351
471de716
EG
5352static void bnx2x_init_internal_common(struct bnx2x *bp)
5353{
5354 int i;
5355
5356 /* Zero this manually as its initialization is
5357 currently missing in the initTool */
5358 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5359 REG_WR(bp, BAR_USTRORM_INTMEM +
5360 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5361}
5362
5363static void bnx2x_init_internal_port(struct bnx2x *bp)
5364{
5365 int port = BP_PORT(bp);
5366
ca00392c
EG
5367 REG_WR(bp,
5368 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5369 REG_WR(bp,
5370 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5371 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5372 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5373}
5374
5375static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5376{
a2fbb9ea
ET
5377 struct tstorm_eth_function_common_config tstorm_config = {0};
5378 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5379 int port = BP_PORT(bp);
5380 int func = BP_FUNC(bp);
de832a55
EG
5381 int i, j;
5382 u32 offset;
471de716 5383 u16 max_agg_size;
a2fbb9ea
ET
5384
5385 if (is_multi(bp)) {
555f6c78 5386 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5387 tstorm_config.rss_result_mask = MULTI_MASK;
5388 }
ca00392c
EG
5389
5390 /* Enable TPA if needed */
5391 if (bp->flags & TPA_ENABLE_FLAG)
5392 tstorm_config.config_flags |=
5393 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5394
8d9c5f34
EG
5395 if (IS_E1HMF(bp))
5396 tstorm_config.config_flags |=
5397 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5398
34f80b04
EG
5399 tstorm_config.leading_client_id = BP_L_ID(bp);
5400
a2fbb9ea 5401 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5402 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5403 (*(u32 *)&tstorm_config));
5404
c14423fe 5405 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
37b091ba 5406 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
a2fbb9ea
ET
5407 bnx2x_set_storm_rx_mode(bp);
5408
de832a55
EG
5409 for_each_queue(bp, i) {
5410 u8 cl_id = bp->fp[i].cl_id;
5411
5412 /* reset xstorm per client statistics */
5413 offset = BAR_XSTRORM_INTMEM +
5414 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5415 for (j = 0;
5416 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5417 REG_WR(bp, offset + j*4, 0);
5418
5419 /* reset tstorm per client statistics */
5420 offset = BAR_TSTRORM_INTMEM +
5421 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5422 for (j = 0;
5423 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5424 REG_WR(bp, offset + j*4, 0);
5425
5426 /* reset ustorm per client statistics */
5427 offset = BAR_USTRORM_INTMEM +
5428 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5429 for (j = 0;
5430 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5431 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5432 }
5433
5434 /* Init statistics related context */
34f80b04 5435 stats_flags.collect_eth = 1;
a2fbb9ea 5436
66e855f3 5437 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5438 ((u32 *)&stats_flags)[0]);
66e855f3 5439 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5440 ((u32 *)&stats_flags)[1]);
5441
66e855f3 5442 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5443 ((u32 *)&stats_flags)[0]);
66e855f3 5444 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5445 ((u32 *)&stats_flags)[1]);
5446
de832a55
EG
5447 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5448 ((u32 *)&stats_flags)[0]);
5449 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5450 ((u32 *)&stats_flags)[1]);
5451
66e855f3 5452 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5453 ((u32 *)&stats_flags)[0]);
66e855f3 5454 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5455 ((u32 *)&stats_flags)[1]);
5456
66e855f3
YG
5457 REG_WR(bp, BAR_XSTRORM_INTMEM +
5458 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5459 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5460 REG_WR(bp, BAR_XSTRORM_INTMEM +
5461 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5462 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5463
5464 REG_WR(bp, BAR_TSTRORM_INTMEM +
5465 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5466 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5467 REG_WR(bp, BAR_TSTRORM_INTMEM +
5468 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5469 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5470
de832a55
EG
5471 REG_WR(bp, BAR_USTRORM_INTMEM +
5472 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5473 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5474 REG_WR(bp, BAR_USTRORM_INTMEM +
5475 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5476 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5477
34f80b04
EG
5478 if (CHIP_IS_E1H(bp)) {
5479 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5480 IS_E1HMF(bp));
5481 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5482 IS_E1HMF(bp));
5483 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5484 IS_E1HMF(bp));
5485 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5486 IS_E1HMF(bp));
5487
7a9b2557
VZ
5488 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5489 bp->e1hov);
34f80b04
EG
5490 }
5491
4f40f2cb
EG
5492 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5493 max_agg_size =
5494 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5495 SGE_PAGE_SIZE * PAGES_PER_SGE),
5496 (u32)0xffff);
555f6c78 5497 for_each_rx_queue(bp, i) {
7a9b2557 5498 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5499
5500 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5501 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5502 U64_LO(fp->rx_comp_mapping));
5503 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5504 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5505 U64_HI(fp->rx_comp_mapping));
5506
ca00392c
EG
5507 /* Next page */
5508 REG_WR(bp, BAR_USTRORM_INTMEM +
5509 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5510 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5511 REG_WR(bp, BAR_USTRORM_INTMEM +
5512 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5513 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5514
7a9b2557 5515 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5516 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5517 max_agg_size);
5518 }
8a1c38d1 5519
1c06328c
EG
5520 /* dropless flow control */
5521 if (CHIP_IS_E1H(bp)) {
5522 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5523
5524 rx_pause.bd_thr_low = 250;
5525 rx_pause.cqe_thr_low = 250;
5526 rx_pause.cos = 1;
5527 rx_pause.sge_thr_low = 0;
5528 rx_pause.bd_thr_high = 350;
5529 rx_pause.cqe_thr_high = 350;
5530 rx_pause.sge_thr_high = 0;
5531
5532 for_each_rx_queue(bp, i) {
5533 struct bnx2x_fastpath *fp = &bp->fp[i];
5534
5535 if (!fp->disable_tpa) {
5536 rx_pause.sge_thr_low = 150;
5537 rx_pause.sge_thr_high = 250;
5538 }
5539
5540
5541 offset = BAR_USTRORM_INTMEM +
5542 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5543 fp->cl_id);
5544 for (j = 0;
5545 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5546 j++)
5547 REG_WR(bp, offset + j*4,
5548 ((u32 *)&rx_pause)[j]);
5549 }
5550 }
5551
8a1c38d1
EG
5552 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5553
5554 /* Init rate shaping and fairness contexts */
5555 if (IS_E1HMF(bp)) {
5556 int vn;
5557
5558 /* During init there is no active link
5559 Until link is up, set link rate to 10Gbps */
5560 bp->link_vars.line_speed = SPEED_10000;
5561 bnx2x_init_port_minmax(bp);
5562
b015e3d1
EG
5563 if (!BP_NOMCP(bp))
5564 bp->mf_config =
5565 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8a1c38d1
EG
5566 bnx2x_calc_vn_weight_sum(bp);
5567
5568 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5569 bnx2x_init_vn_minmax(bp, 2*vn + port);
5570
5571 /* Enable rate shaping and fairness */
b015e3d1 5572 bp->cmng.flags.cmng_enables |=
8a1c38d1 5573 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
b015e3d1 5574
8a1c38d1
EG
5575 } else {
5576 /* rate shaping and fairness are disabled */
5577 DP(NETIF_MSG_IFUP,
5578 "single function mode minmax will be disabled\n");
5579 }
5580
5581
5582 /* Store it to internal memory */
5583 if (bp->port.pmf)
5584 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5585 REG_WR(bp, BAR_XSTRORM_INTMEM +
5586 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5587 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5588}
5589
471de716
EG
5590static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5591{
5592 switch (load_code) {
5593 case FW_MSG_CODE_DRV_LOAD_COMMON:
5594 bnx2x_init_internal_common(bp);
5595 /* no break */
5596
5597 case FW_MSG_CODE_DRV_LOAD_PORT:
5598 bnx2x_init_internal_port(bp);
5599 /* no break */
5600
5601 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5602 bnx2x_init_internal_func(bp);
5603 break;
5604
5605 default:
5606 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5607 break;
5608 }
5609}
5610
5611static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5612{
5613 int i;
5614
5615 for_each_queue(bp, i) {
5616 struct bnx2x_fastpath *fp = &bp->fp[i];
5617
34f80b04 5618 fp->bp = bp;
a2fbb9ea 5619 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5620 fp->index = i;
34f80b04 5621 fp->cl_id = BP_L_ID(bp) + i;
37b091ba
MC
5622#ifdef BCM_CNIC
5623 fp->sb_id = fp->cl_id + 1;
5624#else
34f80b04 5625 fp->sb_id = fp->cl_id;
37b091ba 5626#endif
ca00392c
EG
5627 /* Suitable Rx and Tx SBs are served by the same client */
5628 if (i >= bp->num_rx_queues)
5629 fp->cl_id -= bp->num_rx_queues;
34f80b04 5630 DP(NETIF_MSG_IFUP,
f5372251
EG
5631 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5632 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5633 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5634 fp->sb_id);
5c862848 5635 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5636 }
5637
16119785
EG
5638 /* ensure status block indices were read */
5639 rmb();
5640
5641
5c862848
EG
5642 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5643 DEF_SB_ID);
5644 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5645 bnx2x_update_coalesce(bp);
5646 bnx2x_init_rx_rings(bp);
5647 bnx2x_init_tx_ring(bp);
5648 bnx2x_init_sp_ring(bp);
5649 bnx2x_init_context(bp);
471de716 5650 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5651 bnx2x_init_ind_table(bp);
0ef00459
EG
5652 bnx2x_stats_init(bp);
5653
5654 /* At this point, we are ready for interrupts */
5655 atomic_set(&bp->intr_sem, 0);
5656
5657 /* flush all before enabling interrupts */
5658 mb();
5659 mmiowb();
5660
615f8fd9 5661 bnx2x_int_enable(bp);
eb8da205
EG
5662
5663 /* Check for SPIO5 */
5664 bnx2x_attn_int_deasserted0(bp,
5665 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5666 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5667}
5668
5669/* end of nic init */
5670
5671/*
5672 * gzip service functions
5673 */
5674
5675static int bnx2x_gunzip_init(struct bnx2x *bp)
5676{
5677 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5678 &bp->gunzip_mapping);
5679 if (bp->gunzip_buf == NULL)
5680 goto gunzip_nomem1;
5681
5682 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5683 if (bp->strm == NULL)
5684 goto gunzip_nomem2;
5685
5686 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5687 GFP_KERNEL);
5688 if (bp->strm->workspace == NULL)
5689 goto gunzip_nomem3;
5690
5691 return 0;
5692
5693gunzip_nomem3:
5694 kfree(bp->strm);
5695 bp->strm = NULL;
5696
5697gunzip_nomem2:
5698 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5699 bp->gunzip_mapping);
5700 bp->gunzip_buf = NULL;
5701
5702gunzip_nomem1:
5703 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5704 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5705 return -ENOMEM;
5706}
5707
5708static void bnx2x_gunzip_end(struct bnx2x *bp)
5709{
5710 kfree(bp->strm->workspace);
5711
5712 kfree(bp->strm);
5713 bp->strm = NULL;
5714
5715 if (bp->gunzip_buf) {
5716 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5717 bp->gunzip_mapping);
5718 bp->gunzip_buf = NULL;
5719 }
5720}
5721
94a78b79 5722static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5723{
5724 int n, rc;
5725
5726 /* check gzip header */
94a78b79
VZ
5727 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5728 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5729 return -EINVAL;
94a78b79 5730 }
a2fbb9ea
ET
5731
5732 n = 10;
5733
34f80b04 5734#define FNAME 0x8
a2fbb9ea
ET
5735
5736 if (zbuf[3] & FNAME)
5737 while ((zbuf[n++] != 0) && (n < len));
5738
94a78b79 5739 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5740 bp->strm->avail_in = len - n;
5741 bp->strm->next_out = bp->gunzip_buf;
5742 bp->strm->avail_out = FW_BUF_SIZE;
5743
5744 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5745 if (rc != Z_OK)
5746 return rc;
5747
5748 rc = zlib_inflate(bp->strm, Z_FINISH);
5749 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5750 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5751 bp->dev->name, bp->strm->msg);
5752
5753 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5754 if (bp->gunzip_outlen & 0x3)
5755 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5756 " gunzip_outlen (%d) not aligned\n",
5757 bp->dev->name, bp->gunzip_outlen);
5758 bp->gunzip_outlen >>= 2;
5759
5760 zlib_inflateEnd(bp->strm);
5761
5762 if (rc == Z_STREAM_END)
5763 return 0;
5764
5765 return rc;
5766}
5767
5768/* nic load/unload */
5769
5770/*
34f80b04 5771 * General service functions
a2fbb9ea
ET
5772 */
5773
5774/* send a NIG loopback debug packet */
5775static void bnx2x_lb_pckt(struct bnx2x *bp)
5776{
a2fbb9ea 5777 u32 wb_write[3];
a2fbb9ea
ET
5778
5779 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5780 wb_write[0] = 0x55555555;
5781 wb_write[1] = 0x55555555;
34f80b04 5782 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5783 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5784
5785 /* NON-IP protocol */
a2fbb9ea
ET
5786 wb_write[0] = 0x09000000;
5787 wb_write[1] = 0x55555555;
34f80b04 5788 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5789 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5790}
5791
5792/* some of the internal memories
5793 * are not directly readable from the driver
5794 * to test them we send debug packets
5795 */
5796static int bnx2x_int_mem_test(struct bnx2x *bp)
5797{
5798 int factor;
5799 int count, i;
5800 u32 val = 0;
5801
ad8d3948 5802 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5803 factor = 120;
ad8d3948
EG
5804 else if (CHIP_REV_IS_EMUL(bp))
5805 factor = 200;
5806 else
a2fbb9ea 5807 factor = 1;
a2fbb9ea
ET
5808
5809 DP(NETIF_MSG_HW, "start part1\n");
5810
5811 /* Disable inputs of parser neighbor blocks */
5812 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5813 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5814 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5815 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5816
5817 /* Write 0 to parser credits for CFC search request */
5818 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5819
5820 /* send Ethernet packet */
5821 bnx2x_lb_pckt(bp);
5822
5823 /* TODO do i reset NIG statistic? */
5824 /* Wait until NIG register shows 1 packet of size 0x10 */
5825 count = 1000 * factor;
5826 while (count) {
34f80b04 5827
a2fbb9ea
ET
5828 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5829 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5830 if (val == 0x10)
5831 break;
5832
5833 msleep(10);
5834 count--;
5835 }
5836 if (val != 0x10) {
5837 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5838 return -1;
5839 }
5840
5841 /* Wait until PRS register shows 1 packet */
5842 count = 1000 * factor;
5843 while (count) {
5844 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5845 if (val == 1)
5846 break;
5847
5848 msleep(10);
5849 count--;
5850 }
5851 if (val != 0x1) {
5852 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5853 return -2;
5854 }
5855
5856 /* Reset and init BRB, PRS */
34f80b04 5857 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5858 msleep(50);
34f80b04 5859 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5860 msleep(50);
94a78b79
VZ
5861 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5862 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5863
5864 DP(NETIF_MSG_HW, "part2\n");
5865
5866 /* Disable inputs of parser neighbor blocks */
5867 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5868 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5869 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5870 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5871
5872 /* Write 0 to parser credits for CFC search request */
5873 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5874
5875 /* send 10 Ethernet packets */
5876 for (i = 0; i < 10; i++)
5877 bnx2x_lb_pckt(bp);
5878
5879 /* Wait until NIG register shows 10 + 1
5880 packets of size 11*0x10 = 0xb0 */
5881 count = 1000 * factor;
5882 while (count) {
34f80b04 5883
a2fbb9ea
ET
5884 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5885 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5886 if (val == 0xb0)
5887 break;
5888
5889 msleep(10);
5890 count--;
5891 }
5892 if (val != 0xb0) {
5893 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5894 return -3;
5895 }
5896
5897 /* Wait until PRS register shows 2 packets */
5898 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5899 if (val != 2)
5900 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5901
5902 /* Write 1 to parser credits for CFC search request */
5903 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5904
5905 /* Wait until PRS register shows 3 packets */
5906 msleep(10 * factor);
5907 /* Wait until NIG register shows 1 packet of size 0x10 */
5908 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5909 if (val != 3)
5910 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5911
5912 /* clear NIG EOP FIFO */
5913 for (i = 0; i < 11; i++)
5914 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5915 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5916 if (val != 1) {
5917 BNX2X_ERR("clear of NIG failed\n");
5918 return -4;
5919 }
5920
5921 /* Reset and init BRB, PRS, NIG */
5922 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5923 msleep(50);
5924 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5925 msleep(50);
94a78b79
VZ
5926 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5927 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 5928#ifndef BCM_CNIC
a2fbb9ea
ET
5929 /* set NIC mode */
5930 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5931#endif
5932
5933 /* Enable inputs of parser neighbor blocks */
5934 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5935 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5936 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5937 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5938
5939 DP(NETIF_MSG_HW, "done\n");
5940
5941 return 0; /* OK */
5942}
5943
5944static void enable_blocks_attention(struct bnx2x *bp)
5945{
5946 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5947 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5948 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5949 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5950 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5951 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5952 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5953 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5954 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5955/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5956/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5957 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5958 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5959 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5960/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5961/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5962 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5963 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5964 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5965 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5966/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5967/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5968 if (CHIP_REV_IS_FPGA(bp))
5969 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5970 else
5971 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5972 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5973 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5974 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5975/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5976/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5977 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5978 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5979/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5980 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5981}
5982
34f80b04 5983
81f75bbf
EG
5984static void bnx2x_reset_common(struct bnx2x *bp)
5985{
5986 /* reset_common */
5987 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5988 0xd3ffff7f);
5989 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5990}
5991
573f2035
EG
5992static void bnx2x_init_pxp(struct bnx2x *bp)
5993{
5994 u16 devctl;
5995 int r_order, w_order;
5996
5997 pci_read_config_word(bp->pdev,
5998 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5999 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6000 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6001 if (bp->mrrs == -1)
6002 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6003 else {
6004 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6005 r_order = bp->mrrs;
6006 }
6007
6008 bnx2x_init_pxp_arb(bp, r_order, w_order);
6009}
fd4ef40d
EG
6010
6011static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6012{
6013 u32 val;
6014 u8 port;
6015 u8 is_required = 0;
6016
6017 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6018 SHARED_HW_CFG_FAN_FAILURE_MASK;
6019
6020 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6021 is_required = 1;
6022
6023 /*
6024 * The fan failure mechanism is usually related to the PHY type since
6025 * the power consumption of the board is affected by the PHY. Currently,
6026 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6027 */
6028 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6029 for (port = PORT_0; port < PORT_MAX; port++) {
6030 u32 phy_type =
6031 SHMEM_RD(bp, dev_info.port_hw_config[port].
6032 external_phy_config) &
6033 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6034 is_required |=
6035 ((phy_type ==
6036 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
6037 (phy_type ==
6038 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
6039 (phy_type ==
6040 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6041 }
6042
6043 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6044
6045 if (is_required == 0)
6046 return;
6047
6048 /* Fan failure is indicated by SPIO 5 */
6049 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6050 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6051
6052 /* set to active low mode */
6053 val = REG_RD(bp, MISC_REG_SPIO_INT);
6054 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6055 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6056 REG_WR(bp, MISC_REG_SPIO_INT, val);
6057
6058 /* enable interrupt to signal the IGU */
6059 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6060 val |= (1 << MISC_REGISTERS_SPIO_5);
6061 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6062}
6063
34f80b04 6064static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 6065{
a2fbb9ea 6066 u32 val, i;
37b091ba
MC
6067#ifdef BCM_CNIC
6068 u32 wb_write[2];
6069#endif
a2fbb9ea 6070
34f80b04 6071 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 6072
81f75bbf 6073 bnx2x_reset_common(bp);
34f80b04
EG
6074 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6075 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 6076
94a78b79 6077 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
6078 if (CHIP_IS_E1H(bp))
6079 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 6080
34f80b04
EG
6081 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6082 msleep(30);
6083 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 6084
94a78b79 6085 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
6086 if (CHIP_IS_E1(bp)) {
6087 /* enable HW interrupt from PXP on USDM overflow
6088 bit 16 on INT_MASK_0 */
6089 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6090 }
a2fbb9ea 6091
94a78b79 6092 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 6093 bnx2x_init_pxp(bp);
a2fbb9ea
ET
6094
6095#ifdef __BIG_ENDIAN
34f80b04
EG
6096 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6097 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6098 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6099 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6100 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6101 /* make sure this value is 0 */
6102 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6103
6104/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6105 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6106 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6107 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6108 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6109#endif
6110
34f80b04 6111 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
37b091ba 6112#ifdef BCM_CNIC
34f80b04
EG
6113 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6114 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6115 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6116#endif
6117
34f80b04
EG
6118 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6119 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6120
34f80b04
EG
6121 /* let the HW do it's magic ... */
6122 msleep(100);
6123 /* finish PXP init */
6124 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6125 if (val != 1) {
6126 BNX2X_ERR("PXP2 CFG failed\n");
6127 return -EBUSY;
6128 }
6129 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6130 if (val != 1) {
6131 BNX2X_ERR("PXP2 RD_INIT failed\n");
6132 return -EBUSY;
6133 }
a2fbb9ea 6134
34f80b04
EG
6135 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6136 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6137
94a78b79 6138 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6139
34f80b04
EG
6140 /* clean the DMAE memory */
6141 bp->dmae_ready = 1;
6142 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6143
94a78b79
VZ
6144 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6145 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6146 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6147 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6148
34f80b04
EG
6149 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6150 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6151 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6152 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6153
94a78b79 6154 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba
MC
6155
6156#ifdef BCM_CNIC
6157 wb_write[0] = 0;
6158 wb_write[1] = 0;
6159 for (i = 0; i < 64; i++) {
6160 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6161 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6162
6163 if (CHIP_IS_E1H(bp)) {
6164 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6165 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6166 wb_write, 2);
6167 }
6168 }
6169#endif
34f80b04
EG
6170 /* soft reset pulse */
6171 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6172 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 6173
37b091ba 6174#ifdef BCM_CNIC
94a78b79 6175 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6176#endif
a2fbb9ea 6177
94a78b79 6178 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6179 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6180 if (!CHIP_REV_IS_SLOW(bp)) {
6181 /* enable hw interrupt from doorbell Q */
6182 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6183 }
a2fbb9ea 6184
94a78b79
VZ
6185 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6186 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6187 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 6188#ifndef BCM_CNIC
3196a88a
EG
6189 /* set NIC mode */
6190 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 6191#endif
34f80b04
EG
6192 if (CHIP_IS_E1H(bp))
6193 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6194
94a78b79
VZ
6195 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6196 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6197 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6198 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6199
ca00392c
EG
6200 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6201 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6202 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6203 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6204
94a78b79
VZ
6205 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6206 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6207 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6208 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6209
34f80b04
EG
6210 /* sync semi rtc */
6211 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6212 0x80000000);
6213 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6214 0x80000000);
a2fbb9ea 6215
94a78b79
VZ
6216 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6217 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6218 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6219
34f80b04
EG
6220 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6221 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6222 REG_WR(bp, i, 0xc0cac01a);
6223 /* TODO: replace with something meaningful */
6224 }
94a78b79 6225 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
6226#ifdef BCM_CNIC
6227 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6228 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6229 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6230 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6231 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6232 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6233 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6234 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6235 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6236 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6237#endif
34f80b04 6238 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6239
34f80b04
EG
6240 if (sizeof(union cdu_context) != 1024)
6241 /* we currently assume that a context is 1024 bytes */
6242 printk(KERN_ALERT PFX "please adjust the size of"
6243 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 6244
94a78b79 6245 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6246 val = (4 << 24) + (0 << 12) + 1024;
6247 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6248
94a78b79 6249 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6250 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6251 /* enable context validation interrupt from CFC */
6252 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6253
6254 /* set the thresholds to prevent CFC/CDU race */
6255 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6256
94a78b79
VZ
6257 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6258 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6259
94a78b79 6260 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6261 /* Reset PCIE errors for debug */
6262 REG_WR(bp, 0x2814, 0xffffffff);
6263 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6264
94a78b79 6265 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6266 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6267 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6268 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6269
94a78b79 6270 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6271 if (CHIP_IS_E1H(bp)) {
6272 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6273 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6274 }
6275
6276 if (CHIP_REV_IS_SLOW(bp))
6277 msleep(200);
6278
6279 /* finish CFC init */
6280 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6281 if (val != 1) {
6282 BNX2X_ERR("CFC LL_INIT failed\n");
6283 return -EBUSY;
6284 }
6285 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6286 if (val != 1) {
6287 BNX2X_ERR("CFC AC_INIT failed\n");
6288 return -EBUSY;
6289 }
6290 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6291 if (val != 1) {
6292 BNX2X_ERR("CFC CAM_INIT failed\n");
6293 return -EBUSY;
6294 }
6295 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6296
34f80b04
EG
6297 /* read NIG statistic
6298 to see if this is our first up since powerup */
6299 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6300 val = *bnx2x_sp(bp, wb_data[0]);
6301
6302 /* do internal memory self test */
6303 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6304 BNX2X_ERR("internal mem self test failed\n");
6305 return -EBUSY;
6306 }
6307
35b19ba5 6308 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6309 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6310 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6311 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6312 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6313 bp->port.need_hw_lock = 1;
6314 break;
6315
34f80b04
EG
6316 default:
6317 break;
6318 }
f1410647 6319
fd4ef40d
EG
6320 bnx2x_setup_fan_failure_detection(bp);
6321
34f80b04
EG
6322 /* clear PXP2 attentions */
6323 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6324
34f80b04 6325 enable_blocks_attention(bp);
a2fbb9ea 6326
6bbca910
YR
6327 if (!BP_NOMCP(bp)) {
6328 bnx2x_acquire_phy_lock(bp);
6329 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6330 bnx2x_release_phy_lock(bp);
6331 } else
6332 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6333
34f80b04
EG
6334 return 0;
6335}
a2fbb9ea 6336
34f80b04
EG
6337static int bnx2x_init_port(struct bnx2x *bp)
6338{
6339 int port = BP_PORT(bp);
94a78b79 6340 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6341 u32 low, high;
34f80b04 6342 u32 val;
a2fbb9ea 6343
34f80b04
EG
6344 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6345
6346 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6347
94a78b79 6348 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6349 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6350
6351 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6352 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6353 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 6354 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6355
37b091ba
MC
6356#ifdef BCM_CNIC
6357 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
a2fbb9ea 6358
94a78b79 6359 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
6360 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6361 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 6362#endif
94a78b79 6363 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6364
94a78b79 6365 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6366 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6367 /* no pause for emulation and FPGA */
6368 low = 0;
6369 high = 513;
6370 } else {
6371 if (IS_E1HMF(bp))
6372 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6373 else if (bp->dev->mtu > 4096) {
6374 if (bp->flags & ONE_PORT_FLAG)
6375 low = 160;
6376 else {
6377 val = bp->dev->mtu;
6378 /* (24*1024 + val*4)/256 */
6379 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6380 }
6381 } else
6382 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6383 high = low + 56; /* 14*1024/256 */
6384 }
6385 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6386 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6387
6388
94a78b79 6389 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6390
94a78b79 6391 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6392 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6393 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6394 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6395
94a78b79
VZ
6396 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6397 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6398 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6399 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6400
94a78b79 6401 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6402 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6403
94a78b79 6404 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6405
6406 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6407 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6408
6409 /* update threshold */
34f80b04 6410 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6411 /* update init credit */
34f80b04 6412 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6413
6414 /* probe changes */
34f80b04 6415 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6416 msleep(5);
34f80b04 6417 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea 6418
37b091ba
MC
6419#ifdef BCM_CNIC
6420 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 6421#endif
94a78b79 6422 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6423 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6424
6425 if (CHIP_IS_E1(bp)) {
6426 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6427 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6428 }
94a78b79 6429 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6430
94a78b79 6431 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6432 /* init aeu_mask_attn_func_0/1:
6433 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6434 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6435 * bits 4-7 are used for "per vn group attention" */
6436 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6437 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6438
94a78b79 6439 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6440 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6441 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6442 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6443 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6444
94a78b79 6445 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6446
6447 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6448
6449 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6450 /* 0x2 disable e1hov, 0x1 enable */
6451 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6452 (IS_E1HMF(bp) ? 0x1 : 0x2));
6453
1c06328c
EG
6454 {
6455 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6456 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6457 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6458 }
34f80b04
EG
6459 }
6460
94a78b79 6461 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6462 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6463
35b19ba5 6464 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6465 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6466 {
6467 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6468
6469 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6470 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6471
6472 /* The GPIO should be swapped if the swap register is
6473 set and active */
6474 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6475 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6476
6477 /* Select function upon port-swap configuration */
6478 if (port == 0) {
6479 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6480 aeu_gpio_mask = (swap_val && swap_override) ?
6481 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6482 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6483 } else {
6484 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6485 aeu_gpio_mask = (swap_val && swap_override) ?
6486 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6487 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6488 }
6489 val = REG_RD(bp, offset);
6490 /* add GPIO3 to group */
6491 val |= aeu_gpio_mask;
6492 REG_WR(bp, offset, val);
6493 }
6494 break;
6495
35b19ba5 6496 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6497 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6498 /* add SPIO 5 to group 0 */
4d295db0
EG
6499 {
6500 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6501 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6502 val = REG_RD(bp, reg_addr);
f1410647 6503 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6504 REG_WR(bp, reg_addr, val);
6505 }
f1410647
ET
6506 break;
6507
6508 default:
6509 break;
6510 }
6511
c18487ee 6512 bnx2x__link_reset(bp);
a2fbb9ea 6513
34f80b04
EG
6514 return 0;
6515}
6516
6517#define ILT_PER_FUNC (768/2)
6518#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6519/* the phys address is shifted right 12 bits and has an added
6520 1=valid bit added to the 53rd bit
6521 then since this is a wide register(TM)
6522 we split it into two 32 bit writes
6523 */
6524#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6525#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6526#define PXP_ONE_ILT(x) (((x) << 10) | x)
6527#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6528
37b091ba
MC
6529#ifdef BCM_CNIC
6530#define CNIC_ILT_LINES 127
6531#define CNIC_CTX_PER_ILT 16
6532#else
34f80b04 6533#define CNIC_ILT_LINES 0
37b091ba 6534#endif
34f80b04
EG
6535
6536static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6537{
6538 int reg;
6539
6540 if (CHIP_IS_E1H(bp))
6541 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6542 else /* E1 */
6543 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6544
6545 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6546}
6547
6548static int bnx2x_init_func(struct bnx2x *bp)
6549{
6550 int port = BP_PORT(bp);
6551 int func = BP_FUNC(bp);
8badd27a 6552 u32 addr, val;
34f80b04
EG
6553 int i;
6554
6555 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6556
8badd27a
EG
6557 /* set MSI reconfigure capability */
6558 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6559 val = REG_RD(bp, addr);
6560 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6561 REG_WR(bp, addr, val);
6562
34f80b04
EG
6563 i = FUNC_ILT_BASE(func);
6564
6565 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6566 if (CHIP_IS_E1H(bp)) {
6567 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6568 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6569 } else /* E1 */
6570 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6571 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6572
37b091ba
MC
6573#ifdef BCM_CNIC
6574 i += 1 + CNIC_ILT_LINES;
6575 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6576 if (CHIP_IS_E1(bp))
6577 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6578 else {
6579 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6580 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6581 }
6582
6583 i++;
6584 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6585 if (CHIP_IS_E1(bp))
6586 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6587 else {
6588 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6589 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6590 }
6591
6592 i++;
6593 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6594 if (CHIP_IS_E1(bp))
6595 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6596 else {
6597 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6598 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6599 }
6600
6601 /* tell the searcher where the T2 table is */
6602 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6603
6604 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6605 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6606
6607 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6608 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6609 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6610
6611 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6612#endif
34f80b04
EG
6613
6614 if (CHIP_IS_E1H(bp)) {
573f2035
EG
6615 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6616 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6617 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6618 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6619 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6620 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6621 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6622 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6623 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
6624
6625 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6626 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6627 }
6628
6629 /* HC init per function */
6630 if (CHIP_IS_E1H(bp)) {
6631 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6632
6633 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6634 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6635 }
94a78b79 6636 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6637
c14423fe 6638 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6639 REG_WR(bp, 0x2114, 0xffffffff);
6640 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6641
34f80b04
EG
6642 return 0;
6643}
6644
6645static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6646{
6647 int i, rc = 0;
a2fbb9ea 6648
34f80b04
EG
6649 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6650 BP_FUNC(bp), load_code);
a2fbb9ea 6651
34f80b04
EG
6652 bp->dmae_ready = 0;
6653 mutex_init(&bp->dmae_mutex);
54016b26
EG
6654 rc = bnx2x_gunzip_init(bp);
6655 if (rc)
6656 return rc;
a2fbb9ea 6657
34f80b04
EG
6658 switch (load_code) {
6659 case FW_MSG_CODE_DRV_LOAD_COMMON:
6660 rc = bnx2x_init_common(bp);
6661 if (rc)
6662 goto init_hw_err;
6663 /* no break */
6664
6665 case FW_MSG_CODE_DRV_LOAD_PORT:
6666 bp->dmae_ready = 1;
6667 rc = bnx2x_init_port(bp);
6668 if (rc)
6669 goto init_hw_err;
6670 /* no break */
6671
6672 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6673 bp->dmae_ready = 1;
6674 rc = bnx2x_init_func(bp);
6675 if (rc)
6676 goto init_hw_err;
6677 break;
6678
6679 default:
6680 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6681 break;
6682 }
6683
6684 if (!BP_NOMCP(bp)) {
6685 int func = BP_FUNC(bp);
a2fbb9ea
ET
6686
6687 bp->fw_drv_pulse_wr_seq =
34f80b04 6688 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6689 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
6690 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6691 }
a2fbb9ea 6692
34f80b04
EG
6693 /* this needs to be done before gunzip end */
6694 bnx2x_zero_def_sb(bp);
6695 for_each_queue(bp, i)
6696 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
37b091ba
MC
6697#ifdef BCM_CNIC
6698 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6699#endif
34f80b04
EG
6700
6701init_hw_err:
6702 bnx2x_gunzip_end(bp);
6703
6704 return rc;
a2fbb9ea
ET
6705}
6706
a2fbb9ea
ET
6707static void bnx2x_free_mem(struct bnx2x *bp)
6708{
6709
6710#define BNX2X_PCI_FREE(x, y, size) \
6711 do { \
6712 if (x) { \
6713 pci_free_consistent(bp->pdev, size, x, y); \
6714 x = NULL; \
6715 y = 0; \
6716 } \
6717 } while (0)
6718
6719#define BNX2X_FREE(x) \
6720 do { \
6721 if (x) { \
6722 vfree(x); \
6723 x = NULL; \
6724 } \
6725 } while (0)
6726
6727 int i;
6728
6729 /* fastpath */
555f6c78 6730 /* Common */
a2fbb9ea
ET
6731 for_each_queue(bp, i) {
6732
555f6c78 6733 /* status blocks */
a2fbb9ea
ET
6734 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6735 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6736 sizeof(struct host_status_block));
555f6c78
EG
6737 }
6738 /* Rx */
6739 for_each_rx_queue(bp, i) {
a2fbb9ea 6740
555f6c78 6741 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6742 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6743 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6744 bnx2x_fp(bp, i, rx_desc_mapping),
6745 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6746
6747 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6748 bnx2x_fp(bp, i, rx_comp_mapping),
6749 sizeof(struct eth_fast_path_rx_cqe) *
6750 NUM_RCQ_BD);
a2fbb9ea 6751
7a9b2557 6752 /* SGE ring */
32626230 6753 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6754 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6755 bnx2x_fp(bp, i, rx_sge_mapping),
6756 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6757 }
555f6c78
EG
6758 /* Tx */
6759 for_each_tx_queue(bp, i) {
6760
6761 /* fastpath tx rings: tx_buf tx_desc */
6762 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6763 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6764 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6765 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6766 }
a2fbb9ea
ET
6767 /* end of fastpath */
6768
6769 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6770 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6771
6772 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6773 sizeof(struct bnx2x_slowpath));
a2fbb9ea 6774
37b091ba 6775#ifdef BCM_CNIC
a2fbb9ea
ET
6776 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6777 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6778 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6779 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
37b091ba
MC
6780 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6781 sizeof(struct host_status_block));
a2fbb9ea 6782#endif
7a9b2557 6783 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6784
6785#undef BNX2X_PCI_FREE
6786#undef BNX2X_KFREE
6787}
6788
6789static int bnx2x_alloc_mem(struct bnx2x *bp)
6790{
6791
6792#define BNX2X_PCI_ALLOC(x, y, size) \
6793 do { \
6794 x = pci_alloc_consistent(bp->pdev, size, y); \
6795 if (x == NULL) \
6796 goto alloc_mem_err; \
6797 memset(x, 0, size); \
6798 } while (0)
6799
6800#define BNX2X_ALLOC(x, size) \
6801 do { \
6802 x = vmalloc(size); \
6803 if (x == NULL) \
6804 goto alloc_mem_err; \
6805 memset(x, 0, size); \
6806 } while (0)
6807
6808 int i;
6809
6810 /* fastpath */
555f6c78 6811 /* Common */
a2fbb9ea
ET
6812 for_each_queue(bp, i) {
6813 bnx2x_fp(bp, i, bp) = bp;
6814
555f6c78 6815 /* status blocks */
a2fbb9ea
ET
6816 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6817 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6818 sizeof(struct host_status_block));
555f6c78
EG
6819 }
6820 /* Rx */
6821 for_each_rx_queue(bp, i) {
a2fbb9ea 6822
555f6c78 6823 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6824 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6825 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6826 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6827 &bnx2x_fp(bp, i, rx_desc_mapping),
6828 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6829
6830 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6831 &bnx2x_fp(bp, i, rx_comp_mapping),
6832 sizeof(struct eth_fast_path_rx_cqe) *
6833 NUM_RCQ_BD);
6834
7a9b2557
VZ
6835 /* SGE ring */
6836 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6837 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6838 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6839 &bnx2x_fp(bp, i, rx_sge_mapping),
6840 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6841 }
555f6c78
EG
6842 /* Tx */
6843 for_each_tx_queue(bp, i) {
6844
555f6c78
EG
6845 /* fastpath tx rings: tx_buf tx_desc */
6846 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6847 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6848 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6849 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6850 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6851 }
a2fbb9ea
ET
6852 /* end of fastpath */
6853
6854 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6855 sizeof(struct host_def_status_block));
6856
6857 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6858 sizeof(struct bnx2x_slowpath));
6859
37b091ba 6860#ifdef BCM_CNIC
a2fbb9ea
ET
6861 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6862
a2fbb9ea
ET
6863 /* allocate searcher T2 table
6864 we allocate 1/4 of alloc num for T2
6865 (which is not entered into the ILT) */
6866 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6867
37b091ba 6868 /* Initialize T2 (for 1024 connections) */
a2fbb9ea 6869 for (i = 0; i < 16*1024; i += 64)
37b091ba 6870 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
a2fbb9ea 6871
37b091ba 6872 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
a2fbb9ea
ET
6873 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6874
6875 /* QM queues (128*MAX_CONN) */
6876 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
37b091ba
MC
6877
6878 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6879 sizeof(struct host_status_block));
a2fbb9ea
ET
6880#endif
6881
6882 /* Slow path ring */
6883 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6884
6885 return 0;
6886
6887alloc_mem_err:
6888 bnx2x_free_mem(bp);
6889 return -ENOMEM;
6890
6891#undef BNX2X_PCI_ALLOC
6892#undef BNX2X_ALLOC
6893}
6894
6895static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6896{
6897 int i;
6898
555f6c78 6899 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6900 struct bnx2x_fastpath *fp = &bp->fp[i];
6901
6902 u16 bd_cons = fp->tx_bd_cons;
6903 u16 sw_prod = fp->tx_pkt_prod;
6904 u16 sw_cons = fp->tx_pkt_cons;
6905
a2fbb9ea
ET
6906 while (sw_cons != sw_prod) {
6907 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6908 sw_cons++;
6909 }
6910 }
6911}
6912
6913static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6914{
6915 int i, j;
6916
555f6c78 6917 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6918 struct bnx2x_fastpath *fp = &bp->fp[j];
6919
a2fbb9ea
ET
6920 for (i = 0; i < NUM_RX_BD; i++) {
6921 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6922 struct sk_buff *skb = rx_buf->skb;
6923
6924 if (skb == NULL)
6925 continue;
6926
6927 pci_unmap_single(bp->pdev,
6928 pci_unmap_addr(rx_buf, mapping),
356e2385 6929 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6930
6931 rx_buf->skb = NULL;
6932 dev_kfree_skb(skb);
6933 }
7a9b2557 6934 if (!fp->disable_tpa)
32626230
EG
6935 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6936 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6937 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6938 }
6939}
6940
6941static void bnx2x_free_skbs(struct bnx2x *bp)
6942{
6943 bnx2x_free_tx_skbs(bp);
6944 bnx2x_free_rx_skbs(bp);
6945}
6946
6947static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6948{
34f80b04 6949 int i, offset = 1;
a2fbb9ea
ET
6950
6951 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6952 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6953 bp->msix_table[0].vector);
6954
37b091ba
MC
6955#ifdef BCM_CNIC
6956 offset++;
6957#endif
a2fbb9ea 6958 for_each_queue(bp, i) {
c14423fe 6959 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6960 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6961 bnx2x_fp(bp, i, state));
6962
34f80b04 6963 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6964 }
a2fbb9ea
ET
6965}
6966
6967static void bnx2x_free_irq(struct bnx2x *bp)
6968{
a2fbb9ea 6969 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6970 bnx2x_free_msix_irqs(bp);
6971 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6972 bp->flags &= ~USING_MSIX_FLAG;
6973
8badd27a
EG
6974 } else if (bp->flags & USING_MSI_FLAG) {
6975 free_irq(bp->pdev->irq, bp->dev);
6976 pci_disable_msi(bp->pdev);
6977 bp->flags &= ~USING_MSI_FLAG;
6978
a2fbb9ea
ET
6979 } else
6980 free_irq(bp->pdev->irq, bp->dev);
6981}
6982
6983static int bnx2x_enable_msix(struct bnx2x *bp)
6984{
8badd27a
EG
6985 int i, rc, offset = 1;
6986 int igu_vec = 0;
a2fbb9ea 6987
8badd27a
EG
6988 bp->msix_table[0].entry = igu_vec;
6989 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6990
37b091ba
MC
6991#ifdef BCM_CNIC
6992 igu_vec = BP_L_ID(bp) + offset;
6993 bp->msix_table[1].entry = igu_vec;
6994 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6995 offset++;
6996#endif
34f80b04 6997 for_each_queue(bp, i) {
8badd27a 6998 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6999 bp->msix_table[i + offset].entry = igu_vec;
7000 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7001 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
7002 }
7003
34f80b04 7004 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 7005 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 7006 if (rc) {
8badd27a
EG
7007 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7008 return rc;
34f80b04 7009 }
8badd27a 7010
a2fbb9ea
ET
7011 bp->flags |= USING_MSIX_FLAG;
7012
7013 return 0;
a2fbb9ea
ET
7014}
7015
a2fbb9ea
ET
7016static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7017{
34f80b04 7018 int i, rc, offset = 1;
a2fbb9ea 7019
a2fbb9ea
ET
7020 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7021 bp->dev->name, bp->dev);
a2fbb9ea
ET
7022 if (rc) {
7023 BNX2X_ERR("request sp irq failed\n");
7024 return -EBUSY;
7025 }
7026
37b091ba
MC
7027#ifdef BCM_CNIC
7028 offset++;
7029#endif
a2fbb9ea 7030 for_each_queue(bp, i) {
555f6c78
EG
7031 struct bnx2x_fastpath *fp = &bp->fp[i];
7032
ca00392c
EG
7033 if (i < bp->num_rx_queues)
7034 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
7035 else
7036 sprintf(fp->name, "%s-tx-%d",
7037 bp->dev->name, i - bp->num_rx_queues);
7038
34f80b04 7039 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 7040 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 7041 if (rc) {
555f6c78 7042 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
7043 bnx2x_free_msix_irqs(bp);
7044 return -EBUSY;
7045 }
7046
555f6c78 7047 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
7048 }
7049
555f6c78 7050 i = BNX2X_NUM_QUEUES(bp);
ca00392c
EG
7051 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
7052 " ... fp[%d] %d\n",
7053 bp->dev->name, bp->msix_table[0].vector,
7054 0, bp->msix_table[offset].vector,
7055 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 7056
a2fbb9ea 7057 return 0;
a2fbb9ea
ET
7058}
7059
8badd27a
EG
7060static int bnx2x_enable_msi(struct bnx2x *bp)
7061{
7062 int rc;
7063
7064 rc = pci_enable_msi(bp->pdev);
7065 if (rc) {
7066 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7067 return -1;
7068 }
7069 bp->flags |= USING_MSI_FLAG;
7070
7071 return 0;
7072}
7073
a2fbb9ea
ET
7074static int bnx2x_req_irq(struct bnx2x *bp)
7075{
8badd27a 7076 unsigned long flags;
34f80b04 7077 int rc;
a2fbb9ea 7078
8badd27a
EG
7079 if (bp->flags & USING_MSI_FLAG)
7080 flags = 0;
7081 else
7082 flags = IRQF_SHARED;
7083
7084 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 7085 bp->dev->name, bp->dev);
a2fbb9ea
ET
7086 if (!rc)
7087 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7088
7089 return rc;
a2fbb9ea
ET
7090}
7091
65abd74d
YG
7092static void bnx2x_napi_enable(struct bnx2x *bp)
7093{
7094 int i;
7095
555f6c78 7096 for_each_rx_queue(bp, i)
65abd74d
YG
7097 napi_enable(&bnx2x_fp(bp, i, napi));
7098}
7099
7100static void bnx2x_napi_disable(struct bnx2x *bp)
7101{
7102 int i;
7103
555f6c78 7104 for_each_rx_queue(bp, i)
65abd74d
YG
7105 napi_disable(&bnx2x_fp(bp, i, napi));
7106}
7107
7108static void bnx2x_netif_start(struct bnx2x *bp)
7109{
e1510706
EG
7110 int intr_sem;
7111
7112 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7113 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7114
7115 if (intr_sem) {
65abd74d 7116 if (netif_running(bp->dev)) {
65abd74d
YG
7117 bnx2x_napi_enable(bp);
7118 bnx2x_int_enable(bp);
555f6c78
EG
7119 if (bp->state == BNX2X_STATE_OPEN)
7120 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
7121 }
7122 }
7123}
7124
f8ef6e44 7125static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 7126{
f8ef6e44 7127 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 7128 bnx2x_napi_disable(bp);
762d5f6c
EG
7129 netif_tx_disable(bp->dev);
7130 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
7131}
7132
a2fbb9ea
ET
7133/*
7134 * Init service functions
7135 */
7136
e665bfda
MC
7137/**
7138 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7139 *
7140 * @param bp driver descriptor
7141 * @param set set or clear an entry (1 or 0)
7142 * @param mac pointer to a buffer containing a MAC
7143 * @param cl_bit_vec bit vector of clients to register a MAC for
7144 * @param cam_offset offset in a CAM to use
7145 * @param with_bcast set broadcast MAC as well
7146 */
7147static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7148 u32 cl_bit_vec, u8 cam_offset,
7149 u8 with_bcast)
a2fbb9ea
ET
7150{
7151 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 7152 int port = BP_PORT(bp);
a2fbb9ea
ET
7153
7154 /* CAM allocation
7155 * unicasts 0-31:port0 32-63:port1
7156 * multicast 64-127:port0 128-191:port1
7157 */
e665bfda
MC
7158 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7159 config->hdr.offset = cam_offset;
7160 config->hdr.client_id = 0xff;
a2fbb9ea
ET
7161 config->hdr.reserved1 = 0;
7162
7163 /* primary MAC */
7164 config->config_table[0].cam_entry.msb_mac_addr =
e665bfda 7165 swab16(*(u16 *)&mac[0]);
a2fbb9ea 7166 config->config_table[0].cam_entry.middle_mac_addr =
e665bfda 7167 swab16(*(u16 *)&mac[2]);
a2fbb9ea 7168 config->config_table[0].cam_entry.lsb_mac_addr =
e665bfda 7169 swab16(*(u16 *)&mac[4]);
34f80b04 7170 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7171 if (set)
7172 config->config_table[0].target_table_entry.flags = 0;
7173 else
7174 CAM_INVALIDATE(config->config_table[0]);
ca00392c 7175 config->config_table[0].target_table_entry.clients_bit_vector =
e665bfda 7176 cpu_to_le32(cl_bit_vec);
a2fbb9ea
ET
7177 config->config_table[0].target_table_entry.vlan_id = 0;
7178
3101c2bc
YG
7179 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7180 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7181 config->config_table[0].cam_entry.msb_mac_addr,
7182 config->config_table[0].cam_entry.middle_mac_addr,
7183 config->config_table[0].cam_entry.lsb_mac_addr);
7184
7185 /* broadcast */
e665bfda
MC
7186 if (with_bcast) {
7187 config->config_table[1].cam_entry.msb_mac_addr =
7188 cpu_to_le16(0xffff);
7189 config->config_table[1].cam_entry.middle_mac_addr =
7190 cpu_to_le16(0xffff);
7191 config->config_table[1].cam_entry.lsb_mac_addr =
7192 cpu_to_le16(0xffff);
7193 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7194 if (set)
7195 config->config_table[1].target_table_entry.flags =
7196 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7197 else
7198 CAM_INVALIDATE(config->config_table[1]);
7199 config->config_table[1].target_table_entry.clients_bit_vector =
7200 cpu_to_le32(cl_bit_vec);
7201 config->config_table[1].target_table_entry.vlan_id = 0;
7202 }
a2fbb9ea
ET
7203
7204 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7205 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7206 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7207}
7208
e665bfda
MC
7209/**
7210 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7211 *
7212 * @param bp driver descriptor
7213 * @param set set or clear an entry (1 or 0)
7214 * @param mac pointer to a buffer containing a MAC
7215 * @param cl_bit_vec bit vector of clients to register a MAC for
7216 * @param cam_offset offset in a CAM to use
7217 */
7218static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7219 u32 cl_bit_vec, u8 cam_offset)
34f80b04
EG
7220{
7221 struct mac_configuration_cmd_e1h *config =
7222 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7223
8d9c5f34 7224 config->hdr.length = 1;
e665bfda
MC
7225 config->hdr.offset = cam_offset;
7226 config->hdr.client_id = 0xff;
34f80b04
EG
7227 config->hdr.reserved1 = 0;
7228
7229 /* primary MAC */
7230 config->config_table[0].msb_mac_addr =
e665bfda 7231 swab16(*(u16 *)&mac[0]);
34f80b04 7232 config->config_table[0].middle_mac_addr =
e665bfda 7233 swab16(*(u16 *)&mac[2]);
34f80b04 7234 config->config_table[0].lsb_mac_addr =
e665bfda 7235 swab16(*(u16 *)&mac[4]);
ca00392c 7236 config->config_table[0].clients_bit_vector =
e665bfda 7237 cpu_to_le32(cl_bit_vec);
34f80b04
EG
7238 config->config_table[0].vlan_id = 0;
7239 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7240 if (set)
7241 config->config_table[0].flags = BP_PORT(bp);
7242 else
7243 config->config_table[0].flags =
7244 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7245
e665bfda 7246 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
3101c2bc 7247 (set ? "setting" : "clearing"),
34f80b04
EG
7248 config->config_table[0].msb_mac_addr,
7249 config->config_table[0].middle_mac_addr,
e665bfda 7250 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
34f80b04
EG
7251
7252 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7253 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7254 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7255}
7256
a2fbb9ea
ET
7257static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7258 int *state_p, int poll)
7259{
7260 /* can take a while if any port is running */
8b3a0f0b 7261 int cnt = 5000;
a2fbb9ea 7262
c14423fe
ET
7263 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7264 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7265
7266 might_sleep();
34f80b04 7267 while (cnt--) {
a2fbb9ea
ET
7268 if (poll) {
7269 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7270 /* if index is different from 0
7271 * the reply for some commands will
3101c2bc 7272 * be on the non default queue
a2fbb9ea
ET
7273 */
7274 if (idx)
7275 bnx2x_rx_int(&bp->fp[idx], 10);
7276 }
a2fbb9ea 7277
3101c2bc 7278 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7279 if (*state_p == state) {
7280#ifdef BNX2X_STOP_ON_ERROR
7281 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7282#endif
a2fbb9ea 7283 return 0;
8b3a0f0b 7284 }
a2fbb9ea 7285
a2fbb9ea 7286 msleep(1);
e3553b29
EG
7287
7288 if (bp->panic)
7289 return -EIO;
a2fbb9ea
ET
7290 }
7291
a2fbb9ea 7292 /* timeout! */
49d66772
ET
7293 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7294 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7295#ifdef BNX2X_STOP_ON_ERROR
7296 bnx2x_panic();
7297#endif
a2fbb9ea 7298
49d66772 7299 return -EBUSY;
a2fbb9ea
ET
7300}
7301
e665bfda
MC
7302static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7303{
7304 bp->set_mac_pending++;
7305 smp_wmb();
7306
7307 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7308 (1 << bp->fp->cl_id), BP_FUNC(bp));
7309
7310 /* Wait for a completion */
7311 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7312}
7313
7314static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7315{
7316 bp->set_mac_pending++;
7317 smp_wmb();
7318
7319 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7320 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7321 1);
7322
7323 /* Wait for a completion */
7324 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7325}
7326
993ac7b5
MC
7327#ifdef BCM_CNIC
7328/**
7329 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7330 * MAC(s). This function will wait until the ramdord completion
7331 * returns.
7332 *
7333 * @param bp driver handle
7334 * @param set set or clear the CAM entry
7335 *
7336 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7337 */
7338static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7339{
7340 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7341
7342 bp->set_mac_pending++;
7343 smp_wmb();
7344
7345 /* Send a SET_MAC ramrod */
7346 if (CHIP_IS_E1(bp))
7347 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7348 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7349 1);
7350 else
7351 /* CAM allocation for E1H
7352 * unicasts: by func number
7353 * multicast: 20+FUNC*20, 20 each
7354 */
7355 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7356 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7357
7358 /* Wait for a completion when setting */
7359 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7360
7361 return 0;
7362}
7363#endif
7364
a2fbb9ea
ET
7365static int bnx2x_setup_leading(struct bnx2x *bp)
7366{
34f80b04 7367 int rc;
a2fbb9ea 7368
c14423fe 7369 /* reset IGU state */
34f80b04 7370 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7371
7372 /* SETUP ramrod */
7373 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7374
34f80b04
EG
7375 /* Wait for completion */
7376 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7377
34f80b04 7378 return rc;
a2fbb9ea
ET
7379}
7380
7381static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7382{
555f6c78
EG
7383 struct bnx2x_fastpath *fp = &bp->fp[index];
7384
a2fbb9ea 7385 /* reset IGU state */
555f6c78 7386 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7387
228241eb 7388 /* SETUP ramrod */
555f6c78
EG
7389 fp->state = BNX2X_FP_STATE_OPENING;
7390 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7391 fp->cl_id, 0);
a2fbb9ea
ET
7392
7393 /* Wait for completion */
7394 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7395 &(fp->state), 0);
a2fbb9ea
ET
7396}
7397
a2fbb9ea 7398static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7399
ca00392c
EG
7400static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7401 int *num_tx_queues_out)
7402{
7403 int _num_rx_queues = 0, _num_tx_queues = 0;
7404
7405 switch (bp->multi_mode) {
7406 case ETH_RSS_MODE_DISABLED:
7407 _num_rx_queues = 1;
7408 _num_tx_queues = 1;
7409 break;
7410
7411 case ETH_RSS_MODE_REGULAR:
7412 if (num_rx_queues)
7413 _num_rx_queues = min_t(u32, num_rx_queues,
7414 BNX2X_MAX_QUEUES(bp));
7415 else
7416 _num_rx_queues = min_t(u32, num_online_cpus(),
7417 BNX2X_MAX_QUEUES(bp));
7418
7419 if (num_tx_queues)
7420 _num_tx_queues = min_t(u32, num_tx_queues,
7421 BNX2X_MAX_QUEUES(bp));
7422 else
7423 _num_tx_queues = min_t(u32, num_online_cpus(),
7424 BNX2X_MAX_QUEUES(bp));
7425
7426 /* There must be not more Tx queues than Rx queues */
7427 if (_num_tx_queues > _num_rx_queues) {
7428 BNX2X_ERR("number of tx queues (%d) > "
7429 "number of rx queues (%d)"
7430 " defaulting to %d\n",
7431 _num_tx_queues, _num_rx_queues,
7432 _num_rx_queues);
7433 _num_tx_queues = _num_rx_queues;
7434 }
7435 break;
7436
7437
7438 default:
7439 _num_rx_queues = 1;
7440 _num_tx_queues = 1;
7441 break;
7442 }
7443
7444 *num_rx_queues_out = _num_rx_queues;
7445 *num_tx_queues_out = _num_tx_queues;
7446}
7447
7448static int bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 7449{
ca00392c 7450 int rc = 0;
a2fbb9ea 7451
8badd27a
EG
7452 switch (int_mode) {
7453 case INT_MODE_INTx:
7454 case INT_MODE_MSI:
ca00392c
EG
7455 bp->num_rx_queues = 1;
7456 bp->num_tx_queues = 1;
7457 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a
EG
7458 break;
7459
7460 case INT_MODE_MSIX:
7461 default:
ca00392c
EG
7462 /* Set interrupt mode according to bp->multi_mode value */
7463 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7464 &bp->num_tx_queues);
7465
7466 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
555f6c78 7467 bp->num_rx_queues, bp->num_tx_queues);
ca00392c 7468
2dfe0e1f
EG
7469 /* if we can't use MSI-X we only need one fp,
7470 * so try to enable MSI-X with the requested number of fp's
7471 * and fallback to MSI or legacy INTx with one fp
7472 */
ca00392c
EG
7473 rc = bnx2x_enable_msix(bp);
7474 if (rc) {
34f80b04 7475 /* failed to enable MSI-X */
555f6c78
EG
7476 if (bp->multi_mode)
7477 BNX2X_ERR("Multi requested but failed to "
ca00392c
EG
7478 "enable MSI-X (rx %d tx %d), "
7479 "set number of queues to 1\n",
7480 bp->num_rx_queues, bp->num_tx_queues);
7481 bp->num_rx_queues = 1;
7482 bp->num_tx_queues = 1;
a2fbb9ea 7483 }
8badd27a 7484 break;
a2fbb9ea 7485 }
555f6c78 7486 bp->dev->real_num_tx_queues = bp->num_tx_queues;
ca00392c 7487 return rc;
8badd27a
EG
7488}
7489
993ac7b5
MC
7490#ifdef BCM_CNIC
7491static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7492static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7493#endif
8badd27a
EG
7494
7495/* must be called with rtnl_lock */
7496static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7497{
7498 u32 load_code;
ca00392c
EG
7499 int i, rc;
7500
8badd27a 7501#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7502 if (unlikely(bp->panic))
7503 return -EPERM;
7504#endif
7505
7506 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7507
ca00392c 7508 rc = bnx2x_set_int_mode(bp);
c14423fe 7509
a2fbb9ea
ET
7510 if (bnx2x_alloc_mem(bp))
7511 return -ENOMEM;
7512
555f6c78 7513 for_each_rx_queue(bp, i)
7a9b2557
VZ
7514 bnx2x_fp(bp, i, disable_tpa) =
7515 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7516
555f6c78 7517 for_each_rx_queue(bp, i)
2dfe0e1f
EG
7518 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7519 bnx2x_poll, 128);
7520
2dfe0e1f
EG
7521 bnx2x_napi_enable(bp);
7522
34f80b04
EG
7523 if (bp->flags & USING_MSIX_FLAG) {
7524 rc = bnx2x_req_msix_irqs(bp);
7525 if (rc) {
7526 pci_disable_msix(bp->pdev);
2dfe0e1f 7527 goto load_error1;
34f80b04
EG
7528 }
7529 } else {
ca00392c
EG
7530 /* Fall to INTx if failed to enable MSI-X due to lack of
7531 memory (in bnx2x_set_int_mode()) */
8badd27a
EG
7532 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7533 bnx2x_enable_msi(bp);
34f80b04
EG
7534 bnx2x_ack_int(bp);
7535 rc = bnx2x_req_irq(bp);
7536 if (rc) {
2dfe0e1f 7537 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
7538 if (bp->flags & USING_MSI_FLAG)
7539 pci_disable_msi(bp->pdev);
2dfe0e1f 7540 goto load_error1;
a2fbb9ea 7541 }
8badd27a
EG
7542 if (bp->flags & USING_MSI_FLAG) {
7543 bp->dev->irq = bp->pdev->irq;
7544 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7545 bp->dev->name, bp->pdev->irq);
7546 }
a2fbb9ea
ET
7547 }
7548
2dfe0e1f
EG
7549 /* Send LOAD_REQUEST command to MCP
7550 Returns the type of LOAD command:
7551 if it is the first port to be initialized
7552 common blocks should be initialized, otherwise - not
7553 */
7554 if (!BP_NOMCP(bp)) {
7555 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7556 if (!load_code) {
7557 BNX2X_ERR("MCP response failure, aborting\n");
7558 rc = -EBUSY;
7559 goto load_error2;
7560 }
7561 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7562 rc = -EBUSY; /* other port in diagnostic mode */
7563 goto load_error2;
7564 }
7565
7566 } else {
7567 int port = BP_PORT(bp);
7568
f5372251 7569 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7570 load_count[0], load_count[1], load_count[2]);
7571 load_count[0]++;
7572 load_count[1 + port]++;
f5372251 7573 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7574 load_count[0], load_count[1], load_count[2]);
7575 if (load_count[0] == 1)
7576 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7577 else if (load_count[1 + port] == 1)
7578 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7579 else
7580 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7581 }
7582
7583 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7584 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7585 bp->port.pmf = 1;
7586 else
7587 bp->port.pmf = 0;
7588 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7589
a2fbb9ea 7590 /* Initialize HW */
34f80b04
EG
7591 rc = bnx2x_init_hw(bp, load_code);
7592 if (rc) {
a2fbb9ea 7593 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 7594 goto load_error2;
a2fbb9ea
ET
7595 }
7596
a2fbb9ea 7597 /* Setup NIC internals and enable interrupts */
471de716 7598 bnx2x_nic_init(bp, load_code);
a2fbb9ea 7599
2691d51d
EG
7600 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7601 (bp->common.shmem2_base))
7602 SHMEM2_WR(bp, dcc_support,
7603 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7604 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7605
a2fbb9ea 7606 /* Send LOAD_DONE command to MCP */
34f80b04 7607 if (!BP_NOMCP(bp)) {
228241eb
ET
7608 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7609 if (!load_code) {
da5a662a 7610 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7611 rc = -EBUSY;
2dfe0e1f 7612 goto load_error3;
a2fbb9ea
ET
7613 }
7614 }
7615
7616 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7617
34f80b04
EG
7618 rc = bnx2x_setup_leading(bp);
7619 if (rc) {
da5a662a 7620 BNX2X_ERR("Setup leading failed!\n");
e3553b29 7621#ifndef BNX2X_STOP_ON_ERROR
2dfe0e1f 7622 goto load_error3;
e3553b29
EG
7623#else
7624 bp->panic = 1;
7625 return -EBUSY;
7626#endif
34f80b04 7627 }
a2fbb9ea 7628
34f80b04
EG
7629 if (CHIP_IS_E1H(bp))
7630 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7631 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
7632 bp->state = BNX2X_STATE_DISABLED;
7633 }
a2fbb9ea 7634
ca00392c 7635 if (bp->state == BNX2X_STATE_OPEN) {
37b091ba
MC
7636#ifdef BCM_CNIC
7637 /* Enable Timer scan */
7638 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7639#endif
34f80b04
EG
7640 for_each_nondefault_queue(bp, i) {
7641 rc = bnx2x_setup_multi(bp, i);
7642 if (rc)
37b091ba
MC
7643#ifdef BCM_CNIC
7644 goto load_error4;
7645#else
2dfe0e1f 7646 goto load_error3;
37b091ba 7647#endif
34f80b04 7648 }
a2fbb9ea 7649
ca00392c 7650 if (CHIP_IS_E1(bp))
e665bfda 7651 bnx2x_set_eth_mac_addr_e1(bp, 1);
ca00392c 7652 else
e665bfda 7653 bnx2x_set_eth_mac_addr_e1h(bp, 1);
993ac7b5
MC
7654#ifdef BCM_CNIC
7655 /* Set iSCSI L2 MAC */
7656 mutex_lock(&bp->cnic_mutex);
7657 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7658 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7659 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7660 }
7661 mutex_unlock(&bp->cnic_mutex);
7662#endif
ca00392c 7663 }
34f80b04
EG
7664
7665 if (bp->port.pmf)
b5bf9068 7666 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7667
7668 /* Start fast path */
34f80b04
EG
7669 switch (load_mode) {
7670 case LOAD_NORMAL:
ca00392c
EG
7671 if (bp->state == BNX2X_STATE_OPEN) {
7672 /* Tx queue should be only reenabled */
7673 netif_tx_wake_all_queues(bp->dev);
7674 }
2dfe0e1f 7675 /* Initialize the receive filter. */
34f80b04
EG
7676 bnx2x_set_rx_mode(bp->dev);
7677 break;
7678
7679 case LOAD_OPEN:
555f6c78 7680 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
7681 if (bp->state != BNX2X_STATE_OPEN)
7682 netif_tx_disable(bp->dev);
2dfe0e1f 7683 /* Initialize the receive filter. */
34f80b04 7684 bnx2x_set_rx_mode(bp->dev);
34f80b04 7685 break;
a2fbb9ea 7686
34f80b04 7687 case LOAD_DIAG:
2dfe0e1f 7688 /* Initialize the receive filter. */
a2fbb9ea 7689 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7690 bp->state = BNX2X_STATE_DIAG;
7691 break;
7692
7693 default:
7694 break;
a2fbb9ea
ET
7695 }
7696
34f80b04
EG
7697 if (!bp->port.pmf)
7698 bnx2x__link_status_update(bp);
7699
a2fbb9ea
ET
7700 /* start the timer */
7701 mod_timer(&bp->timer, jiffies + bp->current_interval);
7702
993ac7b5
MC
7703#ifdef BCM_CNIC
7704 bnx2x_setup_cnic_irq_info(bp);
7705 if (bp->state == BNX2X_STATE_OPEN)
7706 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7707#endif
34f80b04 7708
a2fbb9ea
ET
7709 return 0;
7710
37b091ba
MC
7711#ifdef BCM_CNIC
7712load_error4:
7713 /* Disable Timer scan */
7714 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7715#endif
2dfe0e1f
EG
7716load_error3:
7717 bnx2x_int_disable_sync(bp, 1);
7718 if (!BP_NOMCP(bp)) {
7719 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7720 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7721 }
7722 bp->port.pmf = 0;
7a9b2557
VZ
7723 /* Free SKBs, SGEs, TPA pool and driver internals */
7724 bnx2x_free_skbs(bp);
555f6c78 7725 for_each_rx_queue(bp, i)
3196a88a 7726 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7727load_error2:
d1014634
YG
7728 /* Release IRQs */
7729 bnx2x_free_irq(bp);
2dfe0e1f
EG
7730load_error1:
7731 bnx2x_napi_disable(bp);
555f6c78 7732 for_each_rx_queue(bp, i)
7cde1c8b 7733 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7734 bnx2x_free_mem(bp);
7735
34f80b04 7736 return rc;
a2fbb9ea
ET
7737}
7738
7739static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7740{
555f6c78 7741 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7742 int rc;
7743
c14423fe 7744 /* halt the connection */
555f6c78
EG
7745 fp->state = BNX2X_FP_STATE_HALTING;
7746 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7747
34f80b04 7748 /* Wait for completion */
a2fbb9ea 7749 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7750 &(fp->state), 1);
c14423fe 7751 if (rc) /* timeout */
a2fbb9ea
ET
7752 return rc;
7753
7754 /* delete cfc entry */
7755 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7756
34f80b04
EG
7757 /* Wait for completion */
7758 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7759 &(fp->state), 1);
34f80b04 7760 return rc;
a2fbb9ea
ET
7761}
7762
da5a662a 7763static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7764{
4781bfad 7765 __le16 dsb_sp_prod_idx;
c14423fe 7766 /* if the other port is handling traffic,
a2fbb9ea 7767 this can take a lot of time */
34f80b04
EG
7768 int cnt = 500;
7769 int rc;
a2fbb9ea
ET
7770
7771 might_sleep();
7772
7773 /* Send HALT ramrod */
7774 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7775 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7776
34f80b04
EG
7777 /* Wait for completion */
7778 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7779 &(bp->fp[0].state), 1);
7780 if (rc) /* timeout */
da5a662a 7781 return rc;
a2fbb9ea 7782
49d66772 7783 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7784
228241eb 7785 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7786 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7787
49d66772 7788 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7789 we are going to reset the chip anyway
7790 so there is not much to do if this times out
7791 */
34f80b04 7792 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7793 if (!cnt) {
7794 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7795 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7796 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7797#ifdef BNX2X_STOP_ON_ERROR
7798 bnx2x_panic();
7799#endif
36e552ab 7800 rc = -EBUSY;
34f80b04
EG
7801 break;
7802 }
7803 cnt--;
da5a662a 7804 msleep(1);
5650d9d4 7805 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7806 }
7807 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7808 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7809
7810 return rc;
a2fbb9ea
ET
7811}
7812
34f80b04
EG
7813static void bnx2x_reset_func(struct bnx2x *bp)
7814{
7815 int port = BP_PORT(bp);
7816 int func = BP_FUNC(bp);
7817 int base, i;
7818
7819 /* Configure IGU */
7820 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7821 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7822
37b091ba
MC
7823#ifdef BCM_CNIC
7824 /* Disable Timer scan */
7825 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7826 /*
7827 * Wait for at least 10ms and up to 2 second for the timers scan to
7828 * complete
7829 */
7830 for (i = 0; i < 200; i++) {
7831 msleep(10);
7832 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7833 break;
7834 }
7835#endif
34f80b04
EG
7836 /* Clear ILT */
7837 base = FUNC_ILT_BASE(func);
7838 for (i = base; i < base + ILT_PER_FUNC; i++)
7839 bnx2x_ilt_wr(bp, i, 0);
7840}
7841
7842static void bnx2x_reset_port(struct bnx2x *bp)
7843{
7844 int port = BP_PORT(bp);
7845 u32 val;
7846
7847 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7848
7849 /* Do not rcv packets to BRB */
7850 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7851 /* Do not direct rcv packets that are not for MCP to the BRB */
7852 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7853 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7854
7855 /* Configure AEU */
7856 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7857
7858 msleep(100);
7859 /* Check for BRB port occupancy */
7860 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7861 if (val)
7862 DP(NETIF_MSG_IFDOWN,
33471629 7863 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7864
7865 /* TODO: Close Doorbell port? */
7866}
7867
34f80b04
EG
7868static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7869{
7870 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7871 BP_FUNC(bp), reset_code);
7872
7873 switch (reset_code) {
7874 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7875 bnx2x_reset_port(bp);
7876 bnx2x_reset_func(bp);
7877 bnx2x_reset_common(bp);
7878 break;
7879
7880 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7881 bnx2x_reset_port(bp);
7882 bnx2x_reset_func(bp);
7883 break;
7884
7885 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7886 bnx2x_reset_func(bp);
7887 break;
49d66772 7888
34f80b04
EG
7889 default:
7890 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7891 break;
7892 }
7893}
7894
33471629 7895/* must be called with rtnl_lock */
34f80b04 7896static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7897{
da5a662a 7898 int port = BP_PORT(bp);
a2fbb9ea 7899 u32 reset_code = 0;
da5a662a 7900 int i, cnt, rc;
a2fbb9ea 7901
993ac7b5
MC
7902#ifdef BCM_CNIC
7903 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7904#endif
a2fbb9ea
ET
7905 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7906
ab6ad5a4 7907 /* Set "drop all" */
228241eb
ET
7908 bp->rx_mode = BNX2X_RX_MODE_NONE;
7909 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7910
ab6ad5a4 7911 /* Disable HW interrupts, NAPI and Tx */
f8ef6e44 7912 bnx2x_netif_stop(bp, 1);
e94d8af3 7913
34f80b04
EG
7914 del_timer_sync(&bp->timer);
7915 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7916 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7917 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7918
70b9986c
EG
7919 /* Release IRQs */
7920 bnx2x_free_irq(bp);
7921
555f6c78
EG
7922 /* Wait until tx fastpath tasks complete */
7923 for_each_tx_queue(bp, i) {
228241eb
ET
7924 struct bnx2x_fastpath *fp = &bp->fp[i];
7925
34f80b04 7926 cnt = 1000;
e8b5fc51 7927 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7928
7961f791 7929 bnx2x_tx_int(fp);
34f80b04
EG
7930 if (!cnt) {
7931 BNX2X_ERR("timeout waiting for queue[%d]\n",
7932 i);
7933#ifdef BNX2X_STOP_ON_ERROR
7934 bnx2x_panic();
7935 return -EBUSY;
7936#else
7937 break;
7938#endif
7939 }
7940 cnt--;
da5a662a 7941 msleep(1);
34f80b04 7942 }
228241eb 7943 }
da5a662a
VZ
7944 /* Give HW time to discard old tx messages */
7945 msleep(1);
a2fbb9ea 7946
3101c2bc
YG
7947 if (CHIP_IS_E1(bp)) {
7948 struct mac_configuration_cmd *config =
7949 bnx2x_sp(bp, mcast_config);
7950
e665bfda 7951 bnx2x_set_eth_mac_addr_e1(bp, 0);
3101c2bc 7952
8d9c5f34 7953 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7954 CAM_INVALIDATE(config->config_table[i]);
7955
8d9c5f34 7956 config->hdr.length = i;
3101c2bc
YG
7957 if (CHIP_REV_IS_SLOW(bp))
7958 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7959 else
7960 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7961 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7962 config->hdr.reserved1 = 0;
7963
e665bfda
MC
7964 bp->set_mac_pending++;
7965 smp_wmb();
7966
3101c2bc
YG
7967 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7968 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7969 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7970
7971 } else { /* E1H */
65abd74d
YG
7972 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7973
e665bfda 7974 bnx2x_set_eth_mac_addr_e1h(bp, 0);
3101c2bc
YG
7975
7976 for (i = 0; i < MC_HASH_SIZE; i++)
7977 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
7978
7979 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc 7980 }
993ac7b5
MC
7981#ifdef BCM_CNIC
7982 /* Clear iSCSI L2 MAC */
7983 mutex_lock(&bp->cnic_mutex);
7984 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7985 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7986 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7987 }
7988 mutex_unlock(&bp->cnic_mutex);
7989#endif
3101c2bc 7990
65abd74d
YG
7991 if (unload_mode == UNLOAD_NORMAL)
7992 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7993
7d0446c2 7994 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7995 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7996
7d0446c2 7997 else if (bp->wol) {
65abd74d
YG
7998 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7999 u8 *mac_addr = bp->dev->dev_addr;
8000 u32 val;
8001 /* The mac address is written to entries 1-4 to
8002 preserve entry 0 which is used by the PMF */
8003 u8 entry = (BP_E1HVN(bp) + 1)*8;
8004
8005 val = (mac_addr[0] << 8) | mac_addr[1];
8006 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8007
8008 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8009 (mac_addr[4] << 8) | mac_addr[5];
8010 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8011
8012 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8013
8014 } else
8015 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8016
34f80b04
EG
8017 /* Close multi and leading connections
8018 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
8019 for_each_nondefault_queue(bp, i)
8020 if (bnx2x_stop_multi(bp, i))
228241eb 8021 goto unload_error;
a2fbb9ea 8022
da5a662a
VZ
8023 rc = bnx2x_stop_leading(bp);
8024 if (rc) {
34f80b04 8025 BNX2X_ERR("Stop leading failed!\n");
da5a662a 8026#ifdef BNX2X_STOP_ON_ERROR
34f80b04 8027 return -EBUSY;
da5a662a
VZ
8028#else
8029 goto unload_error;
34f80b04 8030#endif
228241eb
ET
8031 }
8032
8033unload_error:
34f80b04 8034 if (!BP_NOMCP(bp))
228241eb 8035 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 8036 else {
f5372251 8037 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
8038 load_count[0], load_count[1], load_count[2]);
8039 load_count[0]--;
da5a662a 8040 load_count[1 + port]--;
f5372251 8041 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
8042 load_count[0], load_count[1], load_count[2]);
8043 if (load_count[0] == 0)
8044 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 8045 else if (load_count[1 + port] == 0)
34f80b04
EG
8046 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8047 else
8048 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8049 }
a2fbb9ea 8050
34f80b04
EG
8051 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8052 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8053 bnx2x__link_reset(bp);
a2fbb9ea
ET
8054
8055 /* Reset the chip */
228241eb 8056 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
8057
8058 /* Report UNLOAD_DONE to MCP */
34f80b04 8059 if (!BP_NOMCP(bp))
a2fbb9ea 8060 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 8061
9a035440 8062 bp->port.pmf = 0;
a2fbb9ea 8063
7a9b2557 8064 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 8065 bnx2x_free_skbs(bp);
555f6c78 8066 for_each_rx_queue(bp, i)
3196a88a 8067 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 8068 for_each_rx_queue(bp, i)
7cde1c8b 8069 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
8070 bnx2x_free_mem(bp);
8071
8072 bp->state = BNX2X_STATE_CLOSED;
228241eb 8073
a2fbb9ea
ET
8074 netif_carrier_off(bp->dev);
8075
8076 return 0;
8077}
8078
34f80b04
EG
8079static void bnx2x_reset_task(struct work_struct *work)
8080{
8081 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8082
8083#ifdef BNX2X_STOP_ON_ERROR
8084 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8085 " so reset not done to allow debug dump,\n"
ad361c98 8086 " you will need to reboot when done\n");
34f80b04
EG
8087 return;
8088#endif
8089
8090 rtnl_lock();
8091
8092 if (!netif_running(bp->dev))
8093 goto reset_task_exit;
8094
8095 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8096 bnx2x_nic_load(bp, LOAD_NORMAL);
8097
8098reset_task_exit:
8099 rtnl_unlock();
8100}
8101
a2fbb9ea
ET
8102/* end of nic load/unload */
8103
8104/* ethtool_ops */
8105
8106/*
8107 * Init service functions
8108 */
8109
f1ef27ef
EG
8110static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8111{
8112 switch (func) {
8113 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8114 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8115 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8116 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8117 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8118 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8119 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8120 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8121 default:
8122 BNX2X_ERR("Unsupported function index: %d\n", func);
8123 return (u32)(-1);
8124 }
8125}
8126
8127static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8128{
8129 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8130
8131 /* Flush all outstanding writes */
8132 mmiowb();
8133
8134 /* Pretend to be function 0 */
8135 REG_WR(bp, reg, 0);
8136 /* Flush the GRC transaction (in the chip) */
8137 new_val = REG_RD(bp, reg);
8138 if (new_val != 0) {
8139 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8140 new_val);
8141 BUG();
8142 }
8143
8144 /* From now we are in the "like-E1" mode */
8145 bnx2x_int_disable(bp);
8146
8147 /* Flush all outstanding writes */
8148 mmiowb();
8149
8150 /* Restore the original funtion settings */
8151 REG_WR(bp, reg, orig_func);
8152 new_val = REG_RD(bp, reg);
8153 if (new_val != orig_func) {
8154 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8155 orig_func, new_val);
8156 BUG();
8157 }
8158}
8159
8160static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8161{
8162 if (CHIP_IS_E1H(bp))
8163 bnx2x_undi_int_disable_e1h(bp, func);
8164 else
8165 bnx2x_int_disable(bp);
8166}
8167
34f80b04
EG
8168static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8169{
8170 u32 val;
8171
8172 /* Check if there is any driver already loaded */
8173 val = REG_RD(bp, MISC_REG_UNPREPARED);
8174 if (val == 0x1) {
8175 /* Check if it is the UNDI driver
8176 * UNDI driver initializes CID offset for normal bell to 0x7
8177 */
4a37fb66 8178 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
8179 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8180 if (val == 0x7) {
8181 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8182 /* save our func */
34f80b04 8183 int func = BP_FUNC(bp);
da5a662a
VZ
8184 u32 swap_en;
8185 u32 swap_val;
34f80b04 8186
b4661739
EG
8187 /* clear the UNDI indication */
8188 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8189
34f80b04
EG
8190 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8191
8192 /* try unload UNDI on port 0 */
8193 bp->func = 0;
da5a662a
VZ
8194 bp->fw_seq =
8195 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8196 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 8197 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
8198
8199 /* if UNDI is loaded on the other port */
8200 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8201
da5a662a
VZ
8202 /* send "DONE" for previous unload */
8203 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8204
8205 /* unload UNDI on port 1 */
34f80b04 8206 bp->func = 1;
da5a662a
VZ
8207 bp->fw_seq =
8208 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8209 DRV_MSG_SEQ_NUMBER_MASK);
8210 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8211
8212 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
8213 }
8214
b4661739
EG
8215 /* now it's safe to release the lock */
8216 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8217
f1ef27ef 8218 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
8219
8220 /* close input traffic and wait for it */
8221 /* Do not rcv packets to BRB */
8222 REG_WR(bp,
8223 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8224 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8225 /* Do not direct rcv packets that are not for MCP to
8226 * the BRB */
8227 REG_WR(bp,
8228 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8229 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8230 /* clear AEU */
8231 REG_WR(bp,
8232 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8233 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8234 msleep(10);
8235
8236 /* save NIG port swap info */
8237 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8238 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
8239 /* reset device */
8240 REG_WR(bp,
8241 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 8242 0xd3ffffff);
34f80b04
EG
8243 REG_WR(bp,
8244 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8245 0x1403);
da5a662a
VZ
8246 /* take the NIG out of reset and restore swap values */
8247 REG_WR(bp,
8248 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8249 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8250 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8251 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8252
8253 /* send unload done to the MCP */
8254 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8255
8256 /* restore our func and fw_seq */
8257 bp->func = func;
8258 bp->fw_seq =
8259 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8260 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
8261
8262 } else
8263 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
8264 }
8265}
8266
8267static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8268{
8269 u32 val, val2, val3, val4, id;
72ce58c3 8270 u16 pmc;
34f80b04
EG
8271
8272 /* Get the chip revision id and number. */
8273 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8274 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8275 id = ((val & 0xffff) << 16);
8276 val = REG_RD(bp, MISC_REG_CHIP_REV);
8277 id |= ((val & 0xf) << 12);
8278 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8279 id |= ((val & 0xff) << 4);
5a40e08e 8280 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
8281 id |= (val & 0xf);
8282 bp->common.chip_id = id;
8283 bp->link_params.chip_id = bp->common.chip_id;
8284 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8285
1c06328c
EG
8286 val = (REG_RD(bp, 0x2874) & 0x55);
8287 if ((bp->common.chip_id & 0x1) ||
8288 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8289 bp->flags |= ONE_PORT_FLAG;
8290 BNX2X_DEV_INFO("single port device\n");
8291 }
8292
34f80b04
EG
8293 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8294 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8295 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8296 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8297 bp->common.flash_size, bp->common.flash_size);
8298
8299 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 8300 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 8301 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
8302 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8303 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
8304
8305 if (!bp->common.shmem_base ||
8306 (bp->common.shmem_base < 0xA0000) ||
8307 (bp->common.shmem_base >= 0xC0000)) {
8308 BNX2X_DEV_INFO("MCP not active\n");
8309 bp->flags |= NO_MCP_FLAG;
8310 return;
8311 }
8312
8313 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8314 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8315 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8316 BNX2X_ERR("BAD MCP validity signature\n");
8317
8318 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 8319 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
8320
8321 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8322 SHARED_HW_CFG_LED_MODE_MASK) >>
8323 SHARED_HW_CFG_LED_MODE_SHIFT);
8324
c2c8b03e
EG
8325 bp->link_params.feature_config_flags = 0;
8326 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8327 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8328 bp->link_params.feature_config_flags |=
8329 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8330 else
8331 bp->link_params.feature_config_flags &=
8332 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8333
34f80b04
EG
8334 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8335 bp->common.bc_ver = val;
8336 BNX2X_DEV_INFO("bc_ver %X\n", val);
8337 if (val < BNX2X_BC_VER) {
8338 /* for now only warn
8339 * later we might need to enforce this */
8340 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8341 " please upgrade BC\n", BNX2X_BC_VER, val);
8342 }
4d295db0
EG
8343 bp->link_params.feature_config_flags |=
8344 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8345 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
8346
8347 if (BP_E1HVN(bp) == 0) {
8348 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8349 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8350 } else {
8351 /* no WOL capability for E1HVN != 0 */
8352 bp->flags |= NO_WOL_FLAG;
8353 }
8354 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 8355 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
8356
8357 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8358 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8359 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8360 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8361
8362 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8363 val, val2, val3, val4);
8364}
8365
8366static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8367 u32 switch_cfg)
a2fbb9ea 8368{
34f80b04 8369 int port = BP_PORT(bp);
a2fbb9ea
ET
8370 u32 ext_phy_type;
8371
a2fbb9ea
ET
8372 switch (switch_cfg) {
8373 case SWITCH_CFG_1G:
8374 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8375
c18487ee
YR
8376 ext_phy_type =
8377 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8378 switch (ext_phy_type) {
8379 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8380 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8381 ext_phy_type);
8382
34f80b04
EG
8383 bp->port.supported |= (SUPPORTED_10baseT_Half |
8384 SUPPORTED_10baseT_Full |
8385 SUPPORTED_100baseT_Half |
8386 SUPPORTED_100baseT_Full |
8387 SUPPORTED_1000baseT_Full |
8388 SUPPORTED_2500baseX_Full |
8389 SUPPORTED_TP |
8390 SUPPORTED_FIBRE |
8391 SUPPORTED_Autoneg |
8392 SUPPORTED_Pause |
8393 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8394 break;
8395
8396 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8397 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8398 ext_phy_type);
8399
34f80b04
EG
8400 bp->port.supported |= (SUPPORTED_10baseT_Half |
8401 SUPPORTED_10baseT_Full |
8402 SUPPORTED_100baseT_Half |
8403 SUPPORTED_100baseT_Full |
8404 SUPPORTED_1000baseT_Full |
8405 SUPPORTED_TP |
8406 SUPPORTED_FIBRE |
8407 SUPPORTED_Autoneg |
8408 SUPPORTED_Pause |
8409 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8410 break;
8411
8412 default:
8413 BNX2X_ERR("NVRAM config error. "
8414 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 8415 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8416 return;
8417 }
8418
34f80b04
EG
8419 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8420 port*0x10);
8421 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
8422 break;
8423
8424 case SWITCH_CFG_10G:
8425 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8426
c18487ee
YR
8427 ext_phy_type =
8428 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8429 switch (ext_phy_type) {
8430 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8431 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8432 ext_phy_type);
8433
34f80b04
EG
8434 bp->port.supported |= (SUPPORTED_10baseT_Half |
8435 SUPPORTED_10baseT_Full |
8436 SUPPORTED_100baseT_Half |
8437 SUPPORTED_100baseT_Full |
8438 SUPPORTED_1000baseT_Full |
8439 SUPPORTED_2500baseX_Full |
8440 SUPPORTED_10000baseT_Full |
8441 SUPPORTED_TP |
8442 SUPPORTED_FIBRE |
8443 SUPPORTED_Autoneg |
8444 SUPPORTED_Pause |
8445 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8446 break;
8447
589abe3a
EG
8448 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8449 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 8450 ext_phy_type);
f1410647 8451
34f80b04 8452 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8453 SUPPORTED_1000baseT_Full |
34f80b04 8454 SUPPORTED_FIBRE |
589abe3a 8455 SUPPORTED_Autoneg |
34f80b04
EG
8456 SUPPORTED_Pause |
8457 SUPPORTED_Asym_Pause);
f1410647
ET
8458 break;
8459
589abe3a
EG
8460 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8461 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
8462 ext_phy_type);
8463
34f80b04 8464 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8465 SUPPORTED_2500baseX_Full |
34f80b04 8466 SUPPORTED_1000baseT_Full |
589abe3a
EG
8467 SUPPORTED_FIBRE |
8468 SUPPORTED_Autoneg |
8469 SUPPORTED_Pause |
8470 SUPPORTED_Asym_Pause);
8471 break;
8472
8473 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8474 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8475 ext_phy_type);
8476
8477 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
8478 SUPPORTED_FIBRE |
8479 SUPPORTED_Pause |
8480 SUPPORTED_Asym_Pause);
f1410647
ET
8481 break;
8482
589abe3a
EG
8483 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8484 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
8485 ext_phy_type);
8486
34f80b04
EG
8487 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8488 SUPPORTED_1000baseT_Full |
8489 SUPPORTED_FIBRE |
34f80b04
EG
8490 SUPPORTED_Pause |
8491 SUPPORTED_Asym_Pause);
f1410647
ET
8492 break;
8493
589abe3a
EG
8494 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8495 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
8496 ext_phy_type);
8497
34f80b04 8498 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 8499 SUPPORTED_1000baseT_Full |
34f80b04 8500 SUPPORTED_Autoneg |
589abe3a 8501 SUPPORTED_FIBRE |
34f80b04
EG
8502 SUPPORTED_Pause |
8503 SUPPORTED_Asym_Pause);
c18487ee
YR
8504 break;
8505
4d295db0
EG
8506 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8507 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8508 ext_phy_type);
8509
8510 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8511 SUPPORTED_1000baseT_Full |
8512 SUPPORTED_Autoneg |
8513 SUPPORTED_FIBRE |
8514 SUPPORTED_Pause |
8515 SUPPORTED_Asym_Pause);
8516 break;
8517
f1410647
ET
8518 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8519 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8520 ext_phy_type);
8521
34f80b04
EG
8522 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8523 SUPPORTED_TP |
8524 SUPPORTED_Autoneg |
8525 SUPPORTED_Pause |
8526 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8527 break;
8528
28577185
EG
8529 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8530 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8531 ext_phy_type);
8532
8533 bp->port.supported |= (SUPPORTED_10baseT_Half |
8534 SUPPORTED_10baseT_Full |
8535 SUPPORTED_100baseT_Half |
8536 SUPPORTED_100baseT_Full |
8537 SUPPORTED_1000baseT_Full |
8538 SUPPORTED_10000baseT_Full |
8539 SUPPORTED_TP |
8540 SUPPORTED_Autoneg |
8541 SUPPORTED_Pause |
8542 SUPPORTED_Asym_Pause);
8543 break;
8544
c18487ee
YR
8545 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8546 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8547 bp->link_params.ext_phy_config);
8548 break;
8549
a2fbb9ea
ET
8550 default:
8551 BNX2X_ERR("NVRAM config error. "
8552 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 8553 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8554 return;
8555 }
8556
34f80b04
EG
8557 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8558 port*0x18);
8559 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 8560
a2fbb9ea
ET
8561 break;
8562
8563 default:
8564 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 8565 bp->port.link_config);
a2fbb9ea
ET
8566 return;
8567 }
34f80b04 8568 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
8569
8570 /* mask what we support according to speed_cap_mask */
c18487ee
YR
8571 if (!(bp->link_params.speed_cap_mask &
8572 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 8573 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 8574
c18487ee
YR
8575 if (!(bp->link_params.speed_cap_mask &
8576 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 8577 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 8578
c18487ee
YR
8579 if (!(bp->link_params.speed_cap_mask &
8580 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 8581 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 8582
c18487ee
YR
8583 if (!(bp->link_params.speed_cap_mask &
8584 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 8585 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 8586
c18487ee
YR
8587 if (!(bp->link_params.speed_cap_mask &
8588 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
8589 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8590 SUPPORTED_1000baseT_Full);
a2fbb9ea 8591
c18487ee
YR
8592 if (!(bp->link_params.speed_cap_mask &
8593 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 8594 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 8595
c18487ee
YR
8596 if (!(bp->link_params.speed_cap_mask &
8597 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 8598 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 8599
34f80b04 8600 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
8601}
8602
34f80b04 8603static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8604{
c18487ee 8605 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 8606
34f80b04 8607 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 8608 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 8609 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 8610 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8611 bp->port.advertising = bp->port.supported;
a2fbb9ea 8612 } else {
c18487ee
YR
8613 u32 ext_phy_type =
8614 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8615
8616 if ((ext_phy_type ==
8617 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8618 (ext_phy_type ==
8619 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 8620 /* force 10G, no AN */
c18487ee 8621 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 8622 bp->port.advertising =
a2fbb9ea
ET
8623 (ADVERTISED_10000baseT_Full |
8624 ADVERTISED_FIBRE);
8625 break;
8626 }
8627 BNX2X_ERR("NVRAM config error. "
8628 "Invalid link_config 0x%x"
8629 " Autoneg not supported\n",
34f80b04 8630 bp->port.link_config);
a2fbb9ea
ET
8631 return;
8632 }
8633 break;
8634
8635 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 8636 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 8637 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
8638 bp->port.advertising = (ADVERTISED_10baseT_Full |
8639 ADVERTISED_TP);
a2fbb9ea
ET
8640 } else {
8641 BNX2X_ERR("NVRAM config error. "
8642 "Invalid link_config 0x%x"
8643 " speed_cap_mask 0x%x\n",
34f80b04 8644 bp->port.link_config,
c18487ee 8645 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8646 return;
8647 }
8648 break;
8649
8650 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 8651 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
8652 bp->link_params.req_line_speed = SPEED_10;
8653 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8654 bp->port.advertising = (ADVERTISED_10baseT_Half |
8655 ADVERTISED_TP);
a2fbb9ea
ET
8656 } else {
8657 BNX2X_ERR("NVRAM config error. "
8658 "Invalid link_config 0x%x"
8659 " speed_cap_mask 0x%x\n",
34f80b04 8660 bp->port.link_config,
c18487ee 8661 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8662 return;
8663 }
8664 break;
8665
8666 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 8667 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 8668 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
8669 bp->port.advertising = (ADVERTISED_100baseT_Full |
8670 ADVERTISED_TP);
a2fbb9ea
ET
8671 } else {
8672 BNX2X_ERR("NVRAM config error. "
8673 "Invalid link_config 0x%x"
8674 " speed_cap_mask 0x%x\n",
34f80b04 8675 bp->port.link_config,
c18487ee 8676 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8677 return;
8678 }
8679 break;
8680
8681 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 8682 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
8683 bp->link_params.req_line_speed = SPEED_100;
8684 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8685 bp->port.advertising = (ADVERTISED_100baseT_Half |
8686 ADVERTISED_TP);
a2fbb9ea
ET
8687 } else {
8688 BNX2X_ERR("NVRAM config error. "
8689 "Invalid link_config 0x%x"
8690 " speed_cap_mask 0x%x\n",
34f80b04 8691 bp->port.link_config,
c18487ee 8692 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8693 return;
8694 }
8695 break;
8696
8697 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 8698 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 8699 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
8700 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8701 ADVERTISED_TP);
a2fbb9ea
ET
8702 } else {
8703 BNX2X_ERR("NVRAM config error. "
8704 "Invalid link_config 0x%x"
8705 " speed_cap_mask 0x%x\n",
34f80b04 8706 bp->port.link_config,
c18487ee 8707 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8708 return;
8709 }
8710 break;
8711
8712 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8713 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8714 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8715 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8716 ADVERTISED_TP);
a2fbb9ea
ET
8717 } else {
8718 BNX2X_ERR("NVRAM config error. "
8719 "Invalid link_config 0x%x"
8720 " speed_cap_mask 0x%x\n",
34f80b04 8721 bp->port.link_config,
c18487ee 8722 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8723 return;
8724 }
8725 break;
8726
8727 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8728 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8729 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8730 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8731 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8732 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8733 ADVERTISED_FIBRE);
a2fbb9ea
ET
8734 } else {
8735 BNX2X_ERR("NVRAM config error. "
8736 "Invalid link_config 0x%x"
8737 " speed_cap_mask 0x%x\n",
34f80b04 8738 bp->port.link_config,
c18487ee 8739 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8740 return;
8741 }
8742 break;
8743
8744 default:
8745 BNX2X_ERR("NVRAM config error. "
8746 "BAD link speed link_config 0x%x\n",
34f80b04 8747 bp->port.link_config);
c18487ee 8748 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8749 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8750 break;
8751 }
a2fbb9ea 8752
34f80b04
EG
8753 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8754 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8755 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8756 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8757 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8758
c18487ee 8759 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8760 " advertising 0x%x\n",
c18487ee
YR
8761 bp->link_params.req_line_speed,
8762 bp->link_params.req_duplex,
34f80b04 8763 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8764}
8765
e665bfda
MC
8766static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8767{
8768 mac_hi = cpu_to_be16(mac_hi);
8769 mac_lo = cpu_to_be32(mac_lo);
8770 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8771 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8772}
8773
34f80b04 8774static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8775{
34f80b04
EG
8776 int port = BP_PORT(bp);
8777 u32 val, val2;
589abe3a 8778 u32 config;
c2c8b03e 8779 u16 i;
01cd4528 8780 u32 ext_phy_type;
a2fbb9ea 8781
c18487ee 8782 bp->link_params.bp = bp;
34f80b04 8783 bp->link_params.port = port;
c18487ee 8784
c18487ee 8785 bp->link_params.lane_config =
a2fbb9ea 8786 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8787 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8788 SHMEM_RD(bp,
8789 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
8790 /* BCM8727_NOC => BCM8727 no over current */
8791 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8792 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8793 bp->link_params.ext_phy_config &=
8794 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8795 bp->link_params.ext_phy_config |=
8796 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8797 bp->link_params.feature_config_flags |=
8798 FEATURE_CONFIG_BCM8727_NOC;
8799 }
8800
c18487ee 8801 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8802 SHMEM_RD(bp,
8803 dev_info.port_hw_config[port].speed_capability_mask);
8804
34f80b04 8805 bp->port.link_config =
a2fbb9ea
ET
8806 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8807
c2c8b03e
EG
8808 /* Get the 4 lanes xgxs config rx and tx */
8809 for (i = 0; i < 2; i++) {
8810 val = SHMEM_RD(bp,
8811 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8812 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8813 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8814
8815 val = SHMEM_RD(bp,
8816 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8817 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8818 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8819 }
8820
3ce2c3f9
EG
8821 /* If the device is capable of WoL, set the default state according
8822 * to the HW
8823 */
4d295db0 8824 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8825 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8826 (config & PORT_FEATURE_WOL_ENABLED));
8827
c2c8b03e
EG
8828 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8829 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8830 bp->link_params.lane_config,
8831 bp->link_params.ext_phy_config,
34f80b04 8832 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8833
4d295db0
EG
8834 bp->link_params.switch_cfg |= (bp->port.link_config &
8835 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 8836 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8837
8838 bnx2x_link_settings_requested(bp);
8839
01cd4528
EG
8840 /*
8841 * If connected directly, work with the internal PHY, otherwise, work
8842 * with the external PHY
8843 */
8844 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8845 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8846 bp->mdio.prtad = bp->link_params.phy_addr;
8847
8848 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8849 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8850 bp->mdio.prtad =
659bc5c4 8851 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
01cd4528 8852
a2fbb9ea
ET
8853 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8854 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 8855 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
8856 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8857 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
8858
8859#ifdef BCM_CNIC
8860 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8861 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8862 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8863#endif
34f80b04
EG
8864}
8865
8866static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8867{
8868 int func = BP_FUNC(bp);
8869 u32 val, val2;
8870 int rc = 0;
a2fbb9ea 8871
34f80b04 8872 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8873
34f80b04
EG
8874 bp->e1hov = 0;
8875 bp->e1hmf = 0;
8876 if (CHIP_IS_E1H(bp)) {
8877 bp->mf_config =
8878 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8879
2691d51d 8880 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 8881 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 8882 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 8883 bp->e1hmf = 1;
2691d51d
EG
8884 BNX2X_DEV_INFO("%s function mode\n",
8885 IS_E1HMF(bp) ? "multi" : "single");
8886
8887 if (IS_E1HMF(bp)) {
8888 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8889 e1hov_tag) &
8890 FUNC_MF_CFG_E1HOV_TAG_MASK);
8891 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8892 bp->e1hov = val;
8893 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8894 "(0x%04x)\n",
8895 func, bp->e1hov, bp->e1hov);
8896 } else {
34f80b04
EG
8897 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8898 " aborting\n", func);
8899 rc = -EPERM;
8900 }
2691d51d
EG
8901 } else {
8902 if (BP_E1HVN(bp)) {
8903 BNX2X_ERR("!!! VN %d in single function mode,"
8904 " aborting\n", BP_E1HVN(bp));
8905 rc = -EPERM;
8906 }
34f80b04
EG
8907 }
8908 }
a2fbb9ea 8909
34f80b04
EG
8910 if (!BP_NOMCP(bp)) {
8911 bnx2x_get_port_hwinfo(bp);
8912
8913 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8914 DRV_MSG_SEQ_NUMBER_MASK);
8915 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8916 }
8917
8918 if (IS_E1HMF(bp)) {
8919 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8920 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8921 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8922 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8923 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8924 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8925 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8926 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8927 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8928 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8929 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8930 ETH_ALEN);
8931 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8932 ETH_ALEN);
a2fbb9ea 8933 }
34f80b04
EG
8934
8935 return rc;
a2fbb9ea
ET
8936 }
8937
34f80b04
EG
8938 if (BP_NOMCP(bp)) {
8939 /* only supposed to happen on emulation/FPGA */
33471629 8940 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8941 random_ether_addr(bp->dev->dev_addr);
8942 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8943 }
a2fbb9ea 8944
34f80b04
EG
8945 return rc;
8946}
8947
8948static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8949{
8950 int func = BP_FUNC(bp);
87942b46 8951 int timer_interval;
34f80b04
EG
8952 int rc;
8953
da5a662a
VZ
8954 /* Disable interrupt handling until HW is initialized */
8955 atomic_set(&bp->intr_sem, 1);
e1510706 8956 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8957
34f80b04 8958 mutex_init(&bp->port.phy_mutex);
993ac7b5
MC
8959#ifdef BCM_CNIC
8960 mutex_init(&bp->cnic_mutex);
8961#endif
a2fbb9ea 8962
1cf167f2 8963 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8964 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8965
8966 rc = bnx2x_get_hwinfo(bp);
8967
8968 /* need to reset chip if undi was active */
8969 if (!BP_NOMCP(bp))
8970 bnx2x_undi_unload(bp);
8971
8972 if (CHIP_REV_IS_FPGA(bp))
8973 printk(KERN_ERR PFX "FPGA detected\n");
8974
8975 if (BP_NOMCP(bp) && (func == 0))
8976 printk(KERN_ERR PFX
8977 "MCP disabled, must load devices in order!\n");
8978
555f6c78 8979 /* Set multi queue mode */
8badd27a
EG
8980 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8981 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8982 printk(KERN_ERR PFX
8badd27a 8983 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8984 multi_mode = ETH_RSS_MODE_DISABLED;
8985 }
8986 bp->multi_mode = multi_mode;
8987
8988
7a9b2557
VZ
8989 /* Set TPA flags */
8990 if (disable_tpa) {
8991 bp->flags &= ~TPA_ENABLE_FLAG;
8992 bp->dev->features &= ~NETIF_F_LRO;
8993 } else {
8994 bp->flags |= TPA_ENABLE_FLAG;
8995 bp->dev->features |= NETIF_F_LRO;
8996 }
8997
a18f5128
EG
8998 if (CHIP_IS_E1(bp))
8999 bp->dropless_fc = 0;
9000 else
9001 bp->dropless_fc = dropless_fc;
9002
8d5726c4 9003 bp->mrrs = mrrs;
7a9b2557 9004
34f80b04
EG
9005 bp->tx_ring_size = MAX_TX_AVAIL;
9006 bp->rx_ring_size = MAX_RX_AVAIL;
9007
9008 bp->rx_csum = 1;
34f80b04
EG
9009
9010 bp->tx_ticks = 50;
9011 bp->rx_ticks = 25;
9012
87942b46
EG
9013 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9014 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
9015
9016 init_timer(&bp->timer);
9017 bp->timer.expires = jiffies + bp->current_interval;
9018 bp->timer.data = (unsigned long) bp;
9019 bp->timer.function = bnx2x_timer;
9020
9021 return rc;
a2fbb9ea
ET
9022}
9023
9024/*
9025 * ethtool service functions
9026 */
9027
9028/* All ethtool functions called with rtnl_lock */
9029
9030static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9031{
9032 struct bnx2x *bp = netdev_priv(dev);
9033
34f80b04
EG
9034 cmd->supported = bp->port.supported;
9035 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
9036
9037 if (netif_carrier_ok(dev)) {
c18487ee
YR
9038 cmd->speed = bp->link_vars.line_speed;
9039 cmd->duplex = bp->link_vars.duplex;
b015e3d1
EG
9040 if (IS_E1HMF(bp)) {
9041 u16 vn_max_rate;
34f80b04 9042
b015e3d1
EG
9043 vn_max_rate =
9044 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
34f80b04 9045 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
b015e3d1
EG
9046 if (vn_max_rate < cmd->speed)
9047 cmd->speed = vn_max_rate;
9048 }
9049 } else {
9050 cmd->speed = -1;
9051 cmd->duplex = -1;
34f80b04 9052 }
a2fbb9ea 9053
c18487ee
YR
9054 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9055 u32 ext_phy_type =
9056 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
9057
9058 switch (ext_phy_type) {
9059 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 9060 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 9061 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
9062 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9063 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9064 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 9065 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
9066 cmd->port = PORT_FIBRE;
9067 break;
9068
9069 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 9070 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
9071 cmd->port = PORT_TP;
9072 break;
9073
c18487ee
YR
9074 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9075 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9076 bp->link_params.ext_phy_config);
9077 break;
9078
f1410647
ET
9079 default:
9080 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
9081 bp->link_params.ext_phy_config);
9082 break;
f1410647
ET
9083 }
9084 } else
a2fbb9ea 9085 cmd->port = PORT_TP;
a2fbb9ea 9086
01cd4528 9087 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
9088 cmd->transceiver = XCVR_INTERNAL;
9089
c18487ee 9090 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 9091 cmd->autoneg = AUTONEG_ENABLE;
f1410647 9092 else
a2fbb9ea 9093 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
9094
9095 cmd->maxtxpkt = 0;
9096 cmd->maxrxpkt = 0;
9097
9098 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9099 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9100 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9101 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9102 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9103 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9104 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9105
9106 return 0;
9107}
9108
9109static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9110{
9111 struct bnx2x *bp = netdev_priv(dev);
9112 u32 advertising;
9113
34f80b04
EG
9114 if (IS_E1HMF(bp))
9115 return 0;
9116
a2fbb9ea
ET
9117 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9118 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9119 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9120 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9121 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9122 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9123 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9124
a2fbb9ea 9125 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
9126 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9127 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 9128 return -EINVAL;
f1410647 9129 }
a2fbb9ea
ET
9130
9131 /* advertise the requested speed and duplex if supported */
34f80b04 9132 cmd->advertising &= bp->port.supported;
a2fbb9ea 9133
c18487ee
YR
9134 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9135 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
9136 bp->port.advertising |= (ADVERTISED_Autoneg |
9137 cmd->advertising);
a2fbb9ea
ET
9138
9139 } else { /* forced speed */
9140 /* advertise the requested speed and duplex if supported */
9141 switch (cmd->speed) {
9142 case SPEED_10:
9143 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 9144 if (!(bp->port.supported &
f1410647
ET
9145 SUPPORTED_10baseT_Full)) {
9146 DP(NETIF_MSG_LINK,
9147 "10M full not supported\n");
a2fbb9ea 9148 return -EINVAL;
f1410647 9149 }
a2fbb9ea
ET
9150
9151 advertising = (ADVERTISED_10baseT_Full |
9152 ADVERTISED_TP);
9153 } else {
34f80b04 9154 if (!(bp->port.supported &
f1410647
ET
9155 SUPPORTED_10baseT_Half)) {
9156 DP(NETIF_MSG_LINK,
9157 "10M half not supported\n");
a2fbb9ea 9158 return -EINVAL;
f1410647 9159 }
a2fbb9ea
ET
9160
9161 advertising = (ADVERTISED_10baseT_Half |
9162 ADVERTISED_TP);
9163 }
9164 break;
9165
9166 case SPEED_100:
9167 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 9168 if (!(bp->port.supported &
f1410647
ET
9169 SUPPORTED_100baseT_Full)) {
9170 DP(NETIF_MSG_LINK,
9171 "100M full not supported\n");
a2fbb9ea 9172 return -EINVAL;
f1410647 9173 }
a2fbb9ea
ET
9174
9175 advertising = (ADVERTISED_100baseT_Full |
9176 ADVERTISED_TP);
9177 } else {
34f80b04 9178 if (!(bp->port.supported &
f1410647
ET
9179 SUPPORTED_100baseT_Half)) {
9180 DP(NETIF_MSG_LINK,
9181 "100M half not supported\n");
a2fbb9ea 9182 return -EINVAL;
f1410647 9183 }
a2fbb9ea
ET
9184
9185 advertising = (ADVERTISED_100baseT_Half |
9186 ADVERTISED_TP);
9187 }
9188 break;
9189
9190 case SPEED_1000:
f1410647
ET
9191 if (cmd->duplex != DUPLEX_FULL) {
9192 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 9193 return -EINVAL;
f1410647 9194 }
a2fbb9ea 9195
34f80b04 9196 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 9197 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 9198 return -EINVAL;
f1410647 9199 }
a2fbb9ea
ET
9200
9201 advertising = (ADVERTISED_1000baseT_Full |
9202 ADVERTISED_TP);
9203 break;
9204
9205 case SPEED_2500:
f1410647
ET
9206 if (cmd->duplex != DUPLEX_FULL) {
9207 DP(NETIF_MSG_LINK,
9208 "2.5G half not supported\n");
a2fbb9ea 9209 return -EINVAL;
f1410647 9210 }
a2fbb9ea 9211
34f80b04 9212 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
9213 DP(NETIF_MSG_LINK,
9214 "2.5G full not supported\n");
a2fbb9ea 9215 return -EINVAL;
f1410647 9216 }
a2fbb9ea 9217
f1410647 9218 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
9219 ADVERTISED_TP);
9220 break;
9221
9222 case SPEED_10000:
f1410647
ET
9223 if (cmd->duplex != DUPLEX_FULL) {
9224 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 9225 return -EINVAL;
f1410647 9226 }
a2fbb9ea 9227
34f80b04 9228 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 9229 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 9230 return -EINVAL;
f1410647 9231 }
a2fbb9ea
ET
9232
9233 advertising = (ADVERTISED_10000baseT_Full |
9234 ADVERTISED_FIBRE);
9235 break;
9236
9237 default:
f1410647 9238 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
9239 return -EINVAL;
9240 }
9241
c18487ee
YR
9242 bp->link_params.req_line_speed = cmd->speed;
9243 bp->link_params.req_duplex = cmd->duplex;
34f80b04 9244 bp->port.advertising = advertising;
a2fbb9ea
ET
9245 }
9246
c18487ee 9247 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 9248 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 9249 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 9250 bp->port.advertising);
a2fbb9ea 9251
34f80b04 9252 if (netif_running(dev)) {
bb2a0f7a 9253 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9254 bnx2x_link_set(bp);
9255 }
a2fbb9ea
ET
9256
9257 return 0;
9258}
9259
0a64ea57
EG
9260#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9261#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9262
9263static int bnx2x_get_regs_len(struct net_device *dev)
9264{
0a64ea57 9265 struct bnx2x *bp = netdev_priv(dev);
0d28e49a 9266 int regdump_len = 0;
0a64ea57
EG
9267 int i;
9268
0a64ea57
EG
9269 if (CHIP_IS_E1(bp)) {
9270 for (i = 0; i < REGS_COUNT; i++)
9271 if (IS_E1_ONLINE(reg_addrs[i].info))
9272 regdump_len += reg_addrs[i].size;
9273
9274 for (i = 0; i < WREGS_COUNT_E1; i++)
9275 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9276 regdump_len += wreg_addrs_e1[i].size *
9277 (1 + wreg_addrs_e1[i].read_regs_count);
9278
9279 } else { /* E1H */
9280 for (i = 0; i < REGS_COUNT; i++)
9281 if (IS_E1H_ONLINE(reg_addrs[i].info))
9282 regdump_len += reg_addrs[i].size;
9283
9284 for (i = 0; i < WREGS_COUNT_E1H; i++)
9285 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9286 regdump_len += wreg_addrs_e1h[i].size *
9287 (1 + wreg_addrs_e1h[i].read_regs_count);
9288 }
9289 regdump_len *= 4;
9290 regdump_len += sizeof(struct dump_hdr);
9291
9292 return regdump_len;
9293}
9294
9295static void bnx2x_get_regs(struct net_device *dev,
9296 struct ethtool_regs *regs, void *_p)
9297{
9298 u32 *p = _p, i, j;
9299 struct bnx2x *bp = netdev_priv(dev);
9300 struct dump_hdr dump_hdr = {0};
9301
9302 regs->version = 0;
9303 memset(p, 0, regs->len);
9304
9305 if (!netif_running(bp->dev))
9306 return;
9307
9308 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9309 dump_hdr.dump_sign = dump_sign_all;
9310 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9311 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9312 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9313 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9314 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9315
9316 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9317 p += dump_hdr.hdr_size + 1;
9318
9319 if (CHIP_IS_E1(bp)) {
9320 for (i = 0; i < REGS_COUNT; i++)
9321 if (IS_E1_ONLINE(reg_addrs[i].info))
9322 for (j = 0; j < reg_addrs[i].size; j++)
9323 *p++ = REG_RD(bp,
9324 reg_addrs[i].addr + j*4);
9325
9326 } else { /* E1H */
9327 for (i = 0; i < REGS_COUNT; i++)
9328 if (IS_E1H_ONLINE(reg_addrs[i].info))
9329 for (j = 0; j < reg_addrs[i].size; j++)
9330 *p++ = REG_RD(bp,
9331 reg_addrs[i].addr + j*4);
9332 }
9333}
9334
0d28e49a
EG
9335#define PHY_FW_VER_LEN 10
9336
9337static void bnx2x_get_drvinfo(struct net_device *dev,
9338 struct ethtool_drvinfo *info)
9339{
9340 struct bnx2x *bp = netdev_priv(dev);
9341 u8 phy_fw_ver[PHY_FW_VER_LEN];
9342
9343 strcpy(info->driver, DRV_MODULE_NAME);
9344 strcpy(info->version, DRV_MODULE_VERSION);
9345
9346 phy_fw_ver[0] = '\0';
9347 if (bp->port.pmf) {
9348 bnx2x_acquire_phy_lock(bp);
9349 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9350 (bp->state != BNX2X_STATE_CLOSED),
9351 phy_fw_ver, PHY_FW_VER_LEN);
9352 bnx2x_release_phy_lock(bp);
9353 }
9354
9355 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9356 (bp->common.bc_ver & 0xff0000) >> 16,
9357 (bp->common.bc_ver & 0xff00) >> 8,
9358 (bp->common.bc_ver & 0xff),
9359 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9360 strcpy(info->bus_info, pci_name(bp->pdev));
9361 info->n_stats = BNX2X_NUM_STATS;
9362 info->testinfo_len = BNX2X_NUM_TESTS;
9363 info->eedump_len = bp->common.flash_size;
9364 info->regdump_len = bnx2x_get_regs_len(dev);
9365}
9366
a2fbb9ea
ET
9367static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9368{
9369 struct bnx2x *bp = netdev_priv(dev);
9370
9371 if (bp->flags & NO_WOL_FLAG) {
9372 wol->supported = 0;
9373 wol->wolopts = 0;
9374 } else {
9375 wol->supported = WAKE_MAGIC;
9376 if (bp->wol)
9377 wol->wolopts = WAKE_MAGIC;
9378 else
9379 wol->wolopts = 0;
9380 }
9381 memset(&wol->sopass, 0, sizeof(wol->sopass));
9382}
9383
9384static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9385{
9386 struct bnx2x *bp = netdev_priv(dev);
9387
9388 if (wol->wolopts & ~WAKE_MAGIC)
9389 return -EINVAL;
9390
9391 if (wol->wolopts & WAKE_MAGIC) {
9392 if (bp->flags & NO_WOL_FLAG)
9393 return -EINVAL;
9394
9395 bp->wol = 1;
34f80b04 9396 } else
a2fbb9ea 9397 bp->wol = 0;
34f80b04 9398
a2fbb9ea
ET
9399 return 0;
9400}
9401
9402static u32 bnx2x_get_msglevel(struct net_device *dev)
9403{
9404 struct bnx2x *bp = netdev_priv(dev);
9405
9406 return bp->msglevel;
9407}
9408
9409static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9410{
9411 struct bnx2x *bp = netdev_priv(dev);
9412
9413 if (capable(CAP_NET_ADMIN))
9414 bp->msglevel = level;
9415}
9416
9417static int bnx2x_nway_reset(struct net_device *dev)
9418{
9419 struct bnx2x *bp = netdev_priv(dev);
9420
34f80b04
EG
9421 if (!bp->port.pmf)
9422 return 0;
a2fbb9ea 9423
34f80b04 9424 if (netif_running(dev)) {
bb2a0f7a 9425 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9426 bnx2x_link_set(bp);
9427 }
a2fbb9ea
ET
9428
9429 return 0;
9430}
9431
ab6ad5a4 9432static u32 bnx2x_get_link(struct net_device *dev)
01e53298
NO
9433{
9434 struct bnx2x *bp = netdev_priv(dev);
9435
9436 return bp->link_vars.link_up;
9437}
9438
a2fbb9ea
ET
9439static int bnx2x_get_eeprom_len(struct net_device *dev)
9440{
9441 struct bnx2x *bp = netdev_priv(dev);
9442
34f80b04 9443 return bp->common.flash_size;
a2fbb9ea
ET
9444}
9445
9446static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9447{
34f80b04 9448 int port = BP_PORT(bp);
a2fbb9ea
ET
9449 int count, i;
9450 u32 val = 0;
9451
9452 /* adjust timeout for emulation/FPGA */
9453 count = NVRAM_TIMEOUT_COUNT;
9454 if (CHIP_REV_IS_SLOW(bp))
9455 count *= 100;
9456
9457 /* request access to nvram interface */
9458 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9459 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9460
9461 for (i = 0; i < count*10; i++) {
9462 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9463 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9464 break;
9465
9466 udelay(5);
9467 }
9468
9469 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 9470 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
9471 return -EBUSY;
9472 }
9473
9474 return 0;
9475}
9476
9477static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9478{
34f80b04 9479 int port = BP_PORT(bp);
a2fbb9ea
ET
9480 int count, i;
9481 u32 val = 0;
9482
9483 /* adjust timeout for emulation/FPGA */
9484 count = NVRAM_TIMEOUT_COUNT;
9485 if (CHIP_REV_IS_SLOW(bp))
9486 count *= 100;
9487
9488 /* relinquish nvram interface */
9489 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9490 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9491
9492 for (i = 0; i < count*10; i++) {
9493 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9494 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9495 break;
9496
9497 udelay(5);
9498 }
9499
9500 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 9501 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
9502 return -EBUSY;
9503 }
9504
9505 return 0;
9506}
9507
9508static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9509{
9510 u32 val;
9511
9512 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9513
9514 /* enable both bits, even on read */
9515 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9516 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9517 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9518}
9519
9520static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9521{
9522 u32 val;
9523
9524 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9525
9526 /* disable both bits, even after read */
9527 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9528 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9529 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9530}
9531
4781bfad 9532static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
9533 u32 cmd_flags)
9534{
f1410647 9535 int count, i, rc;
a2fbb9ea
ET
9536 u32 val;
9537
9538 /* build the command word */
9539 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9540
9541 /* need to clear DONE bit separately */
9542 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9543
9544 /* address of the NVRAM to read from */
9545 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9546 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9547
9548 /* issue a read command */
9549 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9550
9551 /* adjust timeout for emulation/FPGA */
9552 count = NVRAM_TIMEOUT_COUNT;
9553 if (CHIP_REV_IS_SLOW(bp))
9554 count *= 100;
9555
9556 /* wait for completion */
9557 *ret_val = 0;
9558 rc = -EBUSY;
9559 for (i = 0; i < count; i++) {
9560 udelay(5);
9561 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9562
9563 if (val & MCPR_NVM_COMMAND_DONE) {
9564 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
9565 /* we read nvram data in cpu order
9566 * but ethtool sees it as an array of bytes
9567 * converting to big-endian will do the work */
4781bfad 9568 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
9569 rc = 0;
9570 break;
9571 }
9572 }
9573
9574 return rc;
9575}
9576
9577static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9578 int buf_size)
9579{
9580 int rc;
9581 u32 cmd_flags;
4781bfad 9582 __be32 val;
a2fbb9ea
ET
9583
9584 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9585 DP(BNX2X_MSG_NVM,
c14423fe 9586 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9587 offset, buf_size);
9588 return -EINVAL;
9589 }
9590
34f80b04
EG
9591 if (offset + buf_size > bp->common.flash_size) {
9592 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9593 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9594 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9595 return -EINVAL;
9596 }
9597
9598 /* request access to nvram interface */
9599 rc = bnx2x_acquire_nvram_lock(bp);
9600 if (rc)
9601 return rc;
9602
9603 /* enable access to nvram interface */
9604 bnx2x_enable_nvram_access(bp);
9605
9606 /* read the first word(s) */
9607 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9608 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9609 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9610 memcpy(ret_buf, &val, 4);
9611
9612 /* advance to the next dword */
9613 offset += sizeof(u32);
9614 ret_buf += sizeof(u32);
9615 buf_size -= sizeof(u32);
9616 cmd_flags = 0;
9617 }
9618
9619 if (rc == 0) {
9620 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9621 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9622 memcpy(ret_buf, &val, 4);
9623 }
9624
9625 /* disable access to nvram interface */
9626 bnx2x_disable_nvram_access(bp);
9627 bnx2x_release_nvram_lock(bp);
9628
9629 return rc;
9630}
9631
9632static int bnx2x_get_eeprom(struct net_device *dev,
9633 struct ethtool_eeprom *eeprom, u8 *eebuf)
9634{
9635 struct bnx2x *bp = netdev_priv(dev);
9636 int rc;
9637
2add3acb
EG
9638 if (!netif_running(dev))
9639 return -EAGAIN;
9640
34f80b04 9641 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9642 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9643 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9644 eeprom->len, eeprom->len);
9645
9646 /* parameters already validated in ethtool_get_eeprom */
9647
9648 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9649
9650 return rc;
9651}
9652
9653static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9654 u32 cmd_flags)
9655{
f1410647 9656 int count, i, rc;
a2fbb9ea
ET
9657
9658 /* build the command word */
9659 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9660
9661 /* need to clear DONE bit separately */
9662 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9663
9664 /* write the data */
9665 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9666
9667 /* address of the NVRAM to write to */
9668 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9669 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9670
9671 /* issue the write command */
9672 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9673
9674 /* adjust timeout for emulation/FPGA */
9675 count = NVRAM_TIMEOUT_COUNT;
9676 if (CHIP_REV_IS_SLOW(bp))
9677 count *= 100;
9678
9679 /* wait for completion */
9680 rc = -EBUSY;
9681 for (i = 0; i < count; i++) {
9682 udelay(5);
9683 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9684 if (val & MCPR_NVM_COMMAND_DONE) {
9685 rc = 0;
9686 break;
9687 }
9688 }
9689
9690 return rc;
9691}
9692
f1410647 9693#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
9694
9695static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9696 int buf_size)
9697{
9698 int rc;
9699 u32 cmd_flags;
9700 u32 align_offset;
4781bfad 9701 __be32 val;
a2fbb9ea 9702
34f80b04
EG
9703 if (offset + buf_size > bp->common.flash_size) {
9704 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9705 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9706 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9707 return -EINVAL;
9708 }
9709
9710 /* request access to nvram interface */
9711 rc = bnx2x_acquire_nvram_lock(bp);
9712 if (rc)
9713 return rc;
9714
9715 /* enable access to nvram interface */
9716 bnx2x_enable_nvram_access(bp);
9717
9718 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9719 align_offset = (offset & ~0x03);
9720 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9721
9722 if (rc == 0) {
9723 val &= ~(0xff << BYTE_OFFSET(offset));
9724 val |= (*data_buf << BYTE_OFFSET(offset));
9725
9726 /* nvram data is returned as an array of bytes
9727 * convert it back to cpu order */
9728 val = be32_to_cpu(val);
9729
a2fbb9ea
ET
9730 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9731 cmd_flags);
9732 }
9733
9734 /* disable access to nvram interface */
9735 bnx2x_disable_nvram_access(bp);
9736 bnx2x_release_nvram_lock(bp);
9737
9738 return rc;
9739}
9740
9741static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9742 int buf_size)
9743{
9744 int rc;
9745 u32 cmd_flags;
9746 u32 val;
9747 u32 written_so_far;
9748
34f80b04 9749 if (buf_size == 1) /* ethtool */
a2fbb9ea 9750 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9751
9752 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9753 DP(BNX2X_MSG_NVM,
c14423fe 9754 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9755 offset, buf_size);
9756 return -EINVAL;
9757 }
9758
34f80b04
EG
9759 if (offset + buf_size > bp->common.flash_size) {
9760 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9761 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9762 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9763 return -EINVAL;
9764 }
9765
9766 /* request access to nvram interface */
9767 rc = bnx2x_acquire_nvram_lock(bp);
9768 if (rc)
9769 return rc;
9770
9771 /* enable access to nvram interface */
9772 bnx2x_enable_nvram_access(bp);
9773
9774 written_so_far = 0;
9775 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9776 while ((written_so_far < buf_size) && (rc == 0)) {
9777 if (written_so_far == (buf_size - sizeof(u32)))
9778 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9779 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9780 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9781 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9782 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9783
9784 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9785
9786 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9787
9788 /* advance to the next dword */
9789 offset += sizeof(u32);
9790 data_buf += sizeof(u32);
9791 written_so_far += sizeof(u32);
9792 cmd_flags = 0;
9793 }
9794
9795 /* disable access to nvram interface */
9796 bnx2x_disable_nvram_access(bp);
9797 bnx2x_release_nvram_lock(bp);
9798
9799 return rc;
9800}
9801
9802static int bnx2x_set_eeprom(struct net_device *dev,
9803 struct ethtool_eeprom *eeprom, u8 *eebuf)
9804{
9805 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
9806 int port = BP_PORT(bp);
9807 int rc = 0;
a2fbb9ea 9808
9f4c9583
EG
9809 if (!netif_running(dev))
9810 return -EAGAIN;
9811
34f80b04 9812 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9813 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9814 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9815 eeprom->len, eeprom->len);
9816
9817 /* parameters already validated in ethtool_set_eeprom */
9818
f57a6025
EG
9819 /* PHY eeprom can be accessed only by the PMF */
9820 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9821 !bp->port.pmf)
9822 return -EINVAL;
9823
9824 if (eeprom->magic == 0x50485950) {
9825 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9826 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 9827
f57a6025
EG
9828 bnx2x_acquire_phy_lock(bp);
9829 rc |= bnx2x_link_reset(&bp->link_params,
9830 &bp->link_vars, 0);
9831 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9832 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9833 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9834 MISC_REGISTERS_GPIO_HIGH, port);
9835 bnx2x_release_phy_lock(bp);
9836 bnx2x_link_report(bp);
9837
9838 } else if (eeprom->magic == 0x50485952) {
9839 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9840 if ((bp->state == BNX2X_STATE_OPEN) ||
9841 (bp->state == BNX2X_STATE_DISABLED)) {
4a37fb66 9842 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
9843 rc |= bnx2x_link_reset(&bp->link_params,
9844 &bp->link_vars, 1);
9845
9846 rc |= bnx2x_phy_init(&bp->link_params,
9847 &bp->link_vars);
4a37fb66 9848 bnx2x_release_phy_lock(bp);
f57a6025
EG
9849 bnx2x_calc_fc_adv(bp);
9850 }
9851 } else if (eeprom->magic == 0x53985943) {
9852 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9853 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9854 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9855 u8 ext_phy_addr =
659bc5c4 9856 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
f57a6025
EG
9857
9858 /* DSP Remove Download Mode */
9859 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9860 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 9861
f57a6025
EG
9862 bnx2x_acquire_phy_lock(bp);
9863
9864 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9865
9866 /* wait 0.5 sec to allow it to run */
9867 msleep(500);
9868 bnx2x_ext_phy_hw_reset(bp, port);
9869 msleep(500);
9870 bnx2x_release_phy_lock(bp);
9871 }
9872 } else
c18487ee 9873 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9874
9875 return rc;
9876}
9877
9878static int bnx2x_get_coalesce(struct net_device *dev,
9879 struct ethtool_coalesce *coal)
9880{
9881 struct bnx2x *bp = netdev_priv(dev);
9882
9883 memset(coal, 0, sizeof(struct ethtool_coalesce));
9884
9885 coal->rx_coalesce_usecs = bp->rx_ticks;
9886 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9887
9888 return 0;
9889}
9890
ca00392c 9891#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
a2fbb9ea
ET
9892static int bnx2x_set_coalesce(struct net_device *dev,
9893 struct ethtool_coalesce *coal)
9894{
9895 struct bnx2x *bp = netdev_priv(dev);
9896
9897 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
ca00392c
EG
9898 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9899 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea
ET
9900
9901 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
ca00392c
EG
9902 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9903 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea 9904
34f80b04 9905 if (netif_running(dev))
a2fbb9ea
ET
9906 bnx2x_update_coalesce(bp);
9907
9908 return 0;
9909}
9910
9911static void bnx2x_get_ringparam(struct net_device *dev,
9912 struct ethtool_ringparam *ering)
9913{
9914 struct bnx2x *bp = netdev_priv(dev);
9915
9916 ering->rx_max_pending = MAX_RX_AVAIL;
9917 ering->rx_mini_max_pending = 0;
9918 ering->rx_jumbo_max_pending = 0;
9919
9920 ering->rx_pending = bp->rx_ring_size;
9921 ering->rx_mini_pending = 0;
9922 ering->rx_jumbo_pending = 0;
9923
9924 ering->tx_max_pending = MAX_TX_AVAIL;
9925 ering->tx_pending = bp->tx_ring_size;
9926}
9927
9928static int bnx2x_set_ringparam(struct net_device *dev,
9929 struct ethtool_ringparam *ering)
9930{
9931 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9932 int rc = 0;
a2fbb9ea
ET
9933
9934 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9935 (ering->tx_pending > MAX_TX_AVAIL) ||
9936 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9937 return -EINVAL;
9938
9939 bp->rx_ring_size = ering->rx_pending;
9940 bp->tx_ring_size = ering->tx_pending;
9941
34f80b04
EG
9942 if (netif_running(dev)) {
9943 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9944 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9945 }
9946
34f80b04 9947 return rc;
a2fbb9ea
ET
9948}
9949
9950static void bnx2x_get_pauseparam(struct net_device *dev,
9951 struct ethtool_pauseparam *epause)
9952{
9953 struct bnx2x *bp = netdev_priv(dev);
9954
356e2385
EG
9955 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9956 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9957 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9958
c0700f90
DM
9959 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9960 BNX2X_FLOW_CTRL_RX);
9961 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9962 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9963
9964 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9965 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9966 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9967}
9968
9969static int bnx2x_set_pauseparam(struct net_device *dev,
9970 struct ethtool_pauseparam *epause)
9971{
9972 struct bnx2x *bp = netdev_priv(dev);
9973
34f80b04
EG
9974 if (IS_E1HMF(bp))
9975 return 0;
9976
a2fbb9ea
ET
9977 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9978 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9979 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9980
c0700f90 9981 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9982
f1410647 9983 if (epause->rx_pause)
c0700f90 9984 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9985
f1410647 9986 if (epause->tx_pause)
c0700f90 9987 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9988
c0700f90
DM
9989 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9990 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9991
c18487ee 9992 if (epause->autoneg) {
34f80b04 9993 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9994 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9995 return -EINVAL;
9996 }
a2fbb9ea 9997
c18487ee 9998 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9999 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 10000 }
a2fbb9ea 10001
c18487ee
YR
10002 DP(NETIF_MSG_LINK,
10003 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
10004
10005 if (netif_running(dev)) {
bb2a0f7a 10006 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10007 bnx2x_link_set(bp);
10008 }
a2fbb9ea
ET
10009
10010 return 0;
10011}
10012
df0f2343
VZ
10013static int bnx2x_set_flags(struct net_device *dev, u32 data)
10014{
10015 struct bnx2x *bp = netdev_priv(dev);
10016 int changed = 0;
10017 int rc = 0;
10018
10019 /* TPA requires Rx CSUM offloading */
10020 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
10021 if (!(dev->features & NETIF_F_LRO)) {
10022 dev->features |= NETIF_F_LRO;
10023 bp->flags |= TPA_ENABLE_FLAG;
10024 changed = 1;
10025 }
10026
10027 } else if (dev->features & NETIF_F_LRO) {
10028 dev->features &= ~NETIF_F_LRO;
10029 bp->flags &= ~TPA_ENABLE_FLAG;
10030 changed = 1;
10031 }
10032
10033 if (changed && netif_running(dev)) {
10034 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10035 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10036 }
10037
10038 return rc;
10039}
10040
a2fbb9ea
ET
10041static u32 bnx2x_get_rx_csum(struct net_device *dev)
10042{
10043 struct bnx2x *bp = netdev_priv(dev);
10044
10045 return bp->rx_csum;
10046}
10047
10048static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10049{
10050 struct bnx2x *bp = netdev_priv(dev);
df0f2343 10051 int rc = 0;
a2fbb9ea
ET
10052
10053 bp->rx_csum = data;
df0f2343
VZ
10054
10055 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10056 TPA'ed packets will be discarded due to wrong TCP CSUM */
10057 if (!data) {
10058 u32 flags = ethtool_op_get_flags(dev);
10059
10060 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10061 }
10062
10063 return rc;
a2fbb9ea
ET
10064}
10065
10066static int bnx2x_set_tso(struct net_device *dev, u32 data)
10067{
755735eb 10068 if (data) {
a2fbb9ea 10069 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
10070 dev->features |= NETIF_F_TSO6;
10071 } else {
a2fbb9ea 10072 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
10073 dev->features &= ~NETIF_F_TSO6;
10074 }
10075
a2fbb9ea
ET
10076 return 0;
10077}
10078
f3c87cdd 10079static const struct {
a2fbb9ea
ET
10080 char string[ETH_GSTRING_LEN];
10081} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
10082 { "register_test (offline)" },
10083 { "memory_test (offline)" },
10084 { "loopback_test (offline)" },
10085 { "nvram_test (online)" },
10086 { "interrupt_test (online)" },
10087 { "link_test (online)" },
d3d4f495 10088 { "idle check (online)" }
a2fbb9ea
ET
10089};
10090
f3c87cdd
YG
10091static int bnx2x_test_registers(struct bnx2x *bp)
10092{
10093 int idx, i, rc = -ENODEV;
10094 u32 wr_val = 0;
9dabc424 10095 int port = BP_PORT(bp);
f3c87cdd
YG
10096 static const struct {
10097 u32 offset0;
10098 u32 offset1;
10099 u32 mask;
10100 } reg_tbl[] = {
10101/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10102 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
10103 { HC_REG_AGG_INT_0, 4, 0x000003ff },
10104 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
10105 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
10106 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
10107 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
10108 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10109 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
10110 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10111/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
10112 { QM_REG_CONNNUM_0, 4, 0x000fffff },
10113 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
10114 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
10115 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
10116 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10117 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
10118 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 10119 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
10120 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
10121/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
10122 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
10123 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
10124 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
10125 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
10126 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
10127 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
10128 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
10129 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
10130 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
10131/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
10132 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
10133 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
10134 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10135 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
10136 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10137 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
10138
10139 { 0xffffffff, 0, 0x00000000 }
10140 };
10141
10142 if (!netif_running(bp->dev))
10143 return rc;
10144
10145 /* Repeat the test twice:
10146 First by writing 0x00000000, second by writing 0xffffffff */
10147 for (idx = 0; idx < 2; idx++) {
10148
10149 switch (idx) {
10150 case 0:
10151 wr_val = 0;
10152 break;
10153 case 1:
10154 wr_val = 0xffffffff;
10155 break;
10156 }
10157
10158 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10159 u32 offset, mask, save_val, val;
f3c87cdd
YG
10160
10161 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10162 mask = reg_tbl[i].mask;
10163
10164 save_val = REG_RD(bp, offset);
10165
10166 REG_WR(bp, offset, wr_val);
10167 val = REG_RD(bp, offset);
10168
10169 /* Restore the original register's value */
10170 REG_WR(bp, offset, save_val);
10171
10172 /* verify that value is as expected value */
10173 if ((val & mask) != (wr_val & mask))
10174 goto test_reg_exit;
10175 }
10176 }
10177
10178 rc = 0;
10179
10180test_reg_exit:
10181 return rc;
10182}
10183
10184static int bnx2x_test_memory(struct bnx2x *bp)
10185{
10186 int i, j, rc = -ENODEV;
10187 u32 val;
10188 static const struct {
10189 u32 offset;
10190 int size;
10191 } mem_tbl[] = {
10192 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
10193 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10194 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
10195 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
10196 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
10197 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
10198 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
10199
10200 { 0xffffffff, 0 }
10201 };
10202 static const struct {
10203 char *name;
10204 u32 offset;
9dabc424
YG
10205 u32 e1_mask;
10206 u32 e1h_mask;
f3c87cdd 10207 } prty_tbl[] = {
9dabc424
YG
10208 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
10209 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
10210 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
10211 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
10212 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
10213 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
10214
10215 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
10216 };
10217
10218 if (!netif_running(bp->dev))
10219 return rc;
10220
10221 /* Go through all the memories */
10222 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10223 for (j = 0; j < mem_tbl[i].size; j++)
10224 REG_RD(bp, mem_tbl[i].offset + j*4);
10225
10226 /* Check the parity status */
10227 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10228 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
10229 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10230 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
10231 DP(NETIF_MSG_HW,
10232 "%s is 0x%x\n", prty_tbl[i].name, val);
10233 goto test_mem_exit;
10234 }
10235 }
10236
10237 rc = 0;
10238
10239test_mem_exit:
10240 return rc;
10241}
10242
f3c87cdd
YG
10243static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10244{
10245 int cnt = 1000;
10246
10247 if (link_up)
10248 while (bnx2x_link_test(bp) && cnt--)
10249 msleep(10);
10250}
10251
10252static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10253{
10254 unsigned int pkt_size, num_pkts, i;
10255 struct sk_buff *skb;
10256 unsigned char *packet;
ca00392c
EG
10257 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10258 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
f3c87cdd
YG
10259 u16 tx_start_idx, tx_idx;
10260 u16 rx_start_idx, rx_idx;
ca00392c 10261 u16 pkt_prod, bd_prod;
f3c87cdd 10262 struct sw_tx_bd *tx_buf;
ca00392c
EG
10263 struct eth_tx_start_bd *tx_start_bd;
10264 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
10265 dma_addr_t mapping;
10266 union eth_rx_cqe *cqe;
10267 u8 cqe_fp_flags;
10268 struct sw_rx_bd *rx_buf;
10269 u16 len;
10270 int rc = -ENODEV;
10271
b5bf9068
EG
10272 /* check the loopback mode */
10273 switch (loopback_mode) {
10274 case BNX2X_PHY_LOOPBACK:
10275 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10276 return -EINVAL;
10277 break;
10278 case BNX2X_MAC_LOOPBACK:
f3c87cdd 10279 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 10280 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
10281 break;
10282 default:
f3c87cdd 10283 return -EINVAL;
b5bf9068 10284 }
f3c87cdd 10285
b5bf9068
EG
10286 /* prepare the loopback packet */
10287 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10288 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
10289 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10290 if (!skb) {
10291 rc = -ENOMEM;
10292 goto test_loopback_exit;
10293 }
10294 packet = skb_put(skb, pkt_size);
10295 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
10296 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10297 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
10298 for (i = ETH_HLEN; i < pkt_size; i++)
10299 packet[i] = (unsigned char) (i & 0xff);
10300
b5bf9068 10301 /* send the loopback packet */
f3c87cdd 10302 num_pkts = 0;
ca00392c
EG
10303 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10304 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 10305
ca00392c
EG
10306 pkt_prod = fp_tx->tx_pkt_prod++;
10307 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10308 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 10309 tx_buf->skb = skb;
ca00392c 10310 tx_buf->flags = 0;
f3c87cdd 10311
ca00392c
EG
10312 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10313 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
f3c87cdd
YG
10314 mapping = pci_map_single(bp->pdev, skb->data,
10315 skb_headlen(skb), PCI_DMA_TODEVICE);
ca00392c
EG
10316 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10317 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10318 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10319 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10320 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10321 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10322 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10323 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10324
10325 /* turn on parsing and get a BD */
10326 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10327 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10328
10329 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 10330
58f4c4cf
EG
10331 wmb();
10332
ca00392c
EG
10333 fp_tx->tx_db.data.prod += 2;
10334 barrier();
10335 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
f3c87cdd
YG
10336
10337 mmiowb();
10338
10339 num_pkts++;
ca00392c 10340 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
10341 bp->dev->trans_start = jiffies;
10342
10343 udelay(100);
10344
ca00392c 10345 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
10346 if (tx_idx != tx_start_idx + num_pkts)
10347 goto test_loopback_exit;
10348
ca00392c 10349 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
10350 if (rx_idx != rx_start_idx + num_pkts)
10351 goto test_loopback_exit;
10352
ca00392c 10353 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
10354 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10355 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10356 goto test_loopback_rx_exit;
10357
10358 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10359 if (len != pkt_size)
10360 goto test_loopback_rx_exit;
10361
ca00392c 10362 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
10363 skb = rx_buf->skb;
10364 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10365 for (i = ETH_HLEN; i < pkt_size; i++)
10366 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10367 goto test_loopback_rx_exit;
10368
10369 rc = 0;
10370
10371test_loopback_rx_exit:
f3c87cdd 10372
ca00392c
EG
10373 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10374 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10375 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10376 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
10377
10378 /* Update producers */
ca00392c
EG
10379 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10380 fp_rx->rx_sge_prod);
f3c87cdd
YG
10381
10382test_loopback_exit:
10383 bp->link_params.loopback_mode = LOOPBACK_NONE;
10384
10385 return rc;
10386}
10387
10388static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10389{
b5bf9068 10390 int rc = 0, res;
f3c87cdd
YG
10391
10392 if (!netif_running(bp->dev))
10393 return BNX2X_LOOPBACK_FAILED;
10394
f8ef6e44 10395 bnx2x_netif_stop(bp, 1);
3910c8ae 10396 bnx2x_acquire_phy_lock(bp);
f3c87cdd 10397
b5bf9068
EG
10398 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10399 if (res) {
10400 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10401 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
10402 }
10403
b5bf9068
EG
10404 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10405 if (res) {
10406 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10407 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
10408 }
10409
3910c8ae 10410 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
10411 bnx2x_netif_start(bp);
10412
10413 return rc;
10414}
10415
10416#define CRC32_RESIDUAL 0xdebb20e3
10417
10418static int bnx2x_test_nvram(struct bnx2x *bp)
10419{
10420 static const struct {
10421 int offset;
10422 int size;
10423 } nvram_tbl[] = {
10424 { 0, 0x14 }, /* bootstrap */
10425 { 0x14, 0xec }, /* dir */
10426 { 0x100, 0x350 }, /* manuf_info */
10427 { 0x450, 0xf0 }, /* feature_info */
10428 { 0x640, 0x64 }, /* upgrade_key_info */
10429 { 0x6a4, 0x64 },
10430 { 0x708, 0x70 }, /* manuf_key_info */
10431 { 0x778, 0x70 },
10432 { 0, 0 }
10433 };
4781bfad 10434 __be32 buf[0x350 / 4];
f3c87cdd
YG
10435 u8 *data = (u8 *)buf;
10436 int i, rc;
ab6ad5a4 10437 u32 magic, crc;
f3c87cdd
YG
10438
10439 rc = bnx2x_nvram_read(bp, 0, data, 4);
10440 if (rc) {
f5372251 10441 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
10442 goto test_nvram_exit;
10443 }
10444
10445 magic = be32_to_cpu(buf[0]);
10446 if (magic != 0x669955aa) {
10447 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10448 rc = -ENODEV;
10449 goto test_nvram_exit;
10450 }
10451
10452 for (i = 0; nvram_tbl[i].size; i++) {
10453
10454 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10455 nvram_tbl[i].size);
10456 if (rc) {
10457 DP(NETIF_MSG_PROBE,
f5372251 10458 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
10459 goto test_nvram_exit;
10460 }
10461
ab6ad5a4
EG
10462 crc = ether_crc_le(nvram_tbl[i].size, data);
10463 if (crc != CRC32_RESIDUAL) {
f3c87cdd 10464 DP(NETIF_MSG_PROBE,
ab6ad5a4 10465 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
f3c87cdd
YG
10466 rc = -ENODEV;
10467 goto test_nvram_exit;
10468 }
10469 }
10470
10471test_nvram_exit:
10472 return rc;
10473}
10474
10475static int bnx2x_test_intr(struct bnx2x *bp)
10476{
10477 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10478 int i, rc;
10479
10480 if (!netif_running(bp->dev))
10481 return -ENODEV;
10482
8d9c5f34 10483 config->hdr.length = 0;
af246401
EG
10484 if (CHIP_IS_E1(bp))
10485 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10486 else
10487 config->hdr.offset = BP_FUNC(bp);
0626b899 10488 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
10489 config->hdr.reserved1 = 0;
10490
e665bfda
MC
10491 bp->set_mac_pending++;
10492 smp_wmb();
f3c87cdd
YG
10493 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10494 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10495 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10496 if (rc == 0) {
f3c87cdd
YG
10497 for (i = 0; i < 10; i++) {
10498 if (!bp->set_mac_pending)
10499 break;
e665bfda 10500 smp_rmb();
f3c87cdd
YG
10501 msleep_interruptible(10);
10502 }
10503 if (i == 10)
10504 rc = -ENODEV;
10505 }
10506
10507 return rc;
10508}
10509
a2fbb9ea
ET
10510static void bnx2x_self_test(struct net_device *dev,
10511 struct ethtool_test *etest, u64 *buf)
10512{
10513 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
10514
10515 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10516
f3c87cdd 10517 if (!netif_running(dev))
a2fbb9ea 10518 return;
a2fbb9ea 10519
33471629 10520 /* offline tests are not supported in MF mode */
f3c87cdd
YG
10521 if (IS_E1HMF(bp))
10522 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10523
10524 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
10525 int port = BP_PORT(bp);
10526 u32 val;
f3c87cdd
YG
10527 u8 link_up;
10528
279abdf5
EG
10529 /* save current value of input enable for TX port IF */
10530 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10531 /* disable input for TX port IF */
10532 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10533
f3c87cdd
YG
10534 link_up = bp->link_vars.link_up;
10535 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10536 bnx2x_nic_load(bp, LOAD_DIAG);
10537 /* wait until link state is restored */
10538 bnx2x_wait_for_link(bp, link_up);
10539
10540 if (bnx2x_test_registers(bp) != 0) {
10541 buf[0] = 1;
10542 etest->flags |= ETH_TEST_FL_FAILED;
10543 }
10544 if (bnx2x_test_memory(bp) != 0) {
10545 buf[1] = 1;
10546 etest->flags |= ETH_TEST_FL_FAILED;
10547 }
10548 buf[2] = bnx2x_test_loopback(bp, link_up);
10549 if (buf[2] != 0)
10550 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 10551
f3c87cdd 10552 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
10553
10554 /* restore input for TX port IF */
10555 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10556
f3c87cdd
YG
10557 bnx2x_nic_load(bp, LOAD_NORMAL);
10558 /* wait until link state is restored */
10559 bnx2x_wait_for_link(bp, link_up);
10560 }
10561 if (bnx2x_test_nvram(bp) != 0) {
10562 buf[3] = 1;
a2fbb9ea
ET
10563 etest->flags |= ETH_TEST_FL_FAILED;
10564 }
f3c87cdd
YG
10565 if (bnx2x_test_intr(bp) != 0) {
10566 buf[4] = 1;
10567 etest->flags |= ETH_TEST_FL_FAILED;
10568 }
10569 if (bp->port.pmf)
10570 if (bnx2x_link_test(bp) != 0) {
10571 buf[5] = 1;
10572 etest->flags |= ETH_TEST_FL_FAILED;
10573 }
f3c87cdd
YG
10574
10575#ifdef BNX2X_EXTRA_DEBUG
10576 bnx2x_panic_dump(bp);
10577#endif
a2fbb9ea
ET
10578}
10579
de832a55
EG
10580static const struct {
10581 long offset;
10582 int size;
10583 u8 string[ETH_GSTRING_LEN];
10584} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10585/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10586 { Q_STATS_OFFSET32(error_bytes_received_hi),
10587 8, "[%d]: rx_error_bytes" },
10588 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10589 8, "[%d]: rx_ucast_packets" },
10590 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10591 8, "[%d]: rx_mcast_packets" },
10592 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10593 8, "[%d]: rx_bcast_packets" },
10594 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10595 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10596 4, "[%d]: rx_phy_ip_err_discards"},
10597 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10598 4, "[%d]: rx_skb_alloc_discard" },
10599 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10600
10601/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10602 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10603 8, "[%d]: tx_packets" }
10604};
10605
bb2a0f7a
YG
10606static const struct {
10607 long offset;
10608 int size;
10609 u32 flags;
66e855f3
YG
10610#define STATS_FLAGS_PORT 1
10611#define STATS_FLAGS_FUNC 2
de832a55 10612#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 10613 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 10614} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
10615/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10616 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 10617 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 10618 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 10619 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 10620 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 10621 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 10622 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 10623 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 10624 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 10625 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 10626 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 10627 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 10628 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
10629 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10630 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10631 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10632 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10633/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10634 8, STATS_FLAGS_PORT, "rx_fragments" },
10635 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10636 8, STATS_FLAGS_PORT, "rx_jabbers" },
10637 { STATS_OFFSET32(no_buff_discard_hi),
10638 8, STATS_FLAGS_BOTH, "rx_discards" },
10639 { STATS_OFFSET32(mac_filter_discard),
10640 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10641 { STATS_OFFSET32(xxoverflow_discard),
10642 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10643 { STATS_OFFSET32(brb_drop_hi),
10644 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10645 { STATS_OFFSET32(brb_truncate_hi),
10646 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10647 { STATS_OFFSET32(pause_frames_received_hi),
10648 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10649 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10650 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10651 { STATS_OFFSET32(nig_timer_max),
10652 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10653/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10654 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10655 { STATS_OFFSET32(rx_skb_alloc_failed),
10656 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10657 { STATS_OFFSET32(hw_csum_err),
10658 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10659
10660 { STATS_OFFSET32(total_bytes_transmitted_hi),
10661 8, STATS_FLAGS_BOTH, "tx_bytes" },
10662 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10663 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10664 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10665 8, STATS_FLAGS_BOTH, "tx_packets" },
10666 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10667 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10668 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10669 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 10670 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 10671 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 10672 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 10673 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 10674/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 10675 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 10676 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 10677 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 10678 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 10679 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 10680 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 10681 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 10682 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 10683 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 10684 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 10685 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 10686 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 10687 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 10688 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 10689 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 10690 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 10691 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 10692 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 10693 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 10694/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 10695 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
10696 { STATS_OFFSET32(pause_frames_sent_hi),
10697 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
10698};
10699
de832a55
EG
10700#define IS_PORT_STAT(i) \
10701 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10702#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10703#define IS_E1HMF_MODE_STAT(bp) \
10704 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 10705
15f0a394
BH
10706static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10707{
10708 struct bnx2x *bp = netdev_priv(dev);
10709 int i, num_stats;
10710
10711 switch(stringset) {
10712 case ETH_SS_STATS:
10713 if (is_multi(bp)) {
10714 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10715 if (!IS_E1HMF_MODE_STAT(bp))
10716 num_stats += BNX2X_NUM_STATS;
10717 } else {
10718 if (IS_E1HMF_MODE_STAT(bp)) {
10719 num_stats = 0;
10720 for (i = 0; i < BNX2X_NUM_STATS; i++)
10721 if (IS_FUNC_STAT(i))
10722 num_stats++;
10723 } else
10724 num_stats = BNX2X_NUM_STATS;
10725 }
10726 return num_stats;
10727
10728 case ETH_SS_TEST:
10729 return BNX2X_NUM_TESTS;
10730
10731 default:
10732 return -EINVAL;
10733 }
10734}
10735
a2fbb9ea
ET
10736static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10737{
bb2a0f7a 10738 struct bnx2x *bp = netdev_priv(dev);
de832a55 10739 int i, j, k;
bb2a0f7a 10740
a2fbb9ea
ET
10741 switch (stringset) {
10742 case ETH_SS_STATS:
de832a55
EG
10743 if (is_multi(bp)) {
10744 k = 0;
ca00392c 10745 for_each_rx_queue(bp, i) {
de832a55
EG
10746 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10747 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10748 bnx2x_q_stats_arr[j].string, i);
10749 k += BNX2X_NUM_Q_STATS;
10750 }
10751 if (IS_E1HMF_MODE_STAT(bp))
10752 break;
10753 for (j = 0; j < BNX2X_NUM_STATS; j++)
10754 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10755 bnx2x_stats_arr[j].string);
10756 } else {
10757 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10758 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10759 continue;
10760 strcpy(buf + j*ETH_GSTRING_LEN,
10761 bnx2x_stats_arr[i].string);
10762 j++;
10763 }
bb2a0f7a 10764 }
a2fbb9ea
ET
10765 break;
10766
10767 case ETH_SS_TEST:
10768 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10769 break;
10770 }
10771}
10772
a2fbb9ea
ET
10773static void bnx2x_get_ethtool_stats(struct net_device *dev,
10774 struct ethtool_stats *stats, u64 *buf)
10775{
10776 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
10777 u32 *hw_stats, *offset;
10778 int i, j, k;
bb2a0f7a 10779
de832a55
EG
10780 if (is_multi(bp)) {
10781 k = 0;
ca00392c 10782 for_each_rx_queue(bp, i) {
de832a55
EG
10783 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10784 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10785 if (bnx2x_q_stats_arr[j].size == 0) {
10786 /* skip this counter */
10787 buf[k + j] = 0;
10788 continue;
10789 }
10790 offset = (hw_stats +
10791 bnx2x_q_stats_arr[j].offset);
10792 if (bnx2x_q_stats_arr[j].size == 4) {
10793 /* 4-byte counter */
10794 buf[k + j] = (u64) *offset;
10795 continue;
10796 }
10797 /* 8-byte counter */
10798 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10799 }
10800 k += BNX2X_NUM_Q_STATS;
10801 }
10802 if (IS_E1HMF_MODE_STAT(bp))
10803 return;
10804 hw_stats = (u32 *)&bp->eth_stats;
10805 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10806 if (bnx2x_stats_arr[j].size == 0) {
10807 /* skip this counter */
10808 buf[k + j] = 0;
10809 continue;
10810 }
10811 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10812 if (bnx2x_stats_arr[j].size == 4) {
10813 /* 4-byte counter */
10814 buf[k + j] = (u64) *offset;
10815 continue;
10816 }
10817 /* 8-byte counter */
10818 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10819 }
de832a55
EG
10820 } else {
10821 hw_stats = (u32 *)&bp->eth_stats;
10822 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10823 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10824 continue;
10825 if (bnx2x_stats_arr[i].size == 0) {
10826 /* skip this counter */
10827 buf[j] = 0;
10828 j++;
10829 continue;
10830 }
10831 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10832 if (bnx2x_stats_arr[i].size == 4) {
10833 /* 4-byte counter */
10834 buf[j] = (u64) *offset;
10835 j++;
10836 continue;
10837 }
10838 /* 8-byte counter */
10839 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10840 j++;
a2fbb9ea 10841 }
a2fbb9ea
ET
10842 }
10843}
10844
10845static int bnx2x_phys_id(struct net_device *dev, u32 data)
10846{
10847 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10848 int port = BP_PORT(bp);
a2fbb9ea
ET
10849 int i;
10850
34f80b04
EG
10851 if (!netif_running(dev))
10852 return 0;
10853
10854 if (!bp->port.pmf)
10855 return 0;
10856
a2fbb9ea
ET
10857 if (data == 0)
10858 data = 2;
10859
10860 for (i = 0; i < (data * 2); i++) {
c18487ee 10861 if ((i % 2) == 0)
34f80b04 10862 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
10863 bp->link_params.hw_led_mode,
10864 bp->link_params.chip_id);
10865 else
34f80b04 10866 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
10867 bp->link_params.hw_led_mode,
10868 bp->link_params.chip_id);
10869
a2fbb9ea
ET
10870 msleep_interruptible(500);
10871 if (signal_pending(current))
10872 break;
10873 }
10874
c18487ee 10875 if (bp->link_vars.link_up)
34f80b04 10876 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
10877 bp->link_vars.line_speed,
10878 bp->link_params.hw_led_mode,
10879 bp->link_params.chip_id);
a2fbb9ea
ET
10880
10881 return 0;
10882}
10883
0fc0b732 10884static const struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10885 .get_settings = bnx2x_get_settings,
10886 .set_settings = bnx2x_set_settings,
10887 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10888 .get_regs_len = bnx2x_get_regs_len,
10889 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10890 .get_wol = bnx2x_get_wol,
10891 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10892 .get_msglevel = bnx2x_get_msglevel,
10893 .set_msglevel = bnx2x_set_msglevel,
10894 .nway_reset = bnx2x_nway_reset,
01e53298 10895 .get_link = bnx2x_get_link,
7a9b2557
VZ
10896 .get_eeprom_len = bnx2x_get_eeprom_len,
10897 .get_eeprom = bnx2x_get_eeprom,
10898 .set_eeprom = bnx2x_set_eeprom,
10899 .get_coalesce = bnx2x_get_coalesce,
10900 .set_coalesce = bnx2x_set_coalesce,
10901 .get_ringparam = bnx2x_get_ringparam,
10902 .set_ringparam = bnx2x_set_ringparam,
10903 .get_pauseparam = bnx2x_get_pauseparam,
10904 .set_pauseparam = bnx2x_set_pauseparam,
10905 .get_rx_csum = bnx2x_get_rx_csum,
10906 .set_rx_csum = bnx2x_set_rx_csum,
10907 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10908 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10909 .set_flags = bnx2x_set_flags,
10910 .get_flags = ethtool_op_get_flags,
10911 .get_sg = ethtool_op_get_sg,
10912 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10913 .get_tso = ethtool_op_get_tso,
10914 .set_tso = bnx2x_set_tso,
7a9b2557 10915 .self_test = bnx2x_self_test,
15f0a394 10916 .get_sset_count = bnx2x_get_sset_count,
7a9b2557 10917 .get_strings = bnx2x_get_strings,
a2fbb9ea 10918 .phys_id = bnx2x_phys_id,
bb2a0f7a 10919 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10920};
10921
10922/* end of ethtool_ops */
10923
10924/****************************************************************************
10925* General service functions
10926****************************************************************************/
10927
10928static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10929{
10930 u16 pmcsr;
10931
10932 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10933
10934 switch (state) {
10935 case PCI_D0:
34f80b04 10936 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10937 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10938 PCI_PM_CTRL_PME_STATUS));
10939
10940 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10941 /* delay required during transition out of D3hot */
a2fbb9ea 10942 msleep(20);
34f80b04 10943 break;
a2fbb9ea 10944
34f80b04
EG
10945 case PCI_D3hot:
10946 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10947 pmcsr |= 3;
a2fbb9ea 10948
34f80b04
EG
10949 if (bp->wol)
10950 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10951
34f80b04
EG
10952 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10953 pmcsr);
a2fbb9ea 10954
34f80b04
EG
10955 /* No more memory access after this point until
10956 * device is brought back to D0.
10957 */
10958 break;
10959
10960 default:
10961 return -EINVAL;
10962 }
10963 return 0;
a2fbb9ea
ET
10964}
10965
237907c1
EG
10966static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10967{
10968 u16 rx_cons_sb;
10969
10970 /* Tell compiler that status block fields can change */
10971 barrier();
10972 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10973 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10974 rx_cons_sb++;
10975 return (fp->rx_comp_cons != rx_cons_sb);
10976}
10977
34f80b04
EG
10978/*
10979 * net_device service functions
10980 */
10981
a2fbb9ea
ET
10982static int bnx2x_poll(struct napi_struct *napi, int budget)
10983{
10984 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10985 napi);
10986 struct bnx2x *bp = fp->bp;
10987 int work_done = 0;
10988
10989#ifdef BNX2X_STOP_ON_ERROR
10990 if (unlikely(bp->panic))
34f80b04 10991 goto poll_panic;
a2fbb9ea
ET
10992#endif
10993
a2fbb9ea
ET
10994 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10995 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10996
10997 bnx2x_update_fpsb_idx(fp);
10998
8534f32c 10999 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 11000 work_done = bnx2x_rx_int(fp, budget);
356e2385 11001
8534f32c
EG
11002 /* must not complete if we consumed full budget */
11003 if (work_done >= budget)
11004 goto poll_again;
11005 }
a2fbb9ea 11006
ca00392c 11007 /* bnx2x_has_rx_work() reads the status block, thus we need to
8534f32c 11008 * ensure that status block indices have been actually read
ca00392c 11009 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
8534f32c 11010 * so that we won't write the "newer" value of the status block to IGU
ca00392c 11011 * (if there was a DMA right after bnx2x_has_rx_work and
8534f32c
EG
11012 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
11013 * may be postponed to right before bnx2x_ack_sb). In this case
11014 * there will never be another interrupt until there is another update
11015 * of the status block, while there is still unhandled work.
11016 */
11017 rmb();
a2fbb9ea 11018
ca00392c 11019 if (!bnx2x_has_rx_work(fp)) {
a2fbb9ea 11020#ifdef BNX2X_STOP_ON_ERROR
34f80b04 11021poll_panic:
a2fbb9ea 11022#endif
288379f0 11023 napi_complete(napi);
a2fbb9ea 11024
0626b899 11025 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 11026 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 11027 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
11028 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
11029 }
356e2385 11030
8534f32c 11031poll_again:
a2fbb9ea
ET
11032 return work_done;
11033}
11034
755735eb
EG
11035
11036/* we split the first BD into headers and data BDs
33471629 11037 * to ease the pain of our fellow microcode engineers
755735eb
EG
11038 * we use one mapping for both BDs
11039 * So far this has only been observed to happen
11040 * in Other Operating Systems(TM)
11041 */
11042static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
11043 struct bnx2x_fastpath *fp,
ca00392c
EG
11044 struct sw_tx_bd *tx_buf,
11045 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
11046 u16 bd_prod, int nbd)
11047{
ca00392c 11048 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
11049 struct eth_tx_bd *d_tx_bd;
11050 dma_addr_t mapping;
11051 int old_len = le16_to_cpu(h_tx_bd->nbytes);
11052
11053 /* first fix first BD */
11054 h_tx_bd->nbd = cpu_to_le16(nbd);
11055 h_tx_bd->nbytes = cpu_to_le16(hlen);
11056
11057 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11058 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11059 h_tx_bd->addr_lo, h_tx_bd->nbd);
11060
11061 /* now get a new data BD
11062 * (after the pbd) and fill it */
11063 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 11064 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
11065
11066 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11067 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11068
11069 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11070 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11071 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
11072
11073 /* this marks the BD as one that has no individual mapping */
11074 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11075
755735eb
EG
11076 DP(NETIF_MSG_TX_QUEUED,
11077 "TSO split data size is %d (%x:%x)\n",
11078 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11079
ca00392c
EG
11080 /* update tx_bd */
11081 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
11082
11083 return bd_prod;
11084}
11085
11086static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11087{
11088 if (fix > 0)
11089 csum = (u16) ~csum_fold(csum_sub(csum,
11090 csum_partial(t_header - fix, fix, 0)));
11091
11092 else if (fix < 0)
11093 csum = (u16) ~csum_fold(csum_add(csum,
11094 csum_partial(t_header, -fix, 0)));
11095
11096 return swab16(csum);
11097}
11098
11099static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11100{
11101 u32 rc;
11102
11103 if (skb->ip_summed != CHECKSUM_PARTIAL)
11104 rc = XMIT_PLAIN;
11105
11106 else {
4781bfad 11107 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
11108 rc = XMIT_CSUM_V6;
11109 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11110 rc |= XMIT_CSUM_TCP;
11111
11112 } else {
11113 rc = XMIT_CSUM_V4;
11114 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11115 rc |= XMIT_CSUM_TCP;
11116 }
11117 }
11118
11119 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
11120 rc |= XMIT_GSO_V4;
11121
11122 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
11123 rc |= XMIT_GSO_V6;
11124
11125 return rc;
11126}
11127
632da4d6 11128#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
11129/* check if packet requires linearization (packet is too fragmented)
11130 no need to check fragmentation if page size > 8K (there will be no
11131 violation to FW restrictions) */
755735eb
EG
11132static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11133 u32 xmit_type)
11134{
11135 int to_copy = 0;
11136 int hlen = 0;
11137 int first_bd_sz = 0;
11138
11139 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11140 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11141
11142 if (xmit_type & XMIT_GSO) {
11143 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11144 /* Check if LSO packet needs to be copied:
11145 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11146 int wnd_size = MAX_FETCH_BD - 3;
33471629 11147 /* Number of windows to check */
755735eb
EG
11148 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11149 int wnd_idx = 0;
11150 int frag_idx = 0;
11151 u32 wnd_sum = 0;
11152
11153 /* Headers length */
11154 hlen = (int)(skb_transport_header(skb) - skb->data) +
11155 tcp_hdrlen(skb);
11156
11157 /* Amount of data (w/o headers) on linear part of SKB*/
11158 first_bd_sz = skb_headlen(skb) - hlen;
11159
11160 wnd_sum = first_bd_sz;
11161
11162 /* Calculate the first sum - it's special */
11163 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11164 wnd_sum +=
11165 skb_shinfo(skb)->frags[frag_idx].size;
11166
11167 /* If there was data on linear skb data - check it */
11168 if (first_bd_sz > 0) {
11169 if (unlikely(wnd_sum < lso_mss)) {
11170 to_copy = 1;
11171 goto exit_lbl;
11172 }
11173
11174 wnd_sum -= first_bd_sz;
11175 }
11176
11177 /* Others are easier: run through the frag list and
11178 check all windows */
11179 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11180 wnd_sum +=
11181 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11182
11183 if (unlikely(wnd_sum < lso_mss)) {
11184 to_copy = 1;
11185 break;
11186 }
11187 wnd_sum -=
11188 skb_shinfo(skb)->frags[wnd_idx].size;
11189 }
755735eb
EG
11190 } else {
11191 /* in non-LSO too fragmented packet should always
11192 be linearized */
11193 to_copy = 1;
11194 }
11195 }
11196
11197exit_lbl:
11198 if (unlikely(to_copy))
11199 DP(NETIF_MSG_TX_QUEUED,
11200 "Linearization IS REQUIRED for %s packet. "
11201 "num_frags %d hlen %d first_bd_sz %d\n",
11202 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11203 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11204
11205 return to_copy;
11206}
632da4d6 11207#endif
755735eb
EG
11208
11209/* called with netif_tx_lock
a2fbb9ea 11210 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 11211 * netif_wake_queue()
a2fbb9ea 11212 */
61357325 11213static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
a2fbb9ea
ET
11214{
11215 struct bnx2x *bp = netdev_priv(dev);
ca00392c 11216 struct bnx2x_fastpath *fp, *fp_stat;
555f6c78 11217 struct netdev_queue *txq;
a2fbb9ea 11218 struct sw_tx_bd *tx_buf;
ca00392c
EG
11219 struct eth_tx_start_bd *tx_start_bd;
11220 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
11221 struct eth_tx_parse_bd *pbd = NULL;
11222 u16 pkt_prod, bd_prod;
755735eb 11223 int nbd, fp_index;
a2fbb9ea 11224 dma_addr_t mapping;
755735eb 11225 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
11226 int i;
11227 u8 hlen = 0;
ca00392c 11228 __le16 pkt_size = 0;
a2fbb9ea
ET
11229
11230#ifdef BNX2X_STOP_ON_ERROR
11231 if (unlikely(bp->panic))
11232 return NETDEV_TX_BUSY;
11233#endif
11234
555f6c78
EG
11235 fp_index = skb_get_queue_mapping(skb);
11236 txq = netdev_get_tx_queue(dev, fp_index);
11237
ca00392c
EG
11238 fp = &bp->fp[fp_index + bp->num_rx_queues];
11239 fp_stat = &bp->fp[fp_index];
755735eb 11240
231fd58a 11241 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
ca00392c 11242 fp_stat->eth_q_stats.driver_xoff++;
555f6c78 11243 netif_tx_stop_queue(txq);
a2fbb9ea
ET
11244 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11245 return NETDEV_TX_BUSY;
11246 }
11247
755735eb
EG
11248 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
11249 " gso type %x xmit_type %x\n",
11250 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11251 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11252
632da4d6 11253#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
11254 /* First, check if we need to linearize the skb (due to FW
11255 restrictions). No need to check fragmentation if page size > 8K
11256 (there will be no violation to FW restrictions) */
755735eb
EG
11257 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11258 /* Statistics of linearization */
11259 bp->lin_cnt++;
11260 if (skb_linearize(skb) != 0) {
11261 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11262 "silently dropping this SKB\n");
11263 dev_kfree_skb_any(skb);
da5a662a 11264 return NETDEV_TX_OK;
755735eb
EG
11265 }
11266 }
632da4d6 11267#endif
755735eb 11268
a2fbb9ea 11269 /*
755735eb 11270 Please read carefully. First we use one BD which we mark as start,
ca00392c 11271 then we have a parsing info BD (used for TSO or xsum),
755735eb 11272 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
11273 (don't forget to mark the last one as last,
11274 and to unmap only AFTER you write to the BD ...)
755735eb 11275 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
11276 */
11277
11278 pkt_prod = fp->tx_pkt_prod++;
755735eb 11279 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 11280
755735eb 11281 /* get a tx_buf and first BD */
a2fbb9ea 11282 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 11283 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 11284
ca00392c
EG
11285 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11286 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11287 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 11288 /* header nbd */
ca00392c 11289 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 11290
755735eb
EG
11291 /* remember the first BD of the packet */
11292 tx_buf->first_bd = fp->tx_bd_prod;
11293 tx_buf->skb = skb;
ca00392c 11294 tx_buf->flags = 0;
a2fbb9ea
ET
11295
11296 DP(NETIF_MSG_TX_QUEUED,
11297 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 11298 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 11299
0c6671b0
EG
11300#ifdef BCM_VLAN
11301 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11302 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
11303 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11304 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 11305 } else
0c6671b0 11306#endif
ca00392c 11307 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 11308
ca00392c
EG
11309 /* turn on parsing and get a BD */
11310 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11311 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 11312
ca00392c 11313 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
11314
11315 if (xmit_type & XMIT_CSUM) {
ca00392c 11316 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
11317
11318 /* for now NS flag is not used in Linux */
4781bfad
EG
11319 pbd->global_data =
11320 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11321 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 11322
755735eb
EG
11323 pbd->ip_hlen = (skb_transport_header(skb) -
11324 skb_network_header(skb)) / 2;
11325
11326 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 11327
755735eb 11328 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 11329 hlen = hlen*2;
a2fbb9ea 11330
ca00392c 11331 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
11332
11333 if (xmit_type & XMIT_CSUM_V4)
ca00392c 11334 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
11335 ETH_TX_BD_FLAGS_IP_CSUM;
11336 else
ca00392c
EG
11337 tx_start_bd->bd_flags.as_bitfield |=
11338 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
11339
11340 if (xmit_type & XMIT_CSUM_TCP) {
11341 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11342
11343 } else {
11344 s8 fix = SKB_CS_OFF(skb); /* signed! */
11345
ca00392c 11346 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 11347
755735eb 11348 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11349 "hlen %d fix %d csum before fix %x\n",
11350 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
11351
11352 /* HW bug: fixup the CSUM */
11353 pbd->tcp_pseudo_csum =
11354 bnx2x_csum_fix(skb_transport_header(skb),
11355 SKB_CS(skb), fix);
11356
11357 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11358 pbd->tcp_pseudo_csum);
11359 }
a2fbb9ea
ET
11360 }
11361
11362 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 11363 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea 11364
ca00392c
EG
11365 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11366 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11367 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11368 tx_start_bd->nbd = cpu_to_le16(nbd);
11369 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11370 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
11371
11372 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 11373 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
11374 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11375 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11376 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 11377
755735eb 11378 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
11379
11380 DP(NETIF_MSG_TX_QUEUED,
11381 "TSO packet len %d hlen %d total len %d tso size %d\n",
11382 skb->len, hlen, skb_headlen(skb),
11383 skb_shinfo(skb)->gso_size);
11384
ca00392c 11385 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 11386
755735eb 11387 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
11388 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11389 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
11390
11391 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11392 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
11393 pbd->tcp_flags = pbd_tcp_flags(skb);
11394
11395 if (xmit_type & XMIT_GSO_V4) {
11396 pbd->ip_id = swab16(ip_hdr(skb)->id);
11397 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
11398 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11399 ip_hdr(skb)->daddr,
11400 0, IPPROTO_TCP, 0));
755735eb
EG
11401
11402 } else
11403 pbd->tcp_pseudo_csum =
11404 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11405 &ipv6_hdr(skb)->daddr,
11406 0, IPPROTO_TCP, 0));
11407
a2fbb9ea
ET
11408 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11409 }
ca00392c 11410 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 11411
755735eb
EG
11412 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11413 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 11414
755735eb 11415 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
11416 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11417 if (total_pkt_bd == NULL)
11418 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 11419
755735eb
EG
11420 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11421 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 11422
ca00392c
EG
11423 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11424 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11425 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11426 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 11427
755735eb 11428 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11429 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11430 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11431 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
11432 }
11433
ca00392c 11434 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 11435
a2fbb9ea
ET
11436 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11437
755735eb 11438 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
11439 * if the packet contains or ends with it
11440 */
11441 if (TX_BD_POFF(bd_prod) < nbd)
11442 nbd++;
11443
ca00392c
EG
11444 if (total_pkt_bd != NULL)
11445 total_pkt_bd->total_pkt_bytes = pkt_size;
11446
a2fbb9ea
ET
11447 if (pbd)
11448 DP(NETIF_MSG_TX_QUEUED,
11449 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11450 " tcp_flags %x xsum %x seq %u hlen %u\n",
11451 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11452 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 11453 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 11454
755735eb 11455 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 11456
58f4c4cf
EG
11457 /*
11458 * Make sure that the BD data is updated before updating the producer
11459 * since FW might read the BD right after the producer is updated.
11460 * This is only applicable for weak-ordered memory model archs such
11461 * as IA-64. The following barrier is also mandatory since FW will
11462 * assumes packets must have BDs.
11463 */
11464 wmb();
11465
ca00392c
EG
11466 fp->tx_db.data.prod += nbd;
11467 barrier();
11468 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
a2fbb9ea
ET
11469
11470 mmiowb();
11471
755735eb 11472 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
11473
11474 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 11475 netif_tx_stop_queue(txq);
58f4c4cf
EG
11476 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11477 if we put Tx into XOFF state. */
11478 smp_mb();
ca00392c 11479 fp_stat->eth_q_stats.driver_xoff++;
a2fbb9ea 11480 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 11481 netif_tx_wake_queue(txq);
a2fbb9ea 11482 }
ca00392c 11483 fp_stat->tx_pkt++;
a2fbb9ea
ET
11484
11485 return NETDEV_TX_OK;
11486}
11487
bb2a0f7a 11488/* called with rtnl_lock */
a2fbb9ea
ET
11489static int bnx2x_open(struct net_device *dev)
11490{
11491 struct bnx2x *bp = netdev_priv(dev);
11492
6eccabb3
EG
11493 netif_carrier_off(dev);
11494
a2fbb9ea
ET
11495 bnx2x_set_power_state(bp, PCI_D0);
11496
bb2a0f7a 11497 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
11498}
11499
bb2a0f7a 11500/* called with rtnl_lock */
a2fbb9ea
ET
11501static int bnx2x_close(struct net_device *dev)
11502{
a2fbb9ea
ET
11503 struct bnx2x *bp = netdev_priv(dev);
11504
11505 /* Unload the driver, release IRQs */
bb2a0f7a
YG
11506 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11507 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11508 if (!CHIP_REV_IS_SLOW(bp))
11509 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
11510
11511 return 0;
11512}
11513
f5372251 11514/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
11515static void bnx2x_set_rx_mode(struct net_device *dev)
11516{
11517 struct bnx2x *bp = netdev_priv(dev);
11518 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11519 int port = BP_PORT(bp);
11520
11521 if (bp->state != BNX2X_STATE_OPEN) {
11522 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11523 return;
11524 }
11525
11526 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11527
11528 if (dev->flags & IFF_PROMISC)
11529 rx_mode = BNX2X_RX_MODE_PROMISC;
11530
11531 else if ((dev->flags & IFF_ALLMULTI) ||
11532 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11533 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11534
11535 else { /* some multicasts */
11536 if (CHIP_IS_E1(bp)) {
11537 int i, old, offset;
11538 struct dev_mc_list *mclist;
11539 struct mac_configuration_cmd *config =
11540 bnx2x_sp(bp, mcast_config);
11541
11542 for (i = 0, mclist = dev->mc_list;
11543 mclist && (i < dev->mc_count);
11544 i++, mclist = mclist->next) {
11545
11546 config->config_table[i].
11547 cam_entry.msb_mac_addr =
11548 swab16(*(u16 *)&mclist->dmi_addr[0]);
11549 config->config_table[i].
11550 cam_entry.middle_mac_addr =
11551 swab16(*(u16 *)&mclist->dmi_addr[2]);
11552 config->config_table[i].
11553 cam_entry.lsb_mac_addr =
11554 swab16(*(u16 *)&mclist->dmi_addr[4]);
11555 config->config_table[i].cam_entry.flags =
11556 cpu_to_le16(port);
11557 config->config_table[i].
11558 target_table_entry.flags = 0;
ca00392c
EG
11559 config->config_table[i].target_table_entry.
11560 clients_bit_vector =
11561 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
11562 config->config_table[i].
11563 target_table_entry.vlan_id = 0;
11564
11565 DP(NETIF_MSG_IFUP,
11566 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11567 config->config_table[i].
11568 cam_entry.msb_mac_addr,
11569 config->config_table[i].
11570 cam_entry.middle_mac_addr,
11571 config->config_table[i].
11572 cam_entry.lsb_mac_addr);
11573 }
8d9c5f34 11574 old = config->hdr.length;
34f80b04
EG
11575 if (old > i) {
11576 for (; i < old; i++) {
11577 if (CAM_IS_INVALID(config->
11578 config_table[i])) {
af246401 11579 /* already invalidated */
34f80b04
EG
11580 break;
11581 }
11582 /* invalidate */
11583 CAM_INVALIDATE(config->
11584 config_table[i]);
11585 }
11586 }
11587
11588 if (CHIP_REV_IS_SLOW(bp))
11589 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11590 else
11591 offset = BNX2X_MAX_MULTICAST*(1 + port);
11592
8d9c5f34 11593 config->hdr.length = i;
34f80b04 11594 config->hdr.offset = offset;
8d9c5f34 11595 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
11596 config->hdr.reserved1 = 0;
11597
e665bfda
MC
11598 bp->set_mac_pending++;
11599 smp_wmb();
11600
34f80b04
EG
11601 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11602 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11603 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11604 0);
11605 } else { /* E1H */
11606 /* Accept one or more multicasts */
11607 struct dev_mc_list *mclist;
11608 u32 mc_filter[MC_HASH_SIZE];
11609 u32 crc, bit, regidx;
11610 int i;
11611
11612 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11613
11614 for (i = 0, mclist = dev->mc_list;
11615 mclist && (i < dev->mc_count);
11616 i++, mclist = mclist->next) {
11617
7c510e4b
JB
11618 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11619 mclist->dmi_addr);
34f80b04
EG
11620
11621 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11622 bit = (crc >> 24) & 0xff;
11623 regidx = bit >> 5;
11624 bit &= 0x1f;
11625 mc_filter[regidx] |= (1 << bit);
11626 }
11627
11628 for (i = 0; i < MC_HASH_SIZE; i++)
11629 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11630 mc_filter[i]);
11631 }
11632 }
11633
11634 bp->rx_mode = rx_mode;
11635 bnx2x_set_storm_rx_mode(bp);
11636}
11637
11638/* called with rtnl_lock */
a2fbb9ea
ET
11639static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11640{
11641 struct sockaddr *addr = p;
11642 struct bnx2x *bp = netdev_priv(dev);
11643
34f80b04 11644 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
11645 return -EINVAL;
11646
11647 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
11648 if (netif_running(dev)) {
11649 if (CHIP_IS_E1(bp))
e665bfda 11650 bnx2x_set_eth_mac_addr_e1(bp, 1);
34f80b04 11651 else
e665bfda 11652 bnx2x_set_eth_mac_addr_e1h(bp, 1);
34f80b04 11653 }
a2fbb9ea
ET
11654
11655 return 0;
11656}
11657
c18487ee 11658/* called with rtnl_lock */
01cd4528
EG
11659static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11660 int devad, u16 addr)
a2fbb9ea 11661{
01cd4528
EG
11662 struct bnx2x *bp = netdev_priv(netdev);
11663 u16 value;
11664 int rc;
11665 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 11666
01cd4528
EG
11667 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11668 prtad, devad, addr);
a2fbb9ea 11669
01cd4528
EG
11670 if (prtad != bp->mdio.prtad) {
11671 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11672 prtad, bp->mdio.prtad);
11673 return -EINVAL;
11674 }
11675
11676 /* The HW expects different devad if CL22 is used */
11677 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 11678
01cd4528
EG
11679 bnx2x_acquire_phy_lock(bp);
11680 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11681 devad, addr, &value);
11682 bnx2x_release_phy_lock(bp);
11683 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 11684
01cd4528
EG
11685 if (!rc)
11686 rc = value;
11687 return rc;
11688}
a2fbb9ea 11689
01cd4528
EG
11690/* called with rtnl_lock */
11691static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11692 u16 addr, u16 value)
11693{
11694 struct bnx2x *bp = netdev_priv(netdev);
11695 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11696 int rc;
11697
11698 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11699 " value 0x%x\n", prtad, devad, addr, value);
11700
11701 if (prtad != bp->mdio.prtad) {
11702 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11703 prtad, bp->mdio.prtad);
11704 return -EINVAL;
a2fbb9ea
ET
11705 }
11706
01cd4528
EG
11707 /* The HW expects different devad if CL22 is used */
11708 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 11709
01cd4528
EG
11710 bnx2x_acquire_phy_lock(bp);
11711 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11712 devad, addr, value);
11713 bnx2x_release_phy_lock(bp);
11714 return rc;
11715}
c18487ee 11716
01cd4528
EG
11717/* called with rtnl_lock */
11718static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11719{
11720 struct bnx2x *bp = netdev_priv(dev);
11721 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 11722
01cd4528
EG
11723 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11724 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 11725
01cd4528
EG
11726 if (!netif_running(dev))
11727 return -EAGAIN;
11728
11729 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
11730}
11731
34f80b04 11732/* called with rtnl_lock */
a2fbb9ea
ET
11733static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11734{
11735 struct bnx2x *bp = netdev_priv(dev);
34f80b04 11736 int rc = 0;
a2fbb9ea
ET
11737
11738 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11739 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11740 return -EINVAL;
11741
11742 /* This does not race with packet allocation
c14423fe 11743 * because the actual alloc size is
a2fbb9ea
ET
11744 * only updated as part of load
11745 */
11746 dev->mtu = new_mtu;
11747
11748 if (netif_running(dev)) {
34f80b04
EG
11749 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11750 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 11751 }
34f80b04
EG
11752
11753 return rc;
a2fbb9ea
ET
11754}
11755
11756static void bnx2x_tx_timeout(struct net_device *dev)
11757{
11758 struct bnx2x *bp = netdev_priv(dev);
11759
11760#ifdef BNX2X_STOP_ON_ERROR
11761 if (!bp->panic)
11762 bnx2x_panic();
11763#endif
11764 /* This allows the netif to be shutdown gracefully before resetting */
11765 schedule_work(&bp->reset_task);
11766}
11767
11768#ifdef BCM_VLAN
34f80b04 11769/* called with rtnl_lock */
a2fbb9ea
ET
11770static void bnx2x_vlan_rx_register(struct net_device *dev,
11771 struct vlan_group *vlgrp)
11772{
11773 struct bnx2x *bp = netdev_priv(dev);
11774
11775 bp->vlgrp = vlgrp;
0c6671b0
EG
11776
11777 /* Set flags according to the required capabilities */
11778 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11779
11780 if (dev->features & NETIF_F_HW_VLAN_TX)
11781 bp->flags |= HW_VLAN_TX_FLAG;
11782
11783 if (dev->features & NETIF_F_HW_VLAN_RX)
11784 bp->flags |= HW_VLAN_RX_FLAG;
11785
a2fbb9ea 11786 if (netif_running(dev))
49d66772 11787 bnx2x_set_client_config(bp);
a2fbb9ea 11788}
34f80b04 11789
a2fbb9ea
ET
11790#endif
11791
11792#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11793static void poll_bnx2x(struct net_device *dev)
11794{
11795 struct bnx2x *bp = netdev_priv(dev);
11796
11797 disable_irq(bp->pdev->irq);
11798 bnx2x_interrupt(bp->pdev->irq, dev);
11799 enable_irq(bp->pdev->irq);
11800}
11801#endif
11802
c64213cd
SH
11803static const struct net_device_ops bnx2x_netdev_ops = {
11804 .ndo_open = bnx2x_open,
11805 .ndo_stop = bnx2x_close,
11806 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 11807 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
11808 .ndo_set_mac_address = bnx2x_change_mac_addr,
11809 .ndo_validate_addr = eth_validate_addr,
11810 .ndo_do_ioctl = bnx2x_ioctl,
11811 .ndo_change_mtu = bnx2x_change_mtu,
11812 .ndo_tx_timeout = bnx2x_tx_timeout,
11813#ifdef BCM_VLAN
11814 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11815#endif
11816#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11817 .ndo_poll_controller = poll_bnx2x,
11818#endif
11819};
11820
34f80b04
EG
11821static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11822 struct net_device *dev)
a2fbb9ea
ET
11823{
11824 struct bnx2x *bp;
11825 int rc;
11826
11827 SET_NETDEV_DEV(dev, &pdev->dev);
11828 bp = netdev_priv(dev);
11829
34f80b04
EG
11830 bp->dev = dev;
11831 bp->pdev = pdev;
a2fbb9ea 11832 bp->flags = 0;
34f80b04 11833 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11834
11835 rc = pci_enable_device(pdev);
11836 if (rc) {
11837 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11838 goto err_out;
11839 }
11840
11841 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11842 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11843 " aborting\n");
11844 rc = -ENODEV;
11845 goto err_out_disable;
11846 }
11847
11848 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11849 printk(KERN_ERR PFX "Cannot find second PCI device"
11850 " base address, aborting\n");
11851 rc = -ENODEV;
11852 goto err_out_disable;
11853 }
11854
34f80b04
EG
11855 if (atomic_read(&pdev->enable_cnt) == 1) {
11856 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11857 if (rc) {
11858 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11859 " aborting\n");
11860 goto err_out_disable;
11861 }
a2fbb9ea 11862
34f80b04
EG
11863 pci_set_master(pdev);
11864 pci_save_state(pdev);
11865 }
a2fbb9ea
ET
11866
11867 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11868 if (bp->pm_cap == 0) {
11869 printk(KERN_ERR PFX "Cannot find power management"
11870 " capability, aborting\n");
11871 rc = -EIO;
11872 goto err_out_release;
11873 }
11874
11875 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11876 if (bp->pcie_cap == 0) {
11877 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11878 " aborting\n");
11879 rc = -EIO;
11880 goto err_out_release;
11881 }
11882
6a35528a 11883 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11884 bp->flags |= USING_DAC_FLAG;
6a35528a 11885 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
11886 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11887 " failed, aborting\n");
11888 rc = -EIO;
11889 goto err_out_release;
11890 }
11891
284901a9 11892 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a2fbb9ea
ET
11893 printk(KERN_ERR PFX "System does not support DMA,"
11894 " aborting\n");
11895 rc = -EIO;
11896 goto err_out_release;
11897 }
11898
34f80b04
EG
11899 dev->mem_start = pci_resource_start(pdev, 0);
11900 dev->base_addr = dev->mem_start;
11901 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11902
11903 dev->irq = pdev->irq;
11904
275f165f 11905 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11906 if (!bp->regview) {
11907 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11908 rc = -ENOMEM;
11909 goto err_out_release;
11910 }
11911
34f80b04
EG
11912 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11913 min_t(u64, BNX2X_DB_SIZE,
11914 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11915 if (!bp->doorbells) {
11916 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11917 rc = -ENOMEM;
11918 goto err_out_unmap;
11919 }
11920
11921 bnx2x_set_power_state(bp, PCI_D0);
11922
34f80b04
EG
11923 /* clean indirect addresses */
11924 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11925 PCICFG_VENDOR_ID_OFFSET);
11926 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11927 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11928 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11929 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11930
34f80b04 11931 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11932
c64213cd 11933 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11934 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11935 dev->features |= NETIF_F_SG;
11936 dev->features |= NETIF_F_HW_CSUM;
11937 if (bp->flags & USING_DAC_FLAG)
11938 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
11939 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11940 dev->features |= NETIF_F_TSO6;
34f80b04
EG
11941#ifdef BCM_VLAN
11942 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11943 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
11944
11945 dev->vlan_features |= NETIF_F_SG;
11946 dev->vlan_features |= NETIF_F_HW_CSUM;
11947 if (bp->flags & USING_DAC_FLAG)
11948 dev->vlan_features |= NETIF_F_HIGHDMA;
11949 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11950 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 11951#endif
a2fbb9ea 11952
01cd4528
EG
11953 /* get_port_hwinfo() will set prtad and mmds properly */
11954 bp->mdio.prtad = MDIO_PRTAD_NONE;
11955 bp->mdio.mmds = 0;
11956 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11957 bp->mdio.dev = dev;
11958 bp->mdio.mdio_read = bnx2x_mdio_read;
11959 bp->mdio.mdio_write = bnx2x_mdio_write;
11960
a2fbb9ea
ET
11961 return 0;
11962
11963err_out_unmap:
11964 if (bp->regview) {
11965 iounmap(bp->regview);
11966 bp->regview = NULL;
11967 }
a2fbb9ea
ET
11968 if (bp->doorbells) {
11969 iounmap(bp->doorbells);
11970 bp->doorbells = NULL;
11971 }
11972
11973err_out_release:
34f80b04
EG
11974 if (atomic_read(&pdev->enable_cnt) == 1)
11975 pci_release_regions(pdev);
a2fbb9ea
ET
11976
11977err_out_disable:
11978 pci_disable_device(pdev);
11979 pci_set_drvdata(pdev, NULL);
11980
11981err_out:
11982 return rc;
11983}
11984
37f9ce62
EG
11985static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11986 int *width, int *speed)
25047950
ET
11987{
11988 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11989
37f9ce62 11990 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 11991
37f9ce62
EG
11992 /* return value of 1=2.5GHz 2=5GHz */
11993 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 11994}
37f9ce62 11995
94a78b79
VZ
11996static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11997{
37f9ce62 11998 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
11999 struct bnx2x_fw_file_hdr *fw_hdr;
12000 struct bnx2x_fw_file_section *sections;
94a78b79 12001 u32 offset, len, num_ops;
37f9ce62 12002 u16 *ops_offsets;
94a78b79 12003 int i;
37f9ce62 12004 const u8 *fw_ver;
94a78b79
VZ
12005
12006 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
12007 return -EINVAL;
12008
12009 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
12010 sections = (struct bnx2x_fw_file_section *)fw_hdr;
12011
12012 /* Make sure none of the offsets and sizes make us read beyond
12013 * the end of the firmware data */
12014 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
12015 offset = be32_to_cpu(sections[i].offset);
12016 len = be32_to_cpu(sections[i].len);
12017 if (offset + len > firmware->size) {
37f9ce62
EG
12018 printk(KERN_ERR PFX "Section %d length is out of "
12019 "bounds\n", i);
94a78b79
VZ
12020 return -EINVAL;
12021 }
12022 }
12023
12024 /* Likewise for the init_ops offsets */
12025 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
12026 ops_offsets = (u16 *)(firmware->data + offset);
12027 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
12028
12029 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
12030 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
37f9ce62
EG
12031 printk(KERN_ERR PFX "Section offset %d is out of "
12032 "bounds\n", i);
94a78b79
VZ
12033 return -EINVAL;
12034 }
12035 }
12036
12037 /* Check FW version */
12038 offset = be32_to_cpu(fw_hdr->fw_version.offset);
12039 fw_ver = firmware->data + offset;
12040 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
12041 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
12042 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
12043 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
12044 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
12045 " Should be %d.%d.%d.%d\n",
12046 fw_ver[0], fw_ver[1], fw_ver[2],
12047 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
12048 BCM_5710_FW_MINOR_VERSION,
12049 BCM_5710_FW_REVISION_VERSION,
12050 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 12051 return -EINVAL;
94a78b79
VZ
12052 }
12053
12054 return 0;
12055}
12056
ab6ad5a4 12057static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 12058{
ab6ad5a4
EG
12059 const __be32 *source = (const __be32 *)_source;
12060 u32 *target = (u32 *)_target;
94a78b79 12061 u32 i;
94a78b79
VZ
12062
12063 for (i = 0; i < n/4; i++)
12064 target[i] = be32_to_cpu(source[i]);
12065}
12066
12067/*
12068 Ops array is stored in the following format:
12069 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12070 */
ab6ad5a4 12071static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 12072{
ab6ad5a4
EG
12073 const __be32 *source = (const __be32 *)_source;
12074 struct raw_op *target = (struct raw_op *)_target;
94a78b79 12075 u32 i, j, tmp;
94a78b79 12076
ab6ad5a4 12077 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
12078 tmp = be32_to_cpu(source[j]);
12079 target[i].op = (tmp >> 24) & 0xff;
12080 target[i].offset = tmp & 0xffffff;
12081 target[i].raw_data = be32_to_cpu(source[j+1]);
12082 }
12083}
ab6ad5a4
EG
12084
12085static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 12086{
ab6ad5a4
EG
12087 const __be16 *source = (const __be16 *)_source;
12088 u16 *target = (u16 *)_target;
94a78b79 12089 u32 i;
94a78b79
VZ
12090
12091 for (i = 0; i < n/2; i++)
12092 target[i] = be16_to_cpu(source[i]);
12093}
12094
12095#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
ab6ad5a4
EG
12096 do { \
12097 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12098 bp->arr = kmalloc(len, GFP_KERNEL); \
94a78b79 12099 if (!bp->arr) { \
ab6ad5a4
EG
12100 printk(KERN_ERR PFX "Failed to allocate %d bytes " \
12101 "for "#arr"\n", len); \
94a78b79
VZ
12102 goto lbl; \
12103 } \
ab6ad5a4
EG
12104 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12105 (u8 *)bp->arr, len); \
94a78b79
VZ
12106 } while (0)
12107
94a78b79
VZ
12108static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12109{
12110 char fw_file_name[40] = {0};
94a78b79 12111 struct bnx2x_fw_file_hdr *fw_hdr;
ab6ad5a4 12112 int rc, offset;
94a78b79
VZ
12113
12114 /* Create a FW file name */
12115 if (CHIP_IS_E1(bp))
ab6ad5a4 12116 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
94a78b79
VZ
12117 else
12118 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
12119
12120 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
12121 BCM_5710_FW_MAJOR_VERSION,
ab6ad5a4
EG
12122 BCM_5710_FW_MINOR_VERSION,
12123 BCM_5710_FW_REVISION_VERSION,
12124 BCM_5710_FW_ENGINEERING_VERSION);
94a78b79
VZ
12125
12126 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
12127
12128 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12129 if (rc) {
ab6ad5a4
EG
12130 printk(KERN_ERR PFX "Can't load firmware file %s\n",
12131 fw_file_name);
94a78b79
VZ
12132 goto request_firmware_exit;
12133 }
12134
12135 rc = bnx2x_check_firmware(bp);
12136 if (rc) {
12137 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
12138 goto request_firmware_exit;
12139 }
12140
12141 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12142
12143 /* Initialize the pointers to the init arrays */
12144 /* Blob */
12145 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12146
12147 /* Opcodes */
12148 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12149
12150 /* Offsets */
ab6ad5a4
EG
12151 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12152 be16_to_cpu_n);
94a78b79
VZ
12153
12154 /* STORMs firmware */
573f2035
EG
12155 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12156 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12157 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12158 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12159 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12160 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12161 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12162 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12163 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12164 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12165 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12166 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12167 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12168 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12169 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12170 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
12171
12172 return 0;
ab6ad5a4 12173
94a78b79
VZ
12174init_offsets_alloc_err:
12175 kfree(bp->init_ops);
12176init_ops_alloc_err:
12177 kfree(bp->init_data);
12178request_firmware_exit:
12179 release_firmware(bp->firmware);
12180
12181 return rc;
12182}
12183
12184
a2fbb9ea
ET
12185static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12186 const struct pci_device_id *ent)
12187{
a2fbb9ea
ET
12188 struct net_device *dev = NULL;
12189 struct bnx2x *bp;
37f9ce62 12190 int pcie_width, pcie_speed;
25047950 12191 int rc;
a2fbb9ea 12192
a2fbb9ea 12193 /* dev zeroed in init_etherdev */
555f6c78 12194 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
12195 if (!dev) {
12196 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 12197 return -ENOMEM;
34f80b04 12198 }
a2fbb9ea 12199
a2fbb9ea
ET
12200 bp = netdev_priv(dev);
12201 bp->msglevel = debug;
12202
df4770de
EG
12203 pci_set_drvdata(pdev, dev);
12204
34f80b04 12205 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
12206 if (rc < 0) {
12207 free_netdev(dev);
12208 return rc;
12209 }
12210
34f80b04 12211 rc = bnx2x_init_bp(bp);
693fc0d1
EG
12212 if (rc)
12213 goto init_one_exit;
12214
94a78b79
VZ
12215 /* Set init arrays */
12216 rc = bnx2x_init_firmware(bp, &pdev->dev);
12217 if (rc) {
12218 printk(KERN_ERR PFX "Error loading firmware\n");
12219 goto init_one_exit;
12220 }
12221
693fc0d1 12222 rc = register_netdev(dev);
34f80b04 12223 if (rc) {
693fc0d1 12224 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
12225 goto init_one_exit;
12226 }
12227
37f9ce62 12228 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
25047950 12229 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 12230 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 12231 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
37f9ce62 12232 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
25047950 12233 dev->base_addr, bp->pdev->irq);
e174961c 12234 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 12235
a2fbb9ea 12236 return 0;
34f80b04
EG
12237
12238init_one_exit:
12239 if (bp->regview)
12240 iounmap(bp->regview);
12241
12242 if (bp->doorbells)
12243 iounmap(bp->doorbells);
12244
12245 free_netdev(dev);
12246
12247 if (atomic_read(&pdev->enable_cnt) == 1)
12248 pci_release_regions(pdev);
12249
12250 pci_disable_device(pdev);
12251 pci_set_drvdata(pdev, NULL);
12252
12253 return rc;
a2fbb9ea
ET
12254}
12255
12256static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12257{
12258 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
12259 struct bnx2x *bp;
12260
12261 if (!dev) {
228241eb
ET
12262 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12263 return;
12264 }
228241eb 12265 bp = netdev_priv(dev);
a2fbb9ea 12266
a2fbb9ea
ET
12267 unregister_netdev(dev);
12268
94a78b79
VZ
12269 kfree(bp->init_ops_offsets);
12270 kfree(bp->init_ops);
12271 kfree(bp->init_data);
12272 release_firmware(bp->firmware);
12273
a2fbb9ea
ET
12274 if (bp->regview)
12275 iounmap(bp->regview);
12276
12277 if (bp->doorbells)
12278 iounmap(bp->doorbells);
12279
12280 free_netdev(dev);
34f80b04
EG
12281
12282 if (atomic_read(&pdev->enable_cnt) == 1)
12283 pci_release_regions(pdev);
12284
a2fbb9ea
ET
12285 pci_disable_device(pdev);
12286 pci_set_drvdata(pdev, NULL);
12287}
12288
12289static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12290{
12291 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
12292 struct bnx2x *bp;
12293
34f80b04
EG
12294 if (!dev) {
12295 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12296 return -ENODEV;
12297 }
12298 bp = netdev_priv(dev);
a2fbb9ea 12299
34f80b04 12300 rtnl_lock();
a2fbb9ea 12301
34f80b04 12302 pci_save_state(pdev);
228241eb 12303
34f80b04
EG
12304 if (!netif_running(dev)) {
12305 rtnl_unlock();
12306 return 0;
12307 }
a2fbb9ea
ET
12308
12309 netif_device_detach(dev);
a2fbb9ea 12310
da5a662a 12311 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 12312
a2fbb9ea 12313 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 12314
34f80b04
EG
12315 rtnl_unlock();
12316
a2fbb9ea
ET
12317 return 0;
12318}
12319
12320static int bnx2x_resume(struct pci_dev *pdev)
12321{
12322 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 12323 struct bnx2x *bp;
a2fbb9ea
ET
12324 int rc;
12325
228241eb
ET
12326 if (!dev) {
12327 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12328 return -ENODEV;
12329 }
228241eb 12330 bp = netdev_priv(dev);
a2fbb9ea 12331
34f80b04
EG
12332 rtnl_lock();
12333
228241eb 12334 pci_restore_state(pdev);
34f80b04
EG
12335
12336 if (!netif_running(dev)) {
12337 rtnl_unlock();
12338 return 0;
12339 }
12340
a2fbb9ea
ET
12341 bnx2x_set_power_state(bp, PCI_D0);
12342 netif_device_attach(dev);
12343
da5a662a 12344 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 12345
34f80b04
EG
12346 rtnl_unlock();
12347
12348 return rc;
a2fbb9ea
ET
12349}
12350
f8ef6e44
YG
12351static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12352{
12353 int i;
12354
12355 bp->state = BNX2X_STATE_ERROR;
12356
12357 bp->rx_mode = BNX2X_RX_MODE_NONE;
12358
12359 bnx2x_netif_stop(bp, 0);
12360
12361 del_timer_sync(&bp->timer);
12362 bp->stats_state = STATS_STATE_DISABLED;
12363 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12364
12365 /* Release IRQs */
12366 bnx2x_free_irq(bp);
12367
12368 if (CHIP_IS_E1(bp)) {
12369 struct mac_configuration_cmd *config =
12370 bnx2x_sp(bp, mcast_config);
12371
8d9c5f34 12372 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
12373 CAM_INVALIDATE(config->config_table[i]);
12374 }
12375
12376 /* Free SKBs, SGEs, TPA pool and driver internals */
12377 bnx2x_free_skbs(bp);
555f6c78 12378 for_each_rx_queue(bp, i)
f8ef6e44 12379 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 12380 for_each_rx_queue(bp, i)
7cde1c8b 12381 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
12382 bnx2x_free_mem(bp);
12383
12384 bp->state = BNX2X_STATE_CLOSED;
12385
12386 netif_carrier_off(bp->dev);
12387
12388 return 0;
12389}
12390
12391static void bnx2x_eeh_recover(struct bnx2x *bp)
12392{
12393 u32 val;
12394
12395 mutex_init(&bp->port.phy_mutex);
12396
12397 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12398 bp->link_params.shmem_base = bp->common.shmem_base;
12399 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12400
12401 if (!bp->common.shmem_base ||
12402 (bp->common.shmem_base < 0xA0000) ||
12403 (bp->common.shmem_base >= 0xC0000)) {
12404 BNX2X_DEV_INFO("MCP not active\n");
12405 bp->flags |= NO_MCP_FLAG;
12406 return;
12407 }
12408
12409 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12410 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12411 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12412 BNX2X_ERR("BAD MCP validity signature\n");
12413
12414 if (!BP_NOMCP(bp)) {
12415 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12416 & DRV_MSG_SEQ_NUMBER_MASK);
12417 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12418 }
12419}
12420
493adb1f
WX
12421/**
12422 * bnx2x_io_error_detected - called when PCI error is detected
12423 * @pdev: Pointer to PCI device
12424 * @state: The current pci connection state
12425 *
12426 * This function is called after a PCI bus error affecting
12427 * this device has been detected.
12428 */
12429static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12430 pci_channel_state_t state)
12431{
12432 struct net_device *dev = pci_get_drvdata(pdev);
12433 struct bnx2x *bp = netdev_priv(dev);
12434
12435 rtnl_lock();
12436
12437 netif_device_detach(dev);
12438
07ce50e4
DN
12439 if (state == pci_channel_io_perm_failure) {
12440 rtnl_unlock();
12441 return PCI_ERS_RESULT_DISCONNECT;
12442 }
12443
493adb1f 12444 if (netif_running(dev))
f8ef6e44 12445 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
12446
12447 pci_disable_device(pdev);
12448
12449 rtnl_unlock();
12450
12451 /* Request a slot reset */
12452 return PCI_ERS_RESULT_NEED_RESET;
12453}
12454
12455/**
12456 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12457 * @pdev: Pointer to PCI device
12458 *
12459 * Restart the card from scratch, as if from a cold-boot.
12460 */
12461static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12462{
12463 struct net_device *dev = pci_get_drvdata(pdev);
12464 struct bnx2x *bp = netdev_priv(dev);
12465
12466 rtnl_lock();
12467
12468 if (pci_enable_device(pdev)) {
12469 dev_err(&pdev->dev,
12470 "Cannot re-enable PCI device after reset\n");
12471 rtnl_unlock();
12472 return PCI_ERS_RESULT_DISCONNECT;
12473 }
12474
12475 pci_set_master(pdev);
12476 pci_restore_state(pdev);
12477
12478 if (netif_running(dev))
12479 bnx2x_set_power_state(bp, PCI_D0);
12480
12481 rtnl_unlock();
12482
12483 return PCI_ERS_RESULT_RECOVERED;
12484}
12485
12486/**
12487 * bnx2x_io_resume - called when traffic can start flowing again
12488 * @pdev: Pointer to PCI device
12489 *
12490 * This callback is called when the error recovery driver tells us that
12491 * its OK to resume normal operation.
12492 */
12493static void bnx2x_io_resume(struct pci_dev *pdev)
12494{
12495 struct net_device *dev = pci_get_drvdata(pdev);
12496 struct bnx2x *bp = netdev_priv(dev);
12497
12498 rtnl_lock();
12499
f8ef6e44
YG
12500 bnx2x_eeh_recover(bp);
12501
493adb1f 12502 if (netif_running(dev))
f8ef6e44 12503 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
12504
12505 netif_device_attach(dev);
12506
12507 rtnl_unlock();
12508}
12509
12510static struct pci_error_handlers bnx2x_err_handler = {
12511 .error_detected = bnx2x_io_error_detected,
356e2385
EG
12512 .slot_reset = bnx2x_io_slot_reset,
12513 .resume = bnx2x_io_resume,
493adb1f
WX
12514};
12515
a2fbb9ea 12516static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
12517 .name = DRV_MODULE_NAME,
12518 .id_table = bnx2x_pci_tbl,
12519 .probe = bnx2x_init_one,
12520 .remove = __devexit_p(bnx2x_remove_one),
12521 .suspend = bnx2x_suspend,
12522 .resume = bnx2x_resume,
12523 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
12524};
12525
12526static int __init bnx2x_init(void)
12527{
dd21ca6d
SG
12528 int ret;
12529
938cf541
EG
12530 printk(KERN_INFO "%s", version);
12531
1cf167f2
EG
12532 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12533 if (bnx2x_wq == NULL) {
12534 printk(KERN_ERR PFX "Cannot create workqueue\n");
12535 return -ENOMEM;
12536 }
12537
dd21ca6d
SG
12538 ret = pci_register_driver(&bnx2x_pci_driver);
12539 if (ret) {
12540 printk(KERN_ERR PFX "Cannot register driver\n");
12541 destroy_workqueue(bnx2x_wq);
12542 }
12543 return ret;
a2fbb9ea
ET
12544}
12545
12546static void __exit bnx2x_cleanup(void)
12547{
12548 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
12549
12550 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
12551}
12552
12553module_init(bnx2x_init);
12554module_exit(bnx2x_cleanup);
12555
993ac7b5
MC
12556#ifdef BCM_CNIC
12557
12558/* count denotes the number of new completions we have seen */
12559static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12560{
12561 struct eth_spe *spe;
12562
12563#ifdef BNX2X_STOP_ON_ERROR
12564 if (unlikely(bp->panic))
12565 return;
12566#endif
12567
12568 spin_lock_bh(&bp->spq_lock);
12569 bp->cnic_spq_pending -= count;
12570
12571 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12572 bp->cnic_spq_pending++) {
12573
12574 if (!bp->cnic_kwq_pending)
12575 break;
12576
12577 spe = bnx2x_sp_get_next(bp);
12578 *spe = *bp->cnic_kwq_cons;
12579
12580 bp->cnic_kwq_pending--;
12581
12582 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12583 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12584
12585 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12586 bp->cnic_kwq_cons = bp->cnic_kwq;
12587 else
12588 bp->cnic_kwq_cons++;
12589 }
12590 bnx2x_sp_prod_update(bp);
12591 spin_unlock_bh(&bp->spq_lock);
12592}
12593
12594static int bnx2x_cnic_sp_queue(struct net_device *dev,
12595 struct kwqe_16 *kwqes[], u32 count)
12596{
12597 struct bnx2x *bp = netdev_priv(dev);
12598 int i;
12599
12600#ifdef BNX2X_STOP_ON_ERROR
12601 if (unlikely(bp->panic))
12602 return -EIO;
12603#endif
12604
12605 spin_lock_bh(&bp->spq_lock);
12606
12607 for (i = 0; i < count; i++) {
12608 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12609
12610 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12611 break;
12612
12613 *bp->cnic_kwq_prod = *spe;
12614
12615 bp->cnic_kwq_pending++;
12616
12617 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12618 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12619 spe->data.mac_config_addr.hi,
12620 spe->data.mac_config_addr.lo,
12621 bp->cnic_kwq_pending);
12622
12623 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12624 bp->cnic_kwq_prod = bp->cnic_kwq;
12625 else
12626 bp->cnic_kwq_prod++;
12627 }
12628
12629 spin_unlock_bh(&bp->spq_lock);
12630
12631 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12632 bnx2x_cnic_sp_post(bp, 0);
12633
12634 return i;
12635}
12636
12637static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12638{
12639 struct cnic_ops *c_ops;
12640 int rc = 0;
12641
12642 mutex_lock(&bp->cnic_mutex);
12643 c_ops = bp->cnic_ops;
12644 if (c_ops)
12645 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12646 mutex_unlock(&bp->cnic_mutex);
12647
12648 return rc;
12649}
12650
12651static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12652{
12653 struct cnic_ops *c_ops;
12654 int rc = 0;
12655
12656 rcu_read_lock();
12657 c_ops = rcu_dereference(bp->cnic_ops);
12658 if (c_ops)
12659 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12660 rcu_read_unlock();
12661
12662 return rc;
12663}
12664
12665/*
12666 * for commands that have no data
12667 */
12668static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12669{
12670 struct cnic_ctl_info ctl = {0};
12671
12672 ctl.cmd = cmd;
12673
12674 return bnx2x_cnic_ctl_send(bp, &ctl);
12675}
12676
12677static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12678{
12679 struct cnic_ctl_info ctl;
12680
12681 /* first we tell CNIC and only then we count this as a completion */
12682 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12683 ctl.data.comp.cid = cid;
12684
12685 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12686 bnx2x_cnic_sp_post(bp, 1);
12687}
12688
12689static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12690{
12691 struct bnx2x *bp = netdev_priv(dev);
12692 int rc = 0;
12693
12694 switch (ctl->cmd) {
12695 case DRV_CTL_CTXTBL_WR_CMD: {
12696 u32 index = ctl->data.io.offset;
12697 dma_addr_t addr = ctl->data.io.dma_addr;
12698
12699 bnx2x_ilt_wr(bp, index, addr);
12700 break;
12701 }
12702
12703 case DRV_CTL_COMPLETION_CMD: {
12704 int count = ctl->data.comp.comp_count;
12705
12706 bnx2x_cnic_sp_post(bp, count);
12707 break;
12708 }
12709
12710 /* rtnl_lock is held. */
12711 case DRV_CTL_START_L2_CMD: {
12712 u32 cli = ctl->data.ring.client_id;
12713
12714 bp->rx_mode_cl_mask |= (1 << cli);
12715 bnx2x_set_storm_rx_mode(bp);
12716 break;
12717 }
12718
12719 /* rtnl_lock is held. */
12720 case DRV_CTL_STOP_L2_CMD: {
12721 u32 cli = ctl->data.ring.client_id;
12722
12723 bp->rx_mode_cl_mask &= ~(1 << cli);
12724 bnx2x_set_storm_rx_mode(bp);
12725 break;
12726 }
12727
12728 default:
12729 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12730 rc = -EINVAL;
12731 }
12732
12733 return rc;
12734}
12735
12736static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12737{
12738 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12739
12740 if (bp->flags & USING_MSIX_FLAG) {
12741 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12742 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12743 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12744 } else {
12745 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12746 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12747 }
12748 cp->irq_arr[0].status_blk = bp->cnic_sb;
12749 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12750 cp->irq_arr[1].status_blk = bp->def_status_blk;
12751 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12752
12753 cp->num_irq = 2;
12754}
12755
12756static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12757 void *data)
12758{
12759 struct bnx2x *bp = netdev_priv(dev);
12760 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12761
12762 if (ops == NULL)
12763 return -EINVAL;
12764
12765 if (atomic_read(&bp->intr_sem) != 0)
12766 return -EBUSY;
12767
12768 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12769 if (!bp->cnic_kwq)
12770 return -ENOMEM;
12771
12772 bp->cnic_kwq_cons = bp->cnic_kwq;
12773 bp->cnic_kwq_prod = bp->cnic_kwq;
12774 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12775
12776 bp->cnic_spq_pending = 0;
12777 bp->cnic_kwq_pending = 0;
12778
12779 bp->cnic_data = data;
12780
12781 cp->num_irq = 0;
12782 cp->drv_state = CNIC_DRV_STATE_REGD;
12783
12784 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12785
12786 bnx2x_setup_cnic_irq_info(bp);
12787 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12788 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12789 rcu_assign_pointer(bp->cnic_ops, ops);
12790
12791 return 0;
12792}
12793
12794static int bnx2x_unregister_cnic(struct net_device *dev)
12795{
12796 struct bnx2x *bp = netdev_priv(dev);
12797 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12798
12799 mutex_lock(&bp->cnic_mutex);
12800 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12801 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12802 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12803 }
12804 cp->drv_state = 0;
12805 rcu_assign_pointer(bp->cnic_ops, NULL);
12806 mutex_unlock(&bp->cnic_mutex);
12807 synchronize_rcu();
12808 kfree(bp->cnic_kwq);
12809 bp->cnic_kwq = NULL;
12810
12811 return 0;
12812}
12813
12814struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12815{
12816 struct bnx2x *bp = netdev_priv(dev);
12817 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12818
12819 cp->drv_owner = THIS_MODULE;
12820 cp->chip_id = CHIP_ID(bp);
12821 cp->pdev = bp->pdev;
12822 cp->io_base = bp->regview;
12823 cp->io_base2 = bp->doorbells;
12824 cp->max_kwqe_pending = 8;
12825 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12826 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12827 cp->ctx_tbl_len = CNIC_ILT_LINES;
12828 cp->starting_cid = BCM_CNIC_CID_START;
12829 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12830 cp->drv_ctl = bnx2x_drv_ctl;
12831 cp->drv_register_cnic = bnx2x_register_cnic;
12832 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12833
12834 return cp;
12835}
12836EXPORT_SYMBOL(bnx2x_cnic_probe);
12837
12838#endif /* BCM_CNIC */
94a78b79 12839