]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
ambassador: declare MODULE_FIRMWARE
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
0a64ea57 57#include "bnx2x_dump.h"
a2fbb9ea 58
89794a6f
YR
59#define DRV_MODULE_VERSION "1.52.1-3"
60#define DRV_MODULE_RELDATE "2009/11/05"
34f80b04 61#define BNX2X_BC_VER 0x040200
a2fbb9ea 62
94a78b79
VZ
63#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
ab6ad5a4
EG
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
94a78b79 68
34f80b04
EG
69/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
a2fbb9ea 71
53a10565 72static char version[] __devinitdata =
34f80b04 73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
24e3fcef 76MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 80
555f6c78
EG
81static int multi_mode = 1;
82module_param(multi_mode, int, 0);
ca00392c
EG
83MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
85
86static int num_rx_queues;
87module_param(num_rx_queues, int, 0);
88MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
90
91static int num_tx_queues;
92module_param(num_tx_queues, int, 0);
93MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
555f6c78 95
19680c48 96static int disable_tpa;
19680c48 97module_param(disable_tpa, int, 0);
9898f86d 98MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
99
100static int int_mode;
101module_param(int_mode, int, 0);
102MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
a18f5128
EG
104static int dropless_fc;
105module_param(dropless_fc, int, 0);
106MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
107
9898f86d 108static int poll;
a2fbb9ea 109module_param(poll, int, 0);
9898f86d 110MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
111
112static int mrrs = -1;
113module_param(mrrs, int, 0);
114MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
115
9898f86d 116static int debug;
a2fbb9ea 117module_param(debug, int, 0);
9898f86d
EG
118MODULE_PARM_DESC(debug, " Default debug msglevel");
119
120static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 121
1cf167f2 122static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
123
124enum bnx2x_board_type {
125 BCM57710 = 0,
34f80b04
EG
126 BCM57711 = 1,
127 BCM57711E = 2,
a2fbb9ea
ET
128};
129
34f80b04 130/* indexed by board_type, above */
53a10565 131static struct {
a2fbb9ea
ET
132 char *name;
133} board_info[] __devinitdata = {
34f80b04
EG
134 { "Broadcom NetXtreme II BCM57710 XGb" },
135 { "Broadcom NetXtreme II BCM57711 XGb" },
136 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
137};
138
34f80b04 139
a2fbb9ea 140static const struct pci_device_id bnx2x_pci_tbl[] = {
e4ed7113
EG
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
143 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
144 { 0 }
145};
146
147MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
148
149/****************************************************************************
150* General service functions
151****************************************************************************/
152
153/* used only at init
154 * locking is done by mcp
155 */
573f2035 156void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
157{
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
160 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
161 PCICFG_VENDOR_ID_OFFSET);
162}
163
a2fbb9ea
ET
164static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
165{
166 u32 val;
167
168 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
169 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
170 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
171 PCICFG_VENDOR_ID_OFFSET);
172
173 return val;
174}
a2fbb9ea
ET
175
176static const u32 dmae_reg_go_c[] = {
177 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
178 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
179 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
180 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
181};
182
183/* copy command into DMAE command memory and set DMAE command go */
184static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
185 int idx)
186{
187 u32 cmd_offset;
188 int i;
189
190 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
191 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
192 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
193
ad8d3948
EG
194 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
195 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
196 }
197 REG_WR(bp, dmae_reg_go_c[idx], 1);
198}
199
ad8d3948
EG
200void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
201 u32 len32)
a2fbb9ea 202{
5ff7b6d4 203 struct dmae_command dmae;
a2fbb9ea 204 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
205 int cnt = 200;
206
207 if (!bp->dmae_ready) {
208 u32 *data = bnx2x_sp(bp, wb_data[0]);
209
210 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
211 " using indirect\n", dst_addr, len32);
212 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
213 return;
214 }
215
5ff7b6d4 216 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 217
5ff7b6d4
EG
218 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
219 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
220 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 221#ifdef __BIG_ENDIAN
5ff7b6d4 222 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 223#else
5ff7b6d4 224 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 225#endif
5ff7b6d4
EG
226 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
227 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
228 dmae.src_addr_lo = U64_LO(dma_addr);
229 dmae.src_addr_hi = U64_HI(dma_addr);
230 dmae.dst_addr_lo = dst_addr >> 2;
231 dmae.dst_addr_hi = 0;
232 dmae.len = len32;
233 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
234 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
235 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 236
c3eefaf6 237 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
238 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
239 "dst_addr [%x:%08x (%08x)]\n"
240 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
241 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
242 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
243 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 244 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
245 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
246 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 247
5ff7b6d4
EG
248 mutex_lock(&bp->dmae_mutex);
249
a2fbb9ea
ET
250 *wb_comp = 0;
251
5ff7b6d4 252 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
253
254 udelay(5);
ad8d3948
EG
255
256 while (*wb_comp != DMAE_COMP_VAL) {
257 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
258
ad8d3948 259 if (!cnt) {
c3eefaf6 260 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
261 break;
262 }
ad8d3948 263 cnt--;
12469401
YG
264 /* adjust delay for emulation/FPGA */
265 if (CHIP_REV_IS_SLOW(bp))
266 msleep(100);
267 else
268 udelay(5);
a2fbb9ea 269 }
ad8d3948
EG
270
271 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
272}
273
c18487ee 274void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 275{
5ff7b6d4 276 struct dmae_command dmae;
a2fbb9ea 277 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
278 int cnt = 200;
279
280 if (!bp->dmae_ready) {
281 u32 *data = bnx2x_sp(bp, wb_data[0]);
282 int i;
283
284 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
285 " using indirect\n", src_addr, len32);
286 for (i = 0; i < len32; i++)
287 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
288 return;
289 }
290
5ff7b6d4 291 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 292
5ff7b6d4
EG
293 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
294 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
295 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 296#ifdef __BIG_ENDIAN
5ff7b6d4 297 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 298#else
5ff7b6d4 299 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 300#endif
5ff7b6d4
EG
301 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
302 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
303 dmae.src_addr_lo = src_addr >> 2;
304 dmae.src_addr_hi = 0;
305 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
306 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
307 dmae.len = len32;
308 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
309 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
310 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 311
c3eefaf6 312 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
313 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
314 "dst_addr [%x:%08x (%08x)]\n"
315 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
316 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
317 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
318 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 319
5ff7b6d4
EG
320 mutex_lock(&bp->dmae_mutex);
321
322 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
323 *wb_comp = 0;
324
5ff7b6d4 325 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
326
327 udelay(5);
ad8d3948
EG
328
329 while (*wb_comp != DMAE_COMP_VAL) {
330
ad8d3948 331 if (!cnt) {
c3eefaf6 332 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
333 break;
334 }
ad8d3948 335 cnt--;
12469401
YG
336 /* adjust delay for emulation/FPGA */
337 if (CHIP_REV_IS_SLOW(bp))
338 msleep(100);
339 else
340 udelay(5);
a2fbb9ea 341 }
ad8d3948 342 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
343 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
344 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
345
346 mutex_unlock(&bp->dmae_mutex);
347}
348
573f2035
EG
349void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
350 u32 addr, u32 len)
351{
352 int offset = 0;
353
354 while (len > DMAE_LEN32_WR_MAX) {
355 bnx2x_write_dmae(bp, phys_addr + offset,
356 addr + offset, DMAE_LEN32_WR_MAX);
357 offset += DMAE_LEN32_WR_MAX * 4;
358 len -= DMAE_LEN32_WR_MAX;
359 }
360
361 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
362}
363
ad8d3948
EG
364/* used only for slowpath so not inlined */
365static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
366{
367 u32 wb_write[2];
368
369 wb_write[0] = val_hi;
370 wb_write[1] = val_lo;
371 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 372}
a2fbb9ea 373
ad8d3948
EG
374#ifdef USE_WB_RD
375static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
376{
377 u32 wb_data[2];
378
379 REG_RD_DMAE(bp, reg, wb_data, 2);
380
381 return HILO_U64(wb_data[0], wb_data[1]);
382}
383#endif
384
a2fbb9ea
ET
385static int bnx2x_mc_assert(struct bnx2x *bp)
386{
a2fbb9ea 387 char last_idx;
34f80b04
EG
388 int i, rc = 0;
389 u32 row0, row1, row2, row3;
390
391 /* XSTORM */
392 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
393 XSTORM_ASSERT_LIST_INDEX_OFFSET);
394 if (last_idx)
395 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
396
397 /* print the asserts */
398 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
399
400 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
401 XSTORM_ASSERT_LIST_OFFSET(i));
402 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
403 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
404 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
406 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
408
409 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
410 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
411 " 0x%08x 0x%08x 0x%08x\n",
412 i, row3, row2, row1, row0);
413 rc++;
414 } else {
415 break;
416 }
417 }
418
419 /* TSTORM */
420 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
421 TSTORM_ASSERT_LIST_INDEX_OFFSET);
422 if (last_idx)
423 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
424
425 /* print the asserts */
426 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
427
428 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
429 TSTORM_ASSERT_LIST_OFFSET(i));
430 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
431 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
432 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
434 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
436
437 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
438 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
439 " 0x%08x 0x%08x 0x%08x\n",
440 i, row3, row2, row1, row0);
441 rc++;
442 } else {
443 break;
444 }
445 }
446
447 /* CSTORM */
448 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
449 CSTORM_ASSERT_LIST_INDEX_OFFSET);
450 if (last_idx)
451 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
452
453 /* print the asserts */
454 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
455
456 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
457 CSTORM_ASSERT_LIST_OFFSET(i));
458 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
459 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
460 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
462 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
464
465 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
466 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
467 " 0x%08x 0x%08x 0x%08x\n",
468 i, row3, row2, row1, row0);
469 rc++;
470 } else {
471 break;
472 }
473 }
474
475 /* USTORM */
476 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
477 USTORM_ASSERT_LIST_INDEX_OFFSET);
478 if (last_idx)
479 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
480
481 /* print the asserts */
482 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
483
484 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
485 USTORM_ASSERT_LIST_OFFSET(i));
486 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
487 USTORM_ASSERT_LIST_OFFSET(i) + 4);
488 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
489 USTORM_ASSERT_LIST_OFFSET(i) + 8);
490 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
491 USTORM_ASSERT_LIST_OFFSET(i) + 12);
492
493 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
494 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
495 " 0x%08x 0x%08x 0x%08x\n",
496 i, row3, row2, row1, row0);
497 rc++;
498 } else {
499 break;
a2fbb9ea
ET
500 }
501 }
34f80b04 502
a2fbb9ea
ET
503 return rc;
504}
c14423fe 505
a2fbb9ea
ET
506static void bnx2x_fw_dump(struct bnx2x *bp)
507{
508 u32 mark, offset;
4781bfad 509 __be32 data[9];
a2fbb9ea
ET
510 int word;
511
512 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772 513 mark = ((mark + 0x3) & ~0x3);
ad361c98 514 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 515
ad361c98 516 printk(KERN_ERR PFX);
a2fbb9ea
ET
517 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
518 for (word = 0; word < 8; word++)
519 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
520 offset + 4*word));
521 data[8] = 0x0;
49d66772 522 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
523 }
524 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
525 for (word = 0; word < 8; word++)
526 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
527 offset + 4*word));
528 data[8] = 0x0;
49d66772 529 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea 530 }
ad361c98 531 printk(KERN_ERR PFX "end of fw dump\n");
a2fbb9ea
ET
532}
533
534static void bnx2x_panic_dump(struct bnx2x *bp)
535{
536 int i;
537 u16 j, start, end;
538
66e855f3
YG
539 bp->stats_state = STATS_STATE_DISABLED;
540 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
541
a2fbb9ea
ET
542 BNX2X_ERR("begin crash dump -----------------\n");
543
8440d2b6
EG
544 /* Indices */
545 /* Common */
546 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
547 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
548 " spq_prod_idx(%u)\n",
549 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
550 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
551
552 /* Rx */
553 for_each_rx_queue(bp, i) {
a2fbb9ea 554 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 555
c3eefaf6 556 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
557 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
558 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 559 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
560 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
561 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 562 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
563 " fp_u_idx(%x) *sb_u_idx(%x)\n",
564 fp->rx_sge_prod, fp->last_max_sge,
565 le16_to_cpu(fp->fp_u_idx),
566 fp->status_blk->u_status_block.status_block_index);
567 }
a2fbb9ea 568
8440d2b6
EG
569 /* Tx */
570 for_each_tx_queue(bp, i) {
571 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 572
c3eefaf6 573 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
574 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
575 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
576 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 577 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
ca00392c 578 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 579 fp->status_blk->c_status_block.status_block_index,
ca00392c 580 fp->tx_db.data.prod);
8440d2b6 581 }
a2fbb9ea 582
8440d2b6
EG
583 /* Rings */
584 /* Rx */
585 for_each_rx_queue(bp, i) {
586 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
587
588 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
589 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 590 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
591 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
592 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
593
c3eefaf6
EG
594 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
595 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
596 }
597
3196a88a
EG
598 start = RX_SGE(fp->rx_sge_prod);
599 end = RX_SGE(fp->last_max_sge);
8440d2b6 600 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
601 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
602 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
603
c3eefaf6
EG
604 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
605 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
606 }
607
a2fbb9ea
ET
608 start = RCQ_BD(fp->rx_comp_cons - 10);
609 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 610 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
611 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
612
c3eefaf6
EG
613 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
614 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
615 }
616 }
617
8440d2b6
EG
618 /* Tx */
619 for_each_tx_queue(bp, i) {
620 struct bnx2x_fastpath *fp = &bp->fp[i];
621
622 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
623 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
624 for (j = start; j != end; j = TX_BD(j + 1)) {
625 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
626
c3eefaf6
EG
627 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
628 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
629 }
630
631 start = TX_BD(fp->tx_bd_cons - 10);
632 end = TX_BD(fp->tx_bd_cons + 254);
633 for (j = start; j != end; j = TX_BD(j + 1)) {
634 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
635
c3eefaf6
EG
636 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
637 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
638 }
639 }
a2fbb9ea 640
34f80b04 641 bnx2x_fw_dump(bp);
a2fbb9ea
ET
642 bnx2x_mc_assert(bp);
643 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
644}
645
615f8fd9 646static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 647{
34f80b04 648 int port = BP_PORT(bp);
a2fbb9ea
ET
649 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
650 u32 val = REG_RD(bp, addr);
651 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 652 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
653
654 if (msix) {
8badd27a
EG
655 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
656 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
657 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
658 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
659 } else if (msi) {
660 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
661 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
662 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
664 } else {
665 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 666 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
667 HC_CONFIG_0_REG_INT_LINE_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 669
8badd27a
EG
670 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
671 val, port, addr);
615f8fd9
ET
672
673 REG_WR(bp, addr, val);
674
a2fbb9ea
ET
675 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
676 }
677
8badd27a
EG
678 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
679 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
680
681 REG_WR(bp, addr, val);
37dbbf32
EG
682 /*
683 * Ensure that HC_CONFIG is written before leading/trailing edge config
684 */
685 mmiowb();
686 barrier();
34f80b04
EG
687
688 if (CHIP_IS_E1H(bp)) {
689 /* init leading/trailing edge */
690 if (IS_E1HMF(bp)) {
8badd27a 691 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 692 if (bp->port.pmf)
4acac6a5
EG
693 /* enable nig and gpio3 attention */
694 val |= 0x1100;
34f80b04
EG
695 } else
696 val = 0xffff;
697
698 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
699 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
700 }
37dbbf32
EG
701
702 /* Make sure that interrupts are indeed enabled from here on */
703 mmiowb();
a2fbb9ea
ET
704}
705
615f8fd9 706static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 707{
34f80b04 708 int port = BP_PORT(bp);
a2fbb9ea
ET
709 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
710 u32 val = REG_RD(bp, addr);
711
712 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
713 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
714 HC_CONFIG_0_REG_INT_LINE_EN_0 |
715 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
716
717 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
718 val, port, addr);
719
8badd27a
EG
720 /* flush all outstanding writes */
721 mmiowb();
722
a2fbb9ea
ET
723 REG_WR(bp, addr, val);
724 if (REG_RD(bp, addr) != val)
725 BNX2X_ERR("BUG! proper val not read from IGU!\n");
726}
727
f8ef6e44 728static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 729{
a2fbb9ea 730 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 731 int i, offset;
a2fbb9ea 732
34f80b04 733 /* disable interrupt handling */
a2fbb9ea 734 atomic_inc(&bp->intr_sem);
e1510706
EG
735 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
736
f8ef6e44
YG
737 if (disable_hw)
738 /* prevent the HW from sending interrupts */
739 bnx2x_int_disable(bp);
a2fbb9ea
ET
740
741 /* make sure all ISRs are done */
742 if (msix) {
8badd27a
EG
743 synchronize_irq(bp->msix_table[0].vector);
744 offset = 1;
37b091ba
MC
745#ifdef BCM_CNIC
746 offset++;
747#endif
a2fbb9ea 748 for_each_queue(bp, i)
8badd27a 749 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
750 } else
751 synchronize_irq(bp->pdev->irq);
752
753 /* make sure sp_task is not running */
1cf167f2
EG
754 cancel_delayed_work(&bp->sp_task);
755 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
756}
757
34f80b04 758/* fast path */
a2fbb9ea
ET
759
760/*
34f80b04 761 * General service functions
a2fbb9ea
ET
762 */
763
34f80b04 764static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
765 u8 storm, u16 index, u8 op, u8 update)
766{
5c862848
EG
767 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
768 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
769 struct igu_ack_register igu_ack;
770
771 igu_ack.status_block_index = index;
772 igu_ack.sb_id_and_flags =
34f80b04 773 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
774 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
775 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
776 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
777
5c862848
EG
778 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
779 (*(u32 *)&igu_ack), hc_addr);
780 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
781
782 /* Make sure that ACK is written */
783 mmiowb();
784 barrier();
a2fbb9ea
ET
785}
786
787static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
788{
789 struct host_status_block *fpsb = fp->status_blk;
790 u16 rc = 0;
791
792 barrier(); /* status block is written to by the chip */
793 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
794 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
795 rc |= 1;
796 }
797 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
798 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
799 rc |= 2;
800 }
801 return rc;
802}
803
a2fbb9ea
ET
804static u16 bnx2x_ack_int(struct bnx2x *bp)
805{
5c862848
EG
806 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
807 COMMAND_REG_SIMD_MASK);
808 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 809
5c862848
EG
810 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
811 result, hc_addr);
a2fbb9ea 812
a2fbb9ea
ET
813 return result;
814}
815
816
817/*
818 * fast path service functions
819 */
820
e8b5fc51
VZ
821static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
822{
823 /* Tell compiler that consumer and producer can change */
824 barrier();
825 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
826}
827
a2fbb9ea
ET
828/* free skb in the packet ring at pos idx
829 * return idx of last bd freed
830 */
831static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
832 u16 idx)
833{
834 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
835 struct eth_tx_start_bd *tx_start_bd;
836 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 837 struct sk_buff *skb = tx_buf->skb;
34f80b04 838 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
839 int nbd;
840
841 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
842 idx, tx_buf, skb);
843
844 /* unmap first bd */
845 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c
EG
846 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
847 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
848 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 849
ca00392c 850 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 851#ifdef BNX2X_STOP_ON_ERROR
ca00392c 852 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 853 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
854 bnx2x_panic();
855 }
856#endif
ca00392c 857 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 858
ca00392c
EG
859 /* Get the next bd */
860 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 861
ca00392c
EG
862 /* Skip a parse bd... */
863 --nbd;
864 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
865
866 /* ...and the TSO split header bd since they have no mapping */
867 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
868 --nbd;
869 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
870 }
871
872 /* now free frags */
873 while (nbd > 0) {
874
875 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c
EG
876 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
877 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
878 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
a2fbb9ea
ET
879 if (--nbd)
880 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
881 }
882
883 /* release skb */
53e5e96e 884 WARN_ON(!skb);
ca00392c 885 dev_kfree_skb_any(skb);
a2fbb9ea
ET
886 tx_buf->first_bd = 0;
887 tx_buf->skb = NULL;
888
34f80b04 889 return new_cons;
a2fbb9ea
ET
890}
891
34f80b04 892static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 893{
34f80b04
EG
894 s16 used;
895 u16 prod;
896 u16 cons;
a2fbb9ea 897
34f80b04 898 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
899 prod = fp->tx_bd_prod;
900 cons = fp->tx_bd_cons;
901
34f80b04
EG
902 /* NUM_TX_RINGS = number of "next-page" entries
903 It will be used as a threshold */
904 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 905
34f80b04 906#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
907 WARN_ON(used < 0);
908 WARN_ON(used > fp->bp->tx_ring_size);
909 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 910#endif
a2fbb9ea 911
34f80b04 912 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
913}
914
7961f791 915static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
916{
917 struct bnx2x *bp = fp->bp;
555f6c78 918 struct netdev_queue *txq;
a2fbb9ea
ET
919 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
920 int done = 0;
921
922#ifdef BNX2X_STOP_ON_ERROR
923 if (unlikely(bp->panic))
924 return;
925#endif
926
ca00392c 927 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
a2fbb9ea
ET
928 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
929 sw_cons = fp->tx_pkt_cons;
930
931 while (sw_cons != hw_cons) {
932 u16 pkt_cons;
933
934 pkt_cons = TX_BD(sw_cons);
935
936 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
937
34f80b04 938 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
939 hw_cons, sw_cons, pkt_cons);
940
34f80b04 941/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
942 rmb();
943 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
944 }
945*/
946 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
947 sw_cons++;
948 done++;
a2fbb9ea
ET
949 }
950
951 fp->tx_pkt_cons = sw_cons;
952 fp->tx_bd_cons = bd_cons;
953
a2fbb9ea 954 /* TBD need a thresh? */
555f6c78 955 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 956
6044735d
EG
957 /* Need to make the tx_bd_cons update visible to start_xmit()
958 * before checking for netif_tx_queue_stopped(). Without the
959 * memory barrier, there is a small possibility that
960 * start_xmit() will miss it and cause the queue to be stopped
961 * forever.
962 */
963 smp_mb();
964
555f6c78 965 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 966 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 967 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 968 netif_tx_wake_queue(txq);
a2fbb9ea
ET
969 }
970}
971
993ac7b5
MC
972#ifdef BCM_CNIC
973static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
974#endif
3196a88a 975
a2fbb9ea
ET
976static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
977 union eth_rx_cqe *rr_cqe)
978{
979 struct bnx2x *bp = fp->bp;
980 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
981 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
982
34f80b04 983 DP(BNX2X_MSG_SP,
a2fbb9ea 984 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 985 fp->index, cid, command, bp->state,
34f80b04 986 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
987
988 bp->spq_left++;
989
0626b899 990 if (fp->index) {
a2fbb9ea
ET
991 switch (command | fp->state) {
992 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
993 BNX2X_FP_STATE_OPENING):
994 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
995 cid);
996 fp->state = BNX2X_FP_STATE_OPEN;
997 break;
998
999 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1000 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1001 cid);
1002 fp->state = BNX2X_FP_STATE_HALTED;
1003 break;
1004
1005 default:
34f80b04
EG
1006 BNX2X_ERR("unexpected MC reply (%d) "
1007 "fp->state is %x\n", command, fp->state);
1008 break;
a2fbb9ea 1009 }
34f80b04 1010 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1011 return;
1012 }
c14423fe 1013
a2fbb9ea
ET
1014 switch (command | bp->state) {
1015 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1016 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1017 bp->state = BNX2X_STATE_OPEN;
1018 break;
1019
1020 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1021 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1022 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1023 fp->state = BNX2X_FP_STATE_HALTED;
1024 break;
1025
a2fbb9ea 1026 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1027 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1028 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1029 break;
1030
993ac7b5
MC
1031#ifdef BCM_CNIC
1032 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1033 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1034 bnx2x_cnic_cfc_comp(bp, cid);
1035 break;
1036#endif
3196a88a 1037
a2fbb9ea 1038 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1039 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1040 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
e665bfda
MC
1041 bp->set_mac_pending--;
1042 smp_wmb();
a2fbb9ea
ET
1043 break;
1044
49d66772 1045 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1046 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
e665bfda
MC
1047 bp->set_mac_pending--;
1048 smp_wmb();
49d66772
ET
1049 break;
1050
a2fbb9ea 1051 default:
34f80b04 1052 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1053 command, bp->state);
34f80b04 1054 break;
a2fbb9ea 1055 }
34f80b04 1056 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1057}
1058
7a9b2557
VZ
1059static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1060 struct bnx2x_fastpath *fp, u16 index)
1061{
1062 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1063 struct page *page = sw_buf->page;
1064 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1065
1066 /* Skip "next page" elements */
1067 if (!page)
1068 return;
1069
1070 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1071 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1072 __free_pages(page, PAGES_PER_SGE_SHIFT);
1073
1074 sw_buf->page = NULL;
1075 sge->addr_hi = 0;
1076 sge->addr_lo = 0;
1077}
1078
1079static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1080 struct bnx2x_fastpath *fp, int last)
1081{
1082 int i;
1083
1084 for (i = 0; i < last; i++)
1085 bnx2x_free_rx_sge(bp, fp, i);
1086}
1087
1088static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1089 struct bnx2x_fastpath *fp, u16 index)
1090{
1091 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1092 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1093 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1094 dma_addr_t mapping;
1095
1096 if (unlikely(page == NULL))
1097 return -ENOMEM;
1098
4f40f2cb 1099 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1100 PCI_DMA_FROMDEVICE);
8d8bb39b 1101 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1102 __free_pages(page, PAGES_PER_SGE_SHIFT);
1103 return -ENOMEM;
1104 }
1105
1106 sw_buf->page = page;
1107 pci_unmap_addr_set(sw_buf, mapping, mapping);
1108
1109 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1110 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1111
1112 return 0;
1113}
1114
a2fbb9ea
ET
1115static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1116 struct bnx2x_fastpath *fp, u16 index)
1117{
1118 struct sk_buff *skb;
1119 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1120 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1121 dma_addr_t mapping;
1122
1123 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1124 if (unlikely(skb == NULL))
1125 return -ENOMEM;
1126
437cf2f1 1127 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1128 PCI_DMA_FROMDEVICE);
8d8bb39b 1129 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1130 dev_kfree_skb(skb);
1131 return -ENOMEM;
1132 }
1133
1134 rx_buf->skb = skb;
1135 pci_unmap_addr_set(rx_buf, mapping, mapping);
1136
1137 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1138 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1139
1140 return 0;
1141}
1142
1143/* note that we are not allocating a new skb,
1144 * we are just moving one from cons to prod
1145 * we are not creating a new mapping,
1146 * so there is no need to check for dma_mapping_error().
1147 */
1148static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1149 struct sk_buff *skb, u16 cons, u16 prod)
1150{
1151 struct bnx2x *bp = fp->bp;
1152 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1153 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1154 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1155 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1156
1157 pci_dma_sync_single_for_device(bp->pdev,
1158 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1159 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1160
1161 prod_rx_buf->skb = cons_rx_buf->skb;
1162 pci_unmap_addr_set(prod_rx_buf, mapping,
1163 pci_unmap_addr(cons_rx_buf, mapping));
1164 *prod_bd = *cons_bd;
1165}
1166
7a9b2557
VZ
1167static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1168 u16 idx)
1169{
1170 u16 last_max = fp->last_max_sge;
1171
1172 if (SUB_S16(idx, last_max) > 0)
1173 fp->last_max_sge = idx;
1174}
1175
1176static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1177{
1178 int i, j;
1179
1180 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1181 int idx = RX_SGE_CNT * i - 1;
1182
1183 for (j = 0; j < 2; j++) {
1184 SGE_MASK_CLEAR_BIT(fp, idx);
1185 idx--;
1186 }
1187 }
1188}
1189
1190static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1191 struct eth_fast_path_rx_cqe *fp_cqe)
1192{
1193 struct bnx2x *bp = fp->bp;
4f40f2cb 1194 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1195 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1196 SGE_PAGE_SHIFT;
7a9b2557
VZ
1197 u16 last_max, last_elem, first_elem;
1198 u16 delta = 0;
1199 u16 i;
1200
1201 if (!sge_len)
1202 return;
1203
1204 /* First mark all used pages */
1205 for (i = 0; i < sge_len; i++)
1206 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1207
1208 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1209 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1210
1211 /* Here we assume that the last SGE index is the biggest */
1212 prefetch((void *)(fp->sge_mask));
1213 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1214
1215 last_max = RX_SGE(fp->last_max_sge);
1216 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1217 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1218
1219 /* If ring is not full */
1220 if (last_elem + 1 != first_elem)
1221 last_elem++;
1222
1223 /* Now update the prod */
1224 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1225 if (likely(fp->sge_mask[i]))
1226 break;
1227
1228 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1229 delta += RX_SGE_MASK_ELEM_SZ;
1230 }
1231
1232 if (delta > 0) {
1233 fp->rx_sge_prod += delta;
1234 /* clear page-end entries */
1235 bnx2x_clear_sge_mask_next_elems(fp);
1236 }
1237
1238 DP(NETIF_MSG_RX_STATUS,
1239 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1240 fp->last_max_sge, fp->rx_sge_prod);
1241}
1242
1243static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1244{
1245 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1246 memset(fp->sge_mask, 0xff,
1247 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1248
33471629
EG
1249 /* Clear the two last indices in the page to 1:
1250 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1251 hence will never be indicated and should be removed from
1252 the calculations. */
1253 bnx2x_clear_sge_mask_next_elems(fp);
1254}
1255
1256static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1257 struct sk_buff *skb, u16 cons, u16 prod)
1258{
1259 struct bnx2x *bp = fp->bp;
1260 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1261 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1262 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1263 dma_addr_t mapping;
1264
1265 /* move empty skb from pool to prod and map it */
1266 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1267 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1268 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1269 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1270
1271 /* move partial skb from cons to pool (don't unmap yet) */
1272 fp->tpa_pool[queue] = *cons_rx_buf;
1273
1274 /* mark bin state as start - print error if current state != stop */
1275 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1276 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1277
1278 fp->tpa_state[queue] = BNX2X_TPA_START;
1279
1280 /* point prod_bd to new skb */
1281 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1282 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1283
1284#ifdef BNX2X_STOP_ON_ERROR
1285 fp->tpa_queue_used |= (1 << queue);
1286#ifdef __powerpc64__
1287 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1288#else
1289 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1290#endif
1291 fp->tpa_queue_used);
1292#endif
1293}
1294
1295static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1296 struct sk_buff *skb,
1297 struct eth_fast_path_rx_cqe *fp_cqe,
1298 u16 cqe_idx)
1299{
1300 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1301 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1302 u32 i, frag_len, frag_size, pages;
1303 int err;
1304 int j;
1305
1306 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1307 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1308
1309 /* This is needed in order to enable forwarding support */
1310 if (frag_size)
4f40f2cb 1311 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1312 max(frag_size, (u32)len_on_bd));
1313
1314#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1315 if (pages >
1316 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1317 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1318 pages, cqe_idx);
1319 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1320 fp_cqe->pkt_len, len_on_bd);
1321 bnx2x_panic();
1322 return -EINVAL;
1323 }
1324#endif
1325
1326 /* Run through the SGL and compose the fragmented skb */
1327 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1328 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1329
1330 /* FW gives the indices of the SGE as if the ring is an array
1331 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1332 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1333 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1334 old_rx_pg = *rx_pg;
1335
1336 /* If we fail to allocate a substitute page, we simply stop
1337 where we are and drop the whole packet */
1338 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1339 if (unlikely(err)) {
de832a55 1340 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1341 return err;
1342 }
1343
1344 /* Unmap the page as we r going to pass it to the stack */
1345 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1346 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1347
1348 /* Add one frag and update the appropriate fields in the skb */
1349 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1350
1351 skb->data_len += frag_len;
1352 skb->truesize += frag_len;
1353 skb->len += frag_len;
1354
1355 frag_size -= frag_len;
1356 }
1357
1358 return 0;
1359}
1360
1361static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1362 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1363 u16 cqe_idx)
1364{
1365 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1366 struct sk_buff *skb = rx_buf->skb;
1367 /* alloc new skb */
1368 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1369
1370 /* Unmap skb in the pool anyway, as we are going to change
1371 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1372 fails. */
1373 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1374 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1375
7a9b2557 1376 if (likely(new_skb)) {
66e855f3
YG
1377 /* fix ip xsum and give it to the stack */
1378 /* (no need to map the new skb) */
0c6671b0
EG
1379#ifdef BCM_VLAN
1380 int is_vlan_cqe =
1381 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1382 PARSING_FLAGS_VLAN);
1383 int is_not_hwaccel_vlan_cqe =
1384 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1385#endif
7a9b2557
VZ
1386
1387 prefetch(skb);
1388 prefetch(((char *)(skb)) + 128);
1389
7a9b2557
VZ
1390#ifdef BNX2X_STOP_ON_ERROR
1391 if (pad + len > bp->rx_buf_size) {
1392 BNX2X_ERR("skb_put is about to fail... "
1393 "pad %d len %d rx_buf_size %d\n",
1394 pad, len, bp->rx_buf_size);
1395 bnx2x_panic();
1396 return;
1397 }
1398#endif
1399
1400 skb_reserve(skb, pad);
1401 skb_put(skb, len);
1402
1403 skb->protocol = eth_type_trans(skb, bp->dev);
1404 skb->ip_summed = CHECKSUM_UNNECESSARY;
1405
1406 {
1407 struct iphdr *iph;
1408
1409 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1410#ifdef BCM_VLAN
1411 /* If there is no Rx VLAN offloading -
1412 take VLAN tag into an account */
1413 if (unlikely(is_not_hwaccel_vlan_cqe))
1414 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1415#endif
7a9b2557
VZ
1416 iph->check = 0;
1417 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1418 }
1419
1420 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1421 &cqe->fast_path_cqe, cqe_idx)) {
1422#ifdef BCM_VLAN
0c6671b0
EG
1423 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1424 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1425 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1426 le16_to_cpu(cqe->fast_path_cqe.
1427 vlan_tag));
1428 else
1429#endif
1430 netif_receive_skb(skb);
1431 } else {
1432 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1433 " - dropping packet!\n");
1434 dev_kfree_skb(skb);
1435 }
1436
7a9b2557
VZ
1437
1438 /* put new skb in bin */
1439 fp->tpa_pool[queue].skb = new_skb;
1440
1441 } else {
66e855f3 1442 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1443 DP(NETIF_MSG_RX_STATUS,
1444 "Failed to allocate new skb - dropping packet!\n");
de832a55 1445 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1446 }
1447
1448 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1449}
1450
1451static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1452 struct bnx2x_fastpath *fp,
1453 u16 bd_prod, u16 rx_comp_prod,
1454 u16 rx_sge_prod)
1455{
8d9c5f34 1456 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1457 int i;
1458
1459 /* Update producers */
1460 rx_prods.bd_prod = bd_prod;
1461 rx_prods.cqe_prod = rx_comp_prod;
1462 rx_prods.sge_prod = rx_sge_prod;
1463
58f4c4cf
EG
1464 /*
1465 * Make sure that the BD and SGE data is updated before updating the
1466 * producers since FW might read the BD/SGE right after the producer
1467 * is updated.
1468 * This is only applicable for weak-ordered memory model archs such
1469 * as IA-64. The following barrier is also mandatory since FW will
1470 * assumes BDs must have buffers.
1471 */
1472 wmb();
1473
8d9c5f34
EG
1474 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1475 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1476 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1477 ((u32 *)&rx_prods)[i]);
1478
58f4c4cf
EG
1479 mmiowb(); /* keep prod updates ordered */
1480
7a9b2557 1481 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1482 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1483 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1484}
1485
a2fbb9ea
ET
1486static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1487{
1488 struct bnx2x *bp = fp->bp;
34f80b04 1489 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1490 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1491 int rx_pkt = 0;
1492
1493#ifdef BNX2X_STOP_ON_ERROR
1494 if (unlikely(bp->panic))
1495 return 0;
1496#endif
1497
34f80b04
EG
1498 /* CQ "next element" is of the size of the regular element,
1499 that's why it's ok here */
a2fbb9ea
ET
1500 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1501 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1502 hw_comp_cons++;
1503
1504 bd_cons = fp->rx_bd_cons;
1505 bd_prod = fp->rx_bd_prod;
34f80b04 1506 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1507 sw_comp_cons = fp->rx_comp_cons;
1508 sw_comp_prod = fp->rx_comp_prod;
1509
1510 /* Memory barrier necessary as speculative reads of the rx
1511 * buffer can be ahead of the index in the status block
1512 */
1513 rmb();
1514
1515 DP(NETIF_MSG_RX_STATUS,
1516 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1517 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1518
1519 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1520 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1521 struct sk_buff *skb;
1522 union eth_rx_cqe *cqe;
34f80b04
EG
1523 u8 cqe_fp_flags;
1524 u16 len, pad;
a2fbb9ea
ET
1525
1526 comp_ring_cons = RCQ_BD(sw_comp_cons);
1527 bd_prod = RX_BD(bd_prod);
1528 bd_cons = RX_BD(bd_cons);
1529
619e7a66
EG
1530 /* Prefetch the page containing the BD descriptor
1531 at producer's index. It will be needed when new skb is
1532 allocated */
1533 prefetch((void *)(PAGE_ALIGN((unsigned long)
1534 (&fp->rx_desc_ring[bd_prod])) -
1535 PAGE_SIZE + 1));
1536
a2fbb9ea 1537 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1538 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1539
a2fbb9ea 1540 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1541 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1542 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1543 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1544 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1545 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1546
1547 /* is this a slowpath msg? */
34f80b04 1548 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1549 bnx2x_sp_event(fp, cqe);
1550 goto next_cqe;
1551
1552 /* this is an rx packet */
1553 } else {
1554 rx_buf = &fp->rx_buf_ring[bd_cons];
1555 skb = rx_buf->skb;
a2fbb9ea
ET
1556 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1557 pad = cqe->fast_path_cqe.placement_offset;
1558
7a9b2557
VZ
1559 /* If CQE is marked both TPA_START and TPA_END
1560 it is a non-TPA CQE */
1561 if ((!fp->disable_tpa) &&
1562 (TPA_TYPE(cqe_fp_flags) !=
1563 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1564 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1565
1566 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1567 DP(NETIF_MSG_RX_STATUS,
1568 "calling tpa_start on queue %d\n",
1569 queue);
1570
1571 bnx2x_tpa_start(fp, queue, skb,
1572 bd_cons, bd_prod);
1573 goto next_rx;
1574 }
1575
1576 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1577 DP(NETIF_MSG_RX_STATUS,
1578 "calling tpa_stop on queue %d\n",
1579 queue);
1580
1581 if (!BNX2X_RX_SUM_FIX(cqe))
1582 BNX2X_ERR("STOP on none TCP "
1583 "data\n");
1584
1585 /* This is a size of the linear data
1586 on this skb */
1587 len = le16_to_cpu(cqe->fast_path_cqe.
1588 len_on_bd);
1589 bnx2x_tpa_stop(bp, fp, queue, pad,
1590 len, cqe, comp_ring_cons);
1591#ifdef BNX2X_STOP_ON_ERROR
1592 if (bp->panic)
17cb4006 1593 return 0;
7a9b2557
VZ
1594#endif
1595
1596 bnx2x_update_sge_prod(fp,
1597 &cqe->fast_path_cqe);
1598 goto next_cqe;
1599 }
1600 }
1601
a2fbb9ea
ET
1602 pci_dma_sync_single_for_device(bp->pdev,
1603 pci_unmap_addr(rx_buf, mapping),
1604 pad + RX_COPY_THRESH,
1605 PCI_DMA_FROMDEVICE);
1606 prefetch(skb);
1607 prefetch(((char *)(skb)) + 128);
1608
1609 /* is this an error packet? */
34f80b04 1610 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1611 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1612 "ERROR flags %x rx packet %u\n",
1613 cqe_fp_flags, sw_comp_cons);
de832a55 1614 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1615 goto reuse_rx;
1616 }
1617
1618 /* Since we don't have a jumbo ring
1619 * copy small packets if mtu > 1500
1620 */
1621 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1622 (len <= RX_COPY_THRESH)) {
1623 struct sk_buff *new_skb;
1624
1625 new_skb = netdev_alloc_skb(bp->dev,
1626 len + pad);
1627 if (new_skb == NULL) {
1628 DP(NETIF_MSG_RX_ERR,
34f80b04 1629 "ERROR packet dropped "
a2fbb9ea 1630 "because of alloc failure\n");
de832a55 1631 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1632 goto reuse_rx;
1633 }
1634
1635 /* aligned copy */
1636 skb_copy_from_linear_data_offset(skb, pad,
1637 new_skb->data + pad, len);
1638 skb_reserve(new_skb, pad);
1639 skb_put(new_skb, len);
1640
1641 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1642
1643 skb = new_skb;
1644
a119a069
EG
1645 } else
1646 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
a2fbb9ea
ET
1647 pci_unmap_single(bp->pdev,
1648 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1649 bp->rx_buf_size,
a2fbb9ea
ET
1650 PCI_DMA_FROMDEVICE);
1651 skb_reserve(skb, pad);
1652 skb_put(skb, len);
1653
1654 } else {
1655 DP(NETIF_MSG_RX_ERR,
34f80b04 1656 "ERROR packet dropped because "
a2fbb9ea 1657 "of alloc failure\n");
de832a55 1658 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1659reuse_rx:
1660 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1661 goto next_rx;
1662 }
1663
1664 skb->protocol = eth_type_trans(skb, bp->dev);
1665
1666 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1667 if (bp->rx_csum) {
1adcd8be
EG
1668 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1669 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1670 else
de832a55 1671 fp->eth_q_stats.hw_csum_err++;
66e855f3 1672 }
a2fbb9ea
ET
1673 }
1674
748e5439 1675 skb_record_rx_queue(skb, fp->index);
ab6ad5a4 1676
a2fbb9ea 1677#ifdef BCM_VLAN
0c6671b0 1678 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1679 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1680 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1681 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1682 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1683 else
1684#endif
34f80b04 1685 netif_receive_skb(skb);
a2fbb9ea 1686
a2fbb9ea
ET
1687
1688next_rx:
1689 rx_buf->skb = NULL;
1690
1691 bd_cons = NEXT_RX_IDX(bd_cons);
1692 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1693 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1694 rx_pkt++;
a2fbb9ea
ET
1695next_cqe:
1696 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1697 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1698
34f80b04 1699 if (rx_pkt == budget)
a2fbb9ea
ET
1700 break;
1701 } /* while */
1702
1703 fp->rx_bd_cons = bd_cons;
34f80b04 1704 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1705 fp->rx_comp_cons = sw_comp_cons;
1706 fp->rx_comp_prod = sw_comp_prod;
1707
7a9b2557
VZ
1708 /* Update producers */
1709 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1710 fp->rx_sge_prod);
a2fbb9ea
ET
1711
1712 fp->rx_pkt += rx_pkt;
1713 fp->rx_calls++;
1714
1715 return rx_pkt;
1716}
1717
1718static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1719{
1720 struct bnx2x_fastpath *fp = fp_cookie;
1721 struct bnx2x *bp = fp->bp;
a2fbb9ea 1722
da5a662a
VZ
1723 /* Return here if interrupt is disabled */
1724 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1725 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1726 return IRQ_HANDLED;
1727 }
1728
34f80b04 1729 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1730 fp->index, fp->sb_id);
0626b899 1731 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1732
1733#ifdef BNX2X_STOP_ON_ERROR
1734 if (unlikely(bp->panic))
1735 return IRQ_HANDLED;
1736#endif
ca00392c
EG
1737 /* Handle Rx or Tx according to MSI-X vector */
1738 if (fp->is_rx_queue) {
1739 prefetch(fp->rx_cons_sb);
1740 prefetch(&fp->status_blk->u_status_block.status_block_index);
a2fbb9ea 1741
ca00392c 1742 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1743
ca00392c
EG
1744 } else {
1745 prefetch(fp->tx_cons_sb);
1746 prefetch(&fp->status_blk->c_status_block.status_block_index);
1747
1748 bnx2x_update_fpsb_idx(fp);
1749 rmb();
1750 bnx2x_tx_int(fp);
1751
1752 /* Re-enable interrupts */
1753 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1754 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1755 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1756 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1757 }
34f80b04 1758
a2fbb9ea
ET
1759 return IRQ_HANDLED;
1760}
1761
1762static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1763{
555f6c78 1764 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1765 u16 status = bnx2x_ack_int(bp);
34f80b04 1766 u16 mask;
ca00392c 1767 int i;
a2fbb9ea 1768
34f80b04 1769 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1770 if (unlikely(status == 0)) {
1771 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1772 return IRQ_NONE;
1773 }
f5372251 1774 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1775
34f80b04 1776 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1777 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1778 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1779 return IRQ_HANDLED;
1780 }
1781
3196a88a
EG
1782#ifdef BNX2X_STOP_ON_ERROR
1783 if (unlikely(bp->panic))
1784 return IRQ_HANDLED;
1785#endif
1786
ca00392c
EG
1787 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1788 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1789
ca00392c
EG
1790 mask = 0x2 << fp->sb_id;
1791 if (status & mask) {
1792 /* Handle Rx or Tx according to SB id */
1793 if (fp->is_rx_queue) {
1794 prefetch(fp->rx_cons_sb);
1795 prefetch(&fp->status_blk->u_status_block.
1796 status_block_index);
a2fbb9ea 1797
ca00392c 1798 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1799
ca00392c
EG
1800 } else {
1801 prefetch(fp->tx_cons_sb);
1802 prefetch(&fp->status_blk->c_status_block.
1803 status_block_index);
1804
1805 bnx2x_update_fpsb_idx(fp);
1806 rmb();
1807 bnx2x_tx_int(fp);
1808
1809 /* Re-enable interrupts */
1810 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1811 le16_to_cpu(fp->fp_u_idx),
1812 IGU_INT_NOP, 1);
1813 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1814 le16_to_cpu(fp->fp_c_idx),
1815 IGU_INT_ENABLE, 1);
1816 }
1817 status &= ~mask;
1818 }
a2fbb9ea
ET
1819 }
1820
993ac7b5
MC
1821#ifdef BCM_CNIC
1822 mask = 0x2 << CNIC_SB_ID(bp);
1823 if (status & (mask | 0x1)) {
1824 struct cnic_ops *c_ops = NULL;
1825
1826 rcu_read_lock();
1827 c_ops = rcu_dereference(bp->cnic_ops);
1828 if (c_ops)
1829 c_ops->cnic_handler(bp->cnic_data, NULL);
1830 rcu_read_unlock();
1831
1832 status &= ~mask;
1833 }
1834#endif
a2fbb9ea 1835
34f80b04 1836 if (unlikely(status & 0x1)) {
1cf167f2 1837 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1838
1839 status &= ~0x1;
1840 if (!status)
1841 return IRQ_HANDLED;
1842 }
1843
34f80b04
EG
1844 if (status)
1845 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1846 status);
a2fbb9ea 1847
c18487ee 1848 return IRQ_HANDLED;
a2fbb9ea
ET
1849}
1850
c18487ee 1851/* end of fast path */
a2fbb9ea 1852
bb2a0f7a 1853static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1854
c18487ee
YR
1855/* Link */
1856
1857/*
1858 * General service functions
1859 */
a2fbb9ea 1860
4a37fb66 1861static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1862{
1863 u32 lock_status;
1864 u32 resource_bit = (1 << resource);
4a37fb66
YG
1865 int func = BP_FUNC(bp);
1866 u32 hw_lock_control_reg;
c18487ee 1867 int cnt;
a2fbb9ea 1868
c18487ee
YR
1869 /* Validating that the resource is within range */
1870 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1871 DP(NETIF_MSG_HW,
1872 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1873 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1874 return -EINVAL;
1875 }
a2fbb9ea 1876
4a37fb66
YG
1877 if (func <= 5) {
1878 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1879 } else {
1880 hw_lock_control_reg =
1881 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1882 }
1883
c18487ee 1884 /* Validating that the resource is not already taken */
4a37fb66 1885 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1886 if (lock_status & resource_bit) {
1887 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1888 lock_status, resource_bit);
1889 return -EEXIST;
1890 }
a2fbb9ea 1891
46230476
EG
1892 /* Try for 5 second every 5ms */
1893 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1894 /* Try to acquire the lock */
4a37fb66
YG
1895 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1896 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1897 if (lock_status & resource_bit)
1898 return 0;
a2fbb9ea 1899
c18487ee 1900 msleep(5);
a2fbb9ea 1901 }
c18487ee
YR
1902 DP(NETIF_MSG_HW, "Timeout\n");
1903 return -EAGAIN;
1904}
a2fbb9ea 1905
4a37fb66 1906static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1907{
1908 u32 lock_status;
1909 u32 resource_bit = (1 << resource);
4a37fb66
YG
1910 int func = BP_FUNC(bp);
1911 u32 hw_lock_control_reg;
a2fbb9ea 1912
c18487ee
YR
1913 /* Validating that the resource is within range */
1914 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1915 DP(NETIF_MSG_HW,
1916 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1917 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1918 return -EINVAL;
1919 }
1920
4a37fb66
YG
1921 if (func <= 5) {
1922 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1923 } else {
1924 hw_lock_control_reg =
1925 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1926 }
1927
c18487ee 1928 /* Validating that the resource is currently taken */
4a37fb66 1929 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1930 if (!(lock_status & resource_bit)) {
1931 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1932 lock_status, resource_bit);
1933 return -EFAULT;
a2fbb9ea
ET
1934 }
1935
4a37fb66 1936 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1937 return 0;
1938}
1939
1940/* HW Lock for shared dual port PHYs */
4a37fb66 1941static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1942{
34f80b04 1943 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1944
46c6a674
EG
1945 if (bp->port.need_hw_lock)
1946 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1947}
a2fbb9ea 1948
4a37fb66 1949static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1950{
46c6a674
EG
1951 if (bp->port.need_hw_lock)
1952 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1953
34f80b04 1954 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1955}
a2fbb9ea 1956
4acac6a5
EG
1957int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1958{
1959 /* The GPIO should be swapped if swap register is set and active */
1960 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1961 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1962 int gpio_shift = gpio_num +
1963 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1964 u32 gpio_mask = (1 << gpio_shift);
1965 u32 gpio_reg;
1966 int value;
1967
1968 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1969 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1970 return -EINVAL;
1971 }
1972
1973 /* read GPIO value */
1974 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1975
1976 /* get the requested pin value */
1977 if ((gpio_reg & gpio_mask) == gpio_mask)
1978 value = 1;
1979 else
1980 value = 0;
1981
1982 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1983
1984 return value;
1985}
1986
17de50b7 1987int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1988{
1989 /* The GPIO should be swapped if swap register is set and active */
1990 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1991 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1992 int gpio_shift = gpio_num +
1993 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1994 u32 gpio_mask = (1 << gpio_shift);
1995 u32 gpio_reg;
a2fbb9ea 1996
c18487ee
YR
1997 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1998 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1999 return -EINVAL;
2000 }
a2fbb9ea 2001
4a37fb66 2002 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
2003 /* read GPIO and mask except the float bits */
2004 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 2005
c18487ee
YR
2006 switch (mode) {
2007 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2008 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2009 gpio_num, gpio_shift);
2010 /* clear FLOAT and set CLR */
2011 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2012 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2013 break;
a2fbb9ea 2014
c18487ee
YR
2015 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2016 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2017 gpio_num, gpio_shift);
2018 /* clear FLOAT and set SET */
2019 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2020 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2021 break;
a2fbb9ea 2022
17de50b7 2023 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
2024 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2025 gpio_num, gpio_shift);
2026 /* set FLOAT */
2027 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2028 break;
a2fbb9ea 2029
c18487ee
YR
2030 default:
2031 break;
a2fbb9ea
ET
2032 }
2033
c18487ee 2034 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 2035 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 2036
c18487ee 2037 return 0;
a2fbb9ea
ET
2038}
2039
4acac6a5
EG
2040int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2041{
2042 /* The GPIO should be swapped if swap register is set and active */
2043 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2044 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2045 int gpio_shift = gpio_num +
2046 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2047 u32 gpio_mask = (1 << gpio_shift);
2048 u32 gpio_reg;
2049
2050 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2051 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2052 return -EINVAL;
2053 }
2054
2055 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2056 /* read GPIO int */
2057 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2058
2059 switch (mode) {
2060 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2061 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2062 "output low\n", gpio_num, gpio_shift);
2063 /* clear SET and set CLR */
2064 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2065 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2066 break;
2067
2068 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2069 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2070 "output high\n", gpio_num, gpio_shift);
2071 /* clear CLR and set SET */
2072 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2073 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2074 break;
2075
2076 default:
2077 break;
2078 }
2079
2080 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2081 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2082
2083 return 0;
2084}
2085
c18487ee 2086static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2087{
c18487ee
YR
2088 u32 spio_mask = (1 << spio_num);
2089 u32 spio_reg;
a2fbb9ea 2090
c18487ee
YR
2091 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2092 (spio_num > MISC_REGISTERS_SPIO_7)) {
2093 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2094 return -EINVAL;
a2fbb9ea
ET
2095 }
2096
4a37fb66 2097 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2098 /* read SPIO and mask except the float bits */
2099 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2100
c18487ee 2101 switch (mode) {
6378c025 2102 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2103 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2104 /* clear FLOAT and set CLR */
2105 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2106 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2107 break;
a2fbb9ea 2108
6378c025 2109 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2110 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2111 /* clear FLOAT and set SET */
2112 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2113 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2114 break;
a2fbb9ea 2115
c18487ee
YR
2116 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2117 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2118 /* set FLOAT */
2119 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2120 break;
a2fbb9ea 2121
c18487ee
YR
2122 default:
2123 break;
a2fbb9ea
ET
2124 }
2125
c18487ee 2126 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2127 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2128
a2fbb9ea
ET
2129 return 0;
2130}
2131
c18487ee 2132static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2133{
ad33ea3a
EG
2134 switch (bp->link_vars.ieee_fc &
2135 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2136 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2137 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2138 ADVERTISED_Pause);
2139 break;
356e2385 2140
c18487ee 2141 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2142 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2143 ADVERTISED_Pause);
2144 break;
356e2385 2145
c18487ee 2146 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2147 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2148 break;
356e2385 2149
c18487ee 2150 default:
34f80b04 2151 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2152 ADVERTISED_Pause);
2153 break;
2154 }
2155}
f1410647 2156
c18487ee
YR
2157static void bnx2x_link_report(struct bnx2x *bp)
2158{
f34d28ea 2159 if (bp->flags & MF_FUNC_DIS) {
2691d51d
EG
2160 netif_carrier_off(bp->dev);
2161 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2162 return;
2163 }
2164
c18487ee 2165 if (bp->link_vars.link_up) {
35c5f8fe
EG
2166 u16 line_speed;
2167
c18487ee
YR
2168 if (bp->state == BNX2X_STATE_OPEN)
2169 netif_carrier_on(bp->dev);
2170 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2171
35c5f8fe
EG
2172 line_speed = bp->link_vars.line_speed;
2173 if (IS_E1HMF(bp)) {
2174 u16 vn_max_rate;
2175
2176 vn_max_rate =
2177 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2178 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2179 if (vn_max_rate < line_speed)
2180 line_speed = vn_max_rate;
2181 }
2182 printk("%d Mbps ", line_speed);
f1410647 2183
c18487ee
YR
2184 if (bp->link_vars.duplex == DUPLEX_FULL)
2185 printk("full duplex");
2186 else
2187 printk("half duplex");
f1410647 2188
c0700f90
DM
2189 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2190 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2191 printk(", receive ");
356e2385
EG
2192 if (bp->link_vars.flow_ctrl &
2193 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2194 printk("& transmit ");
2195 } else {
2196 printk(", transmit ");
2197 }
2198 printk("flow control ON");
2199 }
2200 printk("\n");
f1410647 2201
c18487ee
YR
2202 } else { /* link_down */
2203 netif_carrier_off(bp->dev);
2204 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2205 }
c18487ee
YR
2206}
2207
b5bf9068 2208static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2209{
19680c48
EG
2210 if (!BP_NOMCP(bp)) {
2211 u8 rc;
a2fbb9ea 2212
19680c48 2213 /* Initialize link parameters structure variables */
8c99e7b0
YR
2214 /* It is recommended to turn off RX FC for jumbo frames
2215 for better performance */
0c593270 2216 if (bp->dev->mtu > 5000)
c0700f90 2217 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2218 else
c0700f90 2219 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2220
4a37fb66 2221 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2222
2223 if (load_mode == LOAD_DIAG)
2224 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2225
19680c48 2226 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2227
4a37fb66 2228 bnx2x_release_phy_lock(bp);
a2fbb9ea 2229
3c96c68b
EG
2230 bnx2x_calc_fc_adv(bp);
2231
b5bf9068
EG
2232 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2233 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2234 bnx2x_link_report(bp);
b5bf9068 2235 }
34f80b04 2236
19680c48
EG
2237 return rc;
2238 }
f5372251 2239 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2240 return -EINVAL;
a2fbb9ea
ET
2241}
2242
c18487ee 2243static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2244{
19680c48 2245 if (!BP_NOMCP(bp)) {
4a37fb66 2246 bnx2x_acquire_phy_lock(bp);
19680c48 2247 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2248 bnx2x_release_phy_lock(bp);
a2fbb9ea 2249
19680c48
EG
2250 bnx2x_calc_fc_adv(bp);
2251 } else
f5372251 2252 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2253}
a2fbb9ea 2254
c18487ee
YR
2255static void bnx2x__link_reset(struct bnx2x *bp)
2256{
19680c48 2257 if (!BP_NOMCP(bp)) {
4a37fb66 2258 bnx2x_acquire_phy_lock(bp);
589abe3a 2259 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2260 bnx2x_release_phy_lock(bp);
19680c48 2261 } else
f5372251 2262 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2263}
a2fbb9ea 2264
c18487ee
YR
2265static u8 bnx2x_link_test(struct bnx2x *bp)
2266{
2267 u8 rc;
a2fbb9ea 2268
4a37fb66 2269 bnx2x_acquire_phy_lock(bp);
c18487ee 2270 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2271 bnx2x_release_phy_lock(bp);
a2fbb9ea 2272
c18487ee
YR
2273 return rc;
2274}
a2fbb9ea 2275
8a1c38d1 2276static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2277{
8a1c38d1
EG
2278 u32 r_param = bp->link_vars.line_speed / 8;
2279 u32 fair_periodic_timeout_usec;
2280 u32 t_fair;
34f80b04 2281
8a1c38d1
EG
2282 memset(&(bp->cmng.rs_vars), 0,
2283 sizeof(struct rate_shaping_vars_per_port));
2284 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2285
8a1c38d1
EG
2286 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2287 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2288
8a1c38d1
EG
2289 /* this is the threshold below which no timer arming will occur
2290 1.25 coefficient is for the threshold to be a little bigger
2291 than the real time, to compensate for timer in-accuracy */
2292 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2293 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2294
8a1c38d1
EG
2295 /* resolution of fairness timer */
2296 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2297 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2298 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2299
8a1c38d1
EG
2300 /* this is the threshold below which we won't arm the timer anymore */
2301 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2302
8a1c38d1
EG
2303 /* we multiply by 1e3/8 to get bytes/msec.
2304 We don't want the credits to pass a credit
2305 of the t_fair*FAIR_MEM (algorithm resolution) */
2306 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2307 /* since each tick is 4 usec */
2308 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2309}
2310
2691d51d
EG
2311/* Calculates the sum of vn_min_rates.
2312 It's needed for further normalizing of the min_rates.
2313 Returns:
2314 sum of vn_min_rates.
2315 or
2316 0 - if all the min_rates are 0.
2317 In the later case fainess algorithm should be deactivated.
2318 If not all min_rates are zero then those that are zeroes will be set to 1.
2319 */
2320static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2321{
2322 int all_zero = 1;
2323 int port = BP_PORT(bp);
2324 int vn;
2325
2326 bp->vn_weight_sum = 0;
2327 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2328 int func = 2*vn + port;
2329 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2330 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2331 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2332
2333 /* Skip hidden vns */
2334 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2335 continue;
2336
2337 /* If min rate is zero - set it to 1 */
2338 if (!vn_min_rate)
2339 vn_min_rate = DEF_MIN_RATE;
2340 else
2341 all_zero = 0;
2342
2343 bp->vn_weight_sum += vn_min_rate;
2344 }
2345
2346 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
2347 if (all_zero) {
2348 bp->cmng.flags.cmng_enables &=
2349 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2350 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2351 " fairness will be disabled\n");
2352 } else
2353 bp->cmng.flags.cmng_enables |=
2354 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
2355}
2356
8a1c38d1 2357static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2358{
2359 struct rate_shaping_vars_per_vn m_rs_vn;
2360 struct fairness_vars_per_vn m_fair_vn;
2361 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2362 u16 vn_min_rate, vn_max_rate;
2363 int i;
2364
2365 /* If function is hidden - set min and max to zeroes */
2366 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2367 vn_min_rate = 0;
2368 vn_max_rate = 0;
2369
2370 } else {
2371 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2372 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1
EG
2373 /* If min rate is zero - set it to 1 */
2374 if (!vn_min_rate)
34f80b04
EG
2375 vn_min_rate = DEF_MIN_RATE;
2376 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2377 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2378 }
8a1c38d1 2379 DP(NETIF_MSG_IFUP,
b015e3d1 2380 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 2381 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2382
2383 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2384 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2385
2386 /* global vn counter - maximal Mbps for this vn */
2387 m_rs_vn.vn_counter.rate = vn_max_rate;
2388
2389 /* quota - number of bytes transmitted in this period */
2390 m_rs_vn.vn_counter.quota =
2391 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2392
8a1c38d1 2393 if (bp->vn_weight_sum) {
34f80b04
EG
2394 /* credit for each period of the fairness algorithm:
2395 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2396 vn_weight_sum should not be larger than 10000, thus
2397 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2398 than zero */
34f80b04 2399 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2400 max((u32)(vn_min_rate * (T_FAIR_COEF /
2401 (8 * bp->vn_weight_sum))),
2402 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2403 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2404 m_fair_vn.vn_credit_delta);
2405 }
2406
34f80b04
EG
2407 /* Store it to internal memory */
2408 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2409 REG_WR(bp, BAR_XSTRORM_INTMEM +
2410 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2411 ((u32 *)(&m_rs_vn))[i]);
2412
2413 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2414 REG_WR(bp, BAR_XSTRORM_INTMEM +
2415 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2416 ((u32 *)(&m_fair_vn))[i]);
2417}
2418
8a1c38d1 2419
c18487ee
YR
2420/* This function is called upon link interrupt */
2421static void bnx2x_link_attn(struct bnx2x *bp)
2422{
bb2a0f7a
YG
2423 /* Make sure that we are synced with the current statistics */
2424 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2425
c18487ee 2426 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2427
bb2a0f7a
YG
2428 if (bp->link_vars.link_up) {
2429
1c06328c 2430 /* dropless flow control */
a18f5128 2431 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
2432 int port = BP_PORT(bp);
2433 u32 pause_enabled = 0;
2434
2435 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2436 pause_enabled = 1;
2437
2438 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2439 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2440 pause_enabled);
2441 }
2442
bb2a0f7a
YG
2443 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2444 struct host_port_stats *pstats;
2445
2446 pstats = bnx2x_sp(bp, port_stats);
2447 /* reset old bmac stats */
2448 memset(&(pstats->mac_stx[0]), 0,
2449 sizeof(struct mac_stx));
2450 }
f34d28ea 2451 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2452 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2453 }
2454
c18487ee
YR
2455 /* indicate link status */
2456 bnx2x_link_report(bp);
34f80b04
EG
2457
2458 if (IS_E1HMF(bp)) {
8a1c38d1 2459 int port = BP_PORT(bp);
34f80b04 2460 int func;
8a1c38d1 2461 int vn;
34f80b04 2462
ab6ad5a4 2463 /* Set the attention towards other drivers on the same port */
34f80b04
EG
2464 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2465 if (vn == BP_E1HVN(bp))
2466 continue;
2467
8a1c38d1 2468 func = ((vn << 1) | port);
34f80b04
EG
2469 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2470 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2471 }
34f80b04 2472
8a1c38d1
EG
2473 if (bp->link_vars.link_up) {
2474 int i;
2475
2476 /* Init rate shaping and fairness contexts */
2477 bnx2x_init_port_minmax(bp);
34f80b04 2478
34f80b04 2479 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2480 bnx2x_init_vn_minmax(bp, 2*vn + port);
2481
2482 /* Store it to internal memory */
2483 for (i = 0;
2484 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2485 REG_WR(bp, BAR_XSTRORM_INTMEM +
2486 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2487 ((u32 *)(&bp->cmng))[i]);
2488 }
34f80b04 2489 }
c18487ee 2490}
a2fbb9ea 2491
c18487ee
YR
2492static void bnx2x__link_status_update(struct bnx2x *bp)
2493{
f34d28ea 2494 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 2495 return;
a2fbb9ea 2496
c18487ee 2497 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2498
bb2a0f7a
YG
2499 if (bp->link_vars.link_up)
2500 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2501 else
2502 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2503
2691d51d
EG
2504 bnx2x_calc_vn_weight_sum(bp);
2505
c18487ee
YR
2506 /* indicate link status */
2507 bnx2x_link_report(bp);
a2fbb9ea 2508}
a2fbb9ea 2509
34f80b04
EG
2510static void bnx2x_pmf_update(struct bnx2x *bp)
2511{
2512 int port = BP_PORT(bp);
2513 u32 val;
2514
2515 bp->port.pmf = 1;
2516 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2517
2518 /* enable nig attention */
2519 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2520 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2521 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2522
2523 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2524}
2525
c18487ee 2526/* end of Link */
a2fbb9ea
ET
2527
2528/* slow path */
2529
2530/*
2531 * General service functions
2532 */
2533
2691d51d
EG
2534/* send the MCP a request, block until there is a reply */
2535u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2536{
2537 int func = BP_FUNC(bp);
2538 u32 seq = ++bp->fw_seq;
2539 u32 rc = 0;
2540 u32 cnt = 1;
2541 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2542
c4ff7cbf 2543 mutex_lock(&bp->fw_mb_mutex);
2691d51d
EG
2544 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2545 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2546
2547 do {
2548 /* let the FW do it's magic ... */
2549 msleep(delay);
2550
2551 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2552
c4ff7cbf
EG
2553 /* Give the FW up to 5 second (500*10ms) */
2554 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2555
2556 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2557 cnt*delay, rc, seq);
2558
2559 /* is this a reply to our command? */
2560 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2561 rc &= FW_MSG_CODE_MASK;
2562 else {
2563 /* FW BUG! */
2564 BNX2X_ERR("FW failed to respond!\n");
2565 bnx2x_fw_dump(bp);
2566 rc = 0;
2567 }
c4ff7cbf 2568 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2569
2570 return rc;
2571}
2572
2573static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
e665bfda 2574static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2691d51d
EG
2575static void bnx2x_set_rx_mode(struct net_device *dev);
2576
2577static void bnx2x_e1h_disable(struct bnx2x *bp)
2578{
2579 int port = BP_PORT(bp);
2691d51d
EG
2580
2581 netif_tx_disable(bp->dev);
2582 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2583
2584 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2585
2691d51d
EG
2586 netif_carrier_off(bp->dev);
2587}
2588
2589static void bnx2x_e1h_enable(struct bnx2x *bp)
2590{
2591 int port = BP_PORT(bp);
2592
2593 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2594
2691d51d
EG
2595 /* Tx queue should be only reenabled */
2596 netif_tx_wake_all_queues(bp->dev);
2597
061bc702
EG
2598 /*
2599 * Should not call netif_carrier_on since it will be called if the link
2600 * is up when checking for link state
2601 */
2691d51d
EG
2602}
2603
2604static void bnx2x_update_min_max(struct bnx2x *bp)
2605{
2606 int port = BP_PORT(bp);
2607 int vn, i;
2608
2609 /* Init rate shaping and fairness contexts */
2610 bnx2x_init_port_minmax(bp);
2611
2612 bnx2x_calc_vn_weight_sum(bp);
2613
2614 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2615 bnx2x_init_vn_minmax(bp, 2*vn + port);
2616
2617 if (bp->port.pmf) {
2618 int func;
2619
2620 /* Set the attention towards other drivers on the same port */
2621 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2622 if (vn == BP_E1HVN(bp))
2623 continue;
2624
2625 func = ((vn << 1) | port);
2626 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2627 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2628 }
2629
2630 /* Store it to internal memory */
2631 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2632 REG_WR(bp, BAR_XSTRORM_INTMEM +
2633 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2634 ((u32 *)(&bp->cmng))[i]);
2635 }
2636}
2637
2638static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2639{
2691d51d 2640 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2691d51d
EG
2641
2642 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2643
f34d28ea
EG
2644 /*
2645 * This is the only place besides the function initialization
2646 * where the bp->flags can change so it is done without any
2647 * locks
2648 */
2691d51d
EG
2649 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2650 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
f34d28ea 2651 bp->flags |= MF_FUNC_DIS;
2691d51d
EG
2652
2653 bnx2x_e1h_disable(bp);
2654 } else {
2655 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
f34d28ea 2656 bp->flags &= ~MF_FUNC_DIS;
2691d51d
EG
2657
2658 bnx2x_e1h_enable(bp);
2659 }
2660 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2661 }
2662 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2663
2664 bnx2x_update_min_max(bp);
2665 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2666 }
2667
2668 /* Report results to MCP */
2669 if (dcc_event)
2670 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2671 else
2672 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2673}
2674
28912902
MC
2675/* must be called under the spq lock */
2676static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2677{
2678 struct eth_spe *next_spe = bp->spq_prod_bd;
2679
2680 if (bp->spq_prod_bd == bp->spq_last_bd) {
2681 bp->spq_prod_bd = bp->spq;
2682 bp->spq_prod_idx = 0;
2683 DP(NETIF_MSG_TIMER, "end of spq\n");
2684 } else {
2685 bp->spq_prod_bd++;
2686 bp->spq_prod_idx++;
2687 }
2688 return next_spe;
2689}
2690
2691/* must be called under the spq lock */
2692static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2693{
2694 int func = BP_FUNC(bp);
2695
2696 /* Make sure that BD data is updated before writing the producer */
2697 wmb();
2698
2699 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2700 bp->spq_prod_idx);
2701 mmiowb();
2702}
2703
a2fbb9ea
ET
2704/* the slow path queue is odd since completions arrive on the fastpath ring */
2705static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2706 u32 data_hi, u32 data_lo, int common)
2707{
28912902 2708 struct eth_spe *spe;
a2fbb9ea 2709
34f80b04
EG
2710 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2711 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2712 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2713 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2714 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2715
2716#ifdef BNX2X_STOP_ON_ERROR
2717 if (unlikely(bp->panic))
2718 return -EIO;
2719#endif
2720
34f80b04 2721 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2722
2723 if (!bp->spq_left) {
2724 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2725 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2726 bnx2x_panic();
2727 return -EBUSY;
2728 }
f1410647 2729
28912902
MC
2730 spe = bnx2x_sp_get_next(bp);
2731
a2fbb9ea 2732 /* CID needs port number to be encoded int it */
28912902 2733 spe->hdr.conn_and_cmd_data =
a2fbb9ea
ET
2734 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2735 HW_CID(bp, cid)));
28912902 2736 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 2737 if (common)
28912902 2738 spe->hdr.type |=
a2fbb9ea
ET
2739 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2740
28912902
MC
2741 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2742 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
2743
2744 bp->spq_left--;
2745
28912902 2746 bnx2x_sp_prod_update(bp);
34f80b04 2747 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2748 return 0;
2749}
2750
2751/* acquire split MCP access lock register */
4a37fb66 2752static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2753{
a2fbb9ea 2754 u32 i, j, val;
34f80b04 2755 int rc = 0;
a2fbb9ea
ET
2756
2757 might_sleep();
2758 i = 100;
2759 for (j = 0; j < i*10; j++) {
2760 val = (1UL << 31);
2761 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2762 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2763 if (val & (1L << 31))
2764 break;
2765
2766 msleep(5);
2767 }
a2fbb9ea 2768 if (!(val & (1L << 31))) {
19680c48 2769 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2770 rc = -EBUSY;
2771 }
2772
2773 return rc;
2774}
2775
4a37fb66
YG
2776/* release split MCP access lock register */
2777static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2778{
2779 u32 val = 0;
2780
2781 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2782}
2783
2784static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2785{
2786 struct host_def_status_block *def_sb = bp->def_status_blk;
2787 u16 rc = 0;
2788
2789 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2790 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2791 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2792 rc |= 1;
2793 }
2794 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2795 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2796 rc |= 2;
2797 }
2798 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2799 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2800 rc |= 4;
2801 }
2802 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2803 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2804 rc |= 8;
2805 }
2806 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2807 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2808 rc |= 16;
2809 }
2810 return rc;
2811}
2812
2813/*
2814 * slow path service functions
2815 */
2816
2817static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2818{
34f80b04 2819 int port = BP_PORT(bp);
5c862848
EG
2820 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2821 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2822 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2823 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2824 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2825 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2826 u32 aeu_mask;
87942b46 2827 u32 nig_mask = 0;
a2fbb9ea 2828
a2fbb9ea
ET
2829 if (bp->attn_state & asserted)
2830 BNX2X_ERR("IGU ERROR\n");
2831
3fcaf2e5
EG
2832 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2833 aeu_mask = REG_RD(bp, aeu_addr);
2834
a2fbb9ea 2835 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2836 aeu_mask, asserted);
2837 aeu_mask &= ~(asserted & 0xff);
2838 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2839
3fcaf2e5
EG
2840 REG_WR(bp, aeu_addr, aeu_mask);
2841 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2842
3fcaf2e5 2843 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2844 bp->attn_state |= asserted;
3fcaf2e5 2845 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2846
2847 if (asserted & ATTN_HARD_WIRED_MASK) {
2848 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2849
a5e9a7cf
EG
2850 bnx2x_acquire_phy_lock(bp);
2851
877e9aa4 2852 /* save nig interrupt mask */
87942b46 2853 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2854 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2855
c18487ee 2856 bnx2x_link_attn(bp);
a2fbb9ea
ET
2857
2858 /* handle unicore attn? */
2859 }
2860 if (asserted & ATTN_SW_TIMER_4_FUNC)
2861 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2862
2863 if (asserted & GPIO_2_FUNC)
2864 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2865
2866 if (asserted & GPIO_3_FUNC)
2867 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2868
2869 if (asserted & GPIO_4_FUNC)
2870 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2871
2872 if (port == 0) {
2873 if (asserted & ATTN_GENERAL_ATTN_1) {
2874 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2875 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2876 }
2877 if (asserted & ATTN_GENERAL_ATTN_2) {
2878 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2879 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2880 }
2881 if (asserted & ATTN_GENERAL_ATTN_3) {
2882 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2883 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2884 }
2885 } else {
2886 if (asserted & ATTN_GENERAL_ATTN_4) {
2887 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2888 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2889 }
2890 if (asserted & ATTN_GENERAL_ATTN_5) {
2891 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2892 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2893 }
2894 if (asserted & ATTN_GENERAL_ATTN_6) {
2895 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2896 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2897 }
2898 }
2899
2900 } /* if hardwired */
2901
5c862848
EG
2902 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2903 asserted, hc_addr);
2904 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2905
2906 /* now set back the mask */
a5e9a7cf 2907 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2908 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2909 bnx2x_release_phy_lock(bp);
2910 }
a2fbb9ea
ET
2911}
2912
fd4ef40d
EG
2913static inline void bnx2x_fan_failure(struct bnx2x *bp)
2914{
2915 int port = BP_PORT(bp);
2916
2917 /* mark the failure */
2918 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2919 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2920 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2921 bp->link_params.ext_phy_config);
2922
2923 /* log the failure */
2924 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2925 " the driver to shutdown the card to prevent permanent"
2926 " damage. Please contact Dell Support for assistance\n",
2927 bp->dev->name);
2928}
ab6ad5a4 2929
877e9aa4 2930static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2931{
34f80b04 2932 int port = BP_PORT(bp);
877e9aa4 2933 int reg_offset;
4d295db0 2934 u32 val, swap_val, swap_override;
877e9aa4 2935
34f80b04
EG
2936 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2937 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2938
34f80b04 2939 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2940
2941 val = REG_RD(bp, reg_offset);
2942 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2943 REG_WR(bp, reg_offset, val);
2944
2945 BNX2X_ERR("SPIO5 hw attention\n");
2946
fd4ef40d 2947 /* Fan failure attention */
35b19ba5
EG
2948 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2949 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2950 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2951 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2952 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2953 /* The PHY reset is controlled by GPIO 1 */
2954 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2955 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2956 break;
2957
4d295db0
EG
2958 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2959 /* The PHY reset is controlled by GPIO 1 */
2960 /* fake the port number to cancel the swap done in
2961 set_gpio() */
2962 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2963 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2964 port = (swap_val && swap_override) ^ 1;
2965 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2966 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2967 break;
2968
877e9aa4
ET
2969 default:
2970 break;
2971 }
fd4ef40d 2972 bnx2x_fan_failure(bp);
877e9aa4 2973 }
34f80b04 2974
589abe3a
EG
2975 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2976 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2977 bnx2x_acquire_phy_lock(bp);
2978 bnx2x_handle_module_detect_int(&bp->link_params);
2979 bnx2x_release_phy_lock(bp);
2980 }
2981
34f80b04
EG
2982 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2983
2984 val = REG_RD(bp, reg_offset);
2985 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2986 REG_WR(bp, reg_offset, val);
2987
2988 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2989 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2990 bnx2x_panic();
2991 }
877e9aa4
ET
2992}
2993
2994static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2995{
2996 u32 val;
2997
0626b899 2998 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2999
3000 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3001 BNX2X_ERR("DB hw attention 0x%x\n", val);
3002 /* DORQ discard attention */
3003 if (val & 0x2)
3004 BNX2X_ERR("FATAL error from DORQ\n");
3005 }
34f80b04
EG
3006
3007 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3008
3009 int port = BP_PORT(bp);
3010 int reg_offset;
3011
3012 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3013 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3014
3015 val = REG_RD(bp, reg_offset);
3016 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3017 REG_WR(bp, reg_offset, val);
3018
3019 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 3020 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
3021 bnx2x_panic();
3022 }
877e9aa4
ET
3023}
3024
3025static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3026{
3027 u32 val;
3028
3029 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3030
3031 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3032 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3033 /* CFC error attention */
3034 if (val & 0x2)
3035 BNX2X_ERR("FATAL error from CFC\n");
3036 }
3037
3038 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3039
3040 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3041 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3042 /* RQ_USDMDP_FIFO_OVERFLOW */
3043 if (val & 0x18000)
3044 BNX2X_ERR("FATAL error from PXP\n");
3045 }
34f80b04
EG
3046
3047 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3048
3049 int port = BP_PORT(bp);
3050 int reg_offset;
3051
3052 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3053 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3054
3055 val = REG_RD(bp, reg_offset);
3056 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3057 REG_WR(bp, reg_offset, val);
3058
3059 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3060 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3061 bnx2x_panic();
3062 }
877e9aa4
ET
3063}
3064
3065static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3066{
34f80b04
EG
3067 u32 val;
3068
877e9aa4
ET
3069 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3070
34f80b04
EG
3071 if (attn & BNX2X_PMF_LINK_ASSERT) {
3072 int func = BP_FUNC(bp);
3073
3074 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
b015e3d1
EG
3075 bp->mf_config = SHMEM_RD(bp,
3076 mf_cfg.func_mf_config[func].config);
2691d51d
EG
3077 val = SHMEM_RD(bp, func_mb[func].drv_status);
3078 if (val & DRV_STATUS_DCC_EVENT_MASK)
3079 bnx2x_dcc_event(bp,
3080 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3081 bnx2x__link_status_update(bp);
2691d51d 3082 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3083 bnx2x_pmf_update(bp);
3084
3085 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3086
3087 BNX2X_ERR("MC assert!\n");
3088 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3089 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3090 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3091 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3092 bnx2x_panic();
3093
3094 } else if (attn & BNX2X_MCP_ASSERT) {
3095
3096 BNX2X_ERR("MCP assert!\n");
3097 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3098 bnx2x_fw_dump(bp);
877e9aa4
ET
3099
3100 } else
3101 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3102 }
3103
3104 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3105 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3106 if (attn & BNX2X_GRC_TIMEOUT) {
3107 val = CHIP_IS_E1H(bp) ?
3108 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3109 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3110 }
3111 if (attn & BNX2X_GRC_RSV) {
3112 val = CHIP_IS_E1H(bp) ?
3113 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3114 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3115 }
877e9aa4 3116 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3117 }
3118}
3119
3120static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3121{
a2fbb9ea
ET
3122 struct attn_route attn;
3123 struct attn_route group_mask;
34f80b04 3124 int port = BP_PORT(bp);
877e9aa4 3125 int index;
a2fbb9ea
ET
3126 u32 reg_addr;
3127 u32 val;
3fcaf2e5 3128 u32 aeu_mask;
a2fbb9ea
ET
3129
3130 /* need to take HW lock because MCP or other port might also
3131 try to handle this event */
4a37fb66 3132 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
3133
3134 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3135 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3136 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3137 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3138 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3139 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3140
3141 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3142 if (deasserted & (1 << index)) {
3143 group_mask = bp->attn_group[index];
3144
34f80b04
EG
3145 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3146 index, group_mask.sig[0], group_mask.sig[1],
3147 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 3148
877e9aa4
ET
3149 bnx2x_attn_int_deasserted3(bp,
3150 attn.sig[3] & group_mask.sig[3]);
3151 bnx2x_attn_int_deasserted1(bp,
3152 attn.sig[1] & group_mask.sig[1]);
3153 bnx2x_attn_int_deasserted2(bp,
3154 attn.sig[2] & group_mask.sig[2]);
3155 bnx2x_attn_int_deasserted0(bp,
3156 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 3157
a2fbb9ea
ET
3158 if ((attn.sig[0] & group_mask.sig[0] &
3159 HW_PRTY_ASSERT_SET_0) ||
3160 (attn.sig[1] & group_mask.sig[1] &
3161 HW_PRTY_ASSERT_SET_1) ||
3162 (attn.sig[2] & group_mask.sig[2] &
3163 HW_PRTY_ASSERT_SET_2))
6378c025 3164 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
3165 }
3166 }
3167
4a37fb66 3168 bnx2x_release_alr(bp);
a2fbb9ea 3169
5c862848 3170 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3171
3172 val = ~deasserted;
3fcaf2e5
EG
3173 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3174 val, reg_addr);
5c862848 3175 REG_WR(bp, reg_addr, val);
a2fbb9ea 3176
a2fbb9ea 3177 if (~bp->attn_state & deasserted)
3fcaf2e5 3178 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3179
3180 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3181 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3182
3fcaf2e5
EG
3183 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3184 aeu_mask = REG_RD(bp, reg_addr);
3185
3186 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3187 aeu_mask, deasserted);
3188 aeu_mask |= (deasserted & 0xff);
3189 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3190
3fcaf2e5
EG
3191 REG_WR(bp, reg_addr, aeu_mask);
3192 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3193
3194 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3195 bp->attn_state &= ~deasserted;
3196 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3197}
3198
3199static void bnx2x_attn_int(struct bnx2x *bp)
3200{
3201 /* read local copy of bits */
68d59484
EG
3202 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3203 attn_bits);
3204 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3205 attn_bits_ack);
a2fbb9ea
ET
3206 u32 attn_state = bp->attn_state;
3207
3208 /* look for changed bits */
3209 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3210 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3211
3212 DP(NETIF_MSG_HW,
3213 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3214 attn_bits, attn_ack, asserted, deasserted);
3215
3216 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3217 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3218
3219 /* handle bits that were raised */
3220 if (asserted)
3221 bnx2x_attn_int_asserted(bp, asserted);
3222
3223 if (deasserted)
3224 bnx2x_attn_int_deasserted(bp, deasserted);
3225}
3226
3227static void bnx2x_sp_task(struct work_struct *work)
3228{
1cf167f2 3229 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3230 u16 status;
3231
34f80b04 3232
a2fbb9ea
ET
3233 /* Return here if interrupt is disabled */
3234 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3235 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3236 return;
3237 }
3238
3239 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3240/* if (status == 0) */
3241/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3242
3196a88a 3243 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 3244
877e9aa4
ET
3245 /* HW attentions */
3246 if (status & 0x1)
a2fbb9ea 3247 bnx2x_attn_int(bp);
a2fbb9ea 3248
68d59484 3249 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3250 IGU_INT_NOP, 1);
3251 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3252 IGU_INT_NOP, 1);
3253 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3254 IGU_INT_NOP, 1);
3255 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3256 IGU_INT_NOP, 1);
3257 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3258 IGU_INT_ENABLE, 1);
877e9aa4 3259
a2fbb9ea
ET
3260}
3261
3262static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3263{
3264 struct net_device *dev = dev_instance;
3265 struct bnx2x *bp = netdev_priv(dev);
3266
3267 /* Return here if interrupt is disabled */
3268 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3269 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3270 return IRQ_HANDLED;
3271 }
3272
8d9c5f34 3273 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3274
3275#ifdef BNX2X_STOP_ON_ERROR
3276 if (unlikely(bp->panic))
3277 return IRQ_HANDLED;
3278#endif
3279
993ac7b5
MC
3280#ifdef BCM_CNIC
3281 {
3282 struct cnic_ops *c_ops;
3283
3284 rcu_read_lock();
3285 c_ops = rcu_dereference(bp->cnic_ops);
3286 if (c_ops)
3287 c_ops->cnic_handler(bp->cnic_data, NULL);
3288 rcu_read_unlock();
3289 }
3290#endif
1cf167f2 3291 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3292
3293 return IRQ_HANDLED;
3294}
3295
3296/* end of slow path */
3297
3298/* Statistics */
3299
3300/****************************************************************************
3301* Macros
3302****************************************************************************/
3303
a2fbb9ea
ET
3304/* sum[hi:lo] += add[hi:lo] */
3305#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3306 do { \
3307 s_lo += a_lo; \
f5ba6772 3308 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3309 } while (0)
3310
3311/* difference = minuend - subtrahend */
3312#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3313 do { \
bb2a0f7a
YG
3314 if (m_lo < s_lo) { \
3315 /* underflow */ \
a2fbb9ea 3316 d_hi = m_hi - s_hi; \
bb2a0f7a 3317 if (d_hi > 0) { \
6378c025 3318 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3319 d_hi--; \
3320 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3321 } else { \
6378c025 3322 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3323 d_hi = 0; \
3324 d_lo = 0; \
3325 } \
bb2a0f7a
YG
3326 } else { \
3327 /* m_lo >= s_lo */ \
a2fbb9ea 3328 if (m_hi < s_hi) { \
bb2a0f7a
YG
3329 d_hi = 0; \
3330 d_lo = 0; \
3331 } else { \
6378c025 3332 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3333 d_hi = m_hi - s_hi; \
3334 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3335 } \
3336 } \
3337 } while (0)
3338
bb2a0f7a 3339#define UPDATE_STAT64(s, t) \
a2fbb9ea 3340 do { \
bb2a0f7a
YG
3341 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3342 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3343 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3344 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3345 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3346 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3347 } while (0)
3348
bb2a0f7a 3349#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3350 do { \
bb2a0f7a
YG
3351 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3352 diff.lo, new->s##_lo, old->s##_lo); \
3353 ADD_64(estats->t##_hi, diff.hi, \
3354 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3355 } while (0)
3356
3357/* sum[hi:lo] += add */
3358#define ADD_EXTEND_64(s_hi, s_lo, a) \
3359 do { \
3360 s_lo += a; \
3361 s_hi += (s_lo < a) ? 1 : 0; \
3362 } while (0)
3363
bb2a0f7a 3364#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3365 do { \
bb2a0f7a
YG
3366 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3367 pstats->mac_stx[1].s##_lo, \
3368 new->s); \
a2fbb9ea
ET
3369 } while (0)
3370
bb2a0f7a 3371#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3372 do { \
4781bfad
EG
3373 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3374 old_tclient->s = tclient->s; \
de832a55
EG
3375 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3376 } while (0)
3377
3378#define UPDATE_EXTEND_USTAT(s, t) \
3379 do { \
3380 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3381 old_uclient->s = uclient->s; \
3382 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3383 } while (0)
3384
3385#define UPDATE_EXTEND_XSTAT(s, t) \
3386 do { \
4781bfad
EG
3387 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3388 old_xclient->s = xclient->s; \
de832a55
EG
3389 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3390 } while (0)
3391
3392/* minuend -= subtrahend */
3393#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3394 do { \
3395 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3396 } while (0)
3397
3398/* minuend[hi:lo] -= subtrahend */
3399#define SUB_EXTEND_64(m_hi, m_lo, s) \
3400 do { \
3401 SUB_64(m_hi, 0, m_lo, s); \
3402 } while (0)
3403
3404#define SUB_EXTEND_USTAT(s, t) \
3405 do { \
3406 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3407 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3408 } while (0)
3409
3410/*
3411 * General service functions
3412 */
3413
3414static inline long bnx2x_hilo(u32 *hiref)
3415{
3416 u32 lo = *(hiref + 1);
3417#if (BITS_PER_LONG == 64)
3418 u32 hi = *hiref;
3419
3420 return HILO_U64(hi, lo);
3421#else
3422 return lo;
3423#endif
3424}
3425
3426/*
3427 * Init service functions
3428 */
3429
bb2a0f7a
YG
3430static void bnx2x_storm_stats_post(struct bnx2x *bp)
3431{
3432 if (!bp->stats_pending) {
3433 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3434 int i, rc;
bb2a0f7a
YG
3435
3436 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3437 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3438 for_each_queue(bp, i)
3439 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3440
3441 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3442 ((u32 *)&ramrod_data)[1],
3443 ((u32 *)&ramrod_data)[0], 0);
3444 if (rc == 0) {
3445 /* stats ramrod has it's own slot on the spq */
3446 bp->spq_left++;
3447 bp->stats_pending = 1;
3448 }
3449 }
3450}
3451
bb2a0f7a
YG
3452static void bnx2x_hw_stats_post(struct bnx2x *bp)
3453{
3454 struct dmae_command *dmae = &bp->stats_dmae;
3455 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3456
3457 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3458 if (CHIP_REV_IS_SLOW(bp))
3459 return;
bb2a0f7a
YG
3460
3461 /* loader */
3462 if (bp->executer_idx) {
3463 int loader_idx = PMF_DMAE_C(bp);
3464
3465 memset(dmae, 0, sizeof(struct dmae_command));
3466
3467 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3468 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3469 DMAE_CMD_DST_RESET |
3470#ifdef __BIG_ENDIAN
3471 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3472#else
3473 DMAE_CMD_ENDIANITY_DW_SWAP |
3474#endif
3475 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3476 DMAE_CMD_PORT_0) |
3477 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3478 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3479 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3480 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3481 sizeof(struct dmae_command) *
3482 (loader_idx + 1)) >> 2;
3483 dmae->dst_addr_hi = 0;
3484 dmae->len = sizeof(struct dmae_command) >> 2;
3485 if (CHIP_IS_E1(bp))
3486 dmae->len--;
3487 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3488 dmae->comp_addr_hi = 0;
3489 dmae->comp_val = 1;
3490
3491 *stats_comp = 0;
3492 bnx2x_post_dmae(bp, dmae, loader_idx);
3493
3494 } else if (bp->func_stx) {
3495 *stats_comp = 0;
3496 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3497 }
3498}
3499
3500static int bnx2x_stats_comp(struct bnx2x *bp)
3501{
3502 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3503 int cnt = 10;
3504
3505 might_sleep();
3506 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3507 if (!cnt) {
3508 BNX2X_ERR("timeout waiting for stats finished\n");
3509 break;
3510 }
3511 cnt--;
12469401 3512 msleep(1);
bb2a0f7a
YG
3513 }
3514 return 1;
3515}
3516
3517/*
3518 * Statistics service functions
3519 */
3520
3521static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3522{
3523 struct dmae_command *dmae;
3524 u32 opcode;
3525 int loader_idx = PMF_DMAE_C(bp);
3526 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3527
3528 /* sanity */
3529 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3530 BNX2X_ERR("BUG!\n");
3531 return;
3532 }
3533
3534 bp->executer_idx = 0;
3535
3536 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3537 DMAE_CMD_C_ENABLE |
3538 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3539#ifdef __BIG_ENDIAN
3540 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3541#else
3542 DMAE_CMD_ENDIANITY_DW_SWAP |
3543#endif
3544 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3545 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3546
3547 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3548 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3549 dmae->src_addr_lo = bp->port.port_stx >> 2;
3550 dmae->src_addr_hi = 0;
3551 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3552 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3553 dmae->len = DMAE_LEN32_RD_MAX;
3554 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3555 dmae->comp_addr_hi = 0;
3556 dmae->comp_val = 1;
3557
3558 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3559 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3560 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3561 dmae->src_addr_hi = 0;
7a9b2557
VZ
3562 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3563 DMAE_LEN32_RD_MAX * 4);
3564 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3565 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3566 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3567 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3568 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3569 dmae->comp_val = DMAE_COMP_VAL;
3570
3571 *stats_comp = 0;
3572 bnx2x_hw_stats_post(bp);
3573 bnx2x_stats_comp(bp);
3574}
3575
3576static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3577{
3578 struct dmae_command *dmae;
34f80b04 3579 int port = BP_PORT(bp);
bb2a0f7a 3580 int vn = BP_E1HVN(bp);
a2fbb9ea 3581 u32 opcode;
bb2a0f7a 3582 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3583 u32 mac_addr;
bb2a0f7a
YG
3584 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3585
3586 /* sanity */
3587 if (!bp->link_vars.link_up || !bp->port.pmf) {
3588 BNX2X_ERR("BUG!\n");
3589 return;
3590 }
a2fbb9ea
ET
3591
3592 bp->executer_idx = 0;
bb2a0f7a
YG
3593
3594 /* MCP */
3595 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3596 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3597 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3598#ifdef __BIG_ENDIAN
bb2a0f7a 3599 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3600#else
bb2a0f7a 3601 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3602#endif
bb2a0f7a
YG
3603 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3604 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3605
bb2a0f7a 3606 if (bp->port.port_stx) {
a2fbb9ea
ET
3607
3608 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3609 dmae->opcode = opcode;
bb2a0f7a
YG
3610 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3611 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3612 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3613 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3614 dmae->len = sizeof(struct host_port_stats) >> 2;
3615 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3616 dmae->comp_addr_hi = 0;
3617 dmae->comp_val = 1;
a2fbb9ea
ET
3618 }
3619
bb2a0f7a
YG
3620 if (bp->func_stx) {
3621
3622 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3623 dmae->opcode = opcode;
3624 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3625 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3626 dmae->dst_addr_lo = bp->func_stx >> 2;
3627 dmae->dst_addr_hi = 0;
3628 dmae->len = sizeof(struct host_func_stats) >> 2;
3629 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3630 dmae->comp_addr_hi = 0;
3631 dmae->comp_val = 1;
a2fbb9ea
ET
3632 }
3633
bb2a0f7a 3634 /* MAC */
a2fbb9ea
ET
3635 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3636 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3637 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3638#ifdef __BIG_ENDIAN
3639 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3640#else
3641 DMAE_CMD_ENDIANITY_DW_SWAP |
3642#endif
bb2a0f7a
YG
3643 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3644 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3645
c18487ee 3646 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3647
3648 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3649 NIG_REG_INGRESS_BMAC0_MEM);
3650
3651 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3652 BIGMAC_REGISTER_TX_STAT_GTBYT */
3653 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3654 dmae->opcode = opcode;
3655 dmae->src_addr_lo = (mac_addr +
3656 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3657 dmae->src_addr_hi = 0;
3658 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3659 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3660 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3661 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3662 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3663 dmae->comp_addr_hi = 0;
3664 dmae->comp_val = 1;
3665
3666 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3667 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3668 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3669 dmae->opcode = opcode;
3670 dmae->src_addr_lo = (mac_addr +
3671 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3672 dmae->src_addr_hi = 0;
3673 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3674 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3675 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3676 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3677 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3678 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3679 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3680 dmae->comp_addr_hi = 0;
3681 dmae->comp_val = 1;
3682
c18487ee 3683 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3684
3685 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3686
3687 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3688 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3689 dmae->opcode = opcode;
3690 dmae->src_addr_lo = (mac_addr +
3691 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3692 dmae->src_addr_hi = 0;
3693 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3694 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3695 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3696 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3697 dmae->comp_addr_hi = 0;
3698 dmae->comp_val = 1;
3699
3700 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3701 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3702 dmae->opcode = opcode;
3703 dmae->src_addr_lo = (mac_addr +
3704 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3705 dmae->src_addr_hi = 0;
3706 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3707 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3708 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3709 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3710 dmae->len = 1;
3711 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3712 dmae->comp_addr_hi = 0;
3713 dmae->comp_val = 1;
3714
3715 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3716 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3717 dmae->opcode = opcode;
3718 dmae->src_addr_lo = (mac_addr +
3719 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3720 dmae->src_addr_hi = 0;
3721 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3722 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3723 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3724 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3725 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3726 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3727 dmae->comp_addr_hi = 0;
3728 dmae->comp_val = 1;
3729 }
3730
3731 /* NIG */
bb2a0f7a
YG
3732 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3733 dmae->opcode = opcode;
3734 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3735 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3736 dmae->src_addr_hi = 0;
3737 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3738 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3739 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3740 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3741 dmae->comp_addr_hi = 0;
3742 dmae->comp_val = 1;
3743
3744 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3745 dmae->opcode = opcode;
3746 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3747 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3748 dmae->src_addr_hi = 0;
3749 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3750 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3751 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3752 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3753 dmae->len = (2*sizeof(u32)) >> 2;
3754 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3755 dmae->comp_addr_hi = 0;
3756 dmae->comp_val = 1;
3757
a2fbb9ea
ET
3758 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3759 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3760 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3761 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3762#ifdef __BIG_ENDIAN
3763 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3764#else
3765 DMAE_CMD_ENDIANITY_DW_SWAP |
3766#endif
bb2a0f7a
YG
3767 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3768 (vn << DMAE_CMD_E1HVN_SHIFT));
3769 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3770 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3771 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3772 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3773 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3774 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3775 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3776 dmae->len = (2*sizeof(u32)) >> 2;
3777 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3778 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3779 dmae->comp_val = DMAE_COMP_VAL;
3780
3781 *stats_comp = 0;
a2fbb9ea
ET
3782}
3783
bb2a0f7a 3784static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3785{
bb2a0f7a
YG
3786 struct dmae_command *dmae = &bp->stats_dmae;
3787 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3788
bb2a0f7a
YG
3789 /* sanity */
3790 if (!bp->func_stx) {
3791 BNX2X_ERR("BUG!\n");
3792 return;
3793 }
a2fbb9ea 3794
bb2a0f7a
YG
3795 bp->executer_idx = 0;
3796 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3797
bb2a0f7a
YG
3798 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3799 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3800 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3801#ifdef __BIG_ENDIAN
3802 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3803#else
3804 DMAE_CMD_ENDIANITY_DW_SWAP |
3805#endif
3806 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3807 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3808 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3809 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3810 dmae->dst_addr_lo = bp->func_stx >> 2;
3811 dmae->dst_addr_hi = 0;
3812 dmae->len = sizeof(struct host_func_stats) >> 2;
3813 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3814 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3815 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3816
bb2a0f7a
YG
3817 *stats_comp = 0;
3818}
a2fbb9ea 3819
bb2a0f7a
YG
3820static void bnx2x_stats_start(struct bnx2x *bp)
3821{
3822 if (bp->port.pmf)
3823 bnx2x_port_stats_init(bp);
3824
3825 else if (bp->func_stx)
3826 bnx2x_func_stats_init(bp);
3827
3828 bnx2x_hw_stats_post(bp);
3829 bnx2x_storm_stats_post(bp);
3830}
3831
3832static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3833{
3834 bnx2x_stats_comp(bp);
3835 bnx2x_stats_pmf_update(bp);
3836 bnx2x_stats_start(bp);
3837}
3838
3839static void bnx2x_stats_restart(struct bnx2x *bp)
3840{
3841 bnx2x_stats_comp(bp);
3842 bnx2x_stats_start(bp);
3843}
3844
3845static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3846{
3847 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3848 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3849 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3850 struct {
3851 u32 lo;
3852 u32 hi;
3853 } diff;
bb2a0f7a
YG
3854
3855 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3856 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3857 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3858 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3859 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3860 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3861 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3862 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3863 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3864 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3865 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3866 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3867 UPDATE_STAT64(tx_stat_gt127,
3868 tx_stat_etherstatspkts65octetsto127octets);
3869 UPDATE_STAT64(tx_stat_gt255,
3870 tx_stat_etherstatspkts128octetsto255octets);
3871 UPDATE_STAT64(tx_stat_gt511,
3872 tx_stat_etherstatspkts256octetsto511octets);
3873 UPDATE_STAT64(tx_stat_gt1023,
3874 tx_stat_etherstatspkts512octetsto1023octets);
3875 UPDATE_STAT64(tx_stat_gt1518,
3876 tx_stat_etherstatspkts1024octetsto1522octets);
3877 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3878 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3879 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3880 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3881 UPDATE_STAT64(tx_stat_gterr,
3882 tx_stat_dot3statsinternalmactransmiterrors);
3883 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3884
3885 estats->pause_frames_received_hi =
3886 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3887 estats->pause_frames_received_lo =
3888 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3889
3890 estats->pause_frames_sent_hi =
3891 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3892 estats->pause_frames_sent_lo =
3893 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3894}
3895
3896static void bnx2x_emac_stats_update(struct bnx2x *bp)
3897{
3898 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3899 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3900 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3901
3902 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3903 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3904 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3905 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3906 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3907 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3908 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3909 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3910 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3911 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3912 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3913 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3914 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3915 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3916 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3917 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3918 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3919 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3920 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3921 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3922 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3923 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3924 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3925 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3926 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3927 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3928 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3929 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3930 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3931 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3932 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3933
3934 estats->pause_frames_received_hi =
3935 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3936 estats->pause_frames_received_lo =
3937 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3938 ADD_64(estats->pause_frames_received_hi,
3939 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3940 estats->pause_frames_received_lo,
3941 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3942
3943 estats->pause_frames_sent_hi =
3944 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3945 estats->pause_frames_sent_lo =
3946 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3947 ADD_64(estats->pause_frames_sent_hi,
3948 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3949 estats->pause_frames_sent_lo,
3950 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3951}
3952
3953static int bnx2x_hw_stats_update(struct bnx2x *bp)
3954{
3955 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3956 struct nig_stats *old = &(bp->port.old_nig_stats);
3957 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3958 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3959 struct {
3960 u32 lo;
3961 u32 hi;
3962 } diff;
de832a55 3963 u32 nig_timer_max;
bb2a0f7a
YG
3964
3965 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3966 bnx2x_bmac_stats_update(bp);
3967
3968 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3969 bnx2x_emac_stats_update(bp);
3970
3971 else { /* unreached */
c3eefaf6 3972 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3973 return -1;
3974 }
a2fbb9ea 3975
bb2a0f7a
YG
3976 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3977 new->brb_discard - old->brb_discard);
66e855f3
YG
3978 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3979 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3980
bb2a0f7a
YG
3981 UPDATE_STAT64_NIG(egress_mac_pkt0,
3982 etherstatspkts1024octetsto1522octets);
3983 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3984
bb2a0f7a 3985 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3986
bb2a0f7a
YG
3987 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3988 sizeof(struct mac_stx));
3989 estats->brb_drop_hi = pstats->brb_drop_hi;
3990 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3991
bb2a0f7a 3992 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3993
de832a55
EG
3994 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3995 if (nig_timer_max != estats->nig_timer_max) {
3996 estats->nig_timer_max = nig_timer_max;
3997 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3998 }
3999
bb2a0f7a 4000 return 0;
a2fbb9ea
ET
4001}
4002
bb2a0f7a 4003static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
4004{
4005 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 4006 struct tstorm_per_port_stats *tport =
de832a55 4007 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
4008 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4009 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
4010 int i;
4011
6fe49bb9
EG
4012 memcpy(&(fstats->total_bytes_received_hi),
4013 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
4014 sizeof(struct host_func_stats) - 2*sizeof(u32));
4015 estats->error_bytes_received_hi = 0;
4016 estats->error_bytes_received_lo = 0;
4017 estats->etherstatsoverrsizepkts_hi = 0;
4018 estats->etherstatsoverrsizepkts_lo = 0;
4019 estats->no_buff_discard_hi = 0;
4020 estats->no_buff_discard_lo = 0;
a2fbb9ea 4021
ca00392c 4022 for_each_rx_queue(bp, i) {
de832a55
EG
4023 struct bnx2x_fastpath *fp = &bp->fp[i];
4024 int cl_id = fp->cl_id;
4025 struct tstorm_per_client_stats *tclient =
4026 &stats->tstorm_common.client_statistics[cl_id];
4027 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4028 struct ustorm_per_client_stats *uclient =
4029 &stats->ustorm_common.client_statistics[cl_id];
4030 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4031 struct xstorm_per_client_stats *xclient =
4032 &stats->xstorm_common.client_statistics[cl_id];
4033 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4034 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4035 u32 diff;
4036
4037 /* are storm stats valid? */
4038 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 4039 bp->stats_counter) {
de832a55
EG
4040 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4041 " xstorm counter (%d) != stats_counter (%d)\n",
4042 i, xclient->stats_counter, bp->stats_counter);
4043 return -1;
4044 }
4045 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 4046 bp->stats_counter) {
de832a55
EG
4047 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4048 " tstorm counter (%d) != stats_counter (%d)\n",
4049 i, tclient->stats_counter, bp->stats_counter);
4050 return -2;
4051 }
4052 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4053 bp->stats_counter) {
4054 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4055 " ustorm counter (%d) != stats_counter (%d)\n",
4056 i, uclient->stats_counter, bp->stats_counter);
4057 return -4;
4058 }
a2fbb9ea 4059
de832a55 4060 qstats->total_bytes_received_hi =
ca00392c 4061 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 4062 qstats->total_bytes_received_lo =
ca00392c
EG
4063 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4064
4065 ADD_64(qstats->total_bytes_received_hi,
4066 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4067 qstats->total_bytes_received_lo,
4068 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4069
4070 ADD_64(qstats->total_bytes_received_hi,
4071 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4072 qstats->total_bytes_received_lo,
4073 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4074
4075 qstats->valid_bytes_received_hi =
4076 qstats->total_bytes_received_hi;
de832a55 4077 qstats->valid_bytes_received_lo =
ca00392c 4078 qstats->total_bytes_received_lo;
bb2a0f7a 4079
de832a55 4080 qstats->error_bytes_received_hi =
bb2a0f7a 4081 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4082 qstats->error_bytes_received_lo =
bb2a0f7a 4083 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4084
de832a55
EG
4085 ADD_64(qstats->total_bytes_received_hi,
4086 qstats->error_bytes_received_hi,
4087 qstats->total_bytes_received_lo,
4088 qstats->error_bytes_received_lo);
4089
4090 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4091 total_unicast_packets_received);
4092 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4093 total_multicast_packets_received);
4094 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4095 total_broadcast_packets_received);
4096 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4097 etherstatsoverrsizepkts);
4098 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4099
4100 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4101 total_unicast_packets_received);
4102 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4103 total_multicast_packets_received);
4104 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4105 total_broadcast_packets_received);
4106 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4107 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4108 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4109
4110 qstats->total_bytes_transmitted_hi =
ca00392c 4111 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4112 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4113 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4114
4115 ADD_64(qstats->total_bytes_transmitted_hi,
4116 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4117 qstats->total_bytes_transmitted_lo,
4118 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4119
4120 ADD_64(qstats->total_bytes_transmitted_hi,
4121 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4122 qstats->total_bytes_transmitted_lo,
4123 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4124
de832a55
EG
4125 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4126 total_unicast_packets_transmitted);
4127 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4128 total_multicast_packets_transmitted);
4129 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4130 total_broadcast_packets_transmitted);
4131
4132 old_tclient->checksum_discard = tclient->checksum_discard;
4133 old_tclient->ttl0_discard = tclient->ttl0_discard;
4134
4135 ADD_64(fstats->total_bytes_received_hi,
4136 qstats->total_bytes_received_hi,
4137 fstats->total_bytes_received_lo,
4138 qstats->total_bytes_received_lo);
4139 ADD_64(fstats->total_bytes_transmitted_hi,
4140 qstats->total_bytes_transmitted_hi,
4141 fstats->total_bytes_transmitted_lo,
4142 qstats->total_bytes_transmitted_lo);
4143 ADD_64(fstats->total_unicast_packets_received_hi,
4144 qstats->total_unicast_packets_received_hi,
4145 fstats->total_unicast_packets_received_lo,
4146 qstats->total_unicast_packets_received_lo);
4147 ADD_64(fstats->total_multicast_packets_received_hi,
4148 qstats->total_multicast_packets_received_hi,
4149 fstats->total_multicast_packets_received_lo,
4150 qstats->total_multicast_packets_received_lo);
4151 ADD_64(fstats->total_broadcast_packets_received_hi,
4152 qstats->total_broadcast_packets_received_hi,
4153 fstats->total_broadcast_packets_received_lo,
4154 qstats->total_broadcast_packets_received_lo);
4155 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4156 qstats->total_unicast_packets_transmitted_hi,
4157 fstats->total_unicast_packets_transmitted_lo,
4158 qstats->total_unicast_packets_transmitted_lo);
4159 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4160 qstats->total_multicast_packets_transmitted_hi,
4161 fstats->total_multicast_packets_transmitted_lo,
4162 qstats->total_multicast_packets_transmitted_lo);
4163 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4164 qstats->total_broadcast_packets_transmitted_hi,
4165 fstats->total_broadcast_packets_transmitted_lo,
4166 qstats->total_broadcast_packets_transmitted_lo);
4167 ADD_64(fstats->valid_bytes_received_hi,
4168 qstats->valid_bytes_received_hi,
4169 fstats->valid_bytes_received_lo,
4170 qstats->valid_bytes_received_lo);
4171
4172 ADD_64(estats->error_bytes_received_hi,
4173 qstats->error_bytes_received_hi,
4174 estats->error_bytes_received_lo,
4175 qstats->error_bytes_received_lo);
4176 ADD_64(estats->etherstatsoverrsizepkts_hi,
4177 qstats->etherstatsoverrsizepkts_hi,
4178 estats->etherstatsoverrsizepkts_lo,
4179 qstats->etherstatsoverrsizepkts_lo);
4180 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4181 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4182 }
4183
4184 ADD_64(fstats->total_bytes_received_hi,
4185 estats->rx_stat_ifhcinbadoctets_hi,
4186 fstats->total_bytes_received_lo,
4187 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4188
4189 memcpy(estats, &(fstats->total_bytes_received_hi),
4190 sizeof(struct host_func_stats) - 2*sizeof(u32));
4191
de832a55
EG
4192 ADD_64(estats->etherstatsoverrsizepkts_hi,
4193 estats->rx_stat_dot3statsframestoolong_hi,
4194 estats->etherstatsoverrsizepkts_lo,
4195 estats->rx_stat_dot3statsframestoolong_lo);
4196 ADD_64(estats->error_bytes_received_hi,
4197 estats->rx_stat_ifhcinbadoctets_hi,
4198 estats->error_bytes_received_lo,
4199 estats->rx_stat_ifhcinbadoctets_lo);
4200
4201 if (bp->port.pmf) {
4202 estats->mac_filter_discard =
4203 le32_to_cpu(tport->mac_filter_discard);
4204 estats->xxoverflow_discard =
4205 le32_to_cpu(tport->xxoverflow_discard);
4206 estats->brb_truncate_discard =
bb2a0f7a 4207 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4208 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4209 }
bb2a0f7a
YG
4210
4211 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4212
de832a55
EG
4213 bp->stats_pending = 0;
4214
a2fbb9ea
ET
4215 return 0;
4216}
4217
bb2a0f7a 4218static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4219{
bb2a0f7a 4220 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4221 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4222 int i;
a2fbb9ea
ET
4223
4224 nstats->rx_packets =
4225 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4226 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4227 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4228
4229 nstats->tx_packets =
4230 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4231 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4232 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4233
de832a55 4234 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4235
0e39e645 4236 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4237
de832a55 4238 nstats->rx_dropped = estats->mac_discard;
ca00392c 4239 for_each_rx_queue(bp, i)
de832a55
EG
4240 nstats->rx_dropped +=
4241 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4242
a2fbb9ea
ET
4243 nstats->tx_dropped = 0;
4244
4245 nstats->multicast =
de832a55 4246 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4247
bb2a0f7a 4248 nstats->collisions =
de832a55 4249 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4250
4251 nstats->rx_length_errors =
de832a55
EG
4252 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4253 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4254 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4255 bnx2x_hilo(&estats->brb_truncate_hi);
4256 nstats->rx_crc_errors =
4257 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4258 nstats->rx_frame_errors =
4259 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4260 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4261 nstats->rx_missed_errors = estats->xxoverflow_discard;
4262
4263 nstats->rx_errors = nstats->rx_length_errors +
4264 nstats->rx_over_errors +
4265 nstats->rx_crc_errors +
4266 nstats->rx_frame_errors +
0e39e645
ET
4267 nstats->rx_fifo_errors +
4268 nstats->rx_missed_errors;
a2fbb9ea 4269
bb2a0f7a 4270 nstats->tx_aborted_errors =
de832a55
EG
4271 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4272 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4273 nstats->tx_carrier_errors =
4274 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4275 nstats->tx_fifo_errors = 0;
4276 nstats->tx_heartbeat_errors = 0;
4277 nstats->tx_window_errors = 0;
4278
4279 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4280 nstats->tx_carrier_errors +
4281 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4282}
4283
4284static void bnx2x_drv_stats_update(struct bnx2x *bp)
4285{
4286 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4287 int i;
4288
4289 estats->driver_xoff = 0;
4290 estats->rx_err_discard_pkt = 0;
4291 estats->rx_skb_alloc_failed = 0;
4292 estats->hw_csum_err = 0;
ca00392c 4293 for_each_rx_queue(bp, i) {
de832a55
EG
4294 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4295
4296 estats->driver_xoff += qstats->driver_xoff;
4297 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4298 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4299 estats->hw_csum_err += qstats->hw_csum_err;
4300 }
a2fbb9ea
ET
4301}
4302
bb2a0f7a 4303static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4304{
bb2a0f7a 4305 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4306
bb2a0f7a
YG
4307 if (*stats_comp != DMAE_COMP_VAL)
4308 return;
4309
4310 if (bp->port.pmf)
de832a55 4311 bnx2x_hw_stats_update(bp);
a2fbb9ea 4312
de832a55
EG
4313 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4314 BNX2X_ERR("storm stats were not updated for 3 times\n");
4315 bnx2x_panic();
4316 return;
a2fbb9ea
ET
4317 }
4318
de832a55
EG
4319 bnx2x_net_stats_update(bp);
4320 bnx2x_drv_stats_update(bp);
4321
a2fbb9ea 4322 if (bp->msglevel & NETIF_MSG_TIMER) {
ca00392c
EG
4323 struct bnx2x_fastpath *fp0_rx = bp->fp;
4324 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
de832a55
EG
4325 struct tstorm_per_client_stats *old_tclient =
4326 &bp->fp->old_tclient;
4327 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4328 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4329 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4330 int i;
a2fbb9ea
ET
4331
4332 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4333 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4334 " tx pkt (%lx)\n",
ca00392c
EG
4335 bnx2x_tx_avail(fp0_tx),
4336 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4337 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4338 " rx pkt (%lx)\n",
ca00392c
EG
4339 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4340 fp0_rx->rx_comp_cons),
4341 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4342 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4343 "brb truncate %u\n",
4344 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4345 qstats->driver_xoff,
4346 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4347 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4348 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4349 "mac_discard %u mac_filter_discard %u "
4350 "xxovrflow_discard %u brb_truncate_discard %u "
4351 "ttl0_discard %u\n",
4781bfad 4352 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4353 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4354 bnx2x_hilo(&qstats->no_buff_discard_hi),
4355 estats->mac_discard, estats->mac_filter_discard,
4356 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4357 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4358
4359 for_each_queue(bp, i) {
4360 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4361 bnx2x_fp(bp, i, tx_pkt),
4362 bnx2x_fp(bp, i, rx_pkt),
4363 bnx2x_fp(bp, i, rx_calls));
4364 }
4365 }
4366
bb2a0f7a
YG
4367 bnx2x_hw_stats_post(bp);
4368 bnx2x_storm_stats_post(bp);
4369}
a2fbb9ea 4370
bb2a0f7a
YG
4371static void bnx2x_port_stats_stop(struct bnx2x *bp)
4372{
4373 struct dmae_command *dmae;
4374 u32 opcode;
4375 int loader_idx = PMF_DMAE_C(bp);
4376 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4377
bb2a0f7a 4378 bp->executer_idx = 0;
a2fbb9ea 4379
bb2a0f7a
YG
4380 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4381 DMAE_CMD_C_ENABLE |
4382 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4383#ifdef __BIG_ENDIAN
bb2a0f7a 4384 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4385#else
bb2a0f7a 4386 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4387#endif
bb2a0f7a
YG
4388 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4389 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4390
4391 if (bp->port.port_stx) {
4392
4393 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4394 if (bp->func_stx)
4395 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4396 else
4397 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4398 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4399 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4400 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4401 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4402 dmae->len = sizeof(struct host_port_stats) >> 2;
4403 if (bp->func_stx) {
4404 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4405 dmae->comp_addr_hi = 0;
4406 dmae->comp_val = 1;
4407 } else {
4408 dmae->comp_addr_lo =
4409 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4410 dmae->comp_addr_hi =
4411 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4412 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4413
bb2a0f7a
YG
4414 *stats_comp = 0;
4415 }
a2fbb9ea
ET
4416 }
4417
bb2a0f7a
YG
4418 if (bp->func_stx) {
4419
4420 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4421 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4422 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4423 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4424 dmae->dst_addr_lo = bp->func_stx >> 2;
4425 dmae->dst_addr_hi = 0;
4426 dmae->len = sizeof(struct host_func_stats) >> 2;
4427 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4428 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4429 dmae->comp_val = DMAE_COMP_VAL;
4430
4431 *stats_comp = 0;
a2fbb9ea 4432 }
bb2a0f7a
YG
4433}
4434
4435static void bnx2x_stats_stop(struct bnx2x *bp)
4436{
4437 int update = 0;
4438
4439 bnx2x_stats_comp(bp);
4440
4441 if (bp->port.pmf)
4442 update = (bnx2x_hw_stats_update(bp) == 0);
4443
4444 update |= (bnx2x_storm_stats_update(bp) == 0);
4445
4446 if (update) {
4447 bnx2x_net_stats_update(bp);
a2fbb9ea 4448
bb2a0f7a
YG
4449 if (bp->port.pmf)
4450 bnx2x_port_stats_stop(bp);
4451
4452 bnx2x_hw_stats_post(bp);
4453 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4454 }
4455}
4456
bb2a0f7a
YG
4457static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4458{
4459}
4460
4461static const struct {
4462 void (*action)(struct bnx2x *bp);
4463 enum bnx2x_stats_state next_state;
4464} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4465/* state event */
4466{
4467/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4468/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4469/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4470/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4471},
4472{
4473/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4474/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4475/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4476/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4477}
4478};
4479
4480static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4481{
4482 enum bnx2x_stats_state state = bp->stats_state;
4483
4484 bnx2x_stats_stm[state][event].action(bp);
4485 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4486
8924665a
EG
4487 /* Make sure the state has been "changed" */
4488 smp_wmb();
4489
bb2a0f7a
YG
4490 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4491 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4492 state, event, bp->stats_state);
4493}
4494
6fe49bb9
EG
4495static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4496{
4497 struct dmae_command *dmae;
4498 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4499
4500 /* sanity */
4501 if (!bp->port.pmf || !bp->port.port_stx) {
4502 BNX2X_ERR("BUG!\n");
4503 return;
4504 }
4505
4506 bp->executer_idx = 0;
4507
4508 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4509 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4510 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4511 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4512#ifdef __BIG_ENDIAN
4513 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4514#else
4515 DMAE_CMD_ENDIANITY_DW_SWAP |
4516#endif
4517 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4518 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4519 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4520 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4521 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4522 dmae->dst_addr_hi = 0;
4523 dmae->len = sizeof(struct host_port_stats) >> 2;
4524 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4525 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4526 dmae->comp_val = DMAE_COMP_VAL;
4527
4528 *stats_comp = 0;
4529 bnx2x_hw_stats_post(bp);
4530 bnx2x_stats_comp(bp);
4531}
4532
4533static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4534{
4535 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4536 int port = BP_PORT(bp);
4537 int func;
4538 u32 func_stx;
4539
4540 /* sanity */
4541 if (!bp->port.pmf || !bp->func_stx) {
4542 BNX2X_ERR("BUG!\n");
4543 return;
4544 }
4545
4546 /* save our func_stx */
4547 func_stx = bp->func_stx;
4548
4549 for (vn = VN_0; vn < vn_max; vn++) {
4550 func = 2*vn + port;
4551
4552 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4553 bnx2x_func_stats_init(bp);
4554 bnx2x_hw_stats_post(bp);
4555 bnx2x_stats_comp(bp);
4556 }
4557
4558 /* restore our func_stx */
4559 bp->func_stx = func_stx;
4560}
4561
4562static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4563{
4564 struct dmae_command *dmae = &bp->stats_dmae;
4565 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4566
4567 /* sanity */
4568 if (!bp->func_stx) {
4569 BNX2X_ERR("BUG!\n");
4570 return;
4571 }
4572
4573 bp->executer_idx = 0;
4574 memset(dmae, 0, sizeof(struct dmae_command));
4575
4576 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4577 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4578 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4579#ifdef __BIG_ENDIAN
4580 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4581#else
4582 DMAE_CMD_ENDIANITY_DW_SWAP |
4583#endif
4584 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4585 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4586 dmae->src_addr_lo = bp->func_stx >> 2;
4587 dmae->src_addr_hi = 0;
4588 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4589 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4590 dmae->len = sizeof(struct host_func_stats) >> 2;
4591 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4592 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4593 dmae->comp_val = DMAE_COMP_VAL;
4594
4595 *stats_comp = 0;
4596 bnx2x_hw_stats_post(bp);
4597 bnx2x_stats_comp(bp);
4598}
4599
4600static void bnx2x_stats_init(struct bnx2x *bp)
4601{
4602 int port = BP_PORT(bp);
4603 int func = BP_FUNC(bp);
4604 int i;
4605
4606 bp->stats_pending = 0;
4607 bp->executer_idx = 0;
4608 bp->stats_counter = 0;
4609
4610 /* port and func stats for management */
4611 if (!BP_NOMCP(bp)) {
4612 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4613 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4614
4615 } else {
4616 bp->port.port_stx = 0;
4617 bp->func_stx = 0;
4618 }
4619 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4620 bp->port.port_stx, bp->func_stx);
4621
4622 /* port stats */
4623 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4624 bp->port.old_nig_stats.brb_discard =
4625 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4626 bp->port.old_nig_stats.brb_truncate =
4627 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4628 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4629 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4630 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4631 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4632
4633 /* function stats */
4634 for_each_queue(bp, i) {
4635 struct bnx2x_fastpath *fp = &bp->fp[i];
4636
4637 memset(&fp->old_tclient, 0,
4638 sizeof(struct tstorm_per_client_stats));
4639 memset(&fp->old_uclient, 0,
4640 sizeof(struct ustorm_per_client_stats));
4641 memset(&fp->old_xclient, 0,
4642 sizeof(struct xstorm_per_client_stats));
4643 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4644 }
4645
4646 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4647 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4648
4649 bp->stats_state = STATS_STATE_DISABLED;
4650
4651 if (bp->port.pmf) {
4652 if (bp->port.port_stx)
4653 bnx2x_port_stats_base_init(bp);
4654
4655 if (bp->func_stx)
4656 bnx2x_func_stats_base_init(bp);
4657
4658 } else if (bp->func_stx)
4659 bnx2x_func_stats_base_update(bp);
4660}
4661
a2fbb9ea
ET
4662static void bnx2x_timer(unsigned long data)
4663{
4664 struct bnx2x *bp = (struct bnx2x *) data;
4665
4666 if (!netif_running(bp->dev))
4667 return;
4668
4669 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4670 goto timer_restart;
a2fbb9ea
ET
4671
4672 if (poll) {
4673 struct bnx2x_fastpath *fp = &bp->fp[0];
4674 int rc;
4675
7961f791 4676 bnx2x_tx_int(fp);
a2fbb9ea
ET
4677 rc = bnx2x_rx_int(fp, 1000);
4678 }
4679
34f80b04
EG
4680 if (!BP_NOMCP(bp)) {
4681 int func = BP_FUNC(bp);
a2fbb9ea
ET
4682 u32 drv_pulse;
4683 u32 mcp_pulse;
4684
4685 ++bp->fw_drv_pulse_wr_seq;
4686 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4687 /* TBD - add SYSTEM_TIME */
4688 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4689 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4690
34f80b04 4691 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4692 MCP_PULSE_SEQ_MASK);
4693 /* The delta between driver pulse and mcp response
4694 * should be 1 (before mcp response) or 0 (after mcp response)
4695 */
4696 if ((drv_pulse != mcp_pulse) &&
4697 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4698 /* someone lost a heartbeat... */
4699 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4700 drv_pulse, mcp_pulse);
4701 }
4702 }
4703
f34d28ea 4704 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 4705 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4706
f1410647 4707timer_restart:
a2fbb9ea
ET
4708 mod_timer(&bp->timer, jiffies + bp->current_interval);
4709}
4710
4711/* end of Statistics */
4712
4713/* nic init */
4714
4715/*
4716 * nic init service functions
4717 */
4718
34f80b04 4719static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4720{
34f80b04
EG
4721 int port = BP_PORT(bp);
4722
ca00392c
EG
4723 /* "CSTORM" */
4724 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4725 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4726 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4727 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4728 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4729 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
4730}
4731
5c862848
EG
4732static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4733 dma_addr_t mapping, int sb_id)
34f80b04
EG
4734{
4735 int port = BP_PORT(bp);
bb2a0f7a 4736 int func = BP_FUNC(bp);
a2fbb9ea 4737 int index;
34f80b04 4738 u64 section;
a2fbb9ea
ET
4739
4740 /* USTORM */
4741 section = ((u64)mapping) + offsetof(struct host_status_block,
4742 u_status_block);
34f80b04 4743 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 4744
ca00392c
EG
4745 REG_WR(bp, BAR_CSTRORM_INTMEM +
4746 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4747 REG_WR(bp, BAR_CSTRORM_INTMEM +
4748 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4749 U64_HI(section));
ca00392c
EG
4750 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4751 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4752
4753 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
4754 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4755 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4756
4757 /* CSTORM */
4758 section = ((u64)mapping) + offsetof(struct host_status_block,
4759 c_status_block);
34f80b04 4760 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4761
4762 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4763 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4764 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4765 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4766 U64_HI(section));
7a9b2557 4767 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 4768 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4769
4770 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4771 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4772 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
4773
4774 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4775}
4776
4777static void bnx2x_zero_def_sb(struct bnx2x *bp)
4778{
4779 int func = BP_FUNC(bp);
a2fbb9ea 4780
ca00392c 4781 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
4782 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4783 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
4784 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4785 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4786 sizeof(struct cstorm_def_status_block_u)/4);
4787 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4788 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4789 sizeof(struct cstorm_def_status_block_c)/4);
4790 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
4791 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4792 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4793}
4794
4795static void bnx2x_init_def_sb(struct bnx2x *bp,
4796 struct host_def_status_block *def_sb,
34f80b04 4797 dma_addr_t mapping, int sb_id)
a2fbb9ea 4798{
34f80b04
EG
4799 int port = BP_PORT(bp);
4800 int func = BP_FUNC(bp);
a2fbb9ea
ET
4801 int index, val, reg_offset;
4802 u64 section;
4803
4804 /* ATTN */
4805 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4806 atten_status_block);
34f80b04 4807 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4808
49d66772
ET
4809 bp->attn_state = 0;
4810
a2fbb9ea
ET
4811 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4812 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4813
34f80b04 4814 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4815 bp->attn_group[index].sig[0] = REG_RD(bp,
4816 reg_offset + 0x10*index);
4817 bp->attn_group[index].sig[1] = REG_RD(bp,
4818 reg_offset + 0x4 + 0x10*index);
4819 bp->attn_group[index].sig[2] = REG_RD(bp,
4820 reg_offset + 0x8 + 0x10*index);
4821 bp->attn_group[index].sig[3] = REG_RD(bp,
4822 reg_offset + 0xc + 0x10*index);
4823 }
4824
a2fbb9ea
ET
4825 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4826 HC_REG_ATTN_MSG0_ADDR_L);
4827
4828 REG_WR(bp, reg_offset, U64_LO(section));
4829 REG_WR(bp, reg_offset + 4, U64_HI(section));
4830
4831 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4832
4833 val = REG_RD(bp, reg_offset);
34f80b04 4834 val |= sb_id;
a2fbb9ea
ET
4835 REG_WR(bp, reg_offset, val);
4836
4837 /* USTORM */
4838 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4839 u_def_status_block);
34f80b04 4840 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4841
ca00392c
EG
4842 REG_WR(bp, BAR_CSTRORM_INTMEM +
4843 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4844 REG_WR(bp, BAR_CSTRORM_INTMEM +
4845 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 4846 U64_HI(section));
ca00392c
EG
4847 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4848 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
4849
4850 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
4851 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4852 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
4853
4854 /* CSTORM */
4855 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4856 c_def_status_block);
34f80b04 4857 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4858
4859 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4860 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 4861 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4862 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 4863 U64_HI(section));
5c862848 4864 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 4865 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
4866
4867 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4868 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4869 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
4870
4871 /* TSTORM */
4872 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4873 t_def_status_block);
34f80b04 4874 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4875
4876 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4877 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4878 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4879 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4880 U64_HI(section));
5c862848 4881 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4882 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4883
4884 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4885 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4886 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4887
4888 /* XSTORM */
4889 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4890 x_def_status_block);
34f80b04 4891 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4892
4893 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4894 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4895 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4896 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4897 U64_HI(section));
5c862848 4898 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4899 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4900
4901 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4902 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4903 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4904
bb2a0f7a 4905 bp->stats_pending = 0;
66e855f3 4906 bp->set_mac_pending = 0;
bb2a0f7a 4907
34f80b04 4908 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4909}
4910
4911static void bnx2x_update_coalesce(struct bnx2x *bp)
4912{
34f80b04 4913 int port = BP_PORT(bp);
a2fbb9ea
ET
4914 int i;
4915
4916 for_each_queue(bp, i) {
34f80b04 4917 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4918
4919 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
4920 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4921 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4922 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4923 bp->rx_ticks/12);
ca00392c
EG
4924 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4925 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4926 U_SB_ETH_RX_CQ_INDEX),
3799cf47 4927 (bp->rx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4928
4929 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4930 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4931 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4932 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4933 bp->tx_ticks/12);
a2fbb9ea 4934 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4935 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4936 C_SB_ETH_TX_CQ_INDEX),
3799cf47 4937 (bp->tx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4938 }
4939}
4940
7a9b2557
VZ
4941static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4942 struct bnx2x_fastpath *fp, int last)
4943{
4944 int i;
4945
4946 for (i = 0; i < last; i++) {
4947 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4948 struct sk_buff *skb = rx_buf->skb;
4949
4950 if (skb == NULL) {
4951 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4952 continue;
4953 }
4954
4955 if (fp->tpa_state[i] == BNX2X_TPA_START)
4956 pci_unmap_single(bp->pdev,
4957 pci_unmap_addr(rx_buf, mapping),
356e2385 4958 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4959
4960 dev_kfree_skb(skb);
4961 rx_buf->skb = NULL;
4962 }
4963}
4964
a2fbb9ea
ET
4965static void bnx2x_init_rx_rings(struct bnx2x *bp)
4966{
7a9b2557 4967 int func = BP_FUNC(bp);
32626230
EG
4968 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4969 ETH_MAX_AGGREGATION_QUEUES_E1H;
4970 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4971 int i, j;
a2fbb9ea 4972
87942b46 4973 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4974 DP(NETIF_MSG_IFUP,
4975 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4976
7a9b2557 4977 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4978
555f6c78 4979 for_each_rx_queue(bp, j) {
32626230 4980 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4981
32626230 4982 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4983 fp->tpa_pool[i].skb =
4984 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4985 if (!fp->tpa_pool[i].skb) {
4986 BNX2X_ERR("Failed to allocate TPA "
4987 "skb pool for queue[%d] - "
4988 "disabling TPA on this "
4989 "queue!\n", j);
4990 bnx2x_free_tpa_pool(bp, fp, i);
4991 fp->disable_tpa = 1;
4992 break;
4993 }
4994 pci_unmap_addr_set((struct sw_rx_bd *)
4995 &bp->fp->tpa_pool[i],
4996 mapping, 0);
4997 fp->tpa_state[i] = BNX2X_TPA_STOP;
4998 }
4999 }
5000 }
5001
555f6c78 5002 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
5003 struct bnx2x_fastpath *fp = &bp->fp[j];
5004
5005 fp->rx_bd_cons = 0;
5006 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
5007 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5008
ca00392c
EG
5009 /* Mark queue as Rx */
5010 fp->is_rx_queue = 1;
5011
7a9b2557
VZ
5012 /* "next page" elements initialization */
5013 /* SGE ring */
5014 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5015 struct eth_rx_sge *sge;
5016
5017 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5018 sge->addr_hi =
5019 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5020 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5021 sge->addr_lo =
5022 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5023 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5024 }
5025
5026 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 5027
7a9b2557 5028 /* RX BD ring */
a2fbb9ea
ET
5029 for (i = 1; i <= NUM_RX_RINGS; i++) {
5030 struct eth_rx_bd *rx_bd;
5031
5032 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5033 rx_bd->addr_hi =
5034 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 5035 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5036 rx_bd->addr_lo =
5037 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 5038 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5039 }
5040
34f80b04 5041 /* CQ ring */
a2fbb9ea
ET
5042 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5043 struct eth_rx_cqe_next_page *nextpg;
5044
5045 nextpg = (struct eth_rx_cqe_next_page *)
5046 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5047 nextpg->addr_hi =
5048 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 5049 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5050 nextpg->addr_lo =
5051 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 5052 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5053 }
5054
7a9b2557
VZ
5055 /* Allocate SGEs and initialize the ring elements */
5056 for (i = 0, ring_prod = 0;
5057 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 5058
7a9b2557
VZ
5059 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5060 BNX2X_ERR("was only able to allocate "
5061 "%d rx sges\n", i);
5062 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5063 /* Cleanup already allocated elements */
5064 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 5065 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
5066 fp->disable_tpa = 1;
5067 ring_prod = 0;
5068 break;
5069 }
5070 ring_prod = NEXT_SGE_IDX(ring_prod);
5071 }
5072 fp->rx_sge_prod = ring_prod;
5073
5074 /* Allocate BDs and initialize BD ring */
66e855f3 5075 fp->rx_comp_cons = 0;
7a9b2557 5076 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
5077 for (i = 0; i < bp->rx_ring_size; i++) {
5078 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5079 BNX2X_ERR("was only able to allocate "
de832a55
EG
5080 "%d rx skbs on queue[%d]\n", i, j);
5081 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
5082 break;
5083 }
5084 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 5085 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5086 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5087 }
5088
7a9b2557
VZ
5089 fp->rx_bd_prod = ring_prod;
5090 /* must not have more available CQEs than BDs */
5091 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5092 cqe_ring_prod);
a2fbb9ea
ET
5093 fp->rx_pkt = fp->rx_calls = 0;
5094
7a9b2557
VZ
5095 /* Warning!
5096 * this will generate an interrupt (to the TSTORM)
5097 * must only be done after chip is initialized
5098 */
5099 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5100 fp->rx_sge_prod);
a2fbb9ea
ET
5101 if (j != 0)
5102 continue;
5103
5104 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5105 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5106 U64_LO(fp->rx_comp_mapping));
5107 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5108 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5109 U64_HI(fp->rx_comp_mapping));
5110 }
5111}
5112
5113static void bnx2x_init_tx_ring(struct bnx2x *bp)
5114{
5115 int i, j;
5116
555f6c78 5117 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
5118 struct bnx2x_fastpath *fp = &bp->fp[j];
5119
5120 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5121 struct eth_tx_next_bd *tx_next_bd =
5122 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5123
ca00392c 5124 tx_next_bd->addr_hi =
a2fbb9ea 5125 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5126 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5127 tx_next_bd->addr_lo =
a2fbb9ea 5128 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5129 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5130 }
5131
ca00392c
EG
5132 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5133 fp->tx_db.data.zero_fill1 = 0;
5134 fp->tx_db.data.prod = 0;
5135
a2fbb9ea
ET
5136 fp->tx_pkt_prod = 0;
5137 fp->tx_pkt_cons = 0;
5138 fp->tx_bd_prod = 0;
5139 fp->tx_bd_cons = 0;
5140 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5141 fp->tx_pkt = 0;
5142 }
6fe49bb9
EG
5143
5144 /* clean tx statistics */
5145 for_each_rx_queue(bp, i)
5146 bnx2x_fp(bp, i, tx_pkt) = 0;
a2fbb9ea
ET
5147}
5148
5149static void bnx2x_init_sp_ring(struct bnx2x *bp)
5150{
34f80b04 5151 int func = BP_FUNC(bp);
a2fbb9ea
ET
5152
5153 spin_lock_init(&bp->spq_lock);
5154
5155 bp->spq_left = MAX_SPQ_PENDING;
5156 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5157 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5158 bp->spq_prod_bd = bp->spq;
5159 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5160
34f80b04 5161 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5162 U64_LO(bp->spq_mapping));
34f80b04
EG
5163 REG_WR(bp,
5164 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5165 U64_HI(bp->spq_mapping));
5166
34f80b04 5167 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5168 bp->spq_prod_idx);
5169}
5170
5171static void bnx2x_init_context(struct bnx2x *bp)
5172{
5173 int i;
5174
ca00392c 5175 for_each_rx_queue(bp, i) {
a2fbb9ea
ET
5176 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5177 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5178 u8 cl_id = fp->cl_id;
a2fbb9ea 5179
34f80b04
EG
5180 context->ustorm_st_context.common.sb_index_numbers =
5181 BNX2X_RX_SB_INDEX_NUM;
0626b899 5182 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5183 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5184 context->ustorm_st_context.common.flags =
de832a55
EG
5185 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5186 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5187 context->ustorm_st_context.common.statistics_counter_id =
5188 cl_id;
8d9c5f34 5189 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5190 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5191 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5192 bp->rx_buf_size;
34f80b04 5193 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5194 U64_HI(fp->rx_desc_mapping);
34f80b04 5195 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5196 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5197 if (!fp->disable_tpa) {
5198 context->ustorm_st_context.common.flags |=
ca00392c 5199 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5200 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
5201 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5202 (u32)0xffff);
7a9b2557
VZ
5203 context->ustorm_st_context.common.sge_page_base_hi =
5204 U64_HI(fp->rx_sge_mapping);
5205 context->ustorm_st_context.common.sge_page_base_lo =
5206 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5207
5208 context->ustorm_st_context.common.max_sges_for_packet =
5209 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5210 context->ustorm_st_context.common.max_sges_for_packet =
5211 ((context->ustorm_st_context.common.
5212 max_sges_for_packet + PAGES_PER_SGE - 1) &
5213 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5214 }
5215
8d9c5f34
EG
5216 context->ustorm_ag_context.cdu_usage =
5217 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5218 CDU_REGION_NUMBER_UCM_AG,
5219 ETH_CONNECTION_TYPE);
5220
ca00392c
EG
5221 context->xstorm_ag_context.cdu_reserved =
5222 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5223 CDU_REGION_NUMBER_XCM_AG,
5224 ETH_CONNECTION_TYPE);
5225 }
5226
5227 for_each_tx_queue(bp, i) {
5228 struct bnx2x_fastpath *fp = &bp->fp[i];
5229 struct eth_context *context =
5230 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5231
5232 context->cstorm_st_context.sb_index_number =
5233 C_SB_ETH_TX_CQ_INDEX;
5234 context->cstorm_st_context.status_block_id = fp->sb_id;
5235
8d9c5f34
EG
5236 context->xstorm_st_context.tx_bd_page_base_hi =
5237 U64_HI(fp->tx_desc_mapping);
5238 context->xstorm_st_context.tx_bd_page_base_lo =
5239 U64_LO(fp->tx_desc_mapping);
ca00392c 5240 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5241 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5242 }
5243}
5244
5245static void bnx2x_init_ind_table(struct bnx2x *bp)
5246{
26c8fa4d 5247 int func = BP_FUNC(bp);
a2fbb9ea
ET
5248 int i;
5249
555f6c78 5250 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5251 return;
5252
555f6c78
EG
5253 DP(NETIF_MSG_IFUP,
5254 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5255 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5256 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5257 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 5258 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
5259}
5260
49d66772
ET
5261static void bnx2x_set_client_config(struct bnx2x *bp)
5262{
49d66772 5263 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5264 int port = BP_PORT(bp);
5265 int i;
49d66772 5266
e7799c5f 5267 tstorm_client.mtu = bp->dev->mtu;
49d66772 5268 tstorm_client.config_flags =
de832a55
EG
5269 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5270 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5271#ifdef BCM_VLAN
0c6671b0 5272 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5273 tstorm_client.config_flags |=
8d9c5f34 5274 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5275 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5276 }
5277#endif
49d66772
ET
5278
5279 for_each_queue(bp, i) {
de832a55
EG
5280 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5281
49d66772 5282 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5283 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5284 ((u32 *)&tstorm_client)[0]);
5285 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5286 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5287 ((u32 *)&tstorm_client)[1]);
5288 }
5289
34f80b04
EG
5290 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5291 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5292}
5293
a2fbb9ea
ET
5294static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5295{
a2fbb9ea 5296 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04 5297 int mode = bp->rx_mode;
37b091ba 5298 int mask = bp->rx_mode_cl_mask;
34f80b04 5299 int func = BP_FUNC(bp);
581ce43d 5300 int port = BP_PORT(bp);
a2fbb9ea 5301 int i;
581ce43d
EG
5302 /* All but management unicast packets should pass to the host as well */
5303 u32 llh_mask =
5304 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5305 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5306 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5307 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5308
3196a88a 5309 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5310
5311 switch (mode) {
5312 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5313 tstorm_mac_filter.ucast_drop_all = mask;
5314 tstorm_mac_filter.mcast_drop_all = mask;
5315 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5316 break;
356e2385 5317
a2fbb9ea 5318 case BNX2X_RX_MODE_NORMAL:
34f80b04 5319 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5320 break;
356e2385 5321
a2fbb9ea 5322 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5323 tstorm_mac_filter.mcast_accept_all = mask;
5324 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5325 break;
356e2385 5326
a2fbb9ea 5327 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5328 tstorm_mac_filter.ucast_accept_all = mask;
5329 tstorm_mac_filter.mcast_accept_all = mask;
5330 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5331 /* pass management unicast packets as well */
5332 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5333 break;
356e2385 5334
a2fbb9ea 5335 default:
34f80b04
EG
5336 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5337 break;
a2fbb9ea
ET
5338 }
5339
581ce43d
EG
5340 REG_WR(bp,
5341 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5342 llh_mask);
5343
a2fbb9ea
ET
5344 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5345 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5346 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5347 ((u32 *)&tstorm_mac_filter)[i]);
5348
34f80b04 5349/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5350 ((u32 *)&tstorm_mac_filter)[i]); */
5351 }
a2fbb9ea 5352
49d66772
ET
5353 if (mode != BNX2X_RX_MODE_NONE)
5354 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5355}
5356
471de716
EG
5357static void bnx2x_init_internal_common(struct bnx2x *bp)
5358{
5359 int i;
5360
5361 /* Zero this manually as its initialization is
5362 currently missing in the initTool */
5363 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5364 REG_WR(bp, BAR_USTRORM_INTMEM +
5365 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5366}
5367
5368static void bnx2x_init_internal_port(struct bnx2x *bp)
5369{
5370 int port = BP_PORT(bp);
5371
ca00392c
EG
5372 REG_WR(bp,
5373 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5374 REG_WR(bp,
5375 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5376 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5377 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5378}
5379
5380static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5381{
a2fbb9ea
ET
5382 struct tstorm_eth_function_common_config tstorm_config = {0};
5383 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5384 int port = BP_PORT(bp);
5385 int func = BP_FUNC(bp);
de832a55
EG
5386 int i, j;
5387 u32 offset;
471de716 5388 u16 max_agg_size;
a2fbb9ea
ET
5389
5390 if (is_multi(bp)) {
555f6c78 5391 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5392 tstorm_config.rss_result_mask = MULTI_MASK;
5393 }
ca00392c
EG
5394
5395 /* Enable TPA if needed */
5396 if (bp->flags & TPA_ENABLE_FLAG)
5397 tstorm_config.config_flags |=
5398 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5399
8d9c5f34
EG
5400 if (IS_E1HMF(bp))
5401 tstorm_config.config_flags |=
5402 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5403
34f80b04
EG
5404 tstorm_config.leading_client_id = BP_L_ID(bp);
5405
a2fbb9ea 5406 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5407 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5408 (*(u32 *)&tstorm_config));
5409
c14423fe 5410 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
37b091ba 5411 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
a2fbb9ea
ET
5412 bnx2x_set_storm_rx_mode(bp);
5413
de832a55
EG
5414 for_each_queue(bp, i) {
5415 u8 cl_id = bp->fp[i].cl_id;
5416
5417 /* reset xstorm per client statistics */
5418 offset = BAR_XSTRORM_INTMEM +
5419 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5420 for (j = 0;
5421 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5422 REG_WR(bp, offset + j*4, 0);
5423
5424 /* reset tstorm per client statistics */
5425 offset = BAR_TSTRORM_INTMEM +
5426 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5427 for (j = 0;
5428 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5429 REG_WR(bp, offset + j*4, 0);
5430
5431 /* reset ustorm per client statistics */
5432 offset = BAR_USTRORM_INTMEM +
5433 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5434 for (j = 0;
5435 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5436 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5437 }
5438
5439 /* Init statistics related context */
34f80b04 5440 stats_flags.collect_eth = 1;
a2fbb9ea 5441
66e855f3 5442 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5443 ((u32 *)&stats_flags)[0]);
66e855f3 5444 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5445 ((u32 *)&stats_flags)[1]);
5446
66e855f3 5447 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5448 ((u32 *)&stats_flags)[0]);
66e855f3 5449 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5450 ((u32 *)&stats_flags)[1]);
5451
de832a55
EG
5452 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5453 ((u32 *)&stats_flags)[0]);
5454 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5455 ((u32 *)&stats_flags)[1]);
5456
66e855f3 5457 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5458 ((u32 *)&stats_flags)[0]);
66e855f3 5459 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5460 ((u32 *)&stats_flags)[1]);
5461
66e855f3
YG
5462 REG_WR(bp, BAR_XSTRORM_INTMEM +
5463 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5464 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5465 REG_WR(bp, BAR_XSTRORM_INTMEM +
5466 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5467 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5468
5469 REG_WR(bp, BAR_TSTRORM_INTMEM +
5470 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5471 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5472 REG_WR(bp, BAR_TSTRORM_INTMEM +
5473 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5474 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5475
de832a55
EG
5476 REG_WR(bp, BAR_USTRORM_INTMEM +
5477 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5478 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5479 REG_WR(bp, BAR_USTRORM_INTMEM +
5480 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5481 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5482
34f80b04
EG
5483 if (CHIP_IS_E1H(bp)) {
5484 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5485 IS_E1HMF(bp));
5486 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5487 IS_E1HMF(bp));
5488 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5489 IS_E1HMF(bp));
5490 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5491 IS_E1HMF(bp));
5492
7a9b2557
VZ
5493 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5494 bp->e1hov);
34f80b04
EG
5495 }
5496
4f40f2cb
EG
5497 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5498 max_agg_size =
5499 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5500 SGE_PAGE_SIZE * PAGES_PER_SGE),
5501 (u32)0xffff);
555f6c78 5502 for_each_rx_queue(bp, i) {
7a9b2557 5503 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5504
5505 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5506 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5507 U64_LO(fp->rx_comp_mapping));
5508 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5509 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5510 U64_HI(fp->rx_comp_mapping));
5511
ca00392c
EG
5512 /* Next page */
5513 REG_WR(bp, BAR_USTRORM_INTMEM +
5514 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5515 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5516 REG_WR(bp, BAR_USTRORM_INTMEM +
5517 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5518 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5519
7a9b2557 5520 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5521 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5522 max_agg_size);
5523 }
8a1c38d1 5524
1c06328c
EG
5525 /* dropless flow control */
5526 if (CHIP_IS_E1H(bp)) {
5527 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5528
5529 rx_pause.bd_thr_low = 250;
5530 rx_pause.cqe_thr_low = 250;
5531 rx_pause.cos = 1;
5532 rx_pause.sge_thr_low = 0;
5533 rx_pause.bd_thr_high = 350;
5534 rx_pause.cqe_thr_high = 350;
5535 rx_pause.sge_thr_high = 0;
5536
5537 for_each_rx_queue(bp, i) {
5538 struct bnx2x_fastpath *fp = &bp->fp[i];
5539
5540 if (!fp->disable_tpa) {
5541 rx_pause.sge_thr_low = 150;
5542 rx_pause.sge_thr_high = 250;
5543 }
5544
5545
5546 offset = BAR_USTRORM_INTMEM +
5547 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5548 fp->cl_id);
5549 for (j = 0;
5550 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5551 j++)
5552 REG_WR(bp, offset + j*4,
5553 ((u32 *)&rx_pause)[j]);
5554 }
5555 }
5556
8a1c38d1
EG
5557 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5558
5559 /* Init rate shaping and fairness contexts */
5560 if (IS_E1HMF(bp)) {
5561 int vn;
5562
5563 /* During init there is no active link
5564 Until link is up, set link rate to 10Gbps */
5565 bp->link_vars.line_speed = SPEED_10000;
5566 bnx2x_init_port_minmax(bp);
5567
b015e3d1
EG
5568 if (!BP_NOMCP(bp))
5569 bp->mf_config =
5570 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8a1c38d1
EG
5571 bnx2x_calc_vn_weight_sum(bp);
5572
5573 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5574 bnx2x_init_vn_minmax(bp, 2*vn + port);
5575
5576 /* Enable rate shaping and fairness */
b015e3d1 5577 bp->cmng.flags.cmng_enables |=
8a1c38d1 5578 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
b015e3d1 5579
8a1c38d1
EG
5580 } else {
5581 /* rate shaping and fairness are disabled */
5582 DP(NETIF_MSG_IFUP,
5583 "single function mode minmax will be disabled\n");
5584 }
5585
5586
5587 /* Store it to internal memory */
5588 if (bp->port.pmf)
5589 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5590 REG_WR(bp, BAR_XSTRORM_INTMEM +
5591 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5592 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5593}
5594
471de716
EG
5595static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5596{
5597 switch (load_code) {
5598 case FW_MSG_CODE_DRV_LOAD_COMMON:
5599 bnx2x_init_internal_common(bp);
5600 /* no break */
5601
5602 case FW_MSG_CODE_DRV_LOAD_PORT:
5603 bnx2x_init_internal_port(bp);
5604 /* no break */
5605
5606 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5607 bnx2x_init_internal_func(bp);
5608 break;
5609
5610 default:
5611 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5612 break;
5613 }
5614}
5615
5616static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5617{
5618 int i;
5619
5620 for_each_queue(bp, i) {
5621 struct bnx2x_fastpath *fp = &bp->fp[i];
5622
34f80b04 5623 fp->bp = bp;
a2fbb9ea 5624 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5625 fp->index = i;
34f80b04 5626 fp->cl_id = BP_L_ID(bp) + i;
37b091ba
MC
5627#ifdef BCM_CNIC
5628 fp->sb_id = fp->cl_id + 1;
5629#else
34f80b04 5630 fp->sb_id = fp->cl_id;
37b091ba 5631#endif
ca00392c
EG
5632 /* Suitable Rx and Tx SBs are served by the same client */
5633 if (i >= bp->num_rx_queues)
5634 fp->cl_id -= bp->num_rx_queues;
34f80b04 5635 DP(NETIF_MSG_IFUP,
f5372251
EG
5636 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5637 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5638 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5639 fp->sb_id);
5c862848 5640 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5641 }
5642
16119785
EG
5643 /* ensure status block indices were read */
5644 rmb();
5645
5646
5c862848
EG
5647 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5648 DEF_SB_ID);
5649 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5650 bnx2x_update_coalesce(bp);
5651 bnx2x_init_rx_rings(bp);
5652 bnx2x_init_tx_ring(bp);
5653 bnx2x_init_sp_ring(bp);
5654 bnx2x_init_context(bp);
471de716 5655 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5656 bnx2x_init_ind_table(bp);
0ef00459
EG
5657 bnx2x_stats_init(bp);
5658
5659 /* At this point, we are ready for interrupts */
5660 atomic_set(&bp->intr_sem, 0);
5661
5662 /* flush all before enabling interrupts */
5663 mb();
5664 mmiowb();
5665
615f8fd9 5666 bnx2x_int_enable(bp);
eb8da205
EG
5667
5668 /* Check for SPIO5 */
5669 bnx2x_attn_int_deasserted0(bp,
5670 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5671 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5672}
5673
5674/* end of nic init */
5675
5676/*
5677 * gzip service functions
5678 */
5679
5680static int bnx2x_gunzip_init(struct bnx2x *bp)
5681{
5682 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5683 &bp->gunzip_mapping);
5684 if (bp->gunzip_buf == NULL)
5685 goto gunzip_nomem1;
5686
5687 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5688 if (bp->strm == NULL)
5689 goto gunzip_nomem2;
5690
5691 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5692 GFP_KERNEL);
5693 if (bp->strm->workspace == NULL)
5694 goto gunzip_nomem3;
5695
5696 return 0;
5697
5698gunzip_nomem3:
5699 kfree(bp->strm);
5700 bp->strm = NULL;
5701
5702gunzip_nomem2:
5703 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5704 bp->gunzip_mapping);
5705 bp->gunzip_buf = NULL;
5706
5707gunzip_nomem1:
5708 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5709 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5710 return -ENOMEM;
5711}
5712
5713static void bnx2x_gunzip_end(struct bnx2x *bp)
5714{
5715 kfree(bp->strm->workspace);
5716
5717 kfree(bp->strm);
5718 bp->strm = NULL;
5719
5720 if (bp->gunzip_buf) {
5721 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5722 bp->gunzip_mapping);
5723 bp->gunzip_buf = NULL;
5724 }
5725}
5726
94a78b79 5727static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5728{
5729 int n, rc;
5730
5731 /* check gzip header */
94a78b79
VZ
5732 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5733 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5734 return -EINVAL;
94a78b79 5735 }
a2fbb9ea
ET
5736
5737 n = 10;
5738
34f80b04 5739#define FNAME 0x8
a2fbb9ea
ET
5740
5741 if (zbuf[3] & FNAME)
5742 while ((zbuf[n++] != 0) && (n < len));
5743
94a78b79 5744 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5745 bp->strm->avail_in = len - n;
5746 bp->strm->next_out = bp->gunzip_buf;
5747 bp->strm->avail_out = FW_BUF_SIZE;
5748
5749 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5750 if (rc != Z_OK)
5751 return rc;
5752
5753 rc = zlib_inflate(bp->strm, Z_FINISH);
5754 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5755 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5756 bp->dev->name, bp->strm->msg);
5757
5758 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5759 if (bp->gunzip_outlen & 0x3)
5760 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5761 " gunzip_outlen (%d) not aligned\n",
5762 bp->dev->name, bp->gunzip_outlen);
5763 bp->gunzip_outlen >>= 2;
5764
5765 zlib_inflateEnd(bp->strm);
5766
5767 if (rc == Z_STREAM_END)
5768 return 0;
5769
5770 return rc;
5771}
5772
5773/* nic load/unload */
5774
5775/*
34f80b04 5776 * General service functions
a2fbb9ea
ET
5777 */
5778
5779/* send a NIG loopback debug packet */
5780static void bnx2x_lb_pckt(struct bnx2x *bp)
5781{
a2fbb9ea 5782 u32 wb_write[3];
a2fbb9ea
ET
5783
5784 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5785 wb_write[0] = 0x55555555;
5786 wb_write[1] = 0x55555555;
34f80b04 5787 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5788 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5789
5790 /* NON-IP protocol */
a2fbb9ea
ET
5791 wb_write[0] = 0x09000000;
5792 wb_write[1] = 0x55555555;
34f80b04 5793 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5794 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5795}
5796
5797/* some of the internal memories
5798 * are not directly readable from the driver
5799 * to test them we send debug packets
5800 */
5801static int bnx2x_int_mem_test(struct bnx2x *bp)
5802{
5803 int factor;
5804 int count, i;
5805 u32 val = 0;
5806
ad8d3948 5807 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5808 factor = 120;
ad8d3948
EG
5809 else if (CHIP_REV_IS_EMUL(bp))
5810 factor = 200;
5811 else
a2fbb9ea 5812 factor = 1;
a2fbb9ea
ET
5813
5814 DP(NETIF_MSG_HW, "start part1\n");
5815
5816 /* Disable inputs of parser neighbor blocks */
5817 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5818 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5819 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5820 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5821
5822 /* Write 0 to parser credits for CFC search request */
5823 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5824
5825 /* send Ethernet packet */
5826 bnx2x_lb_pckt(bp);
5827
5828 /* TODO do i reset NIG statistic? */
5829 /* Wait until NIG register shows 1 packet of size 0x10 */
5830 count = 1000 * factor;
5831 while (count) {
34f80b04 5832
a2fbb9ea
ET
5833 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5834 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5835 if (val == 0x10)
5836 break;
5837
5838 msleep(10);
5839 count--;
5840 }
5841 if (val != 0x10) {
5842 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5843 return -1;
5844 }
5845
5846 /* Wait until PRS register shows 1 packet */
5847 count = 1000 * factor;
5848 while (count) {
5849 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5850 if (val == 1)
5851 break;
5852
5853 msleep(10);
5854 count--;
5855 }
5856 if (val != 0x1) {
5857 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5858 return -2;
5859 }
5860
5861 /* Reset and init BRB, PRS */
34f80b04 5862 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5863 msleep(50);
34f80b04 5864 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5865 msleep(50);
94a78b79
VZ
5866 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5867 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5868
5869 DP(NETIF_MSG_HW, "part2\n");
5870
5871 /* Disable inputs of parser neighbor blocks */
5872 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5873 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5874 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5875 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5876
5877 /* Write 0 to parser credits for CFC search request */
5878 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5879
5880 /* send 10 Ethernet packets */
5881 for (i = 0; i < 10; i++)
5882 bnx2x_lb_pckt(bp);
5883
5884 /* Wait until NIG register shows 10 + 1
5885 packets of size 11*0x10 = 0xb0 */
5886 count = 1000 * factor;
5887 while (count) {
34f80b04 5888
a2fbb9ea
ET
5889 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5890 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5891 if (val == 0xb0)
5892 break;
5893
5894 msleep(10);
5895 count--;
5896 }
5897 if (val != 0xb0) {
5898 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5899 return -3;
5900 }
5901
5902 /* Wait until PRS register shows 2 packets */
5903 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5904 if (val != 2)
5905 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5906
5907 /* Write 1 to parser credits for CFC search request */
5908 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5909
5910 /* Wait until PRS register shows 3 packets */
5911 msleep(10 * factor);
5912 /* Wait until NIG register shows 1 packet of size 0x10 */
5913 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5914 if (val != 3)
5915 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5916
5917 /* clear NIG EOP FIFO */
5918 for (i = 0; i < 11; i++)
5919 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5920 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5921 if (val != 1) {
5922 BNX2X_ERR("clear of NIG failed\n");
5923 return -4;
5924 }
5925
5926 /* Reset and init BRB, PRS, NIG */
5927 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5928 msleep(50);
5929 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5930 msleep(50);
94a78b79
VZ
5931 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5932 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 5933#ifndef BCM_CNIC
a2fbb9ea
ET
5934 /* set NIC mode */
5935 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5936#endif
5937
5938 /* Enable inputs of parser neighbor blocks */
5939 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5940 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5941 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5942 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5943
5944 DP(NETIF_MSG_HW, "done\n");
5945
5946 return 0; /* OK */
5947}
5948
5949static void enable_blocks_attention(struct bnx2x *bp)
5950{
5951 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5952 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5953 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5954 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5955 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5956 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5957 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5958 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5959 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5960/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5961/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5962 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5963 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5964 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5965/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5966/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5967 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5968 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5969 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5970 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5971/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5972/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5973 if (CHIP_REV_IS_FPGA(bp))
5974 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5975 else
5976 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5977 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5978 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5979 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5980/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5981/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5982 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5983 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5984/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5985 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5986}
5987
34f80b04 5988
81f75bbf
EG
5989static void bnx2x_reset_common(struct bnx2x *bp)
5990{
5991 /* reset_common */
5992 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5993 0xd3ffff7f);
5994 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5995}
5996
573f2035
EG
5997static void bnx2x_init_pxp(struct bnx2x *bp)
5998{
5999 u16 devctl;
6000 int r_order, w_order;
6001
6002 pci_read_config_word(bp->pdev,
6003 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6004 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6005 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6006 if (bp->mrrs == -1)
6007 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6008 else {
6009 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6010 r_order = bp->mrrs;
6011 }
6012
6013 bnx2x_init_pxp_arb(bp, r_order, w_order);
6014}
fd4ef40d
EG
6015
6016static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6017{
6018 u32 val;
6019 u8 port;
6020 u8 is_required = 0;
6021
6022 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6023 SHARED_HW_CFG_FAN_FAILURE_MASK;
6024
6025 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6026 is_required = 1;
6027
6028 /*
6029 * The fan failure mechanism is usually related to the PHY type since
6030 * the power consumption of the board is affected by the PHY. Currently,
6031 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6032 */
6033 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6034 for (port = PORT_0; port < PORT_MAX; port++) {
6035 u32 phy_type =
6036 SHMEM_RD(bp, dev_info.port_hw_config[port].
6037 external_phy_config) &
6038 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6039 is_required |=
6040 ((phy_type ==
6041 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
6042 (phy_type ==
6043 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
6044 (phy_type ==
6045 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6046 }
6047
6048 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6049
6050 if (is_required == 0)
6051 return;
6052
6053 /* Fan failure is indicated by SPIO 5 */
6054 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6055 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6056
6057 /* set to active low mode */
6058 val = REG_RD(bp, MISC_REG_SPIO_INT);
6059 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6060 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6061 REG_WR(bp, MISC_REG_SPIO_INT, val);
6062
6063 /* enable interrupt to signal the IGU */
6064 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6065 val |= (1 << MISC_REGISTERS_SPIO_5);
6066 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6067}
6068
34f80b04 6069static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 6070{
a2fbb9ea 6071 u32 val, i;
37b091ba
MC
6072#ifdef BCM_CNIC
6073 u32 wb_write[2];
6074#endif
a2fbb9ea 6075
34f80b04 6076 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 6077
81f75bbf 6078 bnx2x_reset_common(bp);
34f80b04
EG
6079 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6080 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 6081
94a78b79 6082 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
6083 if (CHIP_IS_E1H(bp))
6084 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 6085
34f80b04
EG
6086 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6087 msleep(30);
6088 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 6089
94a78b79 6090 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
6091 if (CHIP_IS_E1(bp)) {
6092 /* enable HW interrupt from PXP on USDM overflow
6093 bit 16 on INT_MASK_0 */
6094 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6095 }
a2fbb9ea 6096
94a78b79 6097 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 6098 bnx2x_init_pxp(bp);
a2fbb9ea
ET
6099
6100#ifdef __BIG_ENDIAN
34f80b04
EG
6101 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6102 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6103 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6104 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6105 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6106 /* make sure this value is 0 */
6107 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6108
6109/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6110 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6111 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6112 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6113 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6114#endif
6115
34f80b04 6116 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
37b091ba 6117#ifdef BCM_CNIC
34f80b04
EG
6118 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6119 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6120 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6121#endif
6122
34f80b04
EG
6123 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6124 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6125
34f80b04
EG
6126 /* let the HW do it's magic ... */
6127 msleep(100);
6128 /* finish PXP init */
6129 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6130 if (val != 1) {
6131 BNX2X_ERR("PXP2 CFG failed\n");
6132 return -EBUSY;
6133 }
6134 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6135 if (val != 1) {
6136 BNX2X_ERR("PXP2 RD_INIT failed\n");
6137 return -EBUSY;
6138 }
a2fbb9ea 6139
34f80b04
EG
6140 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6141 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6142
94a78b79 6143 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6144
34f80b04
EG
6145 /* clean the DMAE memory */
6146 bp->dmae_ready = 1;
6147 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6148
94a78b79
VZ
6149 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6150 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6151 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6152 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6153
34f80b04
EG
6154 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6155 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6156 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6157 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6158
94a78b79 6159 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba
MC
6160
6161#ifdef BCM_CNIC
6162 wb_write[0] = 0;
6163 wb_write[1] = 0;
6164 for (i = 0; i < 64; i++) {
6165 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6166 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6167
6168 if (CHIP_IS_E1H(bp)) {
6169 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6170 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6171 wb_write, 2);
6172 }
6173 }
6174#endif
34f80b04
EG
6175 /* soft reset pulse */
6176 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6177 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 6178
37b091ba 6179#ifdef BCM_CNIC
94a78b79 6180 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6181#endif
a2fbb9ea 6182
94a78b79 6183 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6184 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6185 if (!CHIP_REV_IS_SLOW(bp)) {
6186 /* enable hw interrupt from doorbell Q */
6187 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6188 }
a2fbb9ea 6189
94a78b79
VZ
6190 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6191 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6192 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 6193#ifndef BCM_CNIC
3196a88a
EG
6194 /* set NIC mode */
6195 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 6196#endif
34f80b04
EG
6197 if (CHIP_IS_E1H(bp))
6198 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6199
94a78b79
VZ
6200 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6201 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6202 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6203 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6204
ca00392c
EG
6205 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6206 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6207 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6208 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6209
94a78b79
VZ
6210 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6211 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6212 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6213 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6214
34f80b04
EG
6215 /* sync semi rtc */
6216 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6217 0x80000000);
6218 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6219 0x80000000);
a2fbb9ea 6220
94a78b79
VZ
6221 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6222 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6223 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6224
34f80b04
EG
6225 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6226 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6227 REG_WR(bp, i, 0xc0cac01a);
6228 /* TODO: replace with something meaningful */
6229 }
94a78b79 6230 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
6231#ifdef BCM_CNIC
6232 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6233 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6234 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6235 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6236 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6237 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6238 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6239 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6240 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6241 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6242#endif
34f80b04 6243 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6244
34f80b04
EG
6245 if (sizeof(union cdu_context) != 1024)
6246 /* we currently assume that a context is 1024 bytes */
6247 printk(KERN_ALERT PFX "please adjust the size of"
6248 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 6249
94a78b79 6250 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6251 val = (4 << 24) + (0 << 12) + 1024;
6252 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6253
94a78b79 6254 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6255 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6256 /* enable context validation interrupt from CFC */
6257 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6258
6259 /* set the thresholds to prevent CFC/CDU race */
6260 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6261
94a78b79
VZ
6262 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6263 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6264
94a78b79 6265 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6266 /* Reset PCIE errors for debug */
6267 REG_WR(bp, 0x2814, 0xffffffff);
6268 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6269
94a78b79 6270 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6271 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6272 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6273 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6274
94a78b79 6275 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6276 if (CHIP_IS_E1H(bp)) {
6277 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6278 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6279 }
6280
6281 if (CHIP_REV_IS_SLOW(bp))
6282 msleep(200);
6283
6284 /* finish CFC init */
6285 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6286 if (val != 1) {
6287 BNX2X_ERR("CFC LL_INIT failed\n");
6288 return -EBUSY;
6289 }
6290 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6291 if (val != 1) {
6292 BNX2X_ERR("CFC AC_INIT failed\n");
6293 return -EBUSY;
6294 }
6295 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6296 if (val != 1) {
6297 BNX2X_ERR("CFC CAM_INIT failed\n");
6298 return -EBUSY;
6299 }
6300 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6301
34f80b04
EG
6302 /* read NIG statistic
6303 to see if this is our first up since powerup */
6304 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6305 val = *bnx2x_sp(bp, wb_data[0]);
6306
6307 /* do internal memory self test */
6308 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6309 BNX2X_ERR("internal mem self test failed\n");
6310 return -EBUSY;
6311 }
6312
35b19ba5 6313 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6314 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6315 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6316 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6317 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6318 bp->port.need_hw_lock = 1;
6319 break;
6320
34f80b04
EG
6321 default:
6322 break;
6323 }
f1410647 6324
fd4ef40d
EG
6325 bnx2x_setup_fan_failure_detection(bp);
6326
34f80b04
EG
6327 /* clear PXP2 attentions */
6328 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6329
34f80b04 6330 enable_blocks_attention(bp);
a2fbb9ea 6331
6bbca910
YR
6332 if (!BP_NOMCP(bp)) {
6333 bnx2x_acquire_phy_lock(bp);
6334 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6335 bnx2x_release_phy_lock(bp);
6336 } else
6337 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6338
34f80b04
EG
6339 return 0;
6340}
a2fbb9ea 6341
34f80b04
EG
6342static int bnx2x_init_port(struct bnx2x *bp)
6343{
6344 int port = BP_PORT(bp);
94a78b79 6345 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6346 u32 low, high;
34f80b04 6347 u32 val;
a2fbb9ea 6348
34f80b04
EG
6349 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6350
6351 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6352
94a78b79 6353 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6354 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6355
6356 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6357 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6358 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 6359 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6360
37b091ba
MC
6361#ifdef BCM_CNIC
6362 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
a2fbb9ea 6363
94a78b79 6364 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
6365 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6366 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 6367#endif
94a78b79 6368 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6369
94a78b79 6370 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6371 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6372 /* no pause for emulation and FPGA */
6373 low = 0;
6374 high = 513;
6375 } else {
6376 if (IS_E1HMF(bp))
6377 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6378 else if (bp->dev->mtu > 4096) {
6379 if (bp->flags & ONE_PORT_FLAG)
6380 low = 160;
6381 else {
6382 val = bp->dev->mtu;
6383 /* (24*1024 + val*4)/256 */
6384 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6385 }
6386 } else
6387 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6388 high = low + 56; /* 14*1024/256 */
6389 }
6390 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6391 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6392
6393
94a78b79 6394 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6395
94a78b79 6396 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6397 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6398 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6399 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6400
94a78b79
VZ
6401 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6402 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6403 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6404 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6405
94a78b79 6406 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6407 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6408
94a78b79 6409 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6410
6411 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6412 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6413
6414 /* update threshold */
34f80b04 6415 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6416 /* update init credit */
34f80b04 6417 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6418
6419 /* probe changes */
34f80b04 6420 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6421 msleep(5);
34f80b04 6422 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea 6423
37b091ba
MC
6424#ifdef BCM_CNIC
6425 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 6426#endif
94a78b79 6427 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6428 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6429
6430 if (CHIP_IS_E1(bp)) {
6431 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6432 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6433 }
94a78b79 6434 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6435
94a78b79 6436 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6437 /* init aeu_mask_attn_func_0/1:
6438 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6439 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6440 * bits 4-7 are used for "per vn group attention" */
6441 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6442 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6443
94a78b79 6444 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6445 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6446 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6447 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6448 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6449
94a78b79 6450 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6451
6452 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6453
6454 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6455 /* 0x2 disable e1hov, 0x1 enable */
6456 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6457 (IS_E1HMF(bp) ? 0x1 : 0x2));
6458
1c06328c
EG
6459 {
6460 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6461 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6462 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6463 }
34f80b04
EG
6464 }
6465
94a78b79 6466 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6467 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6468
35b19ba5 6469 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6470 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6471 {
6472 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6473
6474 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6475 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6476
6477 /* The GPIO should be swapped if the swap register is
6478 set and active */
6479 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6480 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6481
6482 /* Select function upon port-swap configuration */
6483 if (port == 0) {
6484 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6485 aeu_gpio_mask = (swap_val && swap_override) ?
6486 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6487 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6488 } else {
6489 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6490 aeu_gpio_mask = (swap_val && swap_override) ?
6491 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6492 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6493 }
6494 val = REG_RD(bp, offset);
6495 /* add GPIO3 to group */
6496 val |= aeu_gpio_mask;
6497 REG_WR(bp, offset, val);
6498 }
6499 break;
6500
35b19ba5 6501 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6502 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6503 /* add SPIO 5 to group 0 */
4d295db0
EG
6504 {
6505 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6506 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6507 val = REG_RD(bp, reg_addr);
f1410647 6508 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6509 REG_WR(bp, reg_addr, val);
6510 }
f1410647
ET
6511 break;
6512
6513 default:
6514 break;
6515 }
6516
c18487ee 6517 bnx2x__link_reset(bp);
a2fbb9ea 6518
34f80b04
EG
6519 return 0;
6520}
6521
6522#define ILT_PER_FUNC (768/2)
6523#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6524/* the phys address is shifted right 12 bits and has an added
6525 1=valid bit added to the 53rd bit
6526 then since this is a wide register(TM)
6527 we split it into two 32 bit writes
6528 */
6529#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6530#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6531#define PXP_ONE_ILT(x) (((x) << 10) | x)
6532#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6533
37b091ba
MC
6534#ifdef BCM_CNIC
6535#define CNIC_ILT_LINES 127
6536#define CNIC_CTX_PER_ILT 16
6537#else
34f80b04 6538#define CNIC_ILT_LINES 0
37b091ba 6539#endif
34f80b04
EG
6540
6541static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6542{
6543 int reg;
6544
6545 if (CHIP_IS_E1H(bp))
6546 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6547 else /* E1 */
6548 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6549
6550 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6551}
6552
6553static int bnx2x_init_func(struct bnx2x *bp)
6554{
6555 int port = BP_PORT(bp);
6556 int func = BP_FUNC(bp);
8badd27a 6557 u32 addr, val;
34f80b04
EG
6558 int i;
6559
6560 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6561
8badd27a
EG
6562 /* set MSI reconfigure capability */
6563 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6564 val = REG_RD(bp, addr);
6565 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6566 REG_WR(bp, addr, val);
6567
34f80b04
EG
6568 i = FUNC_ILT_BASE(func);
6569
6570 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6571 if (CHIP_IS_E1H(bp)) {
6572 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6573 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6574 } else /* E1 */
6575 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6576 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6577
37b091ba
MC
6578#ifdef BCM_CNIC
6579 i += 1 + CNIC_ILT_LINES;
6580 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6581 if (CHIP_IS_E1(bp))
6582 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6583 else {
6584 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6585 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6586 }
6587
6588 i++;
6589 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6590 if (CHIP_IS_E1(bp))
6591 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6592 else {
6593 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6594 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6595 }
6596
6597 i++;
6598 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6599 if (CHIP_IS_E1(bp))
6600 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6601 else {
6602 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6603 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6604 }
6605
6606 /* tell the searcher where the T2 table is */
6607 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6608
6609 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6610 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6611
6612 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6613 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6614 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6615
6616 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6617#endif
34f80b04
EG
6618
6619 if (CHIP_IS_E1H(bp)) {
573f2035
EG
6620 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6621 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6622 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6623 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6624 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6625 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6626 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6627 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6628 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
6629
6630 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6631 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6632 }
6633
6634 /* HC init per function */
6635 if (CHIP_IS_E1H(bp)) {
6636 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6637
6638 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6639 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6640 }
94a78b79 6641 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6642
c14423fe 6643 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6644 REG_WR(bp, 0x2114, 0xffffffff);
6645 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6646
34f80b04
EG
6647 return 0;
6648}
6649
6650static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6651{
6652 int i, rc = 0;
a2fbb9ea 6653
34f80b04
EG
6654 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6655 BP_FUNC(bp), load_code);
a2fbb9ea 6656
34f80b04
EG
6657 bp->dmae_ready = 0;
6658 mutex_init(&bp->dmae_mutex);
54016b26
EG
6659 rc = bnx2x_gunzip_init(bp);
6660 if (rc)
6661 return rc;
a2fbb9ea 6662
34f80b04
EG
6663 switch (load_code) {
6664 case FW_MSG_CODE_DRV_LOAD_COMMON:
6665 rc = bnx2x_init_common(bp);
6666 if (rc)
6667 goto init_hw_err;
6668 /* no break */
6669
6670 case FW_MSG_CODE_DRV_LOAD_PORT:
6671 bp->dmae_ready = 1;
6672 rc = bnx2x_init_port(bp);
6673 if (rc)
6674 goto init_hw_err;
6675 /* no break */
6676
6677 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6678 bp->dmae_ready = 1;
6679 rc = bnx2x_init_func(bp);
6680 if (rc)
6681 goto init_hw_err;
6682 break;
6683
6684 default:
6685 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6686 break;
6687 }
6688
6689 if (!BP_NOMCP(bp)) {
6690 int func = BP_FUNC(bp);
a2fbb9ea
ET
6691
6692 bp->fw_drv_pulse_wr_seq =
34f80b04 6693 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6694 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
6695 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6696 }
a2fbb9ea 6697
34f80b04
EG
6698 /* this needs to be done before gunzip end */
6699 bnx2x_zero_def_sb(bp);
6700 for_each_queue(bp, i)
6701 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
37b091ba
MC
6702#ifdef BCM_CNIC
6703 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6704#endif
34f80b04
EG
6705
6706init_hw_err:
6707 bnx2x_gunzip_end(bp);
6708
6709 return rc;
a2fbb9ea
ET
6710}
6711
a2fbb9ea
ET
6712static void bnx2x_free_mem(struct bnx2x *bp)
6713{
6714
6715#define BNX2X_PCI_FREE(x, y, size) \
6716 do { \
6717 if (x) { \
6718 pci_free_consistent(bp->pdev, size, x, y); \
6719 x = NULL; \
6720 y = 0; \
6721 } \
6722 } while (0)
6723
6724#define BNX2X_FREE(x) \
6725 do { \
6726 if (x) { \
6727 vfree(x); \
6728 x = NULL; \
6729 } \
6730 } while (0)
6731
6732 int i;
6733
6734 /* fastpath */
555f6c78 6735 /* Common */
a2fbb9ea
ET
6736 for_each_queue(bp, i) {
6737
555f6c78 6738 /* status blocks */
a2fbb9ea
ET
6739 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6740 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6741 sizeof(struct host_status_block));
555f6c78
EG
6742 }
6743 /* Rx */
6744 for_each_rx_queue(bp, i) {
a2fbb9ea 6745
555f6c78 6746 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6747 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6748 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6749 bnx2x_fp(bp, i, rx_desc_mapping),
6750 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6751
6752 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6753 bnx2x_fp(bp, i, rx_comp_mapping),
6754 sizeof(struct eth_fast_path_rx_cqe) *
6755 NUM_RCQ_BD);
a2fbb9ea 6756
7a9b2557 6757 /* SGE ring */
32626230 6758 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6759 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6760 bnx2x_fp(bp, i, rx_sge_mapping),
6761 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6762 }
555f6c78
EG
6763 /* Tx */
6764 for_each_tx_queue(bp, i) {
6765
6766 /* fastpath tx rings: tx_buf tx_desc */
6767 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6768 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6769 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6770 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6771 }
a2fbb9ea
ET
6772 /* end of fastpath */
6773
6774 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6775 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6776
6777 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6778 sizeof(struct bnx2x_slowpath));
a2fbb9ea 6779
37b091ba 6780#ifdef BCM_CNIC
a2fbb9ea
ET
6781 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6782 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6783 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6784 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
37b091ba
MC
6785 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6786 sizeof(struct host_status_block));
a2fbb9ea 6787#endif
7a9b2557 6788 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6789
6790#undef BNX2X_PCI_FREE
6791#undef BNX2X_KFREE
6792}
6793
6794static int bnx2x_alloc_mem(struct bnx2x *bp)
6795{
6796
6797#define BNX2X_PCI_ALLOC(x, y, size) \
6798 do { \
6799 x = pci_alloc_consistent(bp->pdev, size, y); \
6800 if (x == NULL) \
6801 goto alloc_mem_err; \
6802 memset(x, 0, size); \
6803 } while (0)
6804
6805#define BNX2X_ALLOC(x, size) \
6806 do { \
6807 x = vmalloc(size); \
6808 if (x == NULL) \
6809 goto alloc_mem_err; \
6810 memset(x, 0, size); \
6811 } while (0)
6812
6813 int i;
6814
6815 /* fastpath */
555f6c78 6816 /* Common */
a2fbb9ea
ET
6817 for_each_queue(bp, i) {
6818 bnx2x_fp(bp, i, bp) = bp;
6819
555f6c78 6820 /* status blocks */
a2fbb9ea
ET
6821 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6822 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6823 sizeof(struct host_status_block));
555f6c78
EG
6824 }
6825 /* Rx */
6826 for_each_rx_queue(bp, i) {
a2fbb9ea 6827
555f6c78 6828 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6829 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6830 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6831 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6832 &bnx2x_fp(bp, i, rx_desc_mapping),
6833 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6834
6835 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6836 &bnx2x_fp(bp, i, rx_comp_mapping),
6837 sizeof(struct eth_fast_path_rx_cqe) *
6838 NUM_RCQ_BD);
6839
7a9b2557
VZ
6840 /* SGE ring */
6841 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6842 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6843 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6844 &bnx2x_fp(bp, i, rx_sge_mapping),
6845 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6846 }
555f6c78
EG
6847 /* Tx */
6848 for_each_tx_queue(bp, i) {
6849
555f6c78
EG
6850 /* fastpath tx rings: tx_buf tx_desc */
6851 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6852 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6853 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6854 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6855 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6856 }
a2fbb9ea
ET
6857 /* end of fastpath */
6858
6859 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6860 sizeof(struct host_def_status_block));
6861
6862 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6863 sizeof(struct bnx2x_slowpath));
6864
37b091ba 6865#ifdef BCM_CNIC
a2fbb9ea
ET
6866 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6867
a2fbb9ea
ET
6868 /* allocate searcher T2 table
6869 we allocate 1/4 of alloc num for T2
6870 (which is not entered into the ILT) */
6871 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6872
37b091ba 6873 /* Initialize T2 (for 1024 connections) */
a2fbb9ea 6874 for (i = 0; i < 16*1024; i += 64)
37b091ba 6875 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
a2fbb9ea 6876
37b091ba 6877 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
a2fbb9ea
ET
6878 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6879
6880 /* QM queues (128*MAX_CONN) */
6881 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
37b091ba
MC
6882
6883 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6884 sizeof(struct host_status_block));
a2fbb9ea
ET
6885#endif
6886
6887 /* Slow path ring */
6888 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6889
6890 return 0;
6891
6892alloc_mem_err:
6893 bnx2x_free_mem(bp);
6894 return -ENOMEM;
6895
6896#undef BNX2X_PCI_ALLOC
6897#undef BNX2X_ALLOC
6898}
6899
6900static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6901{
6902 int i;
6903
555f6c78 6904 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6905 struct bnx2x_fastpath *fp = &bp->fp[i];
6906
6907 u16 bd_cons = fp->tx_bd_cons;
6908 u16 sw_prod = fp->tx_pkt_prod;
6909 u16 sw_cons = fp->tx_pkt_cons;
6910
a2fbb9ea
ET
6911 while (sw_cons != sw_prod) {
6912 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6913 sw_cons++;
6914 }
6915 }
6916}
6917
6918static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6919{
6920 int i, j;
6921
555f6c78 6922 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6923 struct bnx2x_fastpath *fp = &bp->fp[j];
6924
a2fbb9ea
ET
6925 for (i = 0; i < NUM_RX_BD; i++) {
6926 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6927 struct sk_buff *skb = rx_buf->skb;
6928
6929 if (skb == NULL)
6930 continue;
6931
6932 pci_unmap_single(bp->pdev,
6933 pci_unmap_addr(rx_buf, mapping),
356e2385 6934 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6935
6936 rx_buf->skb = NULL;
6937 dev_kfree_skb(skb);
6938 }
7a9b2557 6939 if (!fp->disable_tpa)
32626230
EG
6940 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6941 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6942 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6943 }
6944}
6945
6946static void bnx2x_free_skbs(struct bnx2x *bp)
6947{
6948 bnx2x_free_tx_skbs(bp);
6949 bnx2x_free_rx_skbs(bp);
6950}
6951
6952static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6953{
34f80b04 6954 int i, offset = 1;
a2fbb9ea
ET
6955
6956 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6957 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6958 bp->msix_table[0].vector);
6959
37b091ba
MC
6960#ifdef BCM_CNIC
6961 offset++;
6962#endif
a2fbb9ea 6963 for_each_queue(bp, i) {
c14423fe 6964 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6965 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6966 bnx2x_fp(bp, i, state));
6967
34f80b04 6968 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6969 }
a2fbb9ea
ET
6970}
6971
6972static void bnx2x_free_irq(struct bnx2x *bp)
6973{
a2fbb9ea 6974 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6975 bnx2x_free_msix_irqs(bp);
6976 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6977 bp->flags &= ~USING_MSIX_FLAG;
6978
8badd27a
EG
6979 } else if (bp->flags & USING_MSI_FLAG) {
6980 free_irq(bp->pdev->irq, bp->dev);
6981 pci_disable_msi(bp->pdev);
6982 bp->flags &= ~USING_MSI_FLAG;
6983
a2fbb9ea
ET
6984 } else
6985 free_irq(bp->pdev->irq, bp->dev);
6986}
6987
6988static int bnx2x_enable_msix(struct bnx2x *bp)
6989{
8badd27a
EG
6990 int i, rc, offset = 1;
6991 int igu_vec = 0;
a2fbb9ea 6992
8badd27a
EG
6993 bp->msix_table[0].entry = igu_vec;
6994 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6995
37b091ba
MC
6996#ifdef BCM_CNIC
6997 igu_vec = BP_L_ID(bp) + offset;
6998 bp->msix_table[1].entry = igu_vec;
6999 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7000 offset++;
7001#endif
34f80b04 7002 for_each_queue(bp, i) {
8badd27a 7003 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
7004 bp->msix_table[i + offset].entry = igu_vec;
7005 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7006 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
7007 }
7008
34f80b04 7009 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 7010 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 7011 if (rc) {
8badd27a
EG
7012 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7013 return rc;
34f80b04 7014 }
8badd27a 7015
a2fbb9ea
ET
7016 bp->flags |= USING_MSIX_FLAG;
7017
7018 return 0;
a2fbb9ea
ET
7019}
7020
a2fbb9ea
ET
7021static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7022{
34f80b04 7023 int i, rc, offset = 1;
a2fbb9ea 7024
a2fbb9ea
ET
7025 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7026 bp->dev->name, bp->dev);
a2fbb9ea
ET
7027 if (rc) {
7028 BNX2X_ERR("request sp irq failed\n");
7029 return -EBUSY;
7030 }
7031
37b091ba
MC
7032#ifdef BCM_CNIC
7033 offset++;
7034#endif
a2fbb9ea 7035 for_each_queue(bp, i) {
555f6c78
EG
7036 struct bnx2x_fastpath *fp = &bp->fp[i];
7037
ca00392c
EG
7038 if (i < bp->num_rx_queues)
7039 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
7040 else
7041 sprintf(fp->name, "%s-tx-%d",
7042 bp->dev->name, i - bp->num_rx_queues);
7043
34f80b04 7044 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 7045 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 7046 if (rc) {
555f6c78 7047 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
7048 bnx2x_free_msix_irqs(bp);
7049 return -EBUSY;
7050 }
7051
555f6c78 7052 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
7053 }
7054
555f6c78 7055 i = BNX2X_NUM_QUEUES(bp);
ca00392c
EG
7056 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
7057 " ... fp[%d] %d\n",
7058 bp->dev->name, bp->msix_table[0].vector,
7059 0, bp->msix_table[offset].vector,
7060 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 7061
a2fbb9ea 7062 return 0;
a2fbb9ea
ET
7063}
7064
8badd27a
EG
7065static int bnx2x_enable_msi(struct bnx2x *bp)
7066{
7067 int rc;
7068
7069 rc = pci_enable_msi(bp->pdev);
7070 if (rc) {
7071 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7072 return -1;
7073 }
7074 bp->flags |= USING_MSI_FLAG;
7075
7076 return 0;
7077}
7078
a2fbb9ea
ET
7079static int bnx2x_req_irq(struct bnx2x *bp)
7080{
8badd27a 7081 unsigned long flags;
34f80b04 7082 int rc;
a2fbb9ea 7083
8badd27a
EG
7084 if (bp->flags & USING_MSI_FLAG)
7085 flags = 0;
7086 else
7087 flags = IRQF_SHARED;
7088
7089 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 7090 bp->dev->name, bp->dev);
a2fbb9ea
ET
7091 if (!rc)
7092 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7093
7094 return rc;
a2fbb9ea
ET
7095}
7096
65abd74d
YG
7097static void bnx2x_napi_enable(struct bnx2x *bp)
7098{
7099 int i;
7100
555f6c78 7101 for_each_rx_queue(bp, i)
65abd74d
YG
7102 napi_enable(&bnx2x_fp(bp, i, napi));
7103}
7104
7105static void bnx2x_napi_disable(struct bnx2x *bp)
7106{
7107 int i;
7108
555f6c78 7109 for_each_rx_queue(bp, i)
65abd74d
YG
7110 napi_disable(&bnx2x_fp(bp, i, napi));
7111}
7112
7113static void bnx2x_netif_start(struct bnx2x *bp)
7114{
e1510706
EG
7115 int intr_sem;
7116
7117 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7118 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7119
7120 if (intr_sem) {
65abd74d 7121 if (netif_running(bp->dev)) {
65abd74d
YG
7122 bnx2x_napi_enable(bp);
7123 bnx2x_int_enable(bp);
555f6c78
EG
7124 if (bp->state == BNX2X_STATE_OPEN)
7125 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
7126 }
7127 }
7128}
7129
f8ef6e44 7130static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 7131{
f8ef6e44 7132 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 7133 bnx2x_napi_disable(bp);
762d5f6c
EG
7134 netif_tx_disable(bp->dev);
7135 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
7136}
7137
a2fbb9ea
ET
7138/*
7139 * Init service functions
7140 */
7141
e665bfda
MC
7142/**
7143 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7144 *
7145 * @param bp driver descriptor
7146 * @param set set or clear an entry (1 or 0)
7147 * @param mac pointer to a buffer containing a MAC
7148 * @param cl_bit_vec bit vector of clients to register a MAC for
7149 * @param cam_offset offset in a CAM to use
7150 * @param with_bcast set broadcast MAC as well
7151 */
7152static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7153 u32 cl_bit_vec, u8 cam_offset,
7154 u8 with_bcast)
a2fbb9ea
ET
7155{
7156 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 7157 int port = BP_PORT(bp);
a2fbb9ea
ET
7158
7159 /* CAM allocation
7160 * unicasts 0-31:port0 32-63:port1
7161 * multicast 64-127:port0 128-191:port1
7162 */
e665bfda
MC
7163 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7164 config->hdr.offset = cam_offset;
7165 config->hdr.client_id = 0xff;
a2fbb9ea
ET
7166 config->hdr.reserved1 = 0;
7167
7168 /* primary MAC */
7169 config->config_table[0].cam_entry.msb_mac_addr =
e665bfda 7170 swab16(*(u16 *)&mac[0]);
a2fbb9ea 7171 config->config_table[0].cam_entry.middle_mac_addr =
e665bfda 7172 swab16(*(u16 *)&mac[2]);
a2fbb9ea 7173 config->config_table[0].cam_entry.lsb_mac_addr =
e665bfda 7174 swab16(*(u16 *)&mac[4]);
34f80b04 7175 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7176 if (set)
7177 config->config_table[0].target_table_entry.flags = 0;
7178 else
7179 CAM_INVALIDATE(config->config_table[0]);
ca00392c 7180 config->config_table[0].target_table_entry.clients_bit_vector =
e665bfda 7181 cpu_to_le32(cl_bit_vec);
a2fbb9ea
ET
7182 config->config_table[0].target_table_entry.vlan_id = 0;
7183
3101c2bc
YG
7184 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7185 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7186 config->config_table[0].cam_entry.msb_mac_addr,
7187 config->config_table[0].cam_entry.middle_mac_addr,
7188 config->config_table[0].cam_entry.lsb_mac_addr);
7189
7190 /* broadcast */
e665bfda
MC
7191 if (with_bcast) {
7192 config->config_table[1].cam_entry.msb_mac_addr =
7193 cpu_to_le16(0xffff);
7194 config->config_table[1].cam_entry.middle_mac_addr =
7195 cpu_to_le16(0xffff);
7196 config->config_table[1].cam_entry.lsb_mac_addr =
7197 cpu_to_le16(0xffff);
7198 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7199 if (set)
7200 config->config_table[1].target_table_entry.flags =
7201 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7202 else
7203 CAM_INVALIDATE(config->config_table[1]);
7204 config->config_table[1].target_table_entry.clients_bit_vector =
7205 cpu_to_le32(cl_bit_vec);
7206 config->config_table[1].target_table_entry.vlan_id = 0;
7207 }
a2fbb9ea
ET
7208
7209 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7210 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7211 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7212}
7213
e665bfda
MC
7214/**
7215 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7216 *
7217 * @param bp driver descriptor
7218 * @param set set or clear an entry (1 or 0)
7219 * @param mac pointer to a buffer containing a MAC
7220 * @param cl_bit_vec bit vector of clients to register a MAC for
7221 * @param cam_offset offset in a CAM to use
7222 */
7223static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7224 u32 cl_bit_vec, u8 cam_offset)
34f80b04
EG
7225{
7226 struct mac_configuration_cmd_e1h *config =
7227 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7228
8d9c5f34 7229 config->hdr.length = 1;
e665bfda
MC
7230 config->hdr.offset = cam_offset;
7231 config->hdr.client_id = 0xff;
34f80b04
EG
7232 config->hdr.reserved1 = 0;
7233
7234 /* primary MAC */
7235 config->config_table[0].msb_mac_addr =
e665bfda 7236 swab16(*(u16 *)&mac[0]);
34f80b04 7237 config->config_table[0].middle_mac_addr =
e665bfda 7238 swab16(*(u16 *)&mac[2]);
34f80b04 7239 config->config_table[0].lsb_mac_addr =
e665bfda 7240 swab16(*(u16 *)&mac[4]);
ca00392c 7241 config->config_table[0].clients_bit_vector =
e665bfda 7242 cpu_to_le32(cl_bit_vec);
34f80b04
EG
7243 config->config_table[0].vlan_id = 0;
7244 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7245 if (set)
7246 config->config_table[0].flags = BP_PORT(bp);
7247 else
7248 config->config_table[0].flags =
7249 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7250
e665bfda 7251 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
3101c2bc 7252 (set ? "setting" : "clearing"),
34f80b04
EG
7253 config->config_table[0].msb_mac_addr,
7254 config->config_table[0].middle_mac_addr,
e665bfda 7255 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
34f80b04
EG
7256
7257 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7258 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7259 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7260}
7261
a2fbb9ea
ET
7262static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7263 int *state_p, int poll)
7264{
7265 /* can take a while if any port is running */
8b3a0f0b 7266 int cnt = 5000;
a2fbb9ea 7267
c14423fe
ET
7268 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7269 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7270
7271 might_sleep();
34f80b04 7272 while (cnt--) {
a2fbb9ea
ET
7273 if (poll) {
7274 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7275 /* if index is different from 0
7276 * the reply for some commands will
3101c2bc 7277 * be on the non default queue
a2fbb9ea
ET
7278 */
7279 if (idx)
7280 bnx2x_rx_int(&bp->fp[idx], 10);
7281 }
a2fbb9ea 7282
3101c2bc 7283 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7284 if (*state_p == state) {
7285#ifdef BNX2X_STOP_ON_ERROR
7286 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7287#endif
a2fbb9ea 7288 return 0;
8b3a0f0b 7289 }
a2fbb9ea 7290
a2fbb9ea 7291 msleep(1);
e3553b29
EG
7292
7293 if (bp->panic)
7294 return -EIO;
a2fbb9ea
ET
7295 }
7296
a2fbb9ea 7297 /* timeout! */
49d66772
ET
7298 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7299 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7300#ifdef BNX2X_STOP_ON_ERROR
7301 bnx2x_panic();
7302#endif
a2fbb9ea 7303
49d66772 7304 return -EBUSY;
a2fbb9ea
ET
7305}
7306
e665bfda
MC
7307static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7308{
7309 bp->set_mac_pending++;
7310 smp_wmb();
7311
7312 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7313 (1 << bp->fp->cl_id), BP_FUNC(bp));
7314
7315 /* Wait for a completion */
7316 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7317}
7318
7319static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7320{
7321 bp->set_mac_pending++;
7322 smp_wmb();
7323
7324 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7325 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7326 1);
7327
7328 /* Wait for a completion */
7329 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7330}
7331
993ac7b5
MC
7332#ifdef BCM_CNIC
7333/**
7334 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7335 * MAC(s). This function will wait until the ramdord completion
7336 * returns.
7337 *
7338 * @param bp driver handle
7339 * @param set set or clear the CAM entry
7340 *
7341 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7342 */
7343static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7344{
7345 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7346
7347 bp->set_mac_pending++;
7348 smp_wmb();
7349
7350 /* Send a SET_MAC ramrod */
7351 if (CHIP_IS_E1(bp))
7352 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7353 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7354 1);
7355 else
7356 /* CAM allocation for E1H
7357 * unicasts: by func number
7358 * multicast: 20+FUNC*20, 20 each
7359 */
7360 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7361 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7362
7363 /* Wait for a completion when setting */
7364 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7365
7366 return 0;
7367}
7368#endif
7369
a2fbb9ea
ET
7370static int bnx2x_setup_leading(struct bnx2x *bp)
7371{
34f80b04 7372 int rc;
a2fbb9ea 7373
c14423fe 7374 /* reset IGU state */
34f80b04 7375 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7376
7377 /* SETUP ramrod */
7378 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7379
34f80b04
EG
7380 /* Wait for completion */
7381 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7382
34f80b04 7383 return rc;
a2fbb9ea
ET
7384}
7385
7386static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7387{
555f6c78
EG
7388 struct bnx2x_fastpath *fp = &bp->fp[index];
7389
a2fbb9ea 7390 /* reset IGU state */
555f6c78 7391 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7392
228241eb 7393 /* SETUP ramrod */
555f6c78
EG
7394 fp->state = BNX2X_FP_STATE_OPENING;
7395 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7396 fp->cl_id, 0);
a2fbb9ea
ET
7397
7398 /* Wait for completion */
7399 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7400 &(fp->state), 0);
a2fbb9ea
ET
7401}
7402
a2fbb9ea 7403static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7404
ca00392c
EG
7405static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7406 int *num_tx_queues_out)
7407{
7408 int _num_rx_queues = 0, _num_tx_queues = 0;
7409
7410 switch (bp->multi_mode) {
7411 case ETH_RSS_MODE_DISABLED:
7412 _num_rx_queues = 1;
7413 _num_tx_queues = 1;
7414 break;
7415
7416 case ETH_RSS_MODE_REGULAR:
7417 if (num_rx_queues)
7418 _num_rx_queues = min_t(u32, num_rx_queues,
7419 BNX2X_MAX_QUEUES(bp));
7420 else
7421 _num_rx_queues = min_t(u32, num_online_cpus(),
7422 BNX2X_MAX_QUEUES(bp));
7423
7424 if (num_tx_queues)
7425 _num_tx_queues = min_t(u32, num_tx_queues,
7426 BNX2X_MAX_QUEUES(bp));
7427 else
7428 _num_tx_queues = min_t(u32, num_online_cpus(),
7429 BNX2X_MAX_QUEUES(bp));
7430
7431 /* There must be not more Tx queues than Rx queues */
7432 if (_num_tx_queues > _num_rx_queues) {
7433 BNX2X_ERR("number of tx queues (%d) > "
7434 "number of rx queues (%d)"
7435 " defaulting to %d\n",
7436 _num_tx_queues, _num_rx_queues,
7437 _num_rx_queues);
7438 _num_tx_queues = _num_rx_queues;
7439 }
7440 break;
7441
7442
7443 default:
7444 _num_rx_queues = 1;
7445 _num_tx_queues = 1;
7446 break;
7447 }
7448
7449 *num_rx_queues_out = _num_rx_queues;
7450 *num_tx_queues_out = _num_tx_queues;
7451}
7452
7453static int bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 7454{
ca00392c 7455 int rc = 0;
a2fbb9ea 7456
8badd27a
EG
7457 switch (int_mode) {
7458 case INT_MODE_INTx:
7459 case INT_MODE_MSI:
ca00392c
EG
7460 bp->num_rx_queues = 1;
7461 bp->num_tx_queues = 1;
7462 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a
EG
7463 break;
7464
7465 case INT_MODE_MSIX:
7466 default:
ca00392c
EG
7467 /* Set interrupt mode according to bp->multi_mode value */
7468 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7469 &bp->num_tx_queues);
7470
7471 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
555f6c78 7472 bp->num_rx_queues, bp->num_tx_queues);
ca00392c 7473
2dfe0e1f
EG
7474 /* if we can't use MSI-X we only need one fp,
7475 * so try to enable MSI-X with the requested number of fp's
7476 * and fallback to MSI or legacy INTx with one fp
7477 */
ca00392c
EG
7478 rc = bnx2x_enable_msix(bp);
7479 if (rc) {
34f80b04 7480 /* failed to enable MSI-X */
555f6c78
EG
7481 if (bp->multi_mode)
7482 BNX2X_ERR("Multi requested but failed to "
ca00392c
EG
7483 "enable MSI-X (rx %d tx %d), "
7484 "set number of queues to 1\n",
7485 bp->num_rx_queues, bp->num_tx_queues);
7486 bp->num_rx_queues = 1;
7487 bp->num_tx_queues = 1;
a2fbb9ea 7488 }
8badd27a 7489 break;
a2fbb9ea 7490 }
555f6c78 7491 bp->dev->real_num_tx_queues = bp->num_tx_queues;
ca00392c 7492 return rc;
8badd27a
EG
7493}
7494
993ac7b5
MC
7495#ifdef BCM_CNIC
7496static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7497static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7498#endif
8badd27a
EG
7499
7500/* must be called with rtnl_lock */
7501static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7502{
7503 u32 load_code;
ca00392c
EG
7504 int i, rc;
7505
8badd27a 7506#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7507 if (unlikely(bp->panic))
7508 return -EPERM;
7509#endif
7510
7511 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7512
ca00392c 7513 rc = bnx2x_set_int_mode(bp);
c14423fe 7514
a2fbb9ea
ET
7515 if (bnx2x_alloc_mem(bp))
7516 return -ENOMEM;
7517
555f6c78 7518 for_each_rx_queue(bp, i)
7a9b2557
VZ
7519 bnx2x_fp(bp, i, disable_tpa) =
7520 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7521
555f6c78 7522 for_each_rx_queue(bp, i)
2dfe0e1f
EG
7523 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7524 bnx2x_poll, 128);
7525
2dfe0e1f
EG
7526 bnx2x_napi_enable(bp);
7527
34f80b04
EG
7528 if (bp->flags & USING_MSIX_FLAG) {
7529 rc = bnx2x_req_msix_irqs(bp);
7530 if (rc) {
7531 pci_disable_msix(bp->pdev);
2dfe0e1f 7532 goto load_error1;
34f80b04
EG
7533 }
7534 } else {
ca00392c
EG
7535 /* Fall to INTx if failed to enable MSI-X due to lack of
7536 memory (in bnx2x_set_int_mode()) */
8badd27a
EG
7537 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7538 bnx2x_enable_msi(bp);
34f80b04
EG
7539 bnx2x_ack_int(bp);
7540 rc = bnx2x_req_irq(bp);
7541 if (rc) {
2dfe0e1f 7542 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
7543 if (bp->flags & USING_MSI_FLAG)
7544 pci_disable_msi(bp->pdev);
2dfe0e1f 7545 goto load_error1;
a2fbb9ea 7546 }
8badd27a
EG
7547 if (bp->flags & USING_MSI_FLAG) {
7548 bp->dev->irq = bp->pdev->irq;
7549 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7550 bp->dev->name, bp->pdev->irq);
7551 }
a2fbb9ea
ET
7552 }
7553
2dfe0e1f
EG
7554 /* Send LOAD_REQUEST command to MCP
7555 Returns the type of LOAD command:
7556 if it is the first port to be initialized
7557 common blocks should be initialized, otherwise - not
7558 */
7559 if (!BP_NOMCP(bp)) {
7560 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7561 if (!load_code) {
7562 BNX2X_ERR("MCP response failure, aborting\n");
7563 rc = -EBUSY;
7564 goto load_error2;
7565 }
7566 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7567 rc = -EBUSY; /* other port in diagnostic mode */
7568 goto load_error2;
7569 }
7570
7571 } else {
7572 int port = BP_PORT(bp);
7573
f5372251 7574 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7575 load_count[0], load_count[1], load_count[2]);
7576 load_count[0]++;
7577 load_count[1 + port]++;
f5372251 7578 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7579 load_count[0], load_count[1], load_count[2]);
7580 if (load_count[0] == 1)
7581 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7582 else if (load_count[1 + port] == 1)
7583 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7584 else
7585 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7586 }
7587
7588 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7589 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7590 bp->port.pmf = 1;
7591 else
7592 bp->port.pmf = 0;
7593 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7594
a2fbb9ea 7595 /* Initialize HW */
34f80b04
EG
7596 rc = bnx2x_init_hw(bp, load_code);
7597 if (rc) {
a2fbb9ea 7598 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 7599 goto load_error2;
a2fbb9ea
ET
7600 }
7601
a2fbb9ea 7602 /* Setup NIC internals and enable interrupts */
471de716 7603 bnx2x_nic_init(bp, load_code);
a2fbb9ea 7604
2691d51d
EG
7605 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7606 (bp->common.shmem2_base))
7607 SHMEM2_WR(bp, dcc_support,
7608 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7609 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7610
a2fbb9ea 7611 /* Send LOAD_DONE command to MCP */
34f80b04 7612 if (!BP_NOMCP(bp)) {
228241eb
ET
7613 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7614 if (!load_code) {
da5a662a 7615 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7616 rc = -EBUSY;
2dfe0e1f 7617 goto load_error3;
a2fbb9ea
ET
7618 }
7619 }
7620
7621 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7622
34f80b04
EG
7623 rc = bnx2x_setup_leading(bp);
7624 if (rc) {
da5a662a 7625 BNX2X_ERR("Setup leading failed!\n");
e3553b29 7626#ifndef BNX2X_STOP_ON_ERROR
2dfe0e1f 7627 goto load_error3;
e3553b29
EG
7628#else
7629 bp->panic = 1;
7630 return -EBUSY;
7631#endif
34f80b04 7632 }
a2fbb9ea 7633
34f80b04
EG
7634 if (CHIP_IS_E1H(bp))
7635 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7636 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
f34d28ea 7637 bp->flags |= MF_FUNC_DIS;
34f80b04 7638 }
a2fbb9ea 7639
ca00392c 7640 if (bp->state == BNX2X_STATE_OPEN) {
37b091ba
MC
7641#ifdef BCM_CNIC
7642 /* Enable Timer scan */
7643 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7644#endif
34f80b04
EG
7645 for_each_nondefault_queue(bp, i) {
7646 rc = bnx2x_setup_multi(bp, i);
7647 if (rc)
37b091ba
MC
7648#ifdef BCM_CNIC
7649 goto load_error4;
7650#else
2dfe0e1f 7651 goto load_error3;
37b091ba 7652#endif
34f80b04 7653 }
a2fbb9ea 7654
ca00392c 7655 if (CHIP_IS_E1(bp))
e665bfda 7656 bnx2x_set_eth_mac_addr_e1(bp, 1);
ca00392c 7657 else
e665bfda 7658 bnx2x_set_eth_mac_addr_e1h(bp, 1);
993ac7b5
MC
7659#ifdef BCM_CNIC
7660 /* Set iSCSI L2 MAC */
7661 mutex_lock(&bp->cnic_mutex);
7662 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7663 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7664 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7665 }
7666 mutex_unlock(&bp->cnic_mutex);
7667#endif
ca00392c 7668 }
34f80b04
EG
7669
7670 if (bp->port.pmf)
b5bf9068 7671 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7672
7673 /* Start fast path */
34f80b04
EG
7674 switch (load_mode) {
7675 case LOAD_NORMAL:
ca00392c
EG
7676 if (bp->state == BNX2X_STATE_OPEN) {
7677 /* Tx queue should be only reenabled */
7678 netif_tx_wake_all_queues(bp->dev);
7679 }
2dfe0e1f 7680 /* Initialize the receive filter. */
34f80b04
EG
7681 bnx2x_set_rx_mode(bp->dev);
7682 break;
7683
7684 case LOAD_OPEN:
555f6c78 7685 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
7686 if (bp->state != BNX2X_STATE_OPEN)
7687 netif_tx_disable(bp->dev);
2dfe0e1f 7688 /* Initialize the receive filter. */
34f80b04 7689 bnx2x_set_rx_mode(bp->dev);
34f80b04 7690 break;
a2fbb9ea 7691
34f80b04 7692 case LOAD_DIAG:
2dfe0e1f 7693 /* Initialize the receive filter. */
a2fbb9ea 7694 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7695 bp->state = BNX2X_STATE_DIAG;
7696 break;
7697
7698 default:
7699 break;
a2fbb9ea
ET
7700 }
7701
34f80b04
EG
7702 if (!bp->port.pmf)
7703 bnx2x__link_status_update(bp);
7704
a2fbb9ea
ET
7705 /* start the timer */
7706 mod_timer(&bp->timer, jiffies + bp->current_interval);
7707
993ac7b5
MC
7708#ifdef BCM_CNIC
7709 bnx2x_setup_cnic_irq_info(bp);
7710 if (bp->state == BNX2X_STATE_OPEN)
7711 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7712#endif
34f80b04 7713
a2fbb9ea
ET
7714 return 0;
7715
37b091ba
MC
7716#ifdef BCM_CNIC
7717load_error4:
7718 /* Disable Timer scan */
7719 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7720#endif
2dfe0e1f
EG
7721load_error3:
7722 bnx2x_int_disable_sync(bp, 1);
7723 if (!BP_NOMCP(bp)) {
7724 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7725 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7726 }
7727 bp->port.pmf = 0;
7a9b2557
VZ
7728 /* Free SKBs, SGEs, TPA pool and driver internals */
7729 bnx2x_free_skbs(bp);
555f6c78 7730 for_each_rx_queue(bp, i)
3196a88a 7731 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7732load_error2:
d1014634
YG
7733 /* Release IRQs */
7734 bnx2x_free_irq(bp);
2dfe0e1f
EG
7735load_error1:
7736 bnx2x_napi_disable(bp);
555f6c78 7737 for_each_rx_queue(bp, i)
7cde1c8b 7738 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7739 bnx2x_free_mem(bp);
7740
34f80b04 7741 return rc;
a2fbb9ea
ET
7742}
7743
7744static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7745{
555f6c78 7746 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7747 int rc;
7748
c14423fe 7749 /* halt the connection */
555f6c78
EG
7750 fp->state = BNX2X_FP_STATE_HALTING;
7751 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7752
34f80b04 7753 /* Wait for completion */
a2fbb9ea 7754 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7755 &(fp->state), 1);
c14423fe 7756 if (rc) /* timeout */
a2fbb9ea
ET
7757 return rc;
7758
7759 /* delete cfc entry */
7760 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7761
34f80b04
EG
7762 /* Wait for completion */
7763 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7764 &(fp->state), 1);
34f80b04 7765 return rc;
a2fbb9ea
ET
7766}
7767
da5a662a 7768static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7769{
4781bfad 7770 __le16 dsb_sp_prod_idx;
c14423fe 7771 /* if the other port is handling traffic,
a2fbb9ea 7772 this can take a lot of time */
34f80b04
EG
7773 int cnt = 500;
7774 int rc;
a2fbb9ea
ET
7775
7776 might_sleep();
7777
7778 /* Send HALT ramrod */
7779 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7780 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7781
34f80b04
EG
7782 /* Wait for completion */
7783 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7784 &(bp->fp[0].state), 1);
7785 if (rc) /* timeout */
da5a662a 7786 return rc;
a2fbb9ea 7787
49d66772 7788 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7789
228241eb 7790 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7791 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7792
49d66772 7793 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7794 we are going to reset the chip anyway
7795 so there is not much to do if this times out
7796 */
34f80b04 7797 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7798 if (!cnt) {
7799 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7800 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7801 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7802#ifdef BNX2X_STOP_ON_ERROR
7803 bnx2x_panic();
7804#endif
36e552ab 7805 rc = -EBUSY;
34f80b04
EG
7806 break;
7807 }
7808 cnt--;
da5a662a 7809 msleep(1);
5650d9d4 7810 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7811 }
7812 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7813 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7814
7815 return rc;
a2fbb9ea
ET
7816}
7817
34f80b04
EG
7818static void bnx2x_reset_func(struct bnx2x *bp)
7819{
7820 int port = BP_PORT(bp);
7821 int func = BP_FUNC(bp);
7822 int base, i;
7823
7824 /* Configure IGU */
7825 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7826 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7827
37b091ba
MC
7828#ifdef BCM_CNIC
7829 /* Disable Timer scan */
7830 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7831 /*
7832 * Wait for at least 10ms and up to 2 second for the timers scan to
7833 * complete
7834 */
7835 for (i = 0; i < 200; i++) {
7836 msleep(10);
7837 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7838 break;
7839 }
7840#endif
34f80b04
EG
7841 /* Clear ILT */
7842 base = FUNC_ILT_BASE(func);
7843 for (i = base; i < base + ILT_PER_FUNC; i++)
7844 bnx2x_ilt_wr(bp, i, 0);
7845}
7846
7847static void bnx2x_reset_port(struct bnx2x *bp)
7848{
7849 int port = BP_PORT(bp);
7850 u32 val;
7851
7852 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7853
7854 /* Do not rcv packets to BRB */
7855 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7856 /* Do not direct rcv packets that are not for MCP to the BRB */
7857 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7858 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7859
7860 /* Configure AEU */
7861 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7862
7863 msleep(100);
7864 /* Check for BRB port occupancy */
7865 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7866 if (val)
7867 DP(NETIF_MSG_IFDOWN,
33471629 7868 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7869
7870 /* TODO: Close Doorbell port? */
7871}
7872
34f80b04
EG
7873static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7874{
7875 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7876 BP_FUNC(bp), reset_code);
7877
7878 switch (reset_code) {
7879 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7880 bnx2x_reset_port(bp);
7881 bnx2x_reset_func(bp);
7882 bnx2x_reset_common(bp);
7883 break;
7884
7885 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7886 bnx2x_reset_port(bp);
7887 bnx2x_reset_func(bp);
7888 break;
7889
7890 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7891 bnx2x_reset_func(bp);
7892 break;
49d66772 7893
34f80b04
EG
7894 default:
7895 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7896 break;
7897 }
7898}
7899
33471629 7900/* must be called with rtnl_lock */
34f80b04 7901static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7902{
da5a662a 7903 int port = BP_PORT(bp);
a2fbb9ea 7904 u32 reset_code = 0;
da5a662a 7905 int i, cnt, rc;
a2fbb9ea 7906
993ac7b5
MC
7907#ifdef BCM_CNIC
7908 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7909#endif
a2fbb9ea
ET
7910 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7911
ab6ad5a4 7912 /* Set "drop all" */
228241eb
ET
7913 bp->rx_mode = BNX2X_RX_MODE_NONE;
7914 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7915
ab6ad5a4 7916 /* Disable HW interrupts, NAPI and Tx */
f8ef6e44 7917 bnx2x_netif_stop(bp, 1);
e94d8af3 7918
34f80b04
EG
7919 del_timer_sync(&bp->timer);
7920 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7921 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7922 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7923
70b9986c
EG
7924 /* Release IRQs */
7925 bnx2x_free_irq(bp);
7926
555f6c78
EG
7927 /* Wait until tx fastpath tasks complete */
7928 for_each_tx_queue(bp, i) {
228241eb
ET
7929 struct bnx2x_fastpath *fp = &bp->fp[i];
7930
34f80b04 7931 cnt = 1000;
e8b5fc51 7932 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7933
7961f791 7934 bnx2x_tx_int(fp);
34f80b04
EG
7935 if (!cnt) {
7936 BNX2X_ERR("timeout waiting for queue[%d]\n",
7937 i);
7938#ifdef BNX2X_STOP_ON_ERROR
7939 bnx2x_panic();
7940 return -EBUSY;
7941#else
7942 break;
7943#endif
7944 }
7945 cnt--;
da5a662a 7946 msleep(1);
34f80b04 7947 }
228241eb 7948 }
da5a662a
VZ
7949 /* Give HW time to discard old tx messages */
7950 msleep(1);
a2fbb9ea 7951
3101c2bc
YG
7952 if (CHIP_IS_E1(bp)) {
7953 struct mac_configuration_cmd *config =
7954 bnx2x_sp(bp, mcast_config);
7955
e665bfda 7956 bnx2x_set_eth_mac_addr_e1(bp, 0);
3101c2bc 7957
8d9c5f34 7958 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7959 CAM_INVALIDATE(config->config_table[i]);
7960
8d9c5f34 7961 config->hdr.length = i;
3101c2bc
YG
7962 if (CHIP_REV_IS_SLOW(bp))
7963 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7964 else
7965 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7966 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7967 config->hdr.reserved1 = 0;
7968
e665bfda
MC
7969 bp->set_mac_pending++;
7970 smp_wmb();
7971
3101c2bc
YG
7972 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7973 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7974 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7975
7976 } else { /* E1H */
65abd74d
YG
7977 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7978
e665bfda 7979 bnx2x_set_eth_mac_addr_e1h(bp, 0);
3101c2bc
YG
7980
7981 for (i = 0; i < MC_HASH_SIZE; i++)
7982 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
7983
7984 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc 7985 }
993ac7b5
MC
7986#ifdef BCM_CNIC
7987 /* Clear iSCSI L2 MAC */
7988 mutex_lock(&bp->cnic_mutex);
7989 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7990 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7991 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7992 }
7993 mutex_unlock(&bp->cnic_mutex);
7994#endif
3101c2bc 7995
65abd74d
YG
7996 if (unload_mode == UNLOAD_NORMAL)
7997 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7998
7d0446c2 7999 else if (bp->flags & NO_WOL_FLAG)
65abd74d 8000 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 8001
7d0446c2 8002 else if (bp->wol) {
65abd74d
YG
8003 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8004 u8 *mac_addr = bp->dev->dev_addr;
8005 u32 val;
8006 /* The mac address is written to entries 1-4 to
8007 preserve entry 0 which is used by the PMF */
8008 u8 entry = (BP_E1HVN(bp) + 1)*8;
8009
8010 val = (mac_addr[0] << 8) | mac_addr[1];
8011 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8012
8013 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8014 (mac_addr[4] << 8) | mac_addr[5];
8015 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8016
8017 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8018
8019 } else
8020 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8021
34f80b04
EG
8022 /* Close multi and leading connections
8023 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
8024 for_each_nondefault_queue(bp, i)
8025 if (bnx2x_stop_multi(bp, i))
228241eb 8026 goto unload_error;
a2fbb9ea 8027
da5a662a
VZ
8028 rc = bnx2x_stop_leading(bp);
8029 if (rc) {
34f80b04 8030 BNX2X_ERR("Stop leading failed!\n");
da5a662a 8031#ifdef BNX2X_STOP_ON_ERROR
34f80b04 8032 return -EBUSY;
da5a662a
VZ
8033#else
8034 goto unload_error;
34f80b04 8035#endif
228241eb
ET
8036 }
8037
8038unload_error:
34f80b04 8039 if (!BP_NOMCP(bp))
228241eb 8040 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 8041 else {
f5372251 8042 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
8043 load_count[0], load_count[1], load_count[2]);
8044 load_count[0]--;
da5a662a 8045 load_count[1 + port]--;
f5372251 8046 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
8047 load_count[0], load_count[1], load_count[2]);
8048 if (load_count[0] == 0)
8049 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 8050 else if (load_count[1 + port] == 0)
34f80b04
EG
8051 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8052 else
8053 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8054 }
a2fbb9ea 8055
34f80b04
EG
8056 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8057 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8058 bnx2x__link_reset(bp);
a2fbb9ea
ET
8059
8060 /* Reset the chip */
228241eb 8061 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
8062
8063 /* Report UNLOAD_DONE to MCP */
34f80b04 8064 if (!BP_NOMCP(bp))
a2fbb9ea 8065 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 8066
9a035440 8067 bp->port.pmf = 0;
a2fbb9ea 8068
7a9b2557 8069 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 8070 bnx2x_free_skbs(bp);
555f6c78 8071 for_each_rx_queue(bp, i)
3196a88a 8072 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 8073 for_each_rx_queue(bp, i)
7cde1c8b 8074 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
8075 bnx2x_free_mem(bp);
8076
8077 bp->state = BNX2X_STATE_CLOSED;
228241eb 8078
a2fbb9ea
ET
8079 netif_carrier_off(bp->dev);
8080
8081 return 0;
8082}
8083
34f80b04
EG
8084static void bnx2x_reset_task(struct work_struct *work)
8085{
8086 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8087
8088#ifdef BNX2X_STOP_ON_ERROR
8089 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8090 " so reset not done to allow debug dump,\n"
ad361c98 8091 " you will need to reboot when done\n");
34f80b04
EG
8092 return;
8093#endif
8094
8095 rtnl_lock();
8096
8097 if (!netif_running(bp->dev))
8098 goto reset_task_exit;
8099
8100 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8101 bnx2x_nic_load(bp, LOAD_NORMAL);
8102
8103reset_task_exit:
8104 rtnl_unlock();
8105}
8106
a2fbb9ea
ET
8107/* end of nic load/unload */
8108
8109/* ethtool_ops */
8110
8111/*
8112 * Init service functions
8113 */
8114
f1ef27ef
EG
8115static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8116{
8117 switch (func) {
8118 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8119 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8120 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8121 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8122 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8123 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8124 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8125 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8126 default:
8127 BNX2X_ERR("Unsupported function index: %d\n", func);
8128 return (u32)(-1);
8129 }
8130}
8131
8132static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8133{
8134 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8135
8136 /* Flush all outstanding writes */
8137 mmiowb();
8138
8139 /* Pretend to be function 0 */
8140 REG_WR(bp, reg, 0);
8141 /* Flush the GRC transaction (in the chip) */
8142 new_val = REG_RD(bp, reg);
8143 if (new_val != 0) {
8144 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8145 new_val);
8146 BUG();
8147 }
8148
8149 /* From now we are in the "like-E1" mode */
8150 bnx2x_int_disable(bp);
8151
8152 /* Flush all outstanding writes */
8153 mmiowb();
8154
8155 /* Restore the original funtion settings */
8156 REG_WR(bp, reg, orig_func);
8157 new_val = REG_RD(bp, reg);
8158 if (new_val != orig_func) {
8159 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8160 orig_func, new_val);
8161 BUG();
8162 }
8163}
8164
8165static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8166{
8167 if (CHIP_IS_E1H(bp))
8168 bnx2x_undi_int_disable_e1h(bp, func);
8169 else
8170 bnx2x_int_disable(bp);
8171}
8172
34f80b04
EG
8173static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8174{
8175 u32 val;
8176
8177 /* Check if there is any driver already loaded */
8178 val = REG_RD(bp, MISC_REG_UNPREPARED);
8179 if (val == 0x1) {
8180 /* Check if it is the UNDI driver
8181 * UNDI driver initializes CID offset for normal bell to 0x7
8182 */
4a37fb66 8183 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
8184 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8185 if (val == 0x7) {
8186 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8187 /* save our func */
34f80b04 8188 int func = BP_FUNC(bp);
da5a662a
VZ
8189 u32 swap_en;
8190 u32 swap_val;
34f80b04 8191
b4661739
EG
8192 /* clear the UNDI indication */
8193 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8194
34f80b04
EG
8195 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8196
8197 /* try unload UNDI on port 0 */
8198 bp->func = 0;
da5a662a
VZ
8199 bp->fw_seq =
8200 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8201 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 8202 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
8203
8204 /* if UNDI is loaded on the other port */
8205 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8206
da5a662a
VZ
8207 /* send "DONE" for previous unload */
8208 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8209
8210 /* unload UNDI on port 1 */
34f80b04 8211 bp->func = 1;
da5a662a
VZ
8212 bp->fw_seq =
8213 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8214 DRV_MSG_SEQ_NUMBER_MASK);
8215 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8216
8217 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
8218 }
8219
b4661739
EG
8220 /* now it's safe to release the lock */
8221 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8222
f1ef27ef 8223 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
8224
8225 /* close input traffic and wait for it */
8226 /* Do not rcv packets to BRB */
8227 REG_WR(bp,
8228 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8229 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8230 /* Do not direct rcv packets that are not for MCP to
8231 * the BRB */
8232 REG_WR(bp,
8233 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8234 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8235 /* clear AEU */
8236 REG_WR(bp,
8237 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8238 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8239 msleep(10);
8240
8241 /* save NIG port swap info */
8242 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8243 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
8244 /* reset device */
8245 REG_WR(bp,
8246 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 8247 0xd3ffffff);
34f80b04
EG
8248 REG_WR(bp,
8249 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8250 0x1403);
da5a662a
VZ
8251 /* take the NIG out of reset and restore swap values */
8252 REG_WR(bp,
8253 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8254 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8255 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8256 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8257
8258 /* send unload done to the MCP */
8259 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8260
8261 /* restore our func and fw_seq */
8262 bp->func = func;
8263 bp->fw_seq =
8264 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8265 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
8266
8267 } else
8268 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
8269 }
8270}
8271
8272static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8273{
8274 u32 val, val2, val3, val4, id;
72ce58c3 8275 u16 pmc;
34f80b04
EG
8276
8277 /* Get the chip revision id and number. */
8278 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8279 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8280 id = ((val & 0xffff) << 16);
8281 val = REG_RD(bp, MISC_REG_CHIP_REV);
8282 id |= ((val & 0xf) << 12);
8283 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8284 id |= ((val & 0xff) << 4);
5a40e08e 8285 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
8286 id |= (val & 0xf);
8287 bp->common.chip_id = id;
8288 bp->link_params.chip_id = bp->common.chip_id;
8289 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8290
1c06328c
EG
8291 val = (REG_RD(bp, 0x2874) & 0x55);
8292 if ((bp->common.chip_id & 0x1) ||
8293 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8294 bp->flags |= ONE_PORT_FLAG;
8295 BNX2X_DEV_INFO("single port device\n");
8296 }
8297
34f80b04
EG
8298 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8299 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8300 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8301 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8302 bp->common.flash_size, bp->common.flash_size);
8303
8304 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 8305 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 8306 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
8307 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8308 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
8309
8310 if (!bp->common.shmem_base ||
8311 (bp->common.shmem_base < 0xA0000) ||
8312 (bp->common.shmem_base >= 0xC0000)) {
8313 BNX2X_DEV_INFO("MCP not active\n");
8314 bp->flags |= NO_MCP_FLAG;
8315 return;
8316 }
8317
8318 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8319 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8320 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8321 BNX2X_ERR("BAD MCP validity signature\n");
8322
8323 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 8324 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
8325
8326 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8327 SHARED_HW_CFG_LED_MODE_MASK) >>
8328 SHARED_HW_CFG_LED_MODE_SHIFT);
8329
c2c8b03e
EG
8330 bp->link_params.feature_config_flags = 0;
8331 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8332 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8333 bp->link_params.feature_config_flags |=
8334 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8335 else
8336 bp->link_params.feature_config_flags &=
8337 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8338
34f80b04
EG
8339 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8340 bp->common.bc_ver = val;
8341 BNX2X_DEV_INFO("bc_ver %X\n", val);
8342 if (val < BNX2X_BC_VER) {
8343 /* for now only warn
8344 * later we might need to enforce this */
8345 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8346 " please upgrade BC\n", BNX2X_BC_VER, val);
8347 }
4d295db0
EG
8348 bp->link_params.feature_config_flags |=
8349 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8350 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
8351
8352 if (BP_E1HVN(bp) == 0) {
8353 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8354 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8355 } else {
8356 /* no WOL capability for E1HVN != 0 */
8357 bp->flags |= NO_WOL_FLAG;
8358 }
8359 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 8360 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
8361
8362 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8363 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8364 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8365 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8366
8367 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8368 val, val2, val3, val4);
8369}
8370
8371static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8372 u32 switch_cfg)
a2fbb9ea 8373{
34f80b04 8374 int port = BP_PORT(bp);
a2fbb9ea
ET
8375 u32 ext_phy_type;
8376
a2fbb9ea
ET
8377 switch (switch_cfg) {
8378 case SWITCH_CFG_1G:
8379 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8380
c18487ee
YR
8381 ext_phy_type =
8382 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8383 switch (ext_phy_type) {
8384 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8385 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8386 ext_phy_type);
8387
34f80b04
EG
8388 bp->port.supported |= (SUPPORTED_10baseT_Half |
8389 SUPPORTED_10baseT_Full |
8390 SUPPORTED_100baseT_Half |
8391 SUPPORTED_100baseT_Full |
8392 SUPPORTED_1000baseT_Full |
8393 SUPPORTED_2500baseX_Full |
8394 SUPPORTED_TP |
8395 SUPPORTED_FIBRE |
8396 SUPPORTED_Autoneg |
8397 SUPPORTED_Pause |
8398 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8399 break;
8400
8401 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8402 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8403 ext_phy_type);
8404
34f80b04
EG
8405 bp->port.supported |= (SUPPORTED_10baseT_Half |
8406 SUPPORTED_10baseT_Full |
8407 SUPPORTED_100baseT_Half |
8408 SUPPORTED_100baseT_Full |
8409 SUPPORTED_1000baseT_Full |
8410 SUPPORTED_TP |
8411 SUPPORTED_FIBRE |
8412 SUPPORTED_Autoneg |
8413 SUPPORTED_Pause |
8414 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8415 break;
8416
8417 default:
8418 BNX2X_ERR("NVRAM config error. "
8419 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 8420 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8421 return;
8422 }
8423
34f80b04
EG
8424 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8425 port*0x10);
8426 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
8427 break;
8428
8429 case SWITCH_CFG_10G:
8430 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8431
c18487ee
YR
8432 ext_phy_type =
8433 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8434 switch (ext_phy_type) {
8435 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8436 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8437 ext_phy_type);
8438
34f80b04
EG
8439 bp->port.supported |= (SUPPORTED_10baseT_Half |
8440 SUPPORTED_10baseT_Full |
8441 SUPPORTED_100baseT_Half |
8442 SUPPORTED_100baseT_Full |
8443 SUPPORTED_1000baseT_Full |
8444 SUPPORTED_2500baseX_Full |
8445 SUPPORTED_10000baseT_Full |
8446 SUPPORTED_TP |
8447 SUPPORTED_FIBRE |
8448 SUPPORTED_Autoneg |
8449 SUPPORTED_Pause |
8450 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8451 break;
8452
589abe3a
EG
8453 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8454 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 8455 ext_phy_type);
f1410647 8456
34f80b04 8457 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8458 SUPPORTED_1000baseT_Full |
34f80b04 8459 SUPPORTED_FIBRE |
589abe3a 8460 SUPPORTED_Autoneg |
34f80b04
EG
8461 SUPPORTED_Pause |
8462 SUPPORTED_Asym_Pause);
f1410647
ET
8463 break;
8464
589abe3a
EG
8465 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8466 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
8467 ext_phy_type);
8468
34f80b04 8469 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8470 SUPPORTED_2500baseX_Full |
34f80b04 8471 SUPPORTED_1000baseT_Full |
589abe3a
EG
8472 SUPPORTED_FIBRE |
8473 SUPPORTED_Autoneg |
8474 SUPPORTED_Pause |
8475 SUPPORTED_Asym_Pause);
8476 break;
8477
8478 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8479 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8480 ext_phy_type);
8481
8482 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
8483 SUPPORTED_FIBRE |
8484 SUPPORTED_Pause |
8485 SUPPORTED_Asym_Pause);
f1410647
ET
8486 break;
8487
589abe3a
EG
8488 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8489 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
8490 ext_phy_type);
8491
34f80b04
EG
8492 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8493 SUPPORTED_1000baseT_Full |
8494 SUPPORTED_FIBRE |
34f80b04
EG
8495 SUPPORTED_Pause |
8496 SUPPORTED_Asym_Pause);
f1410647
ET
8497 break;
8498
589abe3a
EG
8499 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8500 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
8501 ext_phy_type);
8502
34f80b04 8503 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 8504 SUPPORTED_1000baseT_Full |
34f80b04 8505 SUPPORTED_Autoneg |
589abe3a 8506 SUPPORTED_FIBRE |
34f80b04
EG
8507 SUPPORTED_Pause |
8508 SUPPORTED_Asym_Pause);
c18487ee
YR
8509 break;
8510
4d295db0
EG
8511 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8512 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8513 ext_phy_type);
8514
8515 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8516 SUPPORTED_1000baseT_Full |
8517 SUPPORTED_Autoneg |
8518 SUPPORTED_FIBRE |
8519 SUPPORTED_Pause |
8520 SUPPORTED_Asym_Pause);
8521 break;
8522
f1410647
ET
8523 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8524 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8525 ext_phy_type);
8526
34f80b04
EG
8527 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8528 SUPPORTED_TP |
8529 SUPPORTED_Autoneg |
8530 SUPPORTED_Pause |
8531 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8532 break;
8533
28577185
EG
8534 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8535 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8536 ext_phy_type);
8537
8538 bp->port.supported |= (SUPPORTED_10baseT_Half |
8539 SUPPORTED_10baseT_Full |
8540 SUPPORTED_100baseT_Half |
8541 SUPPORTED_100baseT_Full |
8542 SUPPORTED_1000baseT_Full |
8543 SUPPORTED_10000baseT_Full |
8544 SUPPORTED_TP |
8545 SUPPORTED_Autoneg |
8546 SUPPORTED_Pause |
8547 SUPPORTED_Asym_Pause);
8548 break;
8549
c18487ee
YR
8550 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8551 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8552 bp->link_params.ext_phy_config);
8553 break;
8554
a2fbb9ea
ET
8555 default:
8556 BNX2X_ERR("NVRAM config error. "
8557 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 8558 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8559 return;
8560 }
8561
34f80b04
EG
8562 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8563 port*0x18);
8564 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 8565
a2fbb9ea
ET
8566 break;
8567
8568 default:
8569 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 8570 bp->port.link_config);
a2fbb9ea
ET
8571 return;
8572 }
34f80b04 8573 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
8574
8575 /* mask what we support according to speed_cap_mask */
c18487ee
YR
8576 if (!(bp->link_params.speed_cap_mask &
8577 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 8578 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 8579
c18487ee
YR
8580 if (!(bp->link_params.speed_cap_mask &
8581 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 8582 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 8583
c18487ee
YR
8584 if (!(bp->link_params.speed_cap_mask &
8585 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 8586 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 8587
c18487ee
YR
8588 if (!(bp->link_params.speed_cap_mask &
8589 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 8590 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 8591
c18487ee
YR
8592 if (!(bp->link_params.speed_cap_mask &
8593 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
8594 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8595 SUPPORTED_1000baseT_Full);
a2fbb9ea 8596
c18487ee
YR
8597 if (!(bp->link_params.speed_cap_mask &
8598 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 8599 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 8600
c18487ee
YR
8601 if (!(bp->link_params.speed_cap_mask &
8602 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 8603 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 8604
34f80b04 8605 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
8606}
8607
34f80b04 8608static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8609{
c18487ee 8610 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 8611
34f80b04 8612 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 8613 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 8614 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 8615 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8616 bp->port.advertising = bp->port.supported;
a2fbb9ea 8617 } else {
c18487ee
YR
8618 u32 ext_phy_type =
8619 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8620
8621 if ((ext_phy_type ==
8622 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8623 (ext_phy_type ==
8624 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 8625 /* force 10G, no AN */
c18487ee 8626 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 8627 bp->port.advertising =
a2fbb9ea
ET
8628 (ADVERTISED_10000baseT_Full |
8629 ADVERTISED_FIBRE);
8630 break;
8631 }
8632 BNX2X_ERR("NVRAM config error. "
8633 "Invalid link_config 0x%x"
8634 " Autoneg not supported\n",
34f80b04 8635 bp->port.link_config);
a2fbb9ea
ET
8636 return;
8637 }
8638 break;
8639
8640 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 8641 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 8642 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
8643 bp->port.advertising = (ADVERTISED_10baseT_Full |
8644 ADVERTISED_TP);
a2fbb9ea
ET
8645 } else {
8646 BNX2X_ERR("NVRAM config error. "
8647 "Invalid link_config 0x%x"
8648 " speed_cap_mask 0x%x\n",
34f80b04 8649 bp->port.link_config,
c18487ee 8650 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8651 return;
8652 }
8653 break;
8654
8655 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 8656 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
8657 bp->link_params.req_line_speed = SPEED_10;
8658 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8659 bp->port.advertising = (ADVERTISED_10baseT_Half |
8660 ADVERTISED_TP);
a2fbb9ea
ET
8661 } else {
8662 BNX2X_ERR("NVRAM config error. "
8663 "Invalid link_config 0x%x"
8664 " speed_cap_mask 0x%x\n",
34f80b04 8665 bp->port.link_config,
c18487ee 8666 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8667 return;
8668 }
8669 break;
8670
8671 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 8672 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 8673 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
8674 bp->port.advertising = (ADVERTISED_100baseT_Full |
8675 ADVERTISED_TP);
a2fbb9ea
ET
8676 } else {
8677 BNX2X_ERR("NVRAM config error. "
8678 "Invalid link_config 0x%x"
8679 " speed_cap_mask 0x%x\n",
34f80b04 8680 bp->port.link_config,
c18487ee 8681 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8682 return;
8683 }
8684 break;
8685
8686 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 8687 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
8688 bp->link_params.req_line_speed = SPEED_100;
8689 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8690 bp->port.advertising = (ADVERTISED_100baseT_Half |
8691 ADVERTISED_TP);
a2fbb9ea
ET
8692 } else {
8693 BNX2X_ERR("NVRAM config error. "
8694 "Invalid link_config 0x%x"
8695 " speed_cap_mask 0x%x\n",
34f80b04 8696 bp->port.link_config,
c18487ee 8697 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8698 return;
8699 }
8700 break;
8701
8702 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 8703 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 8704 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
8705 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8706 ADVERTISED_TP);
a2fbb9ea
ET
8707 } else {
8708 BNX2X_ERR("NVRAM config error. "
8709 "Invalid link_config 0x%x"
8710 " speed_cap_mask 0x%x\n",
34f80b04 8711 bp->port.link_config,
c18487ee 8712 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8713 return;
8714 }
8715 break;
8716
8717 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8718 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8719 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8720 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8721 ADVERTISED_TP);
a2fbb9ea
ET
8722 } else {
8723 BNX2X_ERR("NVRAM config error. "
8724 "Invalid link_config 0x%x"
8725 " speed_cap_mask 0x%x\n",
34f80b04 8726 bp->port.link_config,
c18487ee 8727 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8728 return;
8729 }
8730 break;
8731
8732 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8733 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8734 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8735 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8736 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8737 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8738 ADVERTISED_FIBRE);
a2fbb9ea
ET
8739 } else {
8740 BNX2X_ERR("NVRAM config error. "
8741 "Invalid link_config 0x%x"
8742 " speed_cap_mask 0x%x\n",
34f80b04 8743 bp->port.link_config,
c18487ee 8744 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8745 return;
8746 }
8747 break;
8748
8749 default:
8750 BNX2X_ERR("NVRAM config error. "
8751 "BAD link speed link_config 0x%x\n",
34f80b04 8752 bp->port.link_config);
c18487ee 8753 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8754 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8755 break;
8756 }
a2fbb9ea 8757
34f80b04
EG
8758 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8759 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8760 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8761 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8762 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8763
c18487ee 8764 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8765 " advertising 0x%x\n",
c18487ee
YR
8766 bp->link_params.req_line_speed,
8767 bp->link_params.req_duplex,
34f80b04 8768 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8769}
8770
e665bfda
MC
8771static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8772{
8773 mac_hi = cpu_to_be16(mac_hi);
8774 mac_lo = cpu_to_be32(mac_lo);
8775 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8776 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8777}
8778
34f80b04 8779static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8780{
34f80b04
EG
8781 int port = BP_PORT(bp);
8782 u32 val, val2;
589abe3a 8783 u32 config;
c2c8b03e 8784 u16 i;
01cd4528 8785 u32 ext_phy_type;
a2fbb9ea 8786
c18487ee 8787 bp->link_params.bp = bp;
34f80b04 8788 bp->link_params.port = port;
c18487ee 8789
c18487ee 8790 bp->link_params.lane_config =
a2fbb9ea 8791 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8792 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8793 SHMEM_RD(bp,
8794 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
8795 /* BCM8727_NOC => BCM8727 no over current */
8796 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8797 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8798 bp->link_params.ext_phy_config &=
8799 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8800 bp->link_params.ext_phy_config |=
8801 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8802 bp->link_params.feature_config_flags |=
8803 FEATURE_CONFIG_BCM8727_NOC;
8804 }
8805
c18487ee 8806 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8807 SHMEM_RD(bp,
8808 dev_info.port_hw_config[port].speed_capability_mask);
8809
34f80b04 8810 bp->port.link_config =
a2fbb9ea
ET
8811 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8812
c2c8b03e
EG
8813 /* Get the 4 lanes xgxs config rx and tx */
8814 for (i = 0; i < 2; i++) {
8815 val = SHMEM_RD(bp,
8816 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8817 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8818 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8819
8820 val = SHMEM_RD(bp,
8821 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8822 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8823 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8824 }
8825
3ce2c3f9
EG
8826 /* If the device is capable of WoL, set the default state according
8827 * to the HW
8828 */
4d295db0 8829 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8830 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8831 (config & PORT_FEATURE_WOL_ENABLED));
8832
c2c8b03e
EG
8833 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8834 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8835 bp->link_params.lane_config,
8836 bp->link_params.ext_phy_config,
34f80b04 8837 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8838
4d295db0
EG
8839 bp->link_params.switch_cfg |= (bp->port.link_config &
8840 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 8841 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8842
8843 bnx2x_link_settings_requested(bp);
8844
01cd4528
EG
8845 /*
8846 * If connected directly, work with the internal PHY, otherwise, work
8847 * with the external PHY
8848 */
8849 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8850 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8851 bp->mdio.prtad = bp->link_params.phy_addr;
8852
8853 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8854 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8855 bp->mdio.prtad =
659bc5c4 8856 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
01cd4528 8857
a2fbb9ea
ET
8858 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8859 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 8860 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
8861 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8862 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
8863
8864#ifdef BCM_CNIC
8865 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8866 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8867 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8868#endif
34f80b04
EG
8869}
8870
8871static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8872{
8873 int func = BP_FUNC(bp);
8874 u32 val, val2;
8875 int rc = 0;
a2fbb9ea 8876
34f80b04 8877 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8878
34f80b04
EG
8879 bp->e1hov = 0;
8880 bp->e1hmf = 0;
8881 if (CHIP_IS_E1H(bp)) {
8882 bp->mf_config =
8883 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8884
2691d51d 8885 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 8886 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 8887 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 8888 bp->e1hmf = 1;
2691d51d
EG
8889 BNX2X_DEV_INFO("%s function mode\n",
8890 IS_E1HMF(bp) ? "multi" : "single");
8891
8892 if (IS_E1HMF(bp)) {
8893 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8894 e1hov_tag) &
8895 FUNC_MF_CFG_E1HOV_TAG_MASK);
8896 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8897 bp->e1hov = val;
8898 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8899 "(0x%04x)\n",
8900 func, bp->e1hov, bp->e1hov);
8901 } else {
34f80b04
EG
8902 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8903 " aborting\n", func);
8904 rc = -EPERM;
8905 }
2691d51d
EG
8906 } else {
8907 if (BP_E1HVN(bp)) {
8908 BNX2X_ERR("!!! VN %d in single function mode,"
8909 " aborting\n", BP_E1HVN(bp));
8910 rc = -EPERM;
8911 }
34f80b04
EG
8912 }
8913 }
a2fbb9ea 8914
34f80b04
EG
8915 if (!BP_NOMCP(bp)) {
8916 bnx2x_get_port_hwinfo(bp);
8917
8918 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8919 DRV_MSG_SEQ_NUMBER_MASK);
8920 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8921 }
8922
8923 if (IS_E1HMF(bp)) {
8924 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8925 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8926 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8927 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8928 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8929 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8930 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8931 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8932 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8933 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8934 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8935 ETH_ALEN);
8936 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8937 ETH_ALEN);
a2fbb9ea 8938 }
34f80b04
EG
8939
8940 return rc;
a2fbb9ea
ET
8941 }
8942
34f80b04
EG
8943 if (BP_NOMCP(bp)) {
8944 /* only supposed to happen on emulation/FPGA */
33471629 8945 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8946 random_ether_addr(bp->dev->dev_addr);
8947 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8948 }
a2fbb9ea 8949
34f80b04
EG
8950 return rc;
8951}
8952
8953static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8954{
8955 int func = BP_FUNC(bp);
87942b46 8956 int timer_interval;
34f80b04
EG
8957 int rc;
8958
da5a662a
VZ
8959 /* Disable interrupt handling until HW is initialized */
8960 atomic_set(&bp->intr_sem, 1);
e1510706 8961 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8962
34f80b04 8963 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 8964 mutex_init(&bp->fw_mb_mutex);
993ac7b5
MC
8965#ifdef BCM_CNIC
8966 mutex_init(&bp->cnic_mutex);
8967#endif
a2fbb9ea 8968
1cf167f2 8969 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8970 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8971
8972 rc = bnx2x_get_hwinfo(bp);
8973
8974 /* need to reset chip if undi was active */
8975 if (!BP_NOMCP(bp))
8976 bnx2x_undi_unload(bp);
8977
8978 if (CHIP_REV_IS_FPGA(bp))
8979 printk(KERN_ERR PFX "FPGA detected\n");
8980
8981 if (BP_NOMCP(bp) && (func == 0))
8982 printk(KERN_ERR PFX
8983 "MCP disabled, must load devices in order!\n");
8984
555f6c78 8985 /* Set multi queue mode */
8badd27a
EG
8986 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8987 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8988 printk(KERN_ERR PFX
8badd27a 8989 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8990 multi_mode = ETH_RSS_MODE_DISABLED;
8991 }
8992 bp->multi_mode = multi_mode;
8993
8994
7a9b2557
VZ
8995 /* Set TPA flags */
8996 if (disable_tpa) {
8997 bp->flags &= ~TPA_ENABLE_FLAG;
8998 bp->dev->features &= ~NETIF_F_LRO;
8999 } else {
9000 bp->flags |= TPA_ENABLE_FLAG;
9001 bp->dev->features |= NETIF_F_LRO;
9002 }
9003
a18f5128
EG
9004 if (CHIP_IS_E1(bp))
9005 bp->dropless_fc = 0;
9006 else
9007 bp->dropless_fc = dropless_fc;
9008
8d5726c4 9009 bp->mrrs = mrrs;
7a9b2557 9010
34f80b04
EG
9011 bp->tx_ring_size = MAX_TX_AVAIL;
9012 bp->rx_ring_size = MAX_RX_AVAIL;
9013
9014 bp->rx_csum = 1;
34f80b04
EG
9015
9016 bp->tx_ticks = 50;
9017 bp->rx_ticks = 25;
9018
87942b46
EG
9019 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9020 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
9021
9022 init_timer(&bp->timer);
9023 bp->timer.expires = jiffies + bp->current_interval;
9024 bp->timer.data = (unsigned long) bp;
9025 bp->timer.function = bnx2x_timer;
9026
9027 return rc;
a2fbb9ea
ET
9028}
9029
9030/*
9031 * ethtool service functions
9032 */
9033
9034/* All ethtool functions called with rtnl_lock */
9035
9036static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9037{
9038 struct bnx2x *bp = netdev_priv(dev);
9039
34f80b04
EG
9040 cmd->supported = bp->port.supported;
9041 cmd->advertising = bp->port.advertising;
a2fbb9ea 9042
f34d28ea
EG
9043 if ((bp->state == BNX2X_STATE_OPEN) &&
9044 !(bp->flags & MF_FUNC_DIS) &&
9045 (bp->link_vars.link_up)) {
c18487ee
YR
9046 cmd->speed = bp->link_vars.line_speed;
9047 cmd->duplex = bp->link_vars.duplex;
b015e3d1
EG
9048 if (IS_E1HMF(bp)) {
9049 u16 vn_max_rate;
34f80b04 9050
b015e3d1
EG
9051 vn_max_rate =
9052 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
34f80b04 9053 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
b015e3d1
EG
9054 if (vn_max_rate < cmd->speed)
9055 cmd->speed = vn_max_rate;
9056 }
9057 } else {
9058 cmd->speed = -1;
9059 cmd->duplex = -1;
34f80b04 9060 }
a2fbb9ea 9061
c18487ee
YR
9062 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9063 u32 ext_phy_type =
9064 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
9065
9066 switch (ext_phy_type) {
9067 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 9068 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 9069 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
9070 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9071 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9072 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 9073 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
9074 cmd->port = PORT_FIBRE;
9075 break;
9076
9077 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 9078 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
9079 cmd->port = PORT_TP;
9080 break;
9081
c18487ee
YR
9082 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9083 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9084 bp->link_params.ext_phy_config);
9085 break;
9086
f1410647
ET
9087 default:
9088 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
9089 bp->link_params.ext_phy_config);
9090 break;
f1410647
ET
9091 }
9092 } else
a2fbb9ea 9093 cmd->port = PORT_TP;
a2fbb9ea 9094
01cd4528 9095 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
9096 cmd->transceiver = XCVR_INTERNAL;
9097
c18487ee 9098 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 9099 cmd->autoneg = AUTONEG_ENABLE;
f1410647 9100 else
a2fbb9ea 9101 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
9102
9103 cmd->maxtxpkt = 0;
9104 cmd->maxrxpkt = 0;
9105
9106 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9107 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9108 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9109 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9110 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9111 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9112 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9113
9114 return 0;
9115}
9116
9117static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9118{
9119 struct bnx2x *bp = netdev_priv(dev);
9120 u32 advertising;
9121
34f80b04
EG
9122 if (IS_E1HMF(bp))
9123 return 0;
9124
a2fbb9ea
ET
9125 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9126 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9127 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9128 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9129 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9130 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9131 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9132
a2fbb9ea 9133 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
9134 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9135 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 9136 return -EINVAL;
f1410647 9137 }
a2fbb9ea
ET
9138
9139 /* advertise the requested speed and duplex if supported */
34f80b04 9140 cmd->advertising &= bp->port.supported;
a2fbb9ea 9141
c18487ee
YR
9142 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9143 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
9144 bp->port.advertising |= (ADVERTISED_Autoneg |
9145 cmd->advertising);
a2fbb9ea
ET
9146
9147 } else { /* forced speed */
9148 /* advertise the requested speed and duplex if supported */
9149 switch (cmd->speed) {
9150 case SPEED_10:
9151 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 9152 if (!(bp->port.supported &
f1410647
ET
9153 SUPPORTED_10baseT_Full)) {
9154 DP(NETIF_MSG_LINK,
9155 "10M full not supported\n");
a2fbb9ea 9156 return -EINVAL;
f1410647 9157 }
a2fbb9ea
ET
9158
9159 advertising = (ADVERTISED_10baseT_Full |
9160 ADVERTISED_TP);
9161 } else {
34f80b04 9162 if (!(bp->port.supported &
f1410647
ET
9163 SUPPORTED_10baseT_Half)) {
9164 DP(NETIF_MSG_LINK,
9165 "10M half not supported\n");
a2fbb9ea 9166 return -EINVAL;
f1410647 9167 }
a2fbb9ea
ET
9168
9169 advertising = (ADVERTISED_10baseT_Half |
9170 ADVERTISED_TP);
9171 }
9172 break;
9173
9174 case SPEED_100:
9175 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 9176 if (!(bp->port.supported &
f1410647
ET
9177 SUPPORTED_100baseT_Full)) {
9178 DP(NETIF_MSG_LINK,
9179 "100M full not supported\n");
a2fbb9ea 9180 return -EINVAL;
f1410647 9181 }
a2fbb9ea
ET
9182
9183 advertising = (ADVERTISED_100baseT_Full |
9184 ADVERTISED_TP);
9185 } else {
34f80b04 9186 if (!(bp->port.supported &
f1410647
ET
9187 SUPPORTED_100baseT_Half)) {
9188 DP(NETIF_MSG_LINK,
9189 "100M half not supported\n");
a2fbb9ea 9190 return -EINVAL;
f1410647 9191 }
a2fbb9ea
ET
9192
9193 advertising = (ADVERTISED_100baseT_Half |
9194 ADVERTISED_TP);
9195 }
9196 break;
9197
9198 case SPEED_1000:
f1410647
ET
9199 if (cmd->duplex != DUPLEX_FULL) {
9200 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 9201 return -EINVAL;
f1410647 9202 }
a2fbb9ea 9203
34f80b04 9204 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 9205 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 9206 return -EINVAL;
f1410647 9207 }
a2fbb9ea
ET
9208
9209 advertising = (ADVERTISED_1000baseT_Full |
9210 ADVERTISED_TP);
9211 break;
9212
9213 case SPEED_2500:
f1410647
ET
9214 if (cmd->duplex != DUPLEX_FULL) {
9215 DP(NETIF_MSG_LINK,
9216 "2.5G half not supported\n");
a2fbb9ea 9217 return -EINVAL;
f1410647 9218 }
a2fbb9ea 9219
34f80b04 9220 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
9221 DP(NETIF_MSG_LINK,
9222 "2.5G full not supported\n");
a2fbb9ea 9223 return -EINVAL;
f1410647 9224 }
a2fbb9ea 9225
f1410647 9226 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
9227 ADVERTISED_TP);
9228 break;
9229
9230 case SPEED_10000:
f1410647
ET
9231 if (cmd->duplex != DUPLEX_FULL) {
9232 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 9233 return -EINVAL;
f1410647 9234 }
a2fbb9ea 9235
34f80b04 9236 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 9237 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 9238 return -EINVAL;
f1410647 9239 }
a2fbb9ea
ET
9240
9241 advertising = (ADVERTISED_10000baseT_Full |
9242 ADVERTISED_FIBRE);
9243 break;
9244
9245 default:
f1410647 9246 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
9247 return -EINVAL;
9248 }
9249
c18487ee
YR
9250 bp->link_params.req_line_speed = cmd->speed;
9251 bp->link_params.req_duplex = cmd->duplex;
34f80b04 9252 bp->port.advertising = advertising;
a2fbb9ea
ET
9253 }
9254
c18487ee 9255 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 9256 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 9257 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 9258 bp->port.advertising);
a2fbb9ea 9259
34f80b04 9260 if (netif_running(dev)) {
bb2a0f7a 9261 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9262 bnx2x_link_set(bp);
9263 }
a2fbb9ea
ET
9264
9265 return 0;
9266}
9267
0a64ea57
EG
9268#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9269#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9270
9271static int bnx2x_get_regs_len(struct net_device *dev)
9272{
0a64ea57 9273 struct bnx2x *bp = netdev_priv(dev);
0d28e49a 9274 int regdump_len = 0;
0a64ea57
EG
9275 int i;
9276
0a64ea57
EG
9277 if (CHIP_IS_E1(bp)) {
9278 for (i = 0; i < REGS_COUNT; i++)
9279 if (IS_E1_ONLINE(reg_addrs[i].info))
9280 regdump_len += reg_addrs[i].size;
9281
9282 for (i = 0; i < WREGS_COUNT_E1; i++)
9283 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9284 regdump_len += wreg_addrs_e1[i].size *
9285 (1 + wreg_addrs_e1[i].read_regs_count);
9286
9287 } else { /* E1H */
9288 for (i = 0; i < REGS_COUNT; i++)
9289 if (IS_E1H_ONLINE(reg_addrs[i].info))
9290 regdump_len += reg_addrs[i].size;
9291
9292 for (i = 0; i < WREGS_COUNT_E1H; i++)
9293 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9294 regdump_len += wreg_addrs_e1h[i].size *
9295 (1 + wreg_addrs_e1h[i].read_regs_count);
9296 }
9297 regdump_len *= 4;
9298 regdump_len += sizeof(struct dump_hdr);
9299
9300 return regdump_len;
9301}
9302
9303static void bnx2x_get_regs(struct net_device *dev,
9304 struct ethtool_regs *regs, void *_p)
9305{
9306 u32 *p = _p, i, j;
9307 struct bnx2x *bp = netdev_priv(dev);
9308 struct dump_hdr dump_hdr = {0};
9309
9310 regs->version = 0;
9311 memset(p, 0, regs->len);
9312
9313 if (!netif_running(bp->dev))
9314 return;
9315
9316 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9317 dump_hdr.dump_sign = dump_sign_all;
9318 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9319 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9320 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9321 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9322 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9323
9324 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9325 p += dump_hdr.hdr_size + 1;
9326
9327 if (CHIP_IS_E1(bp)) {
9328 for (i = 0; i < REGS_COUNT; i++)
9329 if (IS_E1_ONLINE(reg_addrs[i].info))
9330 for (j = 0; j < reg_addrs[i].size; j++)
9331 *p++ = REG_RD(bp,
9332 reg_addrs[i].addr + j*4);
9333
9334 } else { /* E1H */
9335 for (i = 0; i < REGS_COUNT; i++)
9336 if (IS_E1H_ONLINE(reg_addrs[i].info))
9337 for (j = 0; j < reg_addrs[i].size; j++)
9338 *p++ = REG_RD(bp,
9339 reg_addrs[i].addr + j*4);
9340 }
9341}
9342
0d28e49a
EG
9343#define PHY_FW_VER_LEN 10
9344
9345static void bnx2x_get_drvinfo(struct net_device *dev,
9346 struct ethtool_drvinfo *info)
9347{
9348 struct bnx2x *bp = netdev_priv(dev);
9349 u8 phy_fw_ver[PHY_FW_VER_LEN];
9350
9351 strcpy(info->driver, DRV_MODULE_NAME);
9352 strcpy(info->version, DRV_MODULE_VERSION);
9353
9354 phy_fw_ver[0] = '\0';
9355 if (bp->port.pmf) {
9356 bnx2x_acquire_phy_lock(bp);
9357 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9358 (bp->state != BNX2X_STATE_CLOSED),
9359 phy_fw_ver, PHY_FW_VER_LEN);
9360 bnx2x_release_phy_lock(bp);
9361 }
9362
9363 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9364 (bp->common.bc_ver & 0xff0000) >> 16,
9365 (bp->common.bc_ver & 0xff00) >> 8,
9366 (bp->common.bc_ver & 0xff),
9367 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9368 strcpy(info->bus_info, pci_name(bp->pdev));
9369 info->n_stats = BNX2X_NUM_STATS;
9370 info->testinfo_len = BNX2X_NUM_TESTS;
9371 info->eedump_len = bp->common.flash_size;
9372 info->regdump_len = bnx2x_get_regs_len(dev);
9373}
9374
a2fbb9ea
ET
9375static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9376{
9377 struct bnx2x *bp = netdev_priv(dev);
9378
9379 if (bp->flags & NO_WOL_FLAG) {
9380 wol->supported = 0;
9381 wol->wolopts = 0;
9382 } else {
9383 wol->supported = WAKE_MAGIC;
9384 if (bp->wol)
9385 wol->wolopts = WAKE_MAGIC;
9386 else
9387 wol->wolopts = 0;
9388 }
9389 memset(&wol->sopass, 0, sizeof(wol->sopass));
9390}
9391
9392static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9393{
9394 struct bnx2x *bp = netdev_priv(dev);
9395
9396 if (wol->wolopts & ~WAKE_MAGIC)
9397 return -EINVAL;
9398
9399 if (wol->wolopts & WAKE_MAGIC) {
9400 if (bp->flags & NO_WOL_FLAG)
9401 return -EINVAL;
9402
9403 bp->wol = 1;
34f80b04 9404 } else
a2fbb9ea 9405 bp->wol = 0;
34f80b04 9406
a2fbb9ea
ET
9407 return 0;
9408}
9409
9410static u32 bnx2x_get_msglevel(struct net_device *dev)
9411{
9412 struct bnx2x *bp = netdev_priv(dev);
9413
9414 return bp->msglevel;
9415}
9416
9417static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9418{
9419 struct bnx2x *bp = netdev_priv(dev);
9420
9421 if (capable(CAP_NET_ADMIN))
9422 bp->msglevel = level;
9423}
9424
9425static int bnx2x_nway_reset(struct net_device *dev)
9426{
9427 struct bnx2x *bp = netdev_priv(dev);
9428
34f80b04
EG
9429 if (!bp->port.pmf)
9430 return 0;
a2fbb9ea 9431
34f80b04 9432 if (netif_running(dev)) {
bb2a0f7a 9433 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9434 bnx2x_link_set(bp);
9435 }
a2fbb9ea
ET
9436
9437 return 0;
9438}
9439
ab6ad5a4 9440static u32 bnx2x_get_link(struct net_device *dev)
01e53298
NO
9441{
9442 struct bnx2x *bp = netdev_priv(dev);
9443
f34d28ea
EG
9444 if (bp->flags & MF_FUNC_DIS)
9445 return 0;
9446
01e53298
NO
9447 return bp->link_vars.link_up;
9448}
9449
a2fbb9ea
ET
9450static int bnx2x_get_eeprom_len(struct net_device *dev)
9451{
9452 struct bnx2x *bp = netdev_priv(dev);
9453
34f80b04 9454 return bp->common.flash_size;
a2fbb9ea
ET
9455}
9456
9457static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9458{
34f80b04 9459 int port = BP_PORT(bp);
a2fbb9ea
ET
9460 int count, i;
9461 u32 val = 0;
9462
9463 /* adjust timeout for emulation/FPGA */
9464 count = NVRAM_TIMEOUT_COUNT;
9465 if (CHIP_REV_IS_SLOW(bp))
9466 count *= 100;
9467
9468 /* request access to nvram interface */
9469 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9470 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9471
9472 for (i = 0; i < count*10; i++) {
9473 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9474 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9475 break;
9476
9477 udelay(5);
9478 }
9479
9480 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 9481 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
9482 return -EBUSY;
9483 }
9484
9485 return 0;
9486}
9487
9488static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9489{
34f80b04 9490 int port = BP_PORT(bp);
a2fbb9ea
ET
9491 int count, i;
9492 u32 val = 0;
9493
9494 /* adjust timeout for emulation/FPGA */
9495 count = NVRAM_TIMEOUT_COUNT;
9496 if (CHIP_REV_IS_SLOW(bp))
9497 count *= 100;
9498
9499 /* relinquish nvram interface */
9500 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9501 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9502
9503 for (i = 0; i < count*10; i++) {
9504 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9505 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9506 break;
9507
9508 udelay(5);
9509 }
9510
9511 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 9512 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
9513 return -EBUSY;
9514 }
9515
9516 return 0;
9517}
9518
9519static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9520{
9521 u32 val;
9522
9523 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9524
9525 /* enable both bits, even on read */
9526 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9527 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9528 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9529}
9530
9531static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9532{
9533 u32 val;
9534
9535 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9536
9537 /* disable both bits, even after read */
9538 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9539 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9540 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9541}
9542
4781bfad 9543static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
9544 u32 cmd_flags)
9545{
f1410647 9546 int count, i, rc;
a2fbb9ea
ET
9547 u32 val;
9548
9549 /* build the command word */
9550 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9551
9552 /* need to clear DONE bit separately */
9553 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9554
9555 /* address of the NVRAM to read from */
9556 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9557 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9558
9559 /* issue a read command */
9560 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9561
9562 /* adjust timeout for emulation/FPGA */
9563 count = NVRAM_TIMEOUT_COUNT;
9564 if (CHIP_REV_IS_SLOW(bp))
9565 count *= 100;
9566
9567 /* wait for completion */
9568 *ret_val = 0;
9569 rc = -EBUSY;
9570 for (i = 0; i < count; i++) {
9571 udelay(5);
9572 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9573
9574 if (val & MCPR_NVM_COMMAND_DONE) {
9575 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
9576 /* we read nvram data in cpu order
9577 * but ethtool sees it as an array of bytes
9578 * converting to big-endian will do the work */
4781bfad 9579 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
9580 rc = 0;
9581 break;
9582 }
9583 }
9584
9585 return rc;
9586}
9587
9588static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9589 int buf_size)
9590{
9591 int rc;
9592 u32 cmd_flags;
4781bfad 9593 __be32 val;
a2fbb9ea
ET
9594
9595 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9596 DP(BNX2X_MSG_NVM,
c14423fe 9597 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9598 offset, buf_size);
9599 return -EINVAL;
9600 }
9601
34f80b04
EG
9602 if (offset + buf_size > bp->common.flash_size) {
9603 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9604 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9605 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9606 return -EINVAL;
9607 }
9608
9609 /* request access to nvram interface */
9610 rc = bnx2x_acquire_nvram_lock(bp);
9611 if (rc)
9612 return rc;
9613
9614 /* enable access to nvram interface */
9615 bnx2x_enable_nvram_access(bp);
9616
9617 /* read the first word(s) */
9618 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9619 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9620 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9621 memcpy(ret_buf, &val, 4);
9622
9623 /* advance to the next dword */
9624 offset += sizeof(u32);
9625 ret_buf += sizeof(u32);
9626 buf_size -= sizeof(u32);
9627 cmd_flags = 0;
9628 }
9629
9630 if (rc == 0) {
9631 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9632 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9633 memcpy(ret_buf, &val, 4);
9634 }
9635
9636 /* disable access to nvram interface */
9637 bnx2x_disable_nvram_access(bp);
9638 bnx2x_release_nvram_lock(bp);
9639
9640 return rc;
9641}
9642
9643static int bnx2x_get_eeprom(struct net_device *dev,
9644 struct ethtool_eeprom *eeprom, u8 *eebuf)
9645{
9646 struct bnx2x *bp = netdev_priv(dev);
9647 int rc;
9648
2add3acb
EG
9649 if (!netif_running(dev))
9650 return -EAGAIN;
9651
34f80b04 9652 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9653 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9654 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9655 eeprom->len, eeprom->len);
9656
9657 /* parameters already validated in ethtool_get_eeprom */
9658
9659 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9660
9661 return rc;
9662}
9663
9664static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9665 u32 cmd_flags)
9666{
f1410647 9667 int count, i, rc;
a2fbb9ea
ET
9668
9669 /* build the command word */
9670 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9671
9672 /* need to clear DONE bit separately */
9673 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9674
9675 /* write the data */
9676 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9677
9678 /* address of the NVRAM to write to */
9679 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9680 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9681
9682 /* issue the write command */
9683 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9684
9685 /* adjust timeout for emulation/FPGA */
9686 count = NVRAM_TIMEOUT_COUNT;
9687 if (CHIP_REV_IS_SLOW(bp))
9688 count *= 100;
9689
9690 /* wait for completion */
9691 rc = -EBUSY;
9692 for (i = 0; i < count; i++) {
9693 udelay(5);
9694 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9695 if (val & MCPR_NVM_COMMAND_DONE) {
9696 rc = 0;
9697 break;
9698 }
9699 }
9700
9701 return rc;
9702}
9703
f1410647 9704#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
9705
9706static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9707 int buf_size)
9708{
9709 int rc;
9710 u32 cmd_flags;
9711 u32 align_offset;
4781bfad 9712 __be32 val;
a2fbb9ea 9713
34f80b04
EG
9714 if (offset + buf_size > bp->common.flash_size) {
9715 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9716 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9717 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9718 return -EINVAL;
9719 }
9720
9721 /* request access to nvram interface */
9722 rc = bnx2x_acquire_nvram_lock(bp);
9723 if (rc)
9724 return rc;
9725
9726 /* enable access to nvram interface */
9727 bnx2x_enable_nvram_access(bp);
9728
9729 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9730 align_offset = (offset & ~0x03);
9731 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9732
9733 if (rc == 0) {
9734 val &= ~(0xff << BYTE_OFFSET(offset));
9735 val |= (*data_buf << BYTE_OFFSET(offset));
9736
9737 /* nvram data is returned as an array of bytes
9738 * convert it back to cpu order */
9739 val = be32_to_cpu(val);
9740
a2fbb9ea
ET
9741 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9742 cmd_flags);
9743 }
9744
9745 /* disable access to nvram interface */
9746 bnx2x_disable_nvram_access(bp);
9747 bnx2x_release_nvram_lock(bp);
9748
9749 return rc;
9750}
9751
9752static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9753 int buf_size)
9754{
9755 int rc;
9756 u32 cmd_flags;
9757 u32 val;
9758 u32 written_so_far;
9759
34f80b04 9760 if (buf_size == 1) /* ethtool */
a2fbb9ea 9761 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9762
9763 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9764 DP(BNX2X_MSG_NVM,
c14423fe 9765 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9766 offset, buf_size);
9767 return -EINVAL;
9768 }
9769
34f80b04
EG
9770 if (offset + buf_size > bp->common.flash_size) {
9771 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9772 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9773 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9774 return -EINVAL;
9775 }
9776
9777 /* request access to nvram interface */
9778 rc = bnx2x_acquire_nvram_lock(bp);
9779 if (rc)
9780 return rc;
9781
9782 /* enable access to nvram interface */
9783 bnx2x_enable_nvram_access(bp);
9784
9785 written_so_far = 0;
9786 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9787 while ((written_so_far < buf_size) && (rc == 0)) {
9788 if (written_so_far == (buf_size - sizeof(u32)))
9789 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9790 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9791 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9792 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9793 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9794
9795 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9796
9797 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9798
9799 /* advance to the next dword */
9800 offset += sizeof(u32);
9801 data_buf += sizeof(u32);
9802 written_so_far += sizeof(u32);
9803 cmd_flags = 0;
9804 }
9805
9806 /* disable access to nvram interface */
9807 bnx2x_disable_nvram_access(bp);
9808 bnx2x_release_nvram_lock(bp);
9809
9810 return rc;
9811}
9812
9813static int bnx2x_set_eeprom(struct net_device *dev,
9814 struct ethtool_eeprom *eeprom, u8 *eebuf)
9815{
9816 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
9817 int port = BP_PORT(bp);
9818 int rc = 0;
a2fbb9ea 9819
9f4c9583
EG
9820 if (!netif_running(dev))
9821 return -EAGAIN;
9822
34f80b04 9823 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9824 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9825 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9826 eeprom->len, eeprom->len);
9827
9828 /* parameters already validated in ethtool_set_eeprom */
9829
f57a6025
EG
9830 /* PHY eeprom can be accessed only by the PMF */
9831 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9832 !bp->port.pmf)
9833 return -EINVAL;
9834
9835 if (eeprom->magic == 0x50485950) {
9836 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9837 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 9838
f57a6025
EG
9839 bnx2x_acquire_phy_lock(bp);
9840 rc |= bnx2x_link_reset(&bp->link_params,
9841 &bp->link_vars, 0);
9842 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9843 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9844 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9845 MISC_REGISTERS_GPIO_HIGH, port);
9846 bnx2x_release_phy_lock(bp);
9847 bnx2x_link_report(bp);
9848
9849 } else if (eeprom->magic == 0x50485952) {
9850 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
f34d28ea 9851 if (bp->state == BNX2X_STATE_OPEN) {
4a37fb66 9852 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
9853 rc |= bnx2x_link_reset(&bp->link_params,
9854 &bp->link_vars, 1);
9855
9856 rc |= bnx2x_phy_init(&bp->link_params,
9857 &bp->link_vars);
4a37fb66 9858 bnx2x_release_phy_lock(bp);
f57a6025
EG
9859 bnx2x_calc_fc_adv(bp);
9860 }
9861 } else if (eeprom->magic == 0x53985943) {
9862 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9863 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9864 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9865 u8 ext_phy_addr =
659bc5c4 9866 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
f57a6025
EG
9867
9868 /* DSP Remove Download Mode */
9869 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9870 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 9871
f57a6025
EG
9872 bnx2x_acquire_phy_lock(bp);
9873
9874 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9875
9876 /* wait 0.5 sec to allow it to run */
9877 msleep(500);
9878 bnx2x_ext_phy_hw_reset(bp, port);
9879 msleep(500);
9880 bnx2x_release_phy_lock(bp);
9881 }
9882 } else
c18487ee 9883 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9884
9885 return rc;
9886}
9887
9888static int bnx2x_get_coalesce(struct net_device *dev,
9889 struct ethtool_coalesce *coal)
9890{
9891 struct bnx2x *bp = netdev_priv(dev);
9892
9893 memset(coal, 0, sizeof(struct ethtool_coalesce));
9894
9895 coal->rx_coalesce_usecs = bp->rx_ticks;
9896 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9897
9898 return 0;
9899}
9900
ca00392c 9901#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
a2fbb9ea
ET
9902static int bnx2x_set_coalesce(struct net_device *dev,
9903 struct ethtool_coalesce *coal)
9904{
9905 struct bnx2x *bp = netdev_priv(dev);
9906
9907 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
ca00392c
EG
9908 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9909 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea
ET
9910
9911 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
ca00392c
EG
9912 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9913 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea 9914
34f80b04 9915 if (netif_running(dev))
a2fbb9ea
ET
9916 bnx2x_update_coalesce(bp);
9917
9918 return 0;
9919}
9920
9921static void bnx2x_get_ringparam(struct net_device *dev,
9922 struct ethtool_ringparam *ering)
9923{
9924 struct bnx2x *bp = netdev_priv(dev);
9925
9926 ering->rx_max_pending = MAX_RX_AVAIL;
9927 ering->rx_mini_max_pending = 0;
9928 ering->rx_jumbo_max_pending = 0;
9929
9930 ering->rx_pending = bp->rx_ring_size;
9931 ering->rx_mini_pending = 0;
9932 ering->rx_jumbo_pending = 0;
9933
9934 ering->tx_max_pending = MAX_TX_AVAIL;
9935 ering->tx_pending = bp->tx_ring_size;
9936}
9937
9938static int bnx2x_set_ringparam(struct net_device *dev,
9939 struct ethtool_ringparam *ering)
9940{
9941 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9942 int rc = 0;
a2fbb9ea
ET
9943
9944 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9945 (ering->tx_pending > MAX_TX_AVAIL) ||
9946 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9947 return -EINVAL;
9948
9949 bp->rx_ring_size = ering->rx_pending;
9950 bp->tx_ring_size = ering->tx_pending;
9951
34f80b04
EG
9952 if (netif_running(dev)) {
9953 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9954 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9955 }
9956
34f80b04 9957 return rc;
a2fbb9ea
ET
9958}
9959
9960static void bnx2x_get_pauseparam(struct net_device *dev,
9961 struct ethtool_pauseparam *epause)
9962{
9963 struct bnx2x *bp = netdev_priv(dev);
9964
356e2385
EG
9965 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9966 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9967 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9968
c0700f90
DM
9969 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9970 BNX2X_FLOW_CTRL_RX);
9971 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9972 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9973
9974 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9975 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9976 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9977}
9978
9979static int bnx2x_set_pauseparam(struct net_device *dev,
9980 struct ethtool_pauseparam *epause)
9981{
9982 struct bnx2x *bp = netdev_priv(dev);
9983
34f80b04
EG
9984 if (IS_E1HMF(bp))
9985 return 0;
9986
a2fbb9ea
ET
9987 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9988 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9989 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9990
c0700f90 9991 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9992
f1410647 9993 if (epause->rx_pause)
c0700f90 9994 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9995
f1410647 9996 if (epause->tx_pause)
c0700f90 9997 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9998
c0700f90
DM
9999 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10000 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 10001
c18487ee 10002 if (epause->autoneg) {
34f80b04 10003 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 10004 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
10005 return -EINVAL;
10006 }
a2fbb9ea 10007
c18487ee 10008 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 10009 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 10010 }
a2fbb9ea 10011
c18487ee
YR
10012 DP(NETIF_MSG_LINK,
10013 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
10014
10015 if (netif_running(dev)) {
bb2a0f7a 10016 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10017 bnx2x_link_set(bp);
10018 }
a2fbb9ea
ET
10019
10020 return 0;
10021}
10022
df0f2343
VZ
10023static int bnx2x_set_flags(struct net_device *dev, u32 data)
10024{
10025 struct bnx2x *bp = netdev_priv(dev);
10026 int changed = 0;
10027 int rc = 0;
10028
10029 /* TPA requires Rx CSUM offloading */
10030 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
10031 if (!(dev->features & NETIF_F_LRO)) {
10032 dev->features |= NETIF_F_LRO;
10033 bp->flags |= TPA_ENABLE_FLAG;
10034 changed = 1;
10035 }
10036
10037 } else if (dev->features & NETIF_F_LRO) {
10038 dev->features &= ~NETIF_F_LRO;
10039 bp->flags &= ~TPA_ENABLE_FLAG;
10040 changed = 1;
10041 }
10042
10043 if (changed && netif_running(dev)) {
10044 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10045 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10046 }
10047
10048 return rc;
10049}
10050
a2fbb9ea
ET
10051static u32 bnx2x_get_rx_csum(struct net_device *dev)
10052{
10053 struct bnx2x *bp = netdev_priv(dev);
10054
10055 return bp->rx_csum;
10056}
10057
10058static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10059{
10060 struct bnx2x *bp = netdev_priv(dev);
df0f2343 10061 int rc = 0;
a2fbb9ea
ET
10062
10063 bp->rx_csum = data;
df0f2343
VZ
10064
10065 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10066 TPA'ed packets will be discarded due to wrong TCP CSUM */
10067 if (!data) {
10068 u32 flags = ethtool_op_get_flags(dev);
10069
10070 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10071 }
10072
10073 return rc;
a2fbb9ea
ET
10074}
10075
10076static int bnx2x_set_tso(struct net_device *dev, u32 data)
10077{
755735eb 10078 if (data) {
a2fbb9ea 10079 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
10080 dev->features |= NETIF_F_TSO6;
10081 } else {
a2fbb9ea 10082 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
10083 dev->features &= ~NETIF_F_TSO6;
10084 }
10085
a2fbb9ea
ET
10086 return 0;
10087}
10088
f3c87cdd 10089static const struct {
a2fbb9ea
ET
10090 char string[ETH_GSTRING_LEN];
10091} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
10092 { "register_test (offline)" },
10093 { "memory_test (offline)" },
10094 { "loopback_test (offline)" },
10095 { "nvram_test (online)" },
10096 { "interrupt_test (online)" },
10097 { "link_test (online)" },
d3d4f495 10098 { "idle check (online)" }
a2fbb9ea
ET
10099};
10100
f3c87cdd
YG
10101static int bnx2x_test_registers(struct bnx2x *bp)
10102{
10103 int idx, i, rc = -ENODEV;
10104 u32 wr_val = 0;
9dabc424 10105 int port = BP_PORT(bp);
f3c87cdd
YG
10106 static const struct {
10107 u32 offset0;
10108 u32 offset1;
10109 u32 mask;
10110 } reg_tbl[] = {
10111/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10112 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
10113 { HC_REG_AGG_INT_0, 4, 0x000003ff },
10114 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
10115 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
10116 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
10117 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
10118 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10119 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
10120 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10121/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
10122 { QM_REG_CONNNUM_0, 4, 0x000fffff },
10123 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
10124 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
10125 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
10126 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10127 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
10128 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 10129 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
10130 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
10131/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
10132 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
10133 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
10134 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
10135 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
10136 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
10137 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
10138 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
10139 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
10140 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
10141/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
10142 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
10143 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
10144 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10145 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
10146 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10147 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
10148
10149 { 0xffffffff, 0, 0x00000000 }
10150 };
10151
10152 if (!netif_running(bp->dev))
10153 return rc;
10154
10155 /* Repeat the test twice:
10156 First by writing 0x00000000, second by writing 0xffffffff */
10157 for (idx = 0; idx < 2; idx++) {
10158
10159 switch (idx) {
10160 case 0:
10161 wr_val = 0;
10162 break;
10163 case 1:
10164 wr_val = 0xffffffff;
10165 break;
10166 }
10167
10168 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10169 u32 offset, mask, save_val, val;
f3c87cdd
YG
10170
10171 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10172 mask = reg_tbl[i].mask;
10173
10174 save_val = REG_RD(bp, offset);
10175
10176 REG_WR(bp, offset, wr_val);
10177 val = REG_RD(bp, offset);
10178
10179 /* Restore the original register's value */
10180 REG_WR(bp, offset, save_val);
10181
10182 /* verify that value is as expected value */
10183 if ((val & mask) != (wr_val & mask))
10184 goto test_reg_exit;
10185 }
10186 }
10187
10188 rc = 0;
10189
10190test_reg_exit:
10191 return rc;
10192}
10193
10194static int bnx2x_test_memory(struct bnx2x *bp)
10195{
10196 int i, j, rc = -ENODEV;
10197 u32 val;
10198 static const struct {
10199 u32 offset;
10200 int size;
10201 } mem_tbl[] = {
10202 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
10203 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10204 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
10205 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
10206 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
10207 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
10208 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
10209
10210 { 0xffffffff, 0 }
10211 };
10212 static const struct {
10213 char *name;
10214 u32 offset;
9dabc424
YG
10215 u32 e1_mask;
10216 u32 e1h_mask;
f3c87cdd 10217 } prty_tbl[] = {
9dabc424
YG
10218 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
10219 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
10220 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
10221 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
10222 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
10223 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
10224
10225 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
10226 };
10227
10228 if (!netif_running(bp->dev))
10229 return rc;
10230
10231 /* Go through all the memories */
10232 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10233 for (j = 0; j < mem_tbl[i].size; j++)
10234 REG_RD(bp, mem_tbl[i].offset + j*4);
10235
10236 /* Check the parity status */
10237 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10238 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
10239 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10240 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
10241 DP(NETIF_MSG_HW,
10242 "%s is 0x%x\n", prty_tbl[i].name, val);
10243 goto test_mem_exit;
10244 }
10245 }
10246
10247 rc = 0;
10248
10249test_mem_exit:
10250 return rc;
10251}
10252
f3c87cdd
YG
10253static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10254{
10255 int cnt = 1000;
10256
10257 if (link_up)
10258 while (bnx2x_link_test(bp) && cnt--)
10259 msleep(10);
10260}
10261
10262static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10263{
10264 unsigned int pkt_size, num_pkts, i;
10265 struct sk_buff *skb;
10266 unsigned char *packet;
ca00392c
EG
10267 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10268 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
f3c87cdd
YG
10269 u16 tx_start_idx, tx_idx;
10270 u16 rx_start_idx, rx_idx;
ca00392c 10271 u16 pkt_prod, bd_prod;
f3c87cdd 10272 struct sw_tx_bd *tx_buf;
ca00392c
EG
10273 struct eth_tx_start_bd *tx_start_bd;
10274 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
10275 dma_addr_t mapping;
10276 union eth_rx_cqe *cqe;
10277 u8 cqe_fp_flags;
10278 struct sw_rx_bd *rx_buf;
10279 u16 len;
10280 int rc = -ENODEV;
10281
b5bf9068
EG
10282 /* check the loopback mode */
10283 switch (loopback_mode) {
10284 case BNX2X_PHY_LOOPBACK:
10285 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10286 return -EINVAL;
10287 break;
10288 case BNX2X_MAC_LOOPBACK:
f3c87cdd 10289 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 10290 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
10291 break;
10292 default:
f3c87cdd 10293 return -EINVAL;
b5bf9068 10294 }
f3c87cdd 10295
b5bf9068
EG
10296 /* prepare the loopback packet */
10297 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10298 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
10299 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10300 if (!skb) {
10301 rc = -ENOMEM;
10302 goto test_loopback_exit;
10303 }
10304 packet = skb_put(skb, pkt_size);
10305 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
10306 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10307 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
10308 for (i = ETH_HLEN; i < pkt_size; i++)
10309 packet[i] = (unsigned char) (i & 0xff);
10310
b5bf9068 10311 /* send the loopback packet */
f3c87cdd 10312 num_pkts = 0;
ca00392c
EG
10313 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10314 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 10315
ca00392c
EG
10316 pkt_prod = fp_tx->tx_pkt_prod++;
10317 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10318 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 10319 tx_buf->skb = skb;
ca00392c 10320 tx_buf->flags = 0;
f3c87cdd 10321
ca00392c
EG
10322 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10323 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
f3c87cdd
YG
10324 mapping = pci_map_single(bp->pdev, skb->data,
10325 skb_headlen(skb), PCI_DMA_TODEVICE);
ca00392c
EG
10326 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10327 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10328 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10329 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10330 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10331 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10332 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10333 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10334
10335 /* turn on parsing and get a BD */
10336 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10337 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10338
10339 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 10340
58f4c4cf
EG
10341 wmb();
10342
ca00392c
EG
10343 fp_tx->tx_db.data.prod += 2;
10344 barrier();
10345 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
f3c87cdd
YG
10346
10347 mmiowb();
10348
10349 num_pkts++;
ca00392c 10350 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
10351 bp->dev->trans_start = jiffies;
10352
10353 udelay(100);
10354
ca00392c 10355 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
10356 if (tx_idx != tx_start_idx + num_pkts)
10357 goto test_loopback_exit;
10358
ca00392c 10359 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
10360 if (rx_idx != rx_start_idx + num_pkts)
10361 goto test_loopback_exit;
10362
ca00392c 10363 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
10364 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10365 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10366 goto test_loopback_rx_exit;
10367
10368 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10369 if (len != pkt_size)
10370 goto test_loopback_rx_exit;
10371
ca00392c 10372 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
10373 skb = rx_buf->skb;
10374 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10375 for (i = ETH_HLEN; i < pkt_size; i++)
10376 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10377 goto test_loopback_rx_exit;
10378
10379 rc = 0;
10380
10381test_loopback_rx_exit:
f3c87cdd 10382
ca00392c
EG
10383 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10384 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10385 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10386 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
10387
10388 /* Update producers */
ca00392c
EG
10389 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10390 fp_rx->rx_sge_prod);
f3c87cdd
YG
10391
10392test_loopback_exit:
10393 bp->link_params.loopback_mode = LOOPBACK_NONE;
10394
10395 return rc;
10396}
10397
10398static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10399{
b5bf9068 10400 int rc = 0, res;
f3c87cdd
YG
10401
10402 if (!netif_running(bp->dev))
10403 return BNX2X_LOOPBACK_FAILED;
10404
f8ef6e44 10405 bnx2x_netif_stop(bp, 1);
3910c8ae 10406 bnx2x_acquire_phy_lock(bp);
f3c87cdd 10407
b5bf9068
EG
10408 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10409 if (res) {
10410 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10411 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
10412 }
10413
b5bf9068
EG
10414 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10415 if (res) {
10416 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10417 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
10418 }
10419
3910c8ae 10420 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
10421 bnx2x_netif_start(bp);
10422
10423 return rc;
10424}
10425
10426#define CRC32_RESIDUAL 0xdebb20e3
10427
10428static int bnx2x_test_nvram(struct bnx2x *bp)
10429{
10430 static const struct {
10431 int offset;
10432 int size;
10433 } nvram_tbl[] = {
10434 { 0, 0x14 }, /* bootstrap */
10435 { 0x14, 0xec }, /* dir */
10436 { 0x100, 0x350 }, /* manuf_info */
10437 { 0x450, 0xf0 }, /* feature_info */
10438 { 0x640, 0x64 }, /* upgrade_key_info */
10439 { 0x6a4, 0x64 },
10440 { 0x708, 0x70 }, /* manuf_key_info */
10441 { 0x778, 0x70 },
10442 { 0, 0 }
10443 };
4781bfad 10444 __be32 buf[0x350 / 4];
f3c87cdd
YG
10445 u8 *data = (u8 *)buf;
10446 int i, rc;
ab6ad5a4 10447 u32 magic, crc;
f3c87cdd
YG
10448
10449 rc = bnx2x_nvram_read(bp, 0, data, 4);
10450 if (rc) {
f5372251 10451 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
10452 goto test_nvram_exit;
10453 }
10454
10455 magic = be32_to_cpu(buf[0]);
10456 if (magic != 0x669955aa) {
10457 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10458 rc = -ENODEV;
10459 goto test_nvram_exit;
10460 }
10461
10462 for (i = 0; nvram_tbl[i].size; i++) {
10463
10464 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10465 nvram_tbl[i].size);
10466 if (rc) {
10467 DP(NETIF_MSG_PROBE,
f5372251 10468 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
10469 goto test_nvram_exit;
10470 }
10471
ab6ad5a4
EG
10472 crc = ether_crc_le(nvram_tbl[i].size, data);
10473 if (crc != CRC32_RESIDUAL) {
f3c87cdd 10474 DP(NETIF_MSG_PROBE,
ab6ad5a4 10475 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
f3c87cdd
YG
10476 rc = -ENODEV;
10477 goto test_nvram_exit;
10478 }
10479 }
10480
10481test_nvram_exit:
10482 return rc;
10483}
10484
10485static int bnx2x_test_intr(struct bnx2x *bp)
10486{
10487 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10488 int i, rc;
10489
10490 if (!netif_running(bp->dev))
10491 return -ENODEV;
10492
8d9c5f34 10493 config->hdr.length = 0;
af246401
EG
10494 if (CHIP_IS_E1(bp))
10495 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10496 else
10497 config->hdr.offset = BP_FUNC(bp);
0626b899 10498 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
10499 config->hdr.reserved1 = 0;
10500
e665bfda
MC
10501 bp->set_mac_pending++;
10502 smp_wmb();
f3c87cdd
YG
10503 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10504 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10505 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10506 if (rc == 0) {
f3c87cdd
YG
10507 for (i = 0; i < 10; i++) {
10508 if (!bp->set_mac_pending)
10509 break;
e665bfda 10510 smp_rmb();
f3c87cdd
YG
10511 msleep_interruptible(10);
10512 }
10513 if (i == 10)
10514 rc = -ENODEV;
10515 }
10516
10517 return rc;
10518}
10519
a2fbb9ea
ET
10520static void bnx2x_self_test(struct net_device *dev,
10521 struct ethtool_test *etest, u64 *buf)
10522{
10523 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
10524
10525 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10526
f3c87cdd 10527 if (!netif_running(dev))
a2fbb9ea 10528 return;
a2fbb9ea 10529
33471629 10530 /* offline tests are not supported in MF mode */
f3c87cdd
YG
10531 if (IS_E1HMF(bp))
10532 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10533
10534 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
10535 int port = BP_PORT(bp);
10536 u32 val;
f3c87cdd
YG
10537 u8 link_up;
10538
279abdf5
EG
10539 /* save current value of input enable for TX port IF */
10540 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10541 /* disable input for TX port IF */
10542 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10543
061bc702 10544 link_up = (bnx2x_link_test(bp) == 0);
f3c87cdd
YG
10545 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10546 bnx2x_nic_load(bp, LOAD_DIAG);
10547 /* wait until link state is restored */
10548 bnx2x_wait_for_link(bp, link_up);
10549
10550 if (bnx2x_test_registers(bp) != 0) {
10551 buf[0] = 1;
10552 etest->flags |= ETH_TEST_FL_FAILED;
10553 }
10554 if (bnx2x_test_memory(bp) != 0) {
10555 buf[1] = 1;
10556 etest->flags |= ETH_TEST_FL_FAILED;
10557 }
10558 buf[2] = bnx2x_test_loopback(bp, link_up);
10559 if (buf[2] != 0)
10560 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 10561
f3c87cdd 10562 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
10563
10564 /* restore input for TX port IF */
10565 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10566
f3c87cdd
YG
10567 bnx2x_nic_load(bp, LOAD_NORMAL);
10568 /* wait until link state is restored */
10569 bnx2x_wait_for_link(bp, link_up);
10570 }
10571 if (bnx2x_test_nvram(bp) != 0) {
10572 buf[3] = 1;
a2fbb9ea
ET
10573 etest->flags |= ETH_TEST_FL_FAILED;
10574 }
f3c87cdd
YG
10575 if (bnx2x_test_intr(bp) != 0) {
10576 buf[4] = 1;
10577 etest->flags |= ETH_TEST_FL_FAILED;
10578 }
10579 if (bp->port.pmf)
10580 if (bnx2x_link_test(bp) != 0) {
10581 buf[5] = 1;
10582 etest->flags |= ETH_TEST_FL_FAILED;
10583 }
f3c87cdd
YG
10584
10585#ifdef BNX2X_EXTRA_DEBUG
10586 bnx2x_panic_dump(bp);
10587#endif
a2fbb9ea
ET
10588}
10589
de832a55
EG
10590static const struct {
10591 long offset;
10592 int size;
10593 u8 string[ETH_GSTRING_LEN];
10594} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10595/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10596 { Q_STATS_OFFSET32(error_bytes_received_hi),
10597 8, "[%d]: rx_error_bytes" },
10598 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10599 8, "[%d]: rx_ucast_packets" },
10600 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10601 8, "[%d]: rx_mcast_packets" },
10602 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10603 8, "[%d]: rx_bcast_packets" },
10604 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10605 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10606 4, "[%d]: rx_phy_ip_err_discards"},
10607 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10608 4, "[%d]: rx_skb_alloc_discard" },
10609 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10610
10611/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10612 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10613 8, "[%d]: tx_packets" }
10614};
10615
bb2a0f7a
YG
10616static const struct {
10617 long offset;
10618 int size;
10619 u32 flags;
66e855f3
YG
10620#define STATS_FLAGS_PORT 1
10621#define STATS_FLAGS_FUNC 2
de832a55 10622#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 10623 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 10624} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
10625/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10626 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 10627 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 10628 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 10629 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 10630 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 10631 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 10632 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 10633 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 10634 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 10635 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 10636 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 10637 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 10638 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
10639 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10640 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10641 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10642 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10643/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10644 8, STATS_FLAGS_PORT, "rx_fragments" },
10645 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10646 8, STATS_FLAGS_PORT, "rx_jabbers" },
10647 { STATS_OFFSET32(no_buff_discard_hi),
10648 8, STATS_FLAGS_BOTH, "rx_discards" },
10649 { STATS_OFFSET32(mac_filter_discard),
10650 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10651 { STATS_OFFSET32(xxoverflow_discard),
10652 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10653 { STATS_OFFSET32(brb_drop_hi),
10654 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10655 { STATS_OFFSET32(brb_truncate_hi),
10656 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10657 { STATS_OFFSET32(pause_frames_received_hi),
10658 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10659 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10660 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10661 { STATS_OFFSET32(nig_timer_max),
10662 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10663/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10664 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10665 { STATS_OFFSET32(rx_skb_alloc_failed),
10666 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10667 { STATS_OFFSET32(hw_csum_err),
10668 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10669
10670 { STATS_OFFSET32(total_bytes_transmitted_hi),
10671 8, STATS_FLAGS_BOTH, "tx_bytes" },
10672 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10673 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10674 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10675 8, STATS_FLAGS_BOTH, "tx_packets" },
10676 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10677 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10678 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10679 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 10680 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 10681 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 10682 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 10683 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 10684/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 10685 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 10686 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 10687 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 10688 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 10689 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 10690 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 10691 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 10692 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 10693 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 10694 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 10695 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 10696 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 10697 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 10698 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 10699 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 10700 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 10701 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 10702 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 10703 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 10704/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 10705 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
10706 { STATS_OFFSET32(pause_frames_sent_hi),
10707 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
10708};
10709
de832a55
EG
10710#define IS_PORT_STAT(i) \
10711 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10712#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10713#define IS_E1HMF_MODE_STAT(bp) \
10714 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 10715
15f0a394
BH
10716static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10717{
10718 struct bnx2x *bp = netdev_priv(dev);
10719 int i, num_stats;
10720
10721 switch(stringset) {
10722 case ETH_SS_STATS:
10723 if (is_multi(bp)) {
10724 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10725 if (!IS_E1HMF_MODE_STAT(bp))
10726 num_stats += BNX2X_NUM_STATS;
10727 } else {
10728 if (IS_E1HMF_MODE_STAT(bp)) {
10729 num_stats = 0;
10730 for (i = 0; i < BNX2X_NUM_STATS; i++)
10731 if (IS_FUNC_STAT(i))
10732 num_stats++;
10733 } else
10734 num_stats = BNX2X_NUM_STATS;
10735 }
10736 return num_stats;
10737
10738 case ETH_SS_TEST:
10739 return BNX2X_NUM_TESTS;
10740
10741 default:
10742 return -EINVAL;
10743 }
10744}
10745
a2fbb9ea
ET
10746static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10747{
bb2a0f7a 10748 struct bnx2x *bp = netdev_priv(dev);
de832a55 10749 int i, j, k;
bb2a0f7a 10750
a2fbb9ea
ET
10751 switch (stringset) {
10752 case ETH_SS_STATS:
de832a55
EG
10753 if (is_multi(bp)) {
10754 k = 0;
ca00392c 10755 for_each_rx_queue(bp, i) {
de832a55
EG
10756 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10757 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10758 bnx2x_q_stats_arr[j].string, i);
10759 k += BNX2X_NUM_Q_STATS;
10760 }
10761 if (IS_E1HMF_MODE_STAT(bp))
10762 break;
10763 for (j = 0; j < BNX2X_NUM_STATS; j++)
10764 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10765 bnx2x_stats_arr[j].string);
10766 } else {
10767 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10768 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10769 continue;
10770 strcpy(buf + j*ETH_GSTRING_LEN,
10771 bnx2x_stats_arr[i].string);
10772 j++;
10773 }
bb2a0f7a 10774 }
a2fbb9ea
ET
10775 break;
10776
10777 case ETH_SS_TEST:
10778 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10779 break;
10780 }
10781}
10782
a2fbb9ea
ET
10783static void bnx2x_get_ethtool_stats(struct net_device *dev,
10784 struct ethtool_stats *stats, u64 *buf)
10785{
10786 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
10787 u32 *hw_stats, *offset;
10788 int i, j, k;
bb2a0f7a 10789
de832a55
EG
10790 if (is_multi(bp)) {
10791 k = 0;
ca00392c 10792 for_each_rx_queue(bp, i) {
de832a55
EG
10793 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10794 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10795 if (bnx2x_q_stats_arr[j].size == 0) {
10796 /* skip this counter */
10797 buf[k + j] = 0;
10798 continue;
10799 }
10800 offset = (hw_stats +
10801 bnx2x_q_stats_arr[j].offset);
10802 if (bnx2x_q_stats_arr[j].size == 4) {
10803 /* 4-byte counter */
10804 buf[k + j] = (u64) *offset;
10805 continue;
10806 }
10807 /* 8-byte counter */
10808 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10809 }
10810 k += BNX2X_NUM_Q_STATS;
10811 }
10812 if (IS_E1HMF_MODE_STAT(bp))
10813 return;
10814 hw_stats = (u32 *)&bp->eth_stats;
10815 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10816 if (bnx2x_stats_arr[j].size == 0) {
10817 /* skip this counter */
10818 buf[k + j] = 0;
10819 continue;
10820 }
10821 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10822 if (bnx2x_stats_arr[j].size == 4) {
10823 /* 4-byte counter */
10824 buf[k + j] = (u64) *offset;
10825 continue;
10826 }
10827 /* 8-byte counter */
10828 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10829 }
de832a55
EG
10830 } else {
10831 hw_stats = (u32 *)&bp->eth_stats;
10832 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10833 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10834 continue;
10835 if (bnx2x_stats_arr[i].size == 0) {
10836 /* skip this counter */
10837 buf[j] = 0;
10838 j++;
10839 continue;
10840 }
10841 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10842 if (bnx2x_stats_arr[i].size == 4) {
10843 /* 4-byte counter */
10844 buf[j] = (u64) *offset;
10845 j++;
10846 continue;
10847 }
10848 /* 8-byte counter */
10849 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10850 j++;
a2fbb9ea 10851 }
a2fbb9ea
ET
10852 }
10853}
10854
10855static int bnx2x_phys_id(struct net_device *dev, u32 data)
10856{
10857 struct bnx2x *bp = netdev_priv(dev);
10858 int i;
10859
34f80b04
EG
10860 if (!netif_running(dev))
10861 return 0;
10862
10863 if (!bp->port.pmf)
10864 return 0;
10865
a2fbb9ea
ET
10866 if (data == 0)
10867 data = 2;
10868
10869 for (i = 0; i < (data * 2); i++) {
c18487ee 10870 if ((i % 2) == 0)
7846e471
YR
10871 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10872 SPEED_1000);
c18487ee 10873 else
7846e471 10874 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
c18487ee 10875
a2fbb9ea
ET
10876 msleep_interruptible(500);
10877 if (signal_pending(current))
10878 break;
10879 }
10880
c18487ee 10881 if (bp->link_vars.link_up)
7846e471
YR
10882 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10883 bp->link_vars.line_speed);
a2fbb9ea
ET
10884
10885 return 0;
10886}
10887
0fc0b732 10888static const struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10889 .get_settings = bnx2x_get_settings,
10890 .set_settings = bnx2x_set_settings,
10891 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10892 .get_regs_len = bnx2x_get_regs_len,
10893 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10894 .get_wol = bnx2x_get_wol,
10895 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10896 .get_msglevel = bnx2x_get_msglevel,
10897 .set_msglevel = bnx2x_set_msglevel,
10898 .nway_reset = bnx2x_nway_reset,
01e53298 10899 .get_link = bnx2x_get_link,
7a9b2557
VZ
10900 .get_eeprom_len = bnx2x_get_eeprom_len,
10901 .get_eeprom = bnx2x_get_eeprom,
10902 .set_eeprom = bnx2x_set_eeprom,
10903 .get_coalesce = bnx2x_get_coalesce,
10904 .set_coalesce = bnx2x_set_coalesce,
10905 .get_ringparam = bnx2x_get_ringparam,
10906 .set_ringparam = bnx2x_set_ringparam,
10907 .get_pauseparam = bnx2x_get_pauseparam,
10908 .set_pauseparam = bnx2x_set_pauseparam,
10909 .get_rx_csum = bnx2x_get_rx_csum,
10910 .set_rx_csum = bnx2x_set_rx_csum,
10911 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10912 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10913 .set_flags = bnx2x_set_flags,
10914 .get_flags = ethtool_op_get_flags,
10915 .get_sg = ethtool_op_get_sg,
10916 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10917 .get_tso = ethtool_op_get_tso,
10918 .set_tso = bnx2x_set_tso,
7a9b2557 10919 .self_test = bnx2x_self_test,
15f0a394 10920 .get_sset_count = bnx2x_get_sset_count,
7a9b2557 10921 .get_strings = bnx2x_get_strings,
a2fbb9ea 10922 .phys_id = bnx2x_phys_id,
bb2a0f7a 10923 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10924};
10925
10926/* end of ethtool_ops */
10927
10928/****************************************************************************
10929* General service functions
10930****************************************************************************/
10931
10932static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10933{
10934 u16 pmcsr;
10935
10936 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10937
10938 switch (state) {
10939 case PCI_D0:
34f80b04 10940 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10941 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10942 PCI_PM_CTRL_PME_STATUS));
10943
10944 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10945 /* delay required during transition out of D3hot */
a2fbb9ea 10946 msleep(20);
34f80b04 10947 break;
a2fbb9ea 10948
34f80b04
EG
10949 case PCI_D3hot:
10950 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10951 pmcsr |= 3;
a2fbb9ea 10952
34f80b04
EG
10953 if (bp->wol)
10954 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10955
34f80b04
EG
10956 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10957 pmcsr);
a2fbb9ea 10958
34f80b04
EG
10959 /* No more memory access after this point until
10960 * device is brought back to D0.
10961 */
10962 break;
10963
10964 default:
10965 return -EINVAL;
10966 }
10967 return 0;
a2fbb9ea
ET
10968}
10969
237907c1
EG
10970static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10971{
10972 u16 rx_cons_sb;
10973
10974 /* Tell compiler that status block fields can change */
10975 barrier();
10976 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10977 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10978 rx_cons_sb++;
10979 return (fp->rx_comp_cons != rx_cons_sb);
10980}
10981
34f80b04
EG
10982/*
10983 * net_device service functions
10984 */
10985
a2fbb9ea
ET
10986static int bnx2x_poll(struct napi_struct *napi, int budget)
10987{
10988 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10989 napi);
10990 struct bnx2x *bp = fp->bp;
10991 int work_done = 0;
10992
10993#ifdef BNX2X_STOP_ON_ERROR
10994 if (unlikely(bp->panic))
34f80b04 10995 goto poll_panic;
a2fbb9ea
ET
10996#endif
10997
a2fbb9ea
ET
10998 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10999 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
11000
11001 bnx2x_update_fpsb_idx(fp);
11002
8534f32c 11003 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 11004 work_done = bnx2x_rx_int(fp, budget);
356e2385 11005
8534f32c
EG
11006 /* must not complete if we consumed full budget */
11007 if (work_done >= budget)
11008 goto poll_again;
11009 }
a2fbb9ea 11010
ca00392c 11011 /* bnx2x_has_rx_work() reads the status block, thus we need to
8534f32c 11012 * ensure that status block indices have been actually read
ca00392c 11013 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
8534f32c 11014 * so that we won't write the "newer" value of the status block to IGU
ca00392c 11015 * (if there was a DMA right after bnx2x_has_rx_work and
8534f32c
EG
11016 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
11017 * may be postponed to right before bnx2x_ack_sb). In this case
11018 * there will never be another interrupt until there is another update
11019 * of the status block, while there is still unhandled work.
11020 */
11021 rmb();
a2fbb9ea 11022
ca00392c 11023 if (!bnx2x_has_rx_work(fp)) {
a2fbb9ea 11024#ifdef BNX2X_STOP_ON_ERROR
34f80b04 11025poll_panic:
a2fbb9ea 11026#endif
288379f0 11027 napi_complete(napi);
a2fbb9ea 11028
0626b899 11029 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 11030 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 11031 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
11032 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
11033 }
356e2385 11034
8534f32c 11035poll_again:
a2fbb9ea
ET
11036 return work_done;
11037}
11038
755735eb
EG
11039
11040/* we split the first BD into headers and data BDs
33471629 11041 * to ease the pain of our fellow microcode engineers
755735eb
EG
11042 * we use one mapping for both BDs
11043 * So far this has only been observed to happen
11044 * in Other Operating Systems(TM)
11045 */
11046static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
11047 struct bnx2x_fastpath *fp,
ca00392c
EG
11048 struct sw_tx_bd *tx_buf,
11049 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
11050 u16 bd_prod, int nbd)
11051{
ca00392c 11052 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
11053 struct eth_tx_bd *d_tx_bd;
11054 dma_addr_t mapping;
11055 int old_len = le16_to_cpu(h_tx_bd->nbytes);
11056
11057 /* first fix first BD */
11058 h_tx_bd->nbd = cpu_to_le16(nbd);
11059 h_tx_bd->nbytes = cpu_to_le16(hlen);
11060
11061 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11062 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11063 h_tx_bd->addr_lo, h_tx_bd->nbd);
11064
11065 /* now get a new data BD
11066 * (after the pbd) and fill it */
11067 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 11068 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
11069
11070 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11071 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11072
11073 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11074 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11075 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
11076
11077 /* this marks the BD as one that has no individual mapping */
11078 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11079
755735eb
EG
11080 DP(NETIF_MSG_TX_QUEUED,
11081 "TSO split data size is %d (%x:%x)\n",
11082 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11083
ca00392c
EG
11084 /* update tx_bd */
11085 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
11086
11087 return bd_prod;
11088}
11089
11090static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11091{
11092 if (fix > 0)
11093 csum = (u16) ~csum_fold(csum_sub(csum,
11094 csum_partial(t_header - fix, fix, 0)));
11095
11096 else if (fix < 0)
11097 csum = (u16) ~csum_fold(csum_add(csum,
11098 csum_partial(t_header, -fix, 0)));
11099
11100 return swab16(csum);
11101}
11102
11103static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11104{
11105 u32 rc;
11106
11107 if (skb->ip_summed != CHECKSUM_PARTIAL)
11108 rc = XMIT_PLAIN;
11109
11110 else {
4781bfad 11111 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
11112 rc = XMIT_CSUM_V6;
11113 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11114 rc |= XMIT_CSUM_TCP;
11115
11116 } else {
11117 rc = XMIT_CSUM_V4;
11118 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11119 rc |= XMIT_CSUM_TCP;
11120 }
11121 }
11122
11123 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
11124 rc |= XMIT_GSO_V4;
11125
11126 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
11127 rc |= XMIT_GSO_V6;
11128
11129 return rc;
11130}
11131
632da4d6 11132#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
11133/* check if packet requires linearization (packet is too fragmented)
11134 no need to check fragmentation if page size > 8K (there will be no
11135 violation to FW restrictions) */
755735eb
EG
11136static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11137 u32 xmit_type)
11138{
11139 int to_copy = 0;
11140 int hlen = 0;
11141 int first_bd_sz = 0;
11142
11143 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11144 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11145
11146 if (xmit_type & XMIT_GSO) {
11147 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11148 /* Check if LSO packet needs to be copied:
11149 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11150 int wnd_size = MAX_FETCH_BD - 3;
33471629 11151 /* Number of windows to check */
755735eb
EG
11152 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11153 int wnd_idx = 0;
11154 int frag_idx = 0;
11155 u32 wnd_sum = 0;
11156
11157 /* Headers length */
11158 hlen = (int)(skb_transport_header(skb) - skb->data) +
11159 tcp_hdrlen(skb);
11160
11161 /* Amount of data (w/o headers) on linear part of SKB*/
11162 first_bd_sz = skb_headlen(skb) - hlen;
11163
11164 wnd_sum = first_bd_sz;
11165
11166 /* Calculate the first sum - it's special */
11167 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11168 wnd_sum +=
11169 skb_shinfo(skb)->frags[frag_idx].size;
11170
11171 /* If there was data on linear skb data - check it */
11172 if (first_bd_sz > 0) {
11173 if (unlikely(wnd_sum < lso_mss)) {
11174 to_copy = 1;
11175 goto exit_lbl;
11176 }
11177
11178 wnd_sum -= first_bd_sz;
11179 }
11180
11181 /* Others are easier: run through the frag list and
11182 check all windows */
11183 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11184 wnd_sum +=
11185 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11186
11187 if (unlikely(wnd_sum < lso_mss)) {
11188 to_copy = 1;
11189 break;
11190 }
11191 wnd_sum -=
11192 skb_shinfo(skb)->frags[wnd_idx].size;
11193 }
755735eb
EG
11194 } else {
11195 /* in non-LSO too fragmented packet should always
11196 be linearized */
11197 to_copy = 1;
11198 }
11199 }
11200
11201exit_lbl:
11202 if (unlikely(to_copy))
11203 DP(NETIF_MSG_TX_QUEUED,
11204 "Linearization IS REQUIRED for %s packet. "
11205 "num_frags %d hlen %d first_bd_sz %d\n",
11206 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11207 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11208
11209 return to_copy;
11210}
632da4d6 11211#endif
755735eb
EG
11212
11213/* called with netif_tx_lock
a2fbb9ea 11214 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 11215 * netif_wake_queue()
a2fbb9ea 11216 */
61357325 11217static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
a2fbb9ea
ET
11218{
11219 struct bnx2x *bp = netdev_priv(dev);
ca00392c 11220 struct bnx2x_fastpath *fp, *fp_stat;
555f6c78 11221 struct netdev_queue *txq;
a2fbb9ea 11222 struct sw_tx_bd *tx_buf;
ca00392c
EG
11223 struct eth_tx_start_bd *tx_start_bd;
11224 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
11225 struct eth_tx_parse_bd *pbd = NULL;
11226 u16 pkt_prod, bd_prod;
755735eb 11227 int nbd, fp_index;
a2fbb9ea 11228 dma_addr_t mapping;
755735eb 11229 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
11230 int i;
11231 u8 hlen = 0;
ca00392c 11232 __le16 pkt_size = 0;
a2fbb9ea
ET
11233
11234#ifdef BNX2X_STOP_ON_ERROR
11235 if (unlikely(bp->panic))
11236 return NETDEV_TX_BUSY;
11237#endif
11238
555f6c78
EG
11239 fp_index = skb_get_queue_mapping(skb);
11240 txq = netdev_get_tx_queue(dev, fp_index);
11241
ca00392c
EG
11242 fp = &bp->fp[fp_index + bp->num_rx_queues];
11243 fp_stat = &bp->fp[fp_index];
755735eb 11244
231fd58a 11245 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
ca00392c 11246 fp_stat->eth_q_stats.driver_xoff++;
555f6c78 11247 netif_tx_stop_queue(txq);
a2fbb9ea
ET
11248 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11249 return NETDEV_TX_BUSY;
11250 }
11251
755735eb
EG
11252 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
11253 " gso type %x xmit_type %x\n",
11254 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11255 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11256
632da4d6 11257#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
11258 /* First, check if we need to linearize the skb (due to FW
11259 restrictions). No need to check fragmentation if page size > 8K
11260 (there will be no violation to FW restrictions) */
755735eb
EG
11261 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11262 /* Statistics of linearization */
11263 bp->lin_cnt++;
11264 if (skb_linearize(skb) != 0) {
11265 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11266 "silently dropping this SKB\n");
11267 dev_kfree_skb_any(skb);
da5a662a 11268 return NETDEV_TX_OK;
755735eb
EG
11269 }
11270 }
632da4d6 11271#endif
755735eb 11272
a2fbb9ea 11273 /*
755735eb 11274 Please read carefully. First we use one BD which we mark as start,
ca00392c 11275 then we have a parsing info BD (used for TSO or xsum),
755735eb 11276 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
11277 (don't forget to mark the last one as last,
11278 and to unmap only AFTER you write to the BD ...)
755735eb 11279 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
11280 */
11281
11282 pkt_prod = fp->tx_pkt_prod++;
755735eb 11283 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 11284
755735eb 11285 /* get a tx_buf and first BD */
a2fbb9ea 11286 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 11287 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 11288
ca00392c
EG
11289 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11290 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11291 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 11292 /* header nbd */
ca00392c 11293 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 11294
755735eb
EG
11295 /* remember the first BD of the packet */
11296 tx_buf->first_bd = fp->tx_bd_prod;
11297 tx_buf->skb = skb;
ca00392c 11298 tx_buf->flags = 0;
a2fbb9ea
ET
11299
11300 DP(NETIF_MSG_TX_QUEUED,
11301 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 11302 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 11303
0c6671b0
EG
11304#ifdef BCM_VLAN
11305 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11306 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
11307 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11308 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 11309 } else
0c6671b0 11310#endif
ca00392c 11311 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 11312
ca00392c
EG
11313 /* turn on parsing and get a BD */
11314 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11315 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 11316
ca00392c 11317 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
11318
11319 if (xmit_type & XMIT_CSUM) {
ca00392c 11320 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
11321
11322 /* for now NS flag is not used in Linux */
4781bfad
EG
11323 pbd->global_data =
11324 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11325 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 11326
755735eb
EG
11327 pbd->ip_hlen = (skb_transport_header(skb) -
11328 skb_network_header(skb)) / 2;
11329
11330 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 11331
755735eb 11332 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 11333 hlen = hlen*2;
a2fbb9ea 11334
ca00392c 11335 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
11336
11337 if (xmit_type & XMIT_CSUM_V4)
ca00392c 11338 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
11339 ETH_TX_BD_FLAGS_IP_CSUM;
11340 else
ca00392c
EG
11341 tx_start_bd->bd_flags.as_bitfield |=
11342 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
11343
11344 if (xmit_type & XMIT_CSUM_TCP) {
11345 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11346
11347 } else {
11348 s8 fix = SKB_CS_OFF(skb); /* signed! */
11349
ca00392c 11350 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 11351
755735eb 11352 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11353 "hlen %d fix %d csum before fix %x\n",
11354 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
11355
11356 /* HW bug: fixup the CSUM */
11357 pbd->tcp_pseudo_csum =
11358 bnx2x_csum_fix(skb_transport_header(skb),
11359 SKB_CS(skb), fix);
11360
11361 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11362 pbd->tcp_pseudo_csum);
11363 }
a2fbb9ea
ET
11364 }
11365
11366 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 11367 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea 11368
ca00392c
EG
11369 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11370 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11371 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11372 tx_start_bd->nbd = cpu_to_le16(nbd);
11373 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11374 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
11375
11376 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 11377 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
11378 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11379 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11380 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 11381
755735eb 11382 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
11383
11384 DP(NETIF_MSG_TX_QUEUED,
11385 "TSO packet len %d hlen %d total len %d tso size %d\n",
11386 skb->len, hlen, skb_headlen(skb),
11387 skb_shinfo(skb)->gso_size);
11388
ca00392c 11389 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 11390
755735eb 11391 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
11392 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11393 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
11394
11395 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11396 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
11397 pbd->tcp_flags = pbd_tcp_flags(skb);
11398
11399 if (xmit_type & XMIT_GSO_V4) {
11400 pbd->ip_id = swab16(ip_hdr(skb)->id);
11401 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
11402 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11403 ip_hdr(skb)->daddr,
11404 0, IPPROTO_TCP, 0));
755735eb
EG
11405
11406 } else
11407 pbd->tcp_pseudo_csum =
11408 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11409 &ipv6_hdr(skb)->daddr,
11410 0, IPPROTO_TCP, 0));
11411
a2fbb9ea
ET
11412 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11413 }
ca00392c 11414 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 11415
755735eb
EG
11416 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11417 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 11418
755735eb 11419 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
11420 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11421 if (total_pkt_bd == NULL)
11422 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 11423
755735eb
EG
11424 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11425 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 11426
ca00392c
EG
11427 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11428 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11429 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11430 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 11431
755735eb 11432 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11433 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11434 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11435 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
11436 }
11437
ca00392c 11438 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 11439
a2fbb9ea
ET
11440 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11441
755735eb 11442 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
11443 * if the packet contains or ends with it
11444 */
11445 if (TX_BD_POFF(bd_prod) < nbd)
11446 nbd++;
11447
ca00392c
EG
11448 if (total_pkt_bd != NULL)
11449 total_pkt_bd->total_pkt_bytes = pkt_size;
11450
a2fbb9ea
ET
11451 if (pbd)
11452 DP(NETIF_MSG_TX_QUEUED,
11453 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11454 " tcp_flags %x xsum %x seq %u hlen %u\n",
11455 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11456 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 11457 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 11458
755735eb 11459 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 11460
58f4c4cf
EG
11461 /*
11462 * Make sure that the BD data is updated before updating the producer
11463 * since FW might read the BD right after the producer is updated.
11464 * This is only applicable for weak-ordered memory model archs such
11465 * as IA-64. The following barrier is also mandatory since FW will
11466 * assumes packets must have BDs.
11467 */
11468 wmb();
11469
ca00392c
EG
11470 fp->tx_db.data.prod += nbd;
11471 barrier();
11472 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
a2fbb9ea
ET
11473
11474 mmiowb();
11475
755735eb 11476 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
11477
11478 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 11479 netif_tx_stop_queue(txq);
58f4c4cf
EG
11480 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11481 if we put Tx into XOFF state. */
11482 smp_mb();
ca00392c 11483 fp_stat->eth_q_stats.driver_xoff++;
a2fbb9ea 11484 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 11485 netif_tx_wake_queue(txq);
a2fbb9ea 11486 }
ca00392c 11487 fp_stat->tx_pkt++;
a2fbb9ea
ET
11488
11489 return NETDEV_TX_OK;
11490}
11491
bb2a0f7a 11492/* called with rtnl_lock */
a2fbb9ea
ET
11493static int bnx2x_open(struct net_device *dev)
11494{
11495 struct bnx2x *bp = netdev_priv(dev);
11496
6eccabb3
EG
11497 netif_carrier_off(dev);
11498
a2fbb9ea
ET
11499 bnx2x_set_power_state(bp, PCI_D0);
11500
bb2a0f7a 11501 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
11502}
11503
bb2a0f7a 11504/* called with rtnl_lock */
a2fbb9ea
ET
11505static int bnx2x_close(struct net_device *dev)
11506{
a2fbb9ea
ET
11507 struct bnx2x *bp = netdev_priv(dev);
11508
11509 /* Unload the driver, release IRQs */
bb2a0f7a
YG
11510 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11511 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11512 if (!CHIP_REV_IS_SLOW(bp))
11513 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
11514
11515 return 0;
11516}
11517
f5372251 11518/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
11519static void bnx2x_set_rx_mode(struct net_device *dev)
11520{
11521 struct bnx2x *bp = netdev_priv(dev);
11522 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11523 int port = BP_PORT(bp);
11524
11525 if (bp->state != BNX2X_STATE_OPEN) {
11526 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11527 return;
11528 }
11529
11530 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11531
11532 if (dev->flags & IFF_PROMISC)
11533 rx_mode = BNX2X_RX_MODE_PROMISC;
11534
11535 else if ((dev->flags & IFF_ALLMULTI) ||
11536 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11537 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11538
11539 else { /* some multicasts */
11540 if (CHIP_IS_E1(bp)) {
11541 int i, old, offset;
11542 struct dev_mc_list *mclist;
11543 struct mac_configuration_cmd *config =
11544 bnx2x_sp(bp, mcast_config);
11545
11546 for (i = 0, mclist = dev->mc_list;
11547 mclist && (i < dev->mc_count);
11548 i++, mclist = mclist->next) {
11549
11550 config->config_table[i].
11551 cam_entry.msb_mac_addr =
11552 swab16(*(u16 *)&mclist->dmi_addr[0]);
11553 config->config_table[i].
11554 cam_entry.middle_mac_addr =
11555 swab16(*(u16 *)&mclist->dmi_addr[2]);
11556 config->config_table[i].
11557 cam_entry.lsb_mac_addr =
11558 swab16(*(u16 *)&mclist->dmi_addr[4]);
11559 config->config_table[i].cam_entry.flags =
11560 cpu_to_le16(port);
11561 config->config_table[i].
11562 target_table_entry.flags = 0;
ca00392c
EG
11563 config->config_table[i].target_table_entry.
11564 clients_bit_vector =
11565 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
11566 config->config_table[i].
11567 target_table_entry.vlan_id = 0;
11568
11569 DP(NETIF_MSG_IFUP,
11570 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11571 config->config_table[i].
11572 cam_entry.msb_mac_addr,
11573 config->config_table[i].
11574 cam_entry.middle_mac_addr,
11575 config->config_table[i].
11576 cam_entry.lsb_mac_addr);
11577 }
8d9c5f34 11578 old = config->hdr.length;
34f80b04
EG
11579 if (old > i) {
11580 for (; i < old; i++) {
11581 if (CAM_IS_INVALID(config->
11582 config_table[i])) {
af246401 11583 /* already invalidated */
34f80b04
EG
11584 break;
11585 }
11586 /* invalidate */
11587 CAM_INVALIDATE(config->
11588 config_table[i]);
11589 }
11590 }
11591
11592 if (CHIP_REV_IS_SLOW(bp))
11593 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11594 else
11595 offset = BNX2X_MAX_MULTICAST*(1 + port);
11596
8d9c5f34 11597 config->hdr.length = i;
34f80b04 11598 config->hdr.offset = offset;
8d9c5f34 11599 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
11600 config->hdr.reserved1 = 0;
11601
e665bfda
MC
11602 bp->set_mac_pending++;
11603 smp_wmb();
11604
34f80b04
EG
11605 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11606 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11607 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11608 0);
11609 } else { /* E1H */
11610 /* Accept one or more multicasts */
11611 struct dev_mc_list *mclist;
11612 u32 mc_filter[MC_HASH_SIZE];
11613 u32 crc, bit, regidx;
11614 int i;
11615
11616 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11617
11618 for (i = 0, mclist = dev->mc_list;
11619 mclist && (i < dev->mc_count);
11620 i++, mclist = mclist->next) {
11621
7c510e4b
JB
11622 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11623 mclist->dmi_addr);
34f80b04
EG
11624
11625 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11626 bit = (crc >> 24) & 0xff;
11627 regidx = bit >> 5;
11628 bit &= 0x1f;
11629 mc_filter[regidx] |= (1 << bit);
11630 }
11631
11632 for (i = 0; i < MC_HASH_SIZE; i++)
11633 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11634 mc_filter[i]);
11635 }
11636 }
11637
11638 bp->rx_mode = rx_mode;
11639 bnx2x_set_storm_rx_mode(bp);
11640}
11641
11642/* called with rtnl_lock */
a2fbb9ea
ET
11643static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11644{
11645 struct sockaddr *addr = p;
11646 struct bnx2x *bp = netdev_priv(dev);
11647
34f80b04 11648 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
11649 return -EINVAL;
11650
11651 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
11652 if (netif_running(dev)) {
11653 if (CHIP_IS_E1(bp))
e665bfda 11654 bnx2x_set_eth_mac_addr_e1(bp, 1);
34f80b04 11655 else
e665bfda 11656 bnx2x_set_eth_mac_addr_e1h(bp, 1);
34f80b04 11657 }
a2fbb9ea
ET
11658
11659 return 0;
11660}
11661
c18487ee 11662/* called with rtnl_lock */
01cd4528
EG
11663static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11664 int devad, u16 addr)
a2fbb9ea 11665{
01cd4528
EG
11666 struct bnx2x *bp = netdev_priv(netdev);
11667 u16 value;
11668 int rc;
11669 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 11670
01cd4528
EG
11671 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11672 prtad, devad, addr);
a2fbb9ea 11673
01cd4528
EG
11674 if (prtad != bp->mdio.prtad) {
11675 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11676 prtad, bp->mdio.prtad);
11677 return -EINVAL;
11678 }
11679
11680 /* The HW expects different devad if CL22 is used */
11681 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 11682
01cd4528
EG
11683 bnx2x_acquire_phy_lock(bp);
11684 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11685 devad, addr, &value);
11686 bnx2x_release_phy_lock(bp);
11687 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 11688
01cd4528
EG
11689 if (!rc)
11690 rc = value;
11691 return rc;
11692}
a2fbb9ea 11693
01cd4528
EG
11694/* called with rtnl_lock */
11695static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11696 u16 addr, u16 value)
11697{
11698 struct bnx2x *bp = netdev_priv(netdev);
11699 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11700 int rc;
11701
11702 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11703 " value 0x%x\n", prtad, devad, addr, value);
11704
11705 if (prtad != bp->mdio.prtad) {
11706 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11707 prtad, bp->mdio.prtad);
11708 return -EINVAL;
a2fbb9ea
ET
11709 }
11710
01cd4528
EG
11711 /* The HW expects different devad if CL22 is used */
11712 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 11713
01cd4528
EG
11714 bnx2x_acquire_phy_lock(bp);
11715 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11716 devad, addr, value);
11717 bnx2x_release_phy_lock(bp);
11718 return rc;
11719}
c18487ee 11720
01cd4528
EG
11721/* called with rtnl_lock */
11722static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11723{
11724 struct bnx2x *bp = netdev_priv(dev);
11725 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 11726
01cd4528
EG
11727 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11728 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 11729
01cd4528
EG
11730 if (!netif_running(dev))
11731 return -EAGAIN;
11732
11733 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
11734}
11735
34f80b04 11736/* called with rtnl_lock */
a2fbb9ea
ET
11737static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11738{
11739 struct bnx2x *bp = netdev_priv(dev);
34f80b04 11740 int rc = 0;
a2fbb9ea
ET
11741
11742 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11743 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11744 return -EINVAL;
11745
11746 /* This does not race with packet allocation
c14423fe 11747 * because the actual alloc size is
a2fbb9ea
ET
11748 * only updated as part of load
11749 */
11750 dev->mtu = new_mtu;
11751
11752 if (netif_running(dev)) {
34f80b04
EG
11753 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11754 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 11755 }
34f80b04
EG
11756
11757 return rc;
a2fbb9ea
ET
11758}
11759
11760static void bnx2x_tx_timeout(struct net_device *dev)
11761{
11762 struct bnx2x *bp = netdev_priv(dev);
11763
11764#ifdef BNX2X_STOP_ON_ERROR
11765 if (!bp->panic)
11766 bnx2x_panic();
11767#endif
11768 /* This allows the netif to be shutdown gracefully before resetting */
11769 schedule_work(&bp->reset_task);
11770}
11771
11772#ifdef BCM_VLAN
34f80b04 11773/* called with rtnl_lock */
a2fbb9ea
ET
11774static void bnx2x_vlan_rx_register(struct net_device *dev,
11775 struct vlan_group *vlgrp)
11776{
11777 struct bnx2x *bp = netdev_priv(dev);
11778
11779 bp->vlgrp = vlgrp;
0c6671b0
EG
11780
11781 /* Set flags according to the required capabilities */
11782 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11783
11784 if (dev->features & NETIF_F_HW_VLAN_TX)
11785 bp->flags |= HW_VLAN_TX_FLAG;
11786
11787 if (dev->features & NETIF_F_HW_VLAN_RX)
11788 bp->flags |= HW_VLAN_RX_FLAG;
11789
a2fbb9ea 11790 if (netif_running(dev))
49d66772 11791 bnx2x_set_client_config(bp);
a2fbb9ea 11792}
34f80b04 11793
a2fbb9ea
ET
11794#endif
11795
11796#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11797static void poll_bnx2x(struct net_device *dev)
11798{
11799 struct bnx2x *bp = netdev_priv(dev);
11800
11801 disable_irq(bp->pdev->irq);
11802 bnx2x_interrupt(bp->pdev->irq, dev);
11803 enable_irq(bp->pdev->irq);
11804}
11805#endif
11806
c64213cd
SH
11807static const struct net_device_ops bnx2x_netdev_ops = {
11808 .ndo_open = bnx2x_open,
11809 .ndo_stop = bnx2x_close,
11810 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 11811 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
11812 .ndo_set_mac_address = bnx2x_change_mac_addr,
11813 .ndo_validate_addr = eth_validate_addr,
11814 .ndo_do_ioctl = bnx2x_ioctl,
11815 .ndo_change_mtu = bnx2x_change_mtu,
11816 .ndo_tx_timeout = bnx2x_tx_timeout,
11817#ifdef BCM_VLAN
11818 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11819#endif
11820#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11821 .ndo_poll_controller = poll_bnx2x,
11822#endif
11823};
11824
34f80b04
EG
11825static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11826 struct net_device *dev)
a2fbb9ea
ET
11827{
11828 struct bnx2x *bp;
11829 int rc;
11830
11831 SET_NETDEV_DEV(dev, &pdev->dev);
11832 bp = netdev_priv(dev);
11833
34f80b04
EG
11834 bp->dev = dev;
11835 bp->pdev = pdev;
a2fbb9ea 11836 bp->flags = 0;
34f80b04 11837 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11838
11839 rc = pci_enable_device(pdev);
11840 if (rc) {
11841 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11842 goto err_out;
11843 }
11844
11845 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11846 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11847 " aborting\n");
11848 rc = -ENODEV;
11849 goto err_out_disable;
11850 }
11851
11852 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11853 printk(KERN_ERR PFX "Cannot find second PCI device"
11854 " base address, aborting\n");
11855 rc = -ENODEV;
11856 goto err_out_disable;
11857 }
11858
34f80b04
EG
11859 if (atomic_read(&pdev->enable_cnt) == 1) {
11860 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11861 if (rc) {
11862 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11863 " aborting\n");
11864 goto err_out_disable;
11865 }
a2fbb9ea 11866
34f80b04
EG
11867 pci_set_master(pdev);
11868 pci_save_state(pdev);
11869 }
a2fbb9ea
ET
11870
11871 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11872 if (bp->pm_cap == 0) {
11873 printk(KERN_ERR PFX "Cannot find power management"
11874 " capability, aborting\n");
11875 rc = -EIO;
11876 goto err_out_release;
11877 }
11878
11879 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11880 if (bp->pcie_cap == 0) {
11881 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11882 " aborting\n");
11883 rc = -EIO;
11884 goto err_out_release;
11885 }
11886
6a35528a 11887 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11888 bp->flags |= USING_DAC_FLAG;
6a35528a 11889 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
11890 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11891 " failed, aborting\n");
11892 rc = -EIO;
11893 goto err_out_release;
11894 }
11895
284901a9 11896 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a2fbb9ea
ET
11897 printk(KERN_ERR PFX "System does not support DMA,"
11898 " aborting\n");
11899 rc = -EIO;
11900 goto err_out_release;
11901 }
11902
34f80b04
EG
11903 dev->mem_start = pci_resource_start(pdev, 0);
11904 dev->base_addr = dev->mem_start;
11905 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11906
11907 dev->irq = pdev->irq;
11908
275f165f 11909 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11910 if (!bp->regview) {
11911 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11912 rc = -ENOMEM;
11913 goto err_out_release;
11914 }
11915
34f80b04
EG
11916 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11917 min_t(u64, BNX2X_DB_SIZE,
11918 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11919 if (!bp->doorbells) {
11920 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11921 rc = -ENOMEM;
11922 goto err_out_unmap;
11923 }
11924
11925 bnx2x_set_power_state(bp, PCI_D0);
11926
34f80b04
EG
11927 /* clean indirect addresses */
11928 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11929 PCICFG_VENDOR_ID_OFFSET);
11930 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11931 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11932 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11933 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11934
34f80b04 11935 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11936
c64213cd 11937 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11938 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11939 dev->features |= NETIF_F_SG;
11940 dev->features |= NETIF_F_HW_CSUM;
11941 if (bp->flags & USING_DAC_FLAG)
11942 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
11943 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11944 dev->features |= NETIF_F_TSO6;
34f80b04
EG
11945#ifdef BCM_VLAN
11946 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11947 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
11948
11949 dev->vlan_features |= NETIF_F_SG;
11950 dev->vlan_features |= NETIF_F_HW_CSUM;
11951 if (bp->flags & USING_DAC_FLAG)
11952 dev->vlan_features |= NETIF_F_HIGHDMA;
11953 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11954 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 11955#endif
a2fbb9ea 11956
01cd4528
EG
11957 /* get_port_hwinfo() will set prtad and mmds properly */
11958 bp->mdio.prtad = MDIO_PRTAD_NONE;
11959 bp->mdio.mmds = 0;
11960 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11961 bp->mdio.dev = dev;
11962 bp->mdio.mdio_read = bnx2x_mdio_read;
11963 bp->mdio.mdio_write = bnx2x_mdio_write;
11964
a2fbb9ea
ET
11965 return 0;
11966
11967err_out_unmap:
11968 if (bp->regview) {
11969 iounmap(bp->regview);
11970 bp->regview = NULL;
11971 }
a2fbb9ea
ET
11972 if (bp->doorbells) {
11973 iounmap(bp->doorbells);
11974 bp->doorbells = NULL;
11975 }
11976
11977err_out_release:
34f80b04
EG
11978 if (atomic_read(&pdev->enable_cnt) == 1)
11979 pci_release_regions(pdev);
a2fbb9ea
ET
11980
11981err_out_disable:
11982 pci_disable_device(pdev);
11983 pci_set_drvdata(pdev, NULL);
11984
11985err_out:
11986 return rc;
11987}
11988
37f9ce62
EG
11989static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11990 int *width, int *speed)
25047950
ET
11991{
11992 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11993
37f9ce62 11994 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 11995
37f9ce62
EG
11996 /* return value of 1=2.5GHz 2=5GHz */
11997 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 11998}
37f9ce62 11999
94a78b79
VZ
12000static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
12001{
37f9ce62 12002 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
12003 struct bnx2x_fw_file_hdr *fw_hdr;
12004 struct bnx2x_fw_file_section *sections;
94a78b79 12005 u32 offset, len, num_ops;
37f9ce62 12006 u16 *ops_offsets;
94a78b79 12007 int i;
37f9ce62 12008 const u8 *fw_ver;
94a78b79
VZ
12009
12010 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
12011 return -EINVAL;
12012
12013 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
12014 sections = (struct bnx2x_fw_file_section *)fw_hdr;
12015
12016 /* Make sure none of the offsets and sizes make us read beyond
12017 * the end of the firmware data */
12018 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
12019 offset = be32_to_cpu(sections[i].offset);
12020 len = be32_to_cpu(sections[i].len);
12021 if (offset + len > firmware->size) {
37f9ce62
EG
12022 printk(KERN_ERR PFX "Section %d length is out of "
12023 "bounds\n", i);
94a78b79
VZ
12024 return -EINVAL;
12025 }
12026 }
12027
12028 /* Likewise for the init_ops offsets */
12029 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
12030 ops_offsets = (u16 *)(firmware->data + offset);
12031 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
12032
12033 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
12034 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
37f9ce62
EG
12035 printk(KERN_ERR PFX "Section offset %d is out of "
12036 "bounds\n", i);
94a78b79
VZ
12037 return -EINVAL;
12038 }
12039 }
12040
12041 /* Check FW version */
12042 offset = be32_to_cpu(fw_hdr->fw_version.offset);
12043 fw_ver = firmware->data + offset;
12044 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
12045 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
12046 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
12047 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
12048 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
12049 " Should be %d.%d.%d.%d\n",
12050 fw_ver[0], fw_ver[1], fw_ver[2],
12051 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
12052 BCM_5710_FW_MINOR_VERSION,
12053 BCM_5710_FW_REVISION_VERSION,
12054 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 12055 return -EINVAL;
94a78b79
VZ
12056 }
12057
12058 return 0;
12059}
12060
ab6ad5a4 12061static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 12062{
ab6ad5a4
EG
12063 const __be32 *source = (const __be32 *)_source;
12064 u32 *target = (u32 *)_target;
94a78b79 12065 u32 i;
94a78b79
VZ
12066
12067 for (i = 0; i < n/4; i++)
12068 target[i] = be32_to_cpu(source[i]);
12069}
12070
12071/*
12072 Ops array is stored in the following format:
12073 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12074 */
ab6ad5a4 12075static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 12076{
ab6ad5a4
EG
12077 const __be32 *source = (const __be32 *)_source;
12078 struct raw_op *target = (struct raw_op *)_target;
94a78b79 12079 u32 i, j, tmp;
94a78b79 12080
ab6ad5a4 12081 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
12082 tmp = be32_to_cpu(source[j]);
12083 target[i].op = (tmp >> 24) & 0xff;
12084 target[i].offset = tmp & 0xffffff;
12085 target[i].raw_data = be32_to_cpu(source[j+1]);
12086 }
12087}
ab6ad5a4
EG
12088
12089static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 12090{
ab6ad5a4
EG
12091 const __be16 *source = (const __be16 *)_source;
12092 u16 *target = (u16 *)_target;
94a78b79 12093 u32 i;
94a78b79
VZ
12094
12095 for (i = 0; i < n/2; i++)
12096 target[i] = be16_to_cpu(source[i]);
12097}
12098
12099#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
ab6ad5a4
EG
12100 do { \
12101 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12102 bp->arr = kmalloc(len, GFP_KERNEL); \
94a78b79 12103 if (!bp->arr) { \
ab6ad5a4
EG
12104 printk(KERN_ERR PFX "Failed to allocate %d bytes " \
12105 "for "#arr"\n", len); \
94a78b79
VZ
12106 goto lbl; \
12107 } \
ab6ad5a4
EG
12108 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12109 (u8 *)bp->arr, len); \
94a78b79
VZ
12110 } while (0)
12111
94a78b79
VZ
12112static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12113{
12114 char fw_file_name[40] = {0};
94a78b79 12115 struct bnx2x_fw_file_hdr *fw_hdr;
ab6ad5a4 12116 int rc, offset;
94a78b79
VZ
12117
12118 /* Create a FW file name */
12119 if (CHIP_IS_E1(bp))
ab6ad5a4 12120 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
94a78b79
VZ
12121 else
12122 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
12123
12124 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
12125 BCM_5710_FW_MAJOR_VERSION,
ab6ad5a4
EG
12126 BCM_5710_FW_MINOR_VERSION,
12127 BCM_5710_FW_REVISION_VERSION,
12128 BCM_5710_FW_ENGINEERING_VERSION);
94a78b79
VZ
12129
12130 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
12131
12132 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12133 if (rc) {
ab6ad5a4
EG
12134 printk(KERN_ERR PFX "Can't load firmware file %s\n",
12135 fw_file_name);
94a78b79
VZ
12136 goto request_firmware_exit;
12137 }
12138
12139 rc = bnx2x_check_firmware(bp);
12140 if (rc) {
12141 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
12142 goto request_firmware_exit;
12143 }
12144
12145 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12146
12147 /* Initialize the pointers to the init arrays */
12148 /* Blob */
12149 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12150
12151 /* Opcodes */
12152 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12153
12154 /* Offsets */
ab6ad5a4
EG
12155 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12156 be16_to_cpu_n);
94a78b79
VZ
12157
12158 /* STORMs firmware */
573f2035
EG
12159 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12160 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12161 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12162 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12163 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12164 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12165 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12166 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12167 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12168 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12169 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12170 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12171 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12172 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12173 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12174 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
12175
12176 return 0;
ab6ad5a4 12177
94a78b79
VZ
12178init_offsets_alloc_err:
12179 kfree(bp->init_ops);
12180init_ops_alloc_err:
12181 kfree(bp->init_data);
12182request_firmware_exit:
12183 release_firmware(bp->firmware);
12184
12185 return rc;
12186}
12187
12188
a2fbb9ea
ET
12189static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12190 const struct pci_device_id *ent)
12191{
a2fbb9ea
ET
12192 struct net_device *dev = NULL;
12193 struct bnx2x *bp;
37f9ce62 12194 int pcie_width, pcie_speed;
25047950 12195 int rc;
a2fbb9ea 12196
a2fbb9ea 12197 /* dev zeroed in init_etherdev */
555f6c78 12198 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
12199 if (!dev) {
12200 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 12201 return -ENOMEM;
34f80b04 12202 }
a2fbb9ea 12203
a2fbb9ea
ET
12204 bp = netdev_priv(dev);
12205 bp->msglevel = debug;
12206
df4770de
EG
12207 pci_set_drvdata(pdev, dev);
12208
34f80b04 12209 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
12210 if (rc < 0) {
12211 free_netdev(dev);
12212 return rc;
12213 }
12214
34f80b04 12215 rc = bnx2x_init_bp(bp);
693fc0d1
EG
12216 if (rc)
12217 goto init_one_exit;
12218
94a78b79
VZ
12219 /* Set init arrays */
12220 rc = bnx2x_init_firmware(bp, &pdev->dev);
12221 if (rc) {
12222 printk(KERN_ERR PFX "Error loading firmware\n");
12223 goto init_one_exit;
12224 }
12225
693fc0d1 12226 rc = register_netdev(dev);
34f80b04 12227 if (rc) {
693fc0d1 12228 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
12229 goto init_one_exit;
12230 }
12231
37f9ce62 12232 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
25047950 12233 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 12234 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 12235 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
37f9ce62 12236 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
25047950 12237 dev->base_addr, bp->pdev->irq);
e174961c 12238 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 12239
a2fbb9ea 12240 return 0;
34f80b04
EG
12241
12242init_one_exit:
12243 if (bp->regview)
12244 iounmap(bp->regview);
12245
12246 if (bp->doorbells)
12247 iounmap(bp->doorbells);
12248
12249 free_netdev(dev);
12250
12251 if (atomic_read(&pdev->enable_cnt) == 1)
12252 pci_release_regions(pdev);
12253
12254 pci_disable_device(pdev);
12255 pci_set_drvdata(pdev, NULL);
12256
12257 return rc;
a2fbb9ea
ET
12258}
12259
12260static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12261{
12262 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
12263 struct bnx2x *bp;
12264
12265 if (!dev) {
228241eb
ET
12266 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12267 return;
12268 }
228241eb 12269 bp = netdev_priv(dev);
a2fbb9ea 12270
a2fbb9ea
ET
12271 unregister_netdev(dev);
12272
94a78b79
VZ
12273 kfree(bp->init_ops_offsets);
12274 kfree(bp->init_ops);
12275 kfree(bp->init_data);
12276 release_firmware(bp->firmware);
12277
a2fbb9ea
ET
12278 if (bp->regview)
12279 iounmap(bp->regview);
12280
12281 if (bp->doorbells)
12282 iounmap(bp->doorbells);
12283
12284 free_netdev(dev);
34f80b04
EG
12285
12286 if (atomic_read(&pdev->enable_cnt) == 1)
12287 pci_release_regions(pdev);
12288
a2fbb9ea
ET
12289 pci_disable_device(pdev);
12290 pci_set_drvdata(pdev, NULL);
12291}
12292
12293static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12294{
12295 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
12296 struct bnx2x *bp;
12297
34f80b04
EG
12298 if (!dev) {
12299 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12300 return -ENODEV;
12301 }
12302 bp = netdev_priv(dev);
a2fbb9ea 12303
34f80b04 12304 rtnl_lock();
a2fbb9ea 12305
34f80b04 12306 pci_save_state(pdev);
228241eb 12307
34f80b04
EG
12308 if (!netif_running(dev)) {
12309 rtnl_unlock();
12310 return 0;
12311 }
a2fbb9ea
ET
12312
12313 netif_device_detach(dev);
a2fbb9ea 12314
da5a662a 12315 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 12316
a2fbb9ea 12317 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 12318
34f80b04
EG
12319 rtnl_unlock();
12320
a2fbb9ea
ET
12321 return 0;
12322}
12323
12324static int bnx2x_resume(struct pci_dev *pdev)
12325{
12326 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 12327 struct bnx2x *bp;
a2fbb9ea
ET
12328 int rc;
12329
228241eb
ET
12330 if (!dev) {
12331 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12332 return -ENODEV;
12333 }
228241eb 12334 bp = netdev_priv(dev);
a2fbb9ea 12335
34f80b04
EG
12336 rtnl_lock();
12337
228241eb 12338 pci_restore_state(pdev);
34f80b04
EG
12339
12340 if (!netif_running(dev)) {
12341 rtnl_unlock();
12342 return 0;
12343 }
12344
a2fbb9ea
ET
12345 bnx2x_set_power_state(bp, PCI_D0);
12346 netif_device_attach(dev);
12347
da5a662a 12348 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 12349
34f80b04
EG
12350 rtnl_unlock();
12351
12352 return rc;
a2fbb9ea
ET
12353}
12354
f8ef6e44
YG
12355static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12356{
12357 int i;
12358
12359 bp->state = BNX2X_STATE_ERROR;
12360
12361 bp->rx_mode = BNX2X_RX_MODE_NONE;
12362
12363 bnx2x_netif_stop(bp, 0);
12364
12365 del_timer_sync(&bp->timer);
12366 bp->stats_state = STATS_STATE_DISABLED;
12367 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12368
12369 /* Release IRQs */
12370 bnx2x_free_irq(bp);
12371
12372 if (CHIP_IS_E1(bp)) {
12373 struct mac_configuration_cmd *config =
12374 bnx2x_sp(bp, mcast_config);
12375
8d9c5f34 12376 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
12377 CAM_INVALIDATE(config->config_table[i]);
12378 }
12379
12380 /* Free SKBs, SGEs, TPA pool and driver internals */
12381 bnx2x_free_skbs(bp);
555f6c78 12382 for_each_rx_queue(bp, i)
f8ef6e44 12383 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 12384 for_each_rx_queue(bp, i)
7cde1c8b 12385 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
12386 bnx2x_free_mem(bp);
12387
12388 bp->state = BNX2X_STATE_CLOSED;
12389
12390 netif_carrier_off(bp->dev);
12391
12392 return 0;
12393}
12394
12395static void bnx2x_eeh_recover(struct bnx2x *bp)
12396{
12397 u32 val;
12398
12399 mutex_init(&bp->port.phy_mutex);
12400
12401 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12402 bp->link_params.shmem_base = bp->common.shmem_base;
12403 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12404
12405 if (!bp->common.shmem_base ||
12406 (bp->common.shmem_base < 0xA0000) ||
12407 (bp->common.shmem_base >= 0xC0000)) {
12408 BNX2X_DEV_INFO("MCP not active\n");
12409 bp->flags |= NO_MCP_FLAG;
12410 return;
12411 }
12412
12413 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12414 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12415 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12416 BNX2X_ERR("BAD MCP validity signature\n");
12417
12418 if (!BP_NOMCP(bp)) {
12419 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12420 & DRV_MSG_SEQ_NUMBER_MASK);
12421 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12422 }
12423}
12424
493adb1f
WX
12425/**
12426 * bnx2x_io_error_detected - called when PCI error is detected
12427 * @pdev: Pointer to PCI device
12428 * @state: The current pci connection state
12429 *
12430 * This function is called after a PCI bus error affecting
12431 * this device has been detected.
12432 */
12433static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12434 pci_channel_state_t state)
12435{
12436 struct net_device *dev = pci_get_drvdata(pdev);
12437 struct bnx2x *bp = netdev_priv(dev);
12438
12439 rtnl_lock();
12440
12441 netif_device_detach(dev);
12442
07ce50e4
DN
12443 if (state == pci_channel_io_perm_failure) {
12444 rtnl_unlock();
12445 return PCI_ERS_RESULT_DISCONNECT;
12446 }
12447
493adb1f 12448 if (netif_running(dev))
f8ef6e44 12449 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
12450
12451 pci_disable_device(pdev);
12452
12453 rtnl_unlock();
12454
12455 /* Request a slot reset */
12456 return PCI_ERS_RESULT_NEED_RESET;
12457}
12458
12459/**
12460 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12461 * @pdev: Pointer to PCI device
12462 *
12463 * Restart the card from scratch, as if from a cold-boot.
12464 */
12465static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12466{
12467 struct net_device *dev = pci_get_drvdata(pdev);
12468 struct bnx2x *bp = netdev_priv(dev);
12469
12470 rtnl_lock();
12471
12472 if (pci_enable_device(pdev)) {
12473 dev_err(&pdev->dev,
12474 "Cannot re-enable PCI device after reset\n");
12475 rtnl_unlock();
12476 return PCI_ERS_RESULT_DISCONNECT;
12477 }
12478
12479 pci_set_master(pdev);
12480 pci_restore_state(pdev);
12481
12482 if (netif_running(dev))
12483 bnx2x_set_power_state(bp, PCI_D0);
12484
12485 rtnl_unlock();
12486
12487 return PCI_ERS_RESULT_RECOVERED;
12488}
12489
12490/**
12491 * bnx2x_io_resume - called when traffic can start flowing again
12492 * @pdev: Pointer to PCI device
12493 *
12494 * This callback is called when the error recovery driver tells us that
12495 * its OK to resume normal operation.
12496 */
12497static void bnx2x_io_resume(struct pci_dev *pdev)
12498{
12499 struct net_device *dev = pci_get_drvdata(pdev);
12500 struct bnx2x *bp = netdev_priv(dev);
12501
12502 rtnl_lock();
12503
f8ef6e44
YG
12504 bnx2x_eeh_recover(bp);
12505
493adb1f 12506 if (netif_running(dev))
f8ef6e44 12507 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
12508
12509 netif_device_attach(dev);
12510
12511 rtnl_unlock();
12512}
12513
12514static struct pci_error_handlers bnx2x_err_handler = {
12515 .error_detected = bnx2x_io_error_detected,
356e2385
EG
12516 .slot_reset = bnx2x_io_slot_reset,
12517 .resume = bnx2x_io_resume,
493adb1f
WX
12518};
12519
a2fbb9ea 12520static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
12521 .name = DRV_MODULE_NAME,
12522 .id_table = bnx2x_pci_tbl,
12523 .probe = bnx2x_init_one,
12524 .remove = __devexit_p(bnx2x_remove_one),
12525 .suspend = bnx2x_suspend,
12526 .resume = bnx2x_resume,
12527 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
12528};
12529
12530static int __init bnx2x_init(void)
12531{
dd21ca6d
SG
12532 int ret;
12533
938cf541
EG
12534 printk(KERN_INFO "%s", version);
12535
1cf167f2
EG
12536 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12537 if (bnx2x_wq == NULL) {
12538 printk(KERN_ERR PFX "Cannot create workqueue\n");
12539 return -ENOMEM;
12540 }
12541
dd21ca6d
SG
12542 ret = pci_register_driver(&bnx2x_pci_driver);
12543 if (ret) {
12544 printk(KERN_ERR PFX "Cannot register driver\n");
12545 destroy_workqueue(bnx2x_wq);
12546 }
12547 return ret;
a2fbb9ea
ET
12548}
12549
12550static void __exit bnx2x_cleanup(void)
12551{
12552 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
12553
12554 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
12555}
12556
12557module_init(bnx2x_init);
12558module_exit(bnx2x_cleanup);
12559
993ac7b5
MC
12560#ifdef BCM_CNIC
12561
12562/* count denotes the number of new completions we have seen */
12563static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12564{
12565 struct eth_spe *spe;
12566
12567#ifdef BNX2X_STOP_ON_ERROR
12568 if (unlikely(bp->panic))
12569 return;
12570#endif
12571
12572 spin_lock_bh(&bp->spq_lock);
12573 bp->cnic_spq_pending -= count;
12574
12575 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12576 bp->cnic_spq_pending++) {
12577
12578 if (!bp->cnic_kwq_pending)
12579 break;
12580
12581 spe = bnx2x_sp_get_next(bp);
12582 *spe = *bp->cnic_kwq_cons;
12583
12584 bp->cnic_kwq_pending--;
12585
12586 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12587 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12588
12589 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12590 bp->cnic_kwq_cons = bp->cnic_kwq;
12591 else
12592 bp->cnic_kwq_cons++;
12593 }
12594 bnx2x_sp_prod_update(bp);
12595 spin_unlock_bh(&bp->spq_lock);
12596}
12597
12598static int bnx2x_cnic_sp_queue(struct net_device *dev,
12599 struct kwqe_16 *kwqes[], u32 count)
12600{
12601 struct bnx2x *bp = netdev_priv(dev);
12602 int i;
12603
12604#ifdef BNX2X_STOP_ON_ERROR
12605 if (unlikely(bp->panic))
12606 return -EIO;
12607#endif
12608
12609 spin_lock_bh(&bp->spq_lock);
12610
12611 for (i = 0; i < count; i++) {
12612 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12613
12614 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12615 break;
12616
12617 *bp->cnic_kwq_prod = *spe;
12618
12619 bp->cnic_kwq_pending++;
12620
12621 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12622 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12623 spe->data.mac_config_addr.hi,
12624 spe->data.mac_config_addr.lo,
12625 bp->cnic_kwq_pending);
12626
12627 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12628 bp->cnic_kwq_prod = bp->cnic_kwq;
12629 else
12630 bp->cnic_kwq_prod++;
12631 }
12632
12633 spin_unlock_bh(&bp->spq_lock);
12634
12635 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12636 bnx2x_cnic_sp_post(bp, 0);
12637
12638 return i;
12639}
12640
12641static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12642{
12643 struct cnic_ops *c_ops;
12644 int rc = 0;
12645
12646 mutex_lock(&bp->cnic_mutex);
12647 c_ops = bp->cnic_ops;
12648 if (c_ops)
12649 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12650 mutex_unlock(&bp->cnic_mutex);
12651
12652 return rc;
12653}
12654
12655static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12656{
12657 struct cnic_ops *c_ops;
12658 int rc = 0;
12659
12660 rcu_read_lock();
12661 c_ops = rcu_dereference(bp->cnic_ops);
12662 if (c_ops)
12663 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12664 rcu_read_unlock();
12665
12666 return rc;
12667}
12668
12669/*
12670 * for commands that have no data
12671 */
12672static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12673{
12674 struct cnic_ctl_info ctl = {0};
12675
12676 ctl.cmd = cmd;
12677
12678 return bnx2x_cnic_ctl_send(bp, &ctl);
12679}
12680
12681static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12682{
12683 struct cnic_ctl_info ctl;
12684
12685 /* first we tell CNIC and only then we count this as a completion */
12686 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12687 ctl.data.comp.cid = cid;
12688
12689 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12690 bnx2x_cnic_sp_post(bp, 1);
12691}
12692
12693static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12694{
12695 struct bnx2x *bp = netdev_priv(dev);
12696 int rc = 0;
12697
12698 switch (ctl->cmd) {
12699 case DRV_CTL_CTXTBL_WR_CMD: {
12700 u32 index = ctl->data.io.offset;
12701 dma_addr_t addr = ctl->data.io.dma_addr;
12702
12703 bnx2x_ilt_wr(bp, index, addr);
12704 break;
12705 }
12706
12707 case DRV_CTL_COMPLETION_CMD: {
12708 int count = ctl->data.comp.comp_count;
12709
12710 bnx2x_cnic_sp_post(bp, count);
12711 break;
12712 }
12713
12714 /* rtnl_lock is held. */
12715 case DRV_CTL_START_L2_CMD: {
12716 u32 cli = ctl->data.ring.client_id;
12717
12718 bp->rx_mode_cl_mask |= (1 << cli);
12719 bnx2x_set_storm_rx_mode(bp);
12720 break;
12721 }
12722
12723 /* rtnl_lock is held. */
12724 case DRV_CTL_STOP_L2_CMD: {
12725 u32 cli = ctl->data.ring.client_id;
12726
12727 bp->rx_mode_cl_mask &= ~(1 << cli);
12728 bnx2x_set_storm_rx_mode(bp);
12729 break;
12730 }
12731
12732 default:
12733 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12734 rc = -EINVAL;
12735 }
12736
12737 return rc;
12738}
12739
12740static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12741{
12742 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12743
12744 if (bp->flags & USING_MSIX_FLAG) {
12745 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12746 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12747 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12748 } else {
12749 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12750 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12751 }
12752 cp->irq_arr[0].status_blk = bp->cnic_sb;
12753 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12754 cp->irq_arr[1].status_blk = bp->def_status_blk;
12755 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12756
12757 cp->num_irq = 2;
12758}
12759
12760static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12761 void *data)
12762{
12763 struct bnx2x *bp = netdev_priv(dev);
12764 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12765
12766 if (ops == NULL)
12767 return -EINVAL;
12768
12769 if (atomic_read(&bp->intr_sem) != 0)
12770 return -EBUSY;
12771
12772 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12773 if (!bp->cnic_kwq)
12774 return -ENOMEM;
12775
12776 bp->cnic_kwq_cons = bp->cnic_kwq;
12777 bp->cnic_kwq_prod = bp->cnic_kwq;
12778 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12779
12780 bp->cnic_spq_pending = 0;
12781 bp->cnic_kwq_pending = 0;
12782
12783 bp->cnic_data = data;
12784
12785 cp->num_irq = 0;
12786 cp->drv_state = CNIC_DRV_STATE_REGD;
12787
12788 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12789
12790 bnx2x_setup_cnic_irq_info(bp);
12791 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12792 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12793 rcu_assign_pointer(bp->cnic_ops, ops);
12794
12795 return 0;
12796}
12797
12798static int bnx2x_unregister_cnic(struct net_device *dev)
12799{
12800 struct bnx2x *bp = netdev_priv(dev);
12801 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12802
12803 mutex_lock(&bp->cnic_mutex);
12804 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12805 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12806 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12807 }
12808 cp->drv_state = 0;
12809 rcu_assign_pointer(bp->cnic_ops, NULL);
12810 mutex_unlock(&bp->cnic_mutex);
12811 synchronize_rcu();
12812 kfree(bp->cnic_kwq);
12813 bp->cnic_kwq = NULL;
12814
12815 return 0;
12816}
12817
12818struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12819{
12820 struct bnx2x *bp = netdev_priv(dev);
12821 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12822
12823 cp->drv_owner = THIS_MODULE;
12824 cp->chip_id = CHIP_ID(bp);
12825 cp->pdev = bp->pdev;
12826 cp->io_base = bp->regview;
12827 cp->io_base2 = bp->doorbells;
12828 cp->max_kwqe_pending = 8;
12829 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12830 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12831 cp->ctx_tbl_len = CNIC_ILT_LINES;
12832 cp->starting_cid = BCM_CNIC_CID_START;
12833 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12834 cp->drv_ctl = bnx2x_drv_ctl;
12835 cp->drv_register_cnic = bnx2x_register_cnic;
12836 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12837
12838 return cp;
12839}
12840EXPORT_SYMBOL(bnx2x_cnic_probe);
12841
12842#endif /* BCM_CNIC */
94a78b79 12843