]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Add FW 5.2.7
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
0a64ea57 57#include "bnx2x_dump.h"
a2fbb9ea 58
c458bc50
EG
59#define DRV_MODULE_VERSION "1.52.1"
60#define DRV_MODULE_RELDATE "2009/08/12"
34f80b04 61#define BNX2X_BC_VER 0x040200
a2fbb9ea 62
94a78b79
VZ
63#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
ab6ad5a4
EG
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
94a78b79 68
34f80b04
EG
69/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
a2fbb9ea 71
53a10565 72static char version[] __devinitdata =
34f80b04 73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
24e3fcef 76MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 80
555f6c78
EG
81static int multi_mode = 1;
82module_param(multi_mode, int, 0);
ca00392c
EG
83MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
85
86static int num_rx_queues;
87module_param(num_rx_queues, int, 0);
88MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
90
91static int num_tx_queues;
92module_param(num_tx_queues, int, 0);
93MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
555f6c78 95
19680c48 96static int disable_tpa;
19680c48 97module_param(disable_tpa, int, 0);
9898f86d 98MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
99
100static int int_mode;
101module_param(int_mode, int, 0);
102MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
a18f5128
EG
104static int dropless_fc;
105module_param(dropless_fc, int, 0);
106MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
107
9898f86d 108static int poll;
a2fbb9ea 109module_param(poll, int, 0);
9898f86d 110MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
111
112static int mrrs = -1;
113module_param(mrrs, int, 0);
114MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
115
9898f86d 116static int debug;
a2fbb9ea 117module_param(debug, int, 0);
9898f86d
EG
118MODULE_PARM_DESC(debug, " Default debug msglevel");
119
120static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 121
1cf167f2 122static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
123
124enum bnx2x_board_type {
125 BCM57710 = 0,
34f80b04
EG
126 BCM57711 = 1,
127 BCM57711E = 2,
a2fbb9ea
ET
128};
129
34f80b04 130/* indexed by board_type, above */
53a10565 131static struct {
a2fbb9ea
ET
132 char *name;
133} board_info[] __devinitdata = {
34f80b04
EG
134 { "Broadcom NetXtreme II BCM57710 XGb" },
135 { "Broadcom NetXtreme II BCM57711 XGb" },
136 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
137};
138
34f80b04 139
a2fbb9ea 140static const struct pci_device_id bnx2x_pci_tbl[] = {
e4ed7113
EG
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
143 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
144 { 0 }
145};
146
147MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
148
149/****************************************************************************
150* General service functions
151****************************************************************************/
152
153/* used only at init
154 * locking is done by mcp
155 */
573f2035 156void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
157{
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
160 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
161 PCICFG_VENDOR_ID_OFFSET);
162}
163
a2fbb9ea
ET
164static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
165{
166 u32 val;
167
168 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
169 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
170 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
171 PCICFG_VENDOR_ID_OFFSET);
172
173 return val;
174}
a2fbb9ea
ET
175
176static const u32 dmae_reg_go_c[] = {
177 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
178 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
179 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
180 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
181};
182
183/* copy command into DMAE command memory and set DMAE command go */
184static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
185 int idx)
186{
187 u32 cmd_offset;
188 int i;
189
190 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
191 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
192 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
193
ad8d3948
EG
194 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
195 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
196 }
197 REG_WR(bp, dmae_reg_go_c[idx], 1);
198}
199
ad8d3948
EG
200void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
201 u32 len32)
a2fbb9ea 202{
5ff7b6d4 203 struct dmae_command dmae;
a2fbb9ea 204 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
205 int cnt = 200;
206
207 if (!bp->dmae_ready) {
208 u32 *data = bnx2x_sp(bp, wb_data[0]);
209
210 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
211 " using indirect\n", dst_addr, len32);
212 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
213 return;
214 }
215
5ff7b6d4 216 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 217
5ff7b6d4
EG
218 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
219 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
220 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 221#ifdef __BIG_ENDIAN
5ff7b6d4 222 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 223#else
5ff7b6d4 224 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 225#endif
5ff7b6d4
EG
226 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
227 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
228 dmae.src_addr_lo = U64_LO(dma_addr);
229 dmae.src_addr_hi = U64_HI(dma_addr);
230 dmae.dst_addr_lo = dst_addr >> 2;
231 dmae.dst_addr_hi = 0;
232 dmae.len = len32;
233 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
234 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
235 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 236
c3eefaf6 237 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
238 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
239 "dst_addr [%x:%08x (%08x)]\n"
240 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
241 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
242 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
243 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 244 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
245 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
246 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 247
5ff7b6d4
EG
248 mutex_lock(&bp->dmae_mutex);
249
a2fbb9ea
ET
250 *wb_comp = 0;
251
5ff7b6d4 252 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
253
254 udelay(5);
ad8d3948
EG
255
256 while (*wb_comp != DMAE_COMP_VAL) {
257 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
258
ad8d3948 259 if (!cnt) {
c3eefaf6 260 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
261 break;
262 }
ad8d3948 263 cnt--;
12469401
YG
264 /* adjust delay for emulation/FPGA */
265 if (CHIP_REV_IS_SLOW(bp))
266 msleep(100);
267 else
268 udelay(5);
a2fbb9ea 269 }
ad8d3948
EG
270
271 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
272}
273
c18487ee 274void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 275{
5ff7b6d4 276 struct dmae_command dmae;
a2fbb9ea 277 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
278 int cnt = 200;
279
280 if (!bp->dmae_ready) {
281 u32 *data = bnx2x_sp(bp, wb_data[0]);
282 int i;
283
284 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
285 " using indirect\n", src_addr, len32);
286 for (i = 0; i < len32; i++)
287 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
288 return;
289 }
290
5ff7b6d4 291 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 292
5ff7b6d4
EG
293 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
294 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
295 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 296#ifdef __BIG_ENDIAN
5ff7b6d4 297 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 298#else
5ff7b6d4 299 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 300#endif
5ff7b6d4
EG
301 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
302 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
303 dmae.src_addr_lo = src_addr >> 2;
304 dmae.src_addr_hi = 0;
305 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
306 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
307 dmae.len = len32;
308 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
309 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
310 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 311
c3eefaf6 312 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
313 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
314 "dst_addr [%x:%08x (%08x)]\n"
315 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
316 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
317 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
318 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 319
5ff7b6d4
EG
320 mutex_lock(&bp->dmae_mutex);
321
322 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
323 *wb_comp = 0;
324
5ff7b6d4 325 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
326
327 udelay(5);
ad8d3948
EG
328
329 while (*wb_comp != DMAE_COMP_VAL) {
330
ad8d3948 331 if (!cnt) {
c3eefaf6 332 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
333 break;
334 }
ad8d3948 335 cnt--;
12469401
YG
336 /* adjust delay for emulation/FPGA */
337 if (CHIP_REV_IS_SLOW(bp))
338 msleep(100);
339 else
340 udelay(5);
a2fbb9ea 341 }
ad8d3948 342 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
343 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
344 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
345
346 mutex_unlock(&bp->dmae_mutex);
347}
348
573f2035
EG
349void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
350 u32 addr, u32 len)
351{
352 int offset = 0;
353
354 while (len > DMAE_LEN32_WR_MAX) {
355 bnx2x_write_dmae(bp, phys_addr + offset,
356 addr + offset, DMAE_LEN32_WR_MAX);
357 offset += DMAE_LEN32_WR_MAX * 4;
358 len -= DMAE_LEN32_WR_MAX;
359 }
360
361 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
362}
363
ad8d3948
EG
364/* used only for slowpath so not inlined */
365static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
366{
367 u32 wb_write[2];
368
369 wb_write[0] = val_hi;
370 wb_write[1] = val_lo;
371 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 372}
a2fbb9ea 373
ad8d3948
EG
374#ifdef USE_WB_RD
375static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
376{
377 u32 wb_data[2];
378
379 REG_RD_DMAE(bp, reg, wb_data, 2);
380
381 return HILO_U64(wb_data[0], wb_data[1]);
382}
383#endif
384
a2fbb9ea
ET
385static int bnx2x_mc_assert(struct bnx2x *bp)
386{
a2fbb9ea 387 char last_idx;
34f80b04
EG
388 int i, rc = 0;
389 u32 row0, row1, row2, row3;
390
391 /* XSTORM */
392 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
393 XSTORM_ASSERT_LIST_INDEX_OFFSET);
394 if (last_idx)
395 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
396
397 /* print the asserts */
398 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
399
400 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
401 XSTORM_ASSERT_LIST_OFFSET(i));
402 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
403 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
404 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
406 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
408
409 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
410 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
411 " 0x%08x 0x%08x 0x%08x\n",
412 i, row3, row2, row1, row0);
413 rc++;
414 } else {
415 break;
416 }
417 }
418
419 /* TSTORM */
420 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
421 TSTORM_ASSERT_LIST_INDEX_OFFSET);
422 if (last_idx)
423 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
424
425 /* print the asserts */
426 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
427
428 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
429 TSTORM_ASSERT_LIST_OFFSET(i));
430 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
431 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
432 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
434 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
436
437 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
438 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
439 " 0x%08x 0x%08x 0x%08x\n",
440 i, row3, row2, row1, row0);
441 rc++;
442 } else {
443 break;
444 }
445 }
446
447 /* CSTORM */
448 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
449 CSTORM_ASSERT_LIST_INDEX_OFFSET);
450 if (last_idx)
451 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
452
453 /* print the asserts */
454 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
455
456 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
457 CSTORM_ASSERT_LIST_OFFSET(i));
458 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
459 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
460 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
462 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
464
465 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
466 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
467 " 0x%08x 0x%08x 0x%08x\n",
468 i, row3, row2, row1, row0);
469 rc++;
470 } else {
471 break;
472 }
473 }
474
475 /* USTORM */
476 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
477 USTORM_ASSERT_LIST_INDEX_OFFSET);
478 if (last_idx)
479 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
480
481 /* print the asserts */
482 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
483
484 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
485 USTORM_ASSERT_LIST_OFFSET(i));
486 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
487 USTORM_ASSERT_LIST_OFFSET(i) + 4);
488 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
489 USTORM_ASSERT_LIST_OFFSET(i) + 8);
490 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
491 USTORM_ASSERT_LIST_OFFSET(i) + 12);
492
493 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
494 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
495 " 0x%08x 0x%08x 0x%08x\n",
496 i, row3, row2, row1, row0);
497 rc++;
498 } else {
499 break;
a2fbb9ea
ET
500 }
501 }
34f80b04 502
a2fbb9ea
ET
503 return rc;
504}
c14423fe 505
a2fbb9ea
ET
506static void bnx2x_fw_dump(struct bnx2x *bp)
507{
508 u32 mark, offset;
4781bfad 509 __be32 data[9];
a2fbb9ea
ET
510 int word;
511
512 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772 513 mark = ((mark + 0x3) & ~0x3);
ad361c98 514 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 515
ad361c98 516 printk(KERN_ERR PFX);
a2fbb9ea
ET
517 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
518 for (word = 0; word < 8; word++)
519 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
520 offset + 4*word));
521 data[8] = 0x0;
49d66772 522 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
523 }
524 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
525 for (word = 0; word < 8; word++)
526 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
527 offset + 4*word));
528 data[8] = 0x0;
49d66772 529 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea 530 }
ad361c98 531 printk(KERN_ERR PFX "end of fw dump\n");
a2fbb9ea
ET
532}
533
534static void bnx2x_panic_dump(struct bnx2x *bp)
535{
536 int i;
537 u16 j, start, end;
538
66e855f3
YG
539 bp->stats_state = STATS_STATE_DISABLED;
540 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
541
a2fbb9ea
ET
542 BNX2X_ERR("begin crash dump -----------------\n");
543
8440d2b6
EG
544 /* Indices */
545 /* Common */
546 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
547 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
548 " spq_prod_idx(%u)\n",
549 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
550 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
551
552 /* Rx */
553 for_each_rx_queue(bp, i) {
a2fbb9ea 554 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 555
c3eefaf6 556 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
557 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
558 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 559 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
560 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
561 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 562 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
563 " fp_u_idx(%x) *sb_u_idx(%x)\n",
564 fp->rx_sge_prod, fp->last_max_sge,
565 le16_to_cpu(fp->fp_u_idx),
566 fp->status_blk->u_status_block.status_block_index);
567 }
a2fbb9ea 568
8440d2b6
EG
569 /* Tx */
570 for_each_tx_queue(bp, i) {
571 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 572
c3eefaf6 573 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
574 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
575 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
576 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 577 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
ca00392c 578 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 579 fp->status_blk->c_status_block.status_block_index,
ca00392c 580 fp->tx_db.data.prod);
8440d2b6 581 }
a2fbb9ea 582
8440d2b6
EG
583 /* Rings */
584 /* Rx */
585 for_each_rx_queue(bp, i) {
586 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
587
588 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
589 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 590 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
591 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
592 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
593
c3eefaf6
EG
594 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
595 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
596 }
597
3196a88a
EG
598 start = RX_SGE(fp->rx_sge_prod);
599 end = RX_SGE(fp->last_max_sge);
8440d2b6 600 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
601 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
602 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
603
c3eefaf6
EG
604 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
605 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
606 }
607
a2fbb9ea
ET
608 start = RCQ_BD(fp->rx_comp_cons - 10);
609 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 610 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
611 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
612
c3eefaf6
EG
613 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
614 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
615 }
616 }
617
8440d2b6
EG
618 /* Tx */
619 for_each_tx_queue(bp, i) {
620 struct bnx2x_fastpath *fp = &bp->fp[i];
621
622 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
623 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
624 for (j = start; j != end; j = TX_BD(j + 1)) {
625 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
626
c3eefaf6
EG
627 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
628 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
629 }
630
631 start = TX_BD(fp->tx_bd_cons - 10);
632 end = TX_BD(fp->tx_bd_cons + 254);
633 for (j = start; j != end; j = TX_BD(j + 1)) {
634 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
635
c3eefaf6
EG
636 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
637 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
638 }
639 }
a2fbb9ea 640
34f80b04 641 bnx2x_fw_dump(bp);
a2fbb9ea
ET
642 bnx2x_mc_assert(bp);
643 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
644}
645
615f8fd9 646static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 647{
34f80b04 648 int port = BP_PORT(bp);
a2fbb9ea
ET
649 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
650 u32 val = REG_RD(bp, addr);
651 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 652 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
653
654 if (msix) {
8badd27a
EG
655 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
656 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
657 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
658 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
659 } else if (msi) {
660 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
661 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
662 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
664 } else {
665 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 666 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
667 HC_CONFIG_0_REG_INT_LINE_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 669
8badd27a
EG
670 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
671 val, port, addr);
615f8fd9
ET
672
673 REG_WR(bp, addr, val);
674
a2fbb9ea
ET
675 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
676 }
677
8badd27a
EG
678 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
679 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
680
681 REG_WR(bp, addr, val);
37dbbf32
EG
682 /*
683 * Ensure that HC_CONFIG is written before leading/trailing edge config
684 */
685 mmiowb();
686 barrier();
34f80b04
EG
687
688 if (CHIP_IS_E1H(bp)) {
689 /* init leading/trailing edge */
690 if (IS_E1HMF(bp)) {
8badd27a 691 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 692 if (bp->port.pmf)
4acac6a5
EG
693 /* enable nig and gpio3 attention */
694 val |= 0x1100;
34f80b04
EG
695 } else
696 val = 0xffff;
697
698 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
699 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
700 }
37dbbf32
EG
701
702 /* Make sure that interrupts are indeed enabled from here on */
703 mmiowb();
a2fbb9ea
ET
704}
705
615f8fd9 706static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 707{
34f80b04 708 int port = BP_PORT(bp);
a2fbb9ea
ET
709 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
710 u32 val = REG_RD(bp, addr);
711
712 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
713 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
714 HC_CONFIG_0_REG_INT_LINE_EN_0 |
715 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
716
717 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
718 val, port, addr);
719
8badd27a
EG
720 /* flush all outstanding writes */
721 mmiowb();
722
a2fbb9ea
ET
723 REG_WR(bp, addr, val);
724 if (REG_RD(bp, addr) != val)
725 BNX2X_ERR("BUG! proper val not read from IGU!\n");
726}
727
f8ef6e44 728static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 729{
a2fbb9ea 730 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 731 int i, offset;
a2fbb9ea 732
34f80b04 733 /* disable interrupt handling */
a2fbb9ea 734 atomic_inc(&bp->intr_sem);
e1510706
EG
735 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
736
f8ef6e44
YG
737 if (disable_hw)
738 /* prevent the HW from sending interrupts */
739 bnx2x_int_disable(bp);
a2fbb9ea
ET
740
741 /* make sure all ISRs are done */
742 if (msix) {
8badd27a
EG
743 synchronize_irq(bp->msix_table[0].vector);
744 offset = 1;
37b091ba
MC
745#ifdef BCM_CNIC
746 offset++;
747#endif
a2fbb9ea 748 for_each_queue(bp, i)
8badd27a 749 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
750 } else
751 synchronize_irq(bp->pdev->irq);
752
753 /* make sure sp_task is not running */
1cf167f2
EG
754 cancel_delayed_work(&bp->sp_task);
755 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
756}
757
34f80b04 758/* fast path */
a2fbb9ea
ET
759
760/*
34f80b04 761 * General service functions
a2fbb9ea
ET
762 */
763
34f80b04 764static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
765 u8 storm, u16 index, u8 op, u8 update)
766{
5c862848
EG
767 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
768 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
769 struct igu_ack_register igu_ack;
770
771 igu_ack.status_block_index = index;
772 igu_ack.sb_id_and_flags =
34f80b04 773 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
774 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
775 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
776 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
777
5c862848
EG
778 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
779 (*(u32 *)&igu_ack), hc_addr);
780 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
781
782 /* Make sure that ACK is written */
783 mmiowb();
784 barrier();
a2fbb9ea
ET
785}
786
787static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
788{
789 struct host_status_block *fpsb = fp->status_blk;
790 u16 rc = 0;
791
792 barrier(); /* status block is written to by the chip */
793 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
794 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
795 rc |= 1;
796 }
797 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
798 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
799 rc |= 2;
800 }
801 return rc;
802}
803
a2fbb9ea
ET
804static u16 bnx2x_ack_int(struct bnx2x *bp)
805{
5c862848
EG
806 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
807 COMMAND_REG_SIMD_MASK);
808 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 809
5c862848
EG
810 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
811 result, hc_addr);
a2fbb9ea 812
a2fbb9ea
ET
813 return result;
814}
815
816
817/*
818 * fast path service functions
819 */
820
e8b5fc51
VZ
821static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
822{
823 /* Tell compiler that consumer and producer can change */
824 barrier();
825 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
826}
827
a2fbb9ea
ET
828/* free skb in the packet ring at pos idx
829 * return idx of last bd freed
830 */
831static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
832 u16 idx)
833{
834 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
835 struct eth_tx_start_bd *tx_start_bd;
836 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 837 struct sk_buff *skb = tx_buf->skb;
34f80b04 838 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
839 int nbd;
840
841 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
842 idx, tx_buf, skb);
843
844 /* unmap first bd */
845 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c
EG
846 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
847 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
848 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 849
ca00392c 850 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 851#ifdef BNX2X_STOP_ON_ERROR
ca00392c 852 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 853 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
854 bnx2x_panic();
855 }
856#endif
ca00392c 857 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 858
ca00392c
EG
859 /* Get the next bd */
860 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 861
ca00392c
EG
862 /* Skip a parse bd... */
863 --nbd;
864 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
865
866 /* ...and the TSO split header bd since they have no mapping */
867 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
868 --nbd;
869 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
870 }
871
872 /* now free frags */
873 while (nbd > 0) {
874
875 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c
EG
876 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
877 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
878 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
a2fbb9ea
ET
879 if (--nbd)
880 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
881 }
882
883 /* release skb */
53e5e96e 884 WARN_ON(!skb);
ca00392c 885 dev_kfree_skb_any(skb);
a2fbb9ea
ET
886 tx_buf->first_bd = 0;
887 tx_buf->skb = NULL;
888
34f80b04 889 return new_cons;
a2fbb9ea
ET
890}
891
34f80b04 892static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 893{
34f80b04
EG
894 s16 used;
895 u16 prod;
896 u16 cons;
a2fbb9ea 897
34f80b04 898 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
899 prod = fp->tx_bd_prod;
900 cons = fp->tx_bd_cons;
901
34f80b04
EG
902 /* NUM_TX_RINGS = number of "next-page" entries
903 It will be used as a threshold */
904 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 905
34f80b04 906#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
907 WARN_ON(used < 0);
908 WARN_ON(used > fp->bp->tx_ring_size);
909 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 910#endif
a2fbb9ea 911
34f80b04 912 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
913}
914
7961f791 915static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
916{
917 struct bnx2x *bp = fp->bp;
555f6c78 918 struct netdev_queue *txq;
a2fbb9ea
ET
919 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
920 int done = 0;
921
922#ifdef BNX2X_STOP_ON_ERROR
923 if (unlikely(bp->panic))
924 return;
925#endif
926
ca00392c 927 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
a2fbb9ea
ET
928 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
929 sw_cons = fp->tx_pkt_cons;
930
931 while (sw_cons != hw_cons) {
932 u16 pkt_cons;
933
934 pkt_cons = TX_BD(sw_cons);
935
936 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
937
34f80b04 938 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
939 hw_cons, sw_cons, pkt_cons);
940
34f80b04 941/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
942 rmb();
943 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
944 }
945*/
946 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
947 sw_cons++;
948 done++;
a2fbb9ea
ET
949 }
950
951 fp->tx_pkt_cons = sw_cons;
952 fp->tx_bd_cons = bd_cons;
953
a2fbb9ea 954 /* TBD need a thresh? */
555f6c78 955 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 956
6044735d
EG
957 /* Need to make the tx_bd_cons update visible to start_xmit()
958 * before checking for netif_tx_queue_stopped(). Without the
959 * memory barrier, there is a small possibility that
960 * start_xmit() will miss it and cause the queue to be stopped
961 * forever.
962 */
963 smp_mb();
964
555f6c78 965 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 966 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 967 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 968 netif_tx_wake_queue(txq);
a2fbb9ea
ET
969 }
970}
971
993ac7b5
MC
972#ifdef BCM_CNIC
973static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
974#endif
3196a88a 975
a2fbb9ea
ET
976static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
977 union eth_rx_cqe *rr_cqe)
978{
979 struct bnx2x *bp = fp->bp;
980 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
981 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
982
34f80b04 983 DP(BNX2X_MSG_SP,
a2fbb9ea 984 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 985 fp->index, cid, command, bp->state,
34f80b04 986 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
987
988 bp->spq_left++;
989
0626b899 990 if (fp->index) {
a2fbb9ea
ET
991 switch (command | fp->state) {
992 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
993 BNX2X_FP_STATE_OPENING):
994 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
995 cid);
996 fp->state = BNX2X_FP_STATE_OPEN;
997 break;
998
999 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1000 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1001 cid);
1002 fp->state = BNX2X_FP_STATE_HALTED;
1003 break;
1004
1005 default:
34f80b04
EG
1006 BNX2X_ERR("unexpected MC reply (%d) "
1007 "fp->state is %x\n", command, fp->state);
1008 break;
a2fbb9ea 1009 }
34f80b04 1010 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1011 return;
1012 }
c14423fe 1013
a2fbb9ea
ET
1014 switch (command | bp->state) {
1015 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1016 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1017 bp->state = BNX2X_STATE_OPEN;
1018 break;
1019
1020 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1021 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1022 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1023 fp->state = BNX2X_FP_STATE_HALTED;
1024 break;
1025
a2fbb9ea 1026 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1027 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1028 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1029 break;
1030
993ac7b5
MC
1031#ifdef BCM_CNIC
1032 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1033 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1034 bnx2x_cnic_cfc_comp(bp, cid);
1035 break;
1036#endif
3196a88a 1037
a2fbb9ea 1038 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1039 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1040 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
e665bfda
MC
1041 bp->set_mac_pending--;
1042 smp_wmb();
a2fbb9ea
ET
1043 break;
1044
49d66772 1045 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
ca00392c 1046 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
34f80b04 1047 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
e665bfda
MC
1048 bp->set_mac_pending--;
1049 smp_wmb();
49d66772
ET
1050 break;
1051
a2fbb9ea 1052 default:
34f80b04 1053 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1054 command, bp->state);
34f80b04 1055 break;
a2fbb9ea 1056 }
34f80b04 1057 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1058}
1059
7a9b2557
VZ
1060static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1061 struct bnx2x_fastpath *fp, u16 index)
1062{
1063 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1064 struct page *page = sw_buf->page;
1065 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1066
1067 /* Skip "next page" elements */
1068 if (!page)
1069 return;
1070
1071 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1072 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1073 __free_pages(page, PAGES_PER_SGE_SHIFT);
1074
1075 sw_buf->page = NULL;
1076 sge->addr_hi = 0;
1077 sge->addr_lo = 0;
1078}
1079
1080static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1081 struct bnx2x_fastpath *fp, int last)
1082{
1083 int i;
1084
1085 for (i = 0; i < last; i++)
1086 bnx2x_free_rx_sge(bp, fp, i);
1087}
1088
1089static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1090 struct bnx2x_fastpath *fp, u16 index)
1091{
1092 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1093 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1094 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1095 dma_addr_t mapping;
1096
1097 if (unlikely(page == NULL))
1098 return -ENOMEM;
1099
4f40f2cb 1100 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1101 PCI_DMA_FROMDEVICE);
8d8bb39b 1102 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1103 __free_pages(page, PAGES_PER_SGE_SHIFT);
1104 return -ENOMEM;
1105 }
1106
1107 sw_buf->page = page;
1108 pci_unmap_addr_set(sw_buf, mapping, mapping);
1109
1110 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1111 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1112
1113 return 0;
1114}
1115
a2fbb9ea
ET
1116static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1117 struct bnx2x_fastpath *fp, u16 index)
1118{
1119 struct sk_buff *skb;
1120 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1121 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1122 dma_addr_t mapping;
1123
1124 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1125 if (unlikely(skb == NULL))
1126 return -ENOMEM;
1127
437cf2f1 1128 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1129 PCI_DMA_FROMDEVICE);
8d8bb39b 1130 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1131 dev_kfree_skb(skb);
1132 return -ENOMEM;
1133 }
1134
1135 rx_buf->skb = skb;
1136 pci_unmap_addr_set(rx_buf, mapping, mapping);
1137
1138 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1139 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1140
1141 return 0;
1142}
1143
1144/* note that we are not allocating a new skb,
1145 * we are just moving one from cons to prod
1146 * we are not creating a new mapping,
1147 * so there is no need to check for dma_mapping_error().
1148 */
1149static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1150 struct sk_buff *skb, u16 cons, u16 prod)
1151{
1152 struct bnx2x *bp = fp->bp;
1153 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1154 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1155 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1156 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1157
1158 pci_dma_sync_single_for_device(bp->pdev,
1159 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1160 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1161
1162 prod_rx_buf->skb = cons_rx_buf->skb;
1163 pci_unmap_addr_set(prod_rx_buf, mapping,
1164 pci_unmap_addr(cons_rx_buf, mapping));
1165 *prod_bd = *cons_bd;
1166}
1167
7a9b2557
VZ
1168static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1169 u16 idx)
1170{
1171 u16 last_max = fp->last_max_sge;
1172
1173 if (SUB_S16(idx, last_max) > 0)
1174 fp->last_max_sge = idx;
1175}
1176
1177static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1178{
1179 int i, j;
1180
1181 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1182 int idx = RX_SGE_CNT * i - 1;
1183
1184 for (j = 0; j < 2; j++) {
1185 SGE_MASK_CLEAR_BIT(fp, idx);
1186 idx--;
1187 }
1188 }
1189}
1190
1191static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1192 struct eth_fast_path_rx_cqe *fp_cqe)
1193{
1194 struct bnx2x *bp = fp->bp;
4f40f2cb 1195 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1196 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1197 SGE_PAGE_SHIFT;
7a9b2557
VZ
1198 u16 last_max, last_elem, first_elem;
1199 u16 delta = 0;
1200 u16 i;
1201
1202 if (!sge_len)
1203 return;
1204
1205 /* First mark all used pages */
1206 for (i = 0; i < sge_len; i++)
1207 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1208
1209 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1210 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1211
1212 /* Here we assume that the last SGE index is the biggest */
1213 prefetch((void *)(fp->sge_mask));
1214 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1215
1216 last_max = RX_SGE(fp->last_max_sge);
1217 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1218 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1219
1220 /* If ring is not full */
1221 if (last_elem + 1 != first_elem)
1222 last_elem++;
1223
1224 /* Now update the prod */
1225 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1226 if (likely(fp->sge_mask[i]))
1227 break;
1228
1229 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1230 delta += RX_SGE_MASK_ELEM_SZ;
1231 }
1232
1233 if (delta > 0) {
1234 fp->rx_sge_prod += delta;
1235 /* clear page-end entries */
1236 bnx2x_clear_sge_mask_next_elems(fp);
1237 }
1238
1239 DP(NETIF_MSG_RX_STATUS,
1240 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1241 fp->last_max_sge, fp->rx_sge_prod);
1242}
1243
1244static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1245{
1246 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1247 memset(fp->sge_mask, 0xff,
1248 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1249
33471629
EG
1250 /* Clear the two last indices in the page to 1:
1251 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1252 hence will never be indicated and should be removed from
1253 the calculations. */
1254 bnx2x_clear_sge_mask_next_elems(fp);
1255}
1256
1257static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1258 struct sk_buff *skb, u16 cons, u16 prod)
1259{
1260 struct bnx2x *bp = fp->bp;
1261 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1262 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1263 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1264 dma_addr_t mapping;
1265
1266 /* move empty skb from pool to prod and map it */
1267 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1268 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1269 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1270 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1271
1272 /* move partial skb from cons to pool (don't unmap yet) */
1273 fp->tpa_pool[queue] = *cons_rx_buf;
1274
1275 /* mark bin state as start - print error if current state != stop */
1276 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1277 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1278
1279 fp->tpa_state[queue] = BNX2X_TPA_START;
1280
1281 /* point prod_bd to new skb */
1282 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1283 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1284
1285#ifdef BNX2X_STOP_ON_ERROR
1286 fp->tpa_queue_used |= (1 << queue);
1287#ifdef __powerpc64__
1288 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1289#else
1290 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1291#endif
1292 fp->tpa_queue_used);
1293#endif
1294}
1295
1296static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1297 struct sk_buff *skb,
1298 struct eth_fast_path_rx_cqe *fp_cqe,
1299 u16 cqe_idx)
1300{
1301 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1302 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1303 u32 i, frag_len, frag_size, pages;
1304 int err;
1305 int j;
1306
1307 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1308 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1309
1310 /* This is needed in order to enable forwarding support */
1311 if (frag_size)
4f40f2cb 1312 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1313 max(frag_size, (u32)len_on_bd));
1314
1315#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1316 if (pages >
1317 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1318 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1319 pages, cqe_idx);
1320 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1321 fp_cqe->pkt_len, len_on_bd);
1322 bnx2x_panic();
1323 return -EINVAL;
1324 }
1325#endif
1326
1327 /* Run through the SGL and compose the fragmented skb */
1328 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1329 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1330
1331 /* FW gives the indices of the SGE as if the ring is an array
1332 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1333 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1334 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1335 old_rx_pg = *rx_pg;
1336
1337 /* If we fail to allocate a substitute page, we simply stop
1338 where we are and drop the whole packet */
1339 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1340 if (unlikely(err)) {
de832a55 1341 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1342 return err;
1343 }
1344
1345 /* Unmap the page as we r going to pass it to the stack */
1346 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1347 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1348
1349 /* Add one frag and update the appropriate fields in the skb */
1350 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1351
1352 skb->data_len += frag_len;
1353 skb->truesize += frag_len;
1354 skb->len += frag_len;
1355
1356 frag_size -= frag_len;
1357 }
1358
1359 return 0;
1360}
1361
1362static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1363 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1364 u16 cqe_idx)
1365{
1366 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1367 struct sk_buff *skb = rx_buf->skb;
1368 /* alloc new skb */
1369 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1370
1371 /* Unmap skb in the pool anyway, as we are going to change
1372 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1373 fails. */
1374 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1375 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1376
7a9b2557 1377 if (likely(new_skb)) {
66e855f3
YG
1378 /* fix ip xsum and give it to the stack */
1379 /* (no need to map the new skb) */
0c6671b0
EG
1380#ifdef BCM_VLAN
1381 int is_vlan_cqe =
1382 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1383 PARSING_FLAGS_VLAN);
1384 int is_not_hwaccel_vlan_cqe =
1385 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1386#endif
7a9b2557
VZ
1387
1388 prefetch(skb);
1389 prefetch(((char *)(skb)) + 128);
1390
7a9b2557
VZ
1391#ifdef BNX2X_STOP_ON_ERROR
1392 if (pad + len > bp->rx_buf_size) {
1393 BNX2X_ERR("skb_put is about to fail... "
1394 "pad %d len %d rx_buf_size %d\n",
1395 pad, len, bp->rx_buf_size);
1396 bnx2x_panic();
1397 return;
1398 }
1399#endif
1400
1401 skb_reserve(skb, pad);
1402 skb_put(skb, len);
1403
1404 skb->protocol = eth_type_trans(skb, bp->dev);
1405 skb->ip_summed = CHECKSUM_UNNECESSARY;
1406
1407 {
1408 struct iphdr *iph;
1409
1410 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1411#ifdef BCM_VLAN
1412 /* If there is no Rx VLAN offloading -
1413 take VLAN tag into an account */
1414 if (unlikely(is_not_hwaccel_vlan_cqe))
1415 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1416#endif
7a9b2557
VZ
1417 iph->check = 0;
1418 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1419 }
1420
1421 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1422 &cqe->fast_path_cqe, cqe_idx)) {
1423#ifdef BCM_VLAN
0c6671b0
EG
1424 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1425 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1426 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1427 le16_to_cpu(cqe->fast_path_cqe.
1428 vlan_tag));
1429 else
1430#endif
1431 netif_receive_skb(skb);
1432 } else {
1433 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1434 " - dropping packet!\n");
1435 dev_kfree_skb(skb);
1436 }
1437
7a9b2557
VZ
1438
1439 /* put new skb in bin */
1440 fp->tpa_pool[queue].skb = new_skb;
1441
1442 } else {
66e855f3 1443 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1444 DP(NETIF_MSG_RX_STATUS,
1445 "Failed to allocate new skb - dropping packet!\n");
de832a55 1446 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1447 }
1448
1449 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1450}
1451
1452static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1453 struct bnx2x_fastpath *fp,
1454 u16 bd_prod, u16 rx_comp_prod,
1455 u16 rx_sge_prod)
1456{
8d9c5f34 1457 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1458 int i;
1459
1460 /* Update producers */
1461 rx_prods.bd_prod = bd_prod;
1462 rx_prods.cqe_prod = rx_comp_prod;
1463 rx_prods.sge_prod = rx_sge_prod;
1464
58f4c4cf
EG
1465 /*
1466 * Make sure that the BD and SGE data is updated before updating the
1467 * producers since FW might read the BD/SGE right after the producer
1468 * is updated.
1469 * This is only applicable for weak-ordered memory model archs such
1470 * as IA-64. The following barrier is also mandatory since FW will
1471 * assumes BDs must have buffers.
1472 */
1473 wmb();
1474
8d9c5f34
EG
1475 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1476 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1477 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1478 ((u32 *)&rx_prods)[i]);
1479
58f4c4cf
EG
1480 mmiowb(); /* keep prod updates ordered */
1481
7a9b2557 1482 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1483 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1484 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1485}
1486
a2fbb9ea
ET
1487static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1488{
1489 struct bnx2x *bp = fp->bp;
34f80b04 1490 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1491 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1492 int rx_pkt = 0;
1493
1494#ifdef BNX2X_STOP_ON_ERROR
1495 if (unlikely(bp->panic))
1496 return 0;
1497#endif
1498
34f80b04
EG
1499 /* CQ "next element" is of the size of the regular element,
1500 that's why it's ok here */
a2fbb9ea
ET
1501 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1502 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1503 hw_comp_cons++;
1504
1505 bd_cons = fp->rx_bd_cons;
1506 bd_prod = fp->rx_bd_prod;
34f80b04 1507 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1508 sw_comp_cons = fp->rx_comp_cons;
1509 sw_comp_prod = fp->rx_comp_prod;
1510
1511 /* Memory barrier necessary as speculative reads of the rx
1512 * buffer can be ahead of the index in the status block
1513 */
1514 rmb();
1515
1516 DP(NETIF_MSG_RX_STATUS,
1517 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1518 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1519
1520 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1521 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1522 struct sk_buff *skb;
1523 union eth_rx_cqe *cqe;
34f80b04
EG
1524 u8 cqe_fp_flags;
1525 u16 len, pad;
a2fbb9ea
ET
1526
1527 comp_ring_cons = RCQ_BD(sw_comp_cons);
1528 bd_prod = RX_BD(bd_prod);
1529 bd_cons = RX_BD(bd_cons);
1530
619e7a66
EG
1531 /* Prefetch the page containing the BD descriptor
1532 at producer's index. It will be needed when new skb is
1533 allocated */
1534 prefetch((void *)(PAGE_ALIGN((unsigned long)
1535 (&fp->rx_desc_ring[bd_prod])) -
1536 PAGE_SIZE + 1));
1537
a2fbb9ea 1538 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1539 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1540
a2fbb9ea 1541 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1542 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1543 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1544 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1545 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1546 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1547
1548 /* is this a slowpath msg? */
34f80b04 1549 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1550 bnx2x_sp_event(fp, cqe);
1551 goto next_cqe;
1552
1553 /* this is an rx packet */
1554 } else {
1555 rx_buf = &fp->rx_buf_ring[bd_cons];
1556 skb = rx_buf->skb;
a2fbb9ea
ET
1557 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1558 pad = cqe->fast_path_cqe.placement_offset;
1559
7a9b2557
VZ
1560 /* If CQE is marked both TPA_START and TPA_END
1561 it is a non-TPA CQE */
1562 if ((!fp->disable_tpa) &&
1563 (TPA_TYPE(cqe_fp_flags) !=
1564 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1565 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1566
1567 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1568 DP(NETIF_MSG_RX_STATUS,
1569 "calling tpa_start on queue %d\n",
1570 queue);
1571
1572 bnx2x_tpa_start(fp, queue, skb,
1573 bd_cons, bd_prod);
1574 goto next_rx;
1575 }
1576
1577 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1578 DP(NETIF_MSG_RX_STATUS,
1579 "calling tpa_stop on queue %d\n",
1580 queue);
1581
1582 if (!BNX2X_RX_SUM_FIX(cqe))
1583 BNX2X_ERR("STOP on none TCP "
1584 "data\n");
1585
1586 /* This is a size of the linear data
1587 on this skb */
1588 len = le16_to_cpu(cqe->fast_path_cqe.
1589 len_on_bd);
1590 bnx2x_tpa_stop(bp, fp, queue, pad,
1591 len, cqe, comp_ring_cons);
1592#ifdef BNX2X_STOP_ON_ERROR
1593 if (bp->panic)
17cb4006 1594 return 0;
7a9b2557
VZ
1595#endif
1596
1597 bnx2x_update_sge_prod(fp,
1598 &cqe->fast_path_cqe);
1599 goto next_cqe;
1600 }
1601 }
1602
a2fbb9ea
ET
1603 pci_dma_sync_single_for_device(bp->pdev,
1604 pci_unmap_addr(rx_buf, mapping),
1605 pad + RX_COPY_THRESH,
1606 PCI_DMA_FROMDEVICE);
1607 prefetch(skb);
1608 prefetch(((char *)(skb)) + 128);
1609
1610 /* is this an error packet? */
34f80b04 1611 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1612 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1613 "ERROR flags %x rx packet %u\n",
1614 cqe_fp_flags, sw_comp_cons);
de832a55 1615 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1616 goto reuse_rx;
1617 }
1618
1619 /* Since we don't have a jumbo ring
1620 * copy small packets if mtu > 1500
1621 */
1622 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1623 (len <= RX_COPY_THRESH)) {
1624 struct sk_buff *new_skb;
1625
1626 new_skb = netdev_alloc_skb(bp->dev,
1627 len + pad);
1628 if (new_skb == NULL) {
1629 DP(NETIF_MSG_RX_ERR,
34f80b04 1630 "ERROR packet dropped "
a2fbb9ea 1631 "because of alloc failure\n");
de832a55 1632 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1633 goto reuse_rx;
1634 }
1635
1636 /* aligned copy */
1637 skb_copy_from_linear_data_offset(skb, pad,
1638 new_skb->data + pad, len);
1639 skb_reserve(new_skb, pad);
1640 skb_put(new_skb, len);
1641
1642 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1643
1644 skb = new_skb;
1645
a119a069
EG
1646 } else
1647 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
a2fbb9ea
ET
1648 pci_unmap_single(bp->pdev,
1649 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1650 bp->rx_buf_size,
a2fbb9ea
ET
1651 PCI_DMA_FROMDEVICE);
1652 skb_reserve(skb, pad);
1653 skb_put(skb, len);
1654
1655 } else {
1656 DP(NETIF_MSG_RX_ERR,
34f80b04 1657 "ERROR packet dropped because "
a2fbb9ea 1658 "of alloc failure\n");
de832a55 1659 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1660reuse_rx:
1661 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1662 goto next_rx;
1663 }
1664
1665 skb->protocol = eth_type_trans(skb, bp->dev);
1666
1667 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1668 if (bp->rx_csum) {
1adcd8be
EG
1669 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1670 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1671 else
de832a55 1672 fp->eth_q_stats.hw_csum_err++;
66e855f3 1673 }
a2fbb9ea
ET
1674 }
1675
748e5439 1676 skb_record_rx_queue(skb, fp->index);
ab6ad5a4 1677
a2fbb9ea 1678#ifdef BCM_VLAN
0c6671b0 1679 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1680 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1681 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1682 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1683 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1684 else
1685#endif
34f80b04 1686 netif_receive_skb(skb);
a2fbb9ea 1687
a2fbb9ea
ET
1688
1689next_rx:
1690 rx_buf->skb = NULL;
1691
1692 bd_cons = NEXT_RX_IDX(bd_cons);
1693 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1694 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1695 rx_pkt++;
a2fbb9ea
ET
1696next_cqe:
1697 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1698 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1699
34f80b04 1700 if (rx_pkt == budget)
a2fbb9ea
ET
1701 break;
1702 } /* while */
1703
1704 fp->rx_bd_cons = bd_cons;
34f80b04 1705 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1706 fp->rx_comp_cons = sw_comp_cons;
1707 fp->rx_comp_prod = sw_comp_prod;
1708
7a9b2557
VZ
1709 /* Update producers */
1710 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1711 fp->rx_sge_prod);
a2fbb9ea
ET
1712
1713 fp->rx_pkt += rx_pkt;
1714 fp->rx_calls++;
1715
1716 return rx_pkt;
1717}
1718
1719static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1720{
1721 struct bnx2x_fastpath *fp = fp_cookie;
1722 struct bnx2x *bp = fp->bp;
a2fbb9ea 1723
da5a662a
VZ
1724 /* Return here if interrupt is disabled */
1725 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1726 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1727 return IRQ_HANDLED;
1728 }
1729
34f80b04 1730 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1731 fp->index, fp->sb_id);
0626b899 1732 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1733
1734#ifdef BNX2X_STOP_ON_ERROR
1735 if (unlikely(bp->panic))
1736 return IRQ_HANDLED;
1737#endif
ca00392c
EG
1738 /* Handle Rx or Tx according to MSI-X vector */
1739 if (fp->is_rx_queue) {
1740 prefetch(fp->rx_cons_sb);
1741 prefetch(&fp->status_blk->u_status_block.status_block_index);
a2fbb9ea 1742
ca00392c 1743 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1744
ca00392c
EG
1745 } else {
1746 prefetch(fp->tx_cons_sb);
1747 prefetch(&fp->status_blk->c_status_block.status_block_index);
1748
1749 bnx2x_update_fpsb_idx(fp);
1750 rmb();
1751 bnx2x_tx_int(fp);
1752
1753 /* Re-enable interrupts */
1754 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1755 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1756 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1757 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1758 }
34f80b04 1759
a2fbb9ea
ET
1760 return IRQ_HANDLED;
1761}
1762
1763static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1764{
555f6c78 1765 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1766 u16 status = bnx2x_ack_int(bp);
34f80b04 1767 u16 mask;
ca00392c 1768 int i;
a2fbb9ea 1769
34f80b04 1770 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1771 if (unlikely(status == 0)) {
1772 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1773 return IRQ_NONE;
1774 }
f5372251 1775 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1776
34f80b04 1777 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1778 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1779 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1780 return IRQ_HANDLED;
1781 }
1782
3196a88a
EG
1783#ifdef BNX2X_STOP_ON_ERROR
1784 if (unlikely(bp->panic))
1785 return IRQ_HANDLED;
1786#endif
1787
ca00392c
EG
1788 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1789 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1790
ca00392c
EG
1791 mask = 0x2 << fp->sb_id;
1792 if (status & mask) {
1793 /* Handle Rx or Tx according to SB id */
1794 if (fp->is_rx_queue) {
1795 prefetch(fp->rx_cons_sb);
1796 prefetch(&fp->status_blk->u_status_block.
1797 status_block_index);
a2fbb9ea 1798
ca00392c 1799 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1800
ca00392c
EG
1801 } else {
1802 prefetch(fp->tx_cons_sb);
1803 prefetch(&fp->status_blk->c_status_block.
1804 status_block_index);
1805
1806 bnx2x_update_fpsb_idx(fp);
1807 rmb();
1808 bnx2x_tx_int(fp);
1809
1810 /* Re-enable interrupts */
1811 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1812 le16_to_cpu(fp->fp_u_idx),
1813 IGU_INT_NOP, 1);
1814 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1815 le16_to_cpu(fp->fp_c_idx),
1816 IGU_INT_ENABLE, 1);
1817 }
1818 status &= ~mask;
1819 }
a2fbb9ea
ET
1820 }
1821
993ac7b5
MC
1822#ifdef BCM_CNIC
1823 mask = 0x2 << CNIC_SB_ID(bp);
1824 if (status & (mask | 0x1)) {
1825 struct cnic_ops *c_ops = NULL;
1826
1827 rcu_read_lock();
1828 c_ops = rcu_dereference(bp->cnic_ops);
1829 if (c_ops)
1830 c_ops->cnic_handler(bp->cnic_data, NULL);
1831 rcu_read_unlock();
1832
1833 status &= ~mask;
1834 }
1835#endif
a2fbb9ea 1836
34f80b04 1837 if (unlikely(status & 0x1)) {
1cf167f2 1838 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1839
1840 status &= ~0x1;
1841 if (!status)
1842 return IRQ_HANDLED;
1843 }
1844
34f80b04
EG
1845 if (status)
1846 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1847 status);
a2fbb9ea 1848
c18487ee 1849 return IRQ_HANDLED;
a2fbb9ea
ET
1850}
1851
c18487ee 1852/* end of fast path */
a2fbb9ea 1853
bb2a0f7a 1854static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1855
c18487ee
YR
1856/* Link */
1857
1858/*
1859 * General service functions
1860 */
a2fbb9ea 1861
4a37fb66 1862static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1863{
1864 u32 lock_status;
1865 u32 resource_bit = (1 << resource);
4a37fb66
YG
1866 int func = BP_FUNC(bp);
1867 u32 hw_lock_control_reg;
c18487ee 1868 int cnt;
a2fbb9ea 1869
c18487ee
YR
1870 /* Validating that the resource is within range */
1871 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1872 DP(NETIF_MSG_HW,
1873 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1874 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1875 return -EINVAL;
1876 }
a2fbb9ea 1877
4a37fb66
YG
1878 if (func <= 5) {
1879 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1880 } else {
1881 hw_lock_control_reg =
1882 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1883 }
1884
c18487ee 1885 /* Validating that the resource is not already taken */
4a37fb66 1886 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1887 if (lock_status & resource_bit) {
1888 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1889 lock_status, resource_bit);
1890 return -EEXIST;
1891 }
a2fbb9ea 1892
46230476
EG
1893 /* Try for 5 second every 5ms */
1894 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1895 /* Try to acquire the lock */
4a37fb66
YG
1896 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1897 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1898 if (lock_status & resource_bit)
1899 return 0;
a2fbb9ea 1900
c18487ee 1901 msleep(5);
a2fbb9ea 1902 }
c18487ee
YR
1903 DP(NETIF_MSG_HW, "Timeout\n");
1904 return -EAGAIN;
1905}
a2fbb9ea 1906
4a37fb66 1907static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1908{
1909 u32 lock_status;
1910 u32 resource_bit = (1 << resource);
4a37fb66
YG
1911 int func = BP_FUNC(bp);
1912 u32 hw_lock_control_reg;
a2fbb9ea 1913
c18487ee
YR
1914 /* Validating that the resource is within range */
1915 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1916 DP(NETIF_MSG_HW,
1917 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1918 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1919 return -EINVAL;
1920 }
1921
4a37fb66
YG
1922 if (func <= 5) {
1923 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1924 } else {
1925 hw_lock_control_reg =
1926 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1927 }
1928
c18487ee 1929 /* Validating that the resource is currently taken */
4a37fb66 1930 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1931 if (!(lock_status & resource_bit)) {
1932 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1933 lock_status, resource_bit);
1934 return -EFAULT;
a2fbb9ea
ET
1935 }
1936
4a37fb66 1937 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1938 return 0;
1939}
1940
1941/* HW Lock for shared dual port PHYs */
4a37fb66 1942static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1943{
34f80b04 1944 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1945
46c6a674
EG
1946 if (bp->port.need_hw_lock)
1947 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1948}
a2fbb9ea 1949
4a37fb66 1950static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1951{
46c6a674
EG
1952 if (bp->port.need_hw_lock)
1953 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1954
34f80b04 1955 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1956}
a2fbb9ea 1957
4acac6a5
EG
1958int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1959{
1960 /* The GPIO should be swapped if swap register is set and active */
1961 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1962 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1963 int gpio_shift = gpio_num +
1964 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1965 u32 gpio_mask = (1 << gpio_shift);
1966 u32 gpio_reg;
1967 int value;
1968
1969 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1970 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1971 return -EINVAL;
1972 }
1973
1974 /* read GPIO value */
1975 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1976
1977 /* get the requested pin value */
1978 if ((gpio_reg & gpio_mask) == gpio_mask)
1979 value = 1;
1980 else
1981 value = 0;
1982
1983 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1984
1985 return value;
1986}
1987
17de50b7 1988int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1989{
1990 /* The GPIO should be swapped if swap register is set and active */
1991 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1992 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1993 int gpio_shift = gpio_num +
1994 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1995 u32 gpio_mask = (1 << gpio_shift);
1996 u32 gpio_reg;
a2fbb9ea 1997
c18487ee
YR
1998 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1999 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2000 return -EINVAL;
2001 }
a2fbb9ea 2002
4a37fb66 2003 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
2004 /* read GPIO and mask except the float bits */
2005 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 2006
c18487ee
YR
2007 switch (mode) {
2008 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2009 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2010 gpio_num, gpio_shift);
2011 /* clear FLOAT and set CLR */
2012 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2013 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2014 break;
a2fbb9ea 2015
c18487ee
YR
2016 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2017 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2018 gpio_num, gpio_shift);
2019 /* clear FLOAT and set SET */
2020 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2021 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2022 break;
a2fbb9ea 2023
17de50b7 2024 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
2025 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2026 gpio_num, gpio_shift);
2027 /* set FLOAT */
2028 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2029 break;
a2fbb9ea 2030
c18487ee
YR
2031 default:
2032 break;
a2fbb9ea
ET
2033 }
2034
c18487ee 2035 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 2036 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 2037
c18487ee 2038 return 0;
a2fbb9ea
ET
2039}
2040
4acac6a5
EG
2041int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2042{
2043 /* The GPIO should be swapped if swap register is set and active */
2044 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2045 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2046 int gpio_shift = gpio_num +
2047 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2048 u32 gpio_mask = (1 << gpio_shift);
2049 u32 gpio_reg;
2050
2051 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2052 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2053 return -EINVAL;
2054 }
2055
2056 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2057 /* read GPIO int */
2058 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2059
2060 switch (mode) {
2061 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2062 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2063 "output low\n", gpio_num, gpio_shift);
2064 /* clear SET and set CLR */
2065 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2066 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2067 break;
2068
2069 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2070 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2071 "output high\n", gpio_num, gpio_shift);
2072 /* clear CLR and set SET */
2073 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2074 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2075 break;
2076
2077 default:
2078 break;
2079 }
2080
2081 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2082 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2083
2084 return 0;
2085}
2086
c18487ee 2087static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2088{
c18487ee
YR
2089 u32 spio_mask = (1 << spio_num);
2090 u32 spio_reg;
a2fbb9ea 2091
c18487ee
YR
2092 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2093 (spio_num > MISC_REGISTERS_SPIO_7)) {
2094 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2095 return -EINVAL;
a2fbb9ea
ET
2096 }
2097
4a37fb66 2098 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2099 /* read SPIO and mask except the float bits */
2100 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2101
c18487ee 2102 switch (mode) {
6378c025 2103 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2104 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2105 /* clear FLOAT and set CLR */
2106 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2107 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2108 break;
a2fbb9ea 2109
6378c025 2110 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2111 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2112 /* clear FLOAT and set SET */
2113 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2114 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2115 break;
a2fbb9ea 2116
c18487ee
YR
2117 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2118 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2119 /* set FLOAT */
2120 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2121 break;
a2fbb9ea 2122
c18487ee
YR
2123 default:
2124 break;
a2fbb9ea
ET
2125 }
2126
c18487ee 2127 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2128 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2129
a2fbb9ea
ET
2130 return 0;
2131}
2132
c18487ee 2133static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2134{
ad33ea3a
EG
2135 switch (bp->link_vars.ieee_fc &
2136 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2137 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2138 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2139 ADVERTISED_Pause);
2140 break;
356e2385 2141
c18487ee 2142 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2143 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2144 ADVERTISED_Pause);
2145 break;
356e2385 2146
c18487ee 2147 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2148 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2149 break;
356e2385 2150
c18487ee 2151 default:
34f80b04 2152 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2153 ADVERTISED_Pause);
2154 break;
2155 }
2156}
f1410647 2157
c18487ee
YR
2158static void bnx2x_link_report(struct bnx2x *bp)
2159{
2691d51d
EG
2160 if (bp->state == BNX2X_STATE_DISABLED) {
2161 netif_carrier_off(bp->dev);
2162 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2163 return;
2164 }
2165
c18487ee
YR
2166 if (bp->link_vars.link_up) {
2167 if (bp->state == BNX2X_STATE_OPEN)
2168 netif_carrier_on(bp->dev);
2169 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2170
c18487ee 2171 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2172
c18487ee
YR
2173 if (bp->link_vars.duplex == DUPLEX_FULL)
2174 printk("full duplex");
2175 else
2176 printk("half duplex");
f1410647 2177
c0700f90
DM
2178 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2179 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2180 printk(", receive ");
356e2385
EG
2181 if (bp->link_vars.flow_ctrl &
2182 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2183 printk("& transmit ");
2184 } else {
2185 printk(", transmit ");
2186 }
2187 printk("flow control ON");
2188 }
2189 printk("\n");
f1410647 2190
c18487ee
YR
2191 } else { /* link_down */
2192 netif_carrier_off(bp->dev);
2193 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2194 }
c18487ee
YR
2195}
2196
b5bf9068 2197static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2198{
19680c48
EG
2199 if (!BP_NOMCP(bp)) {
2200 u8 rc;
a2fbb9ea 2201
19680c48 2202 /* Initialize link parameters structure variables */
8c99e7b0
YR
2203 /* It is recommended to turn off RX FC for jumbo frames
2204 for better performance */
0c593270 2205 if (bp->dev->mtu > 5000)
c0700f90 2206 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2207 else
c0700f90 2208 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2209
4a37fb66 2210 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2211
2212 if (load_mode == LOAD_DIAG)
2213 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2214
19680c48 2215 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2216
4a37fb66 2217 bnx2x_release_phy_lock(bp);
a2fbb9ea 2218
3c96c68b
EG
2219 bnx2x_calc_fc_adv(bp);
2220
b5bf9068
EG
2221 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2222 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2223 bnx2x_link_report(bp);
b5bf9068 2224 }
34f80b04 2225
19680c48
EG
2226 return rc;
2227 }
f5372251 2228 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2229 return -EINVAL;
a2fbb9ea
ET
2230}
2231
c18487ee 2232static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2233{
19680c48 2234 if (!BP_NOMCP(bp)) {
4a37fb66 2235 bnx2x_acquire_phy_lock(bp);
19680c48 2236 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2237 bnx2x_release_phy_lock(bp);
a2fbb9ea 2238
19680c48
EG
2239 bnx2x_calc_fc_adv(bp);
2240 } else
f5372251 2241 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2242}
a2fbb9ea 2243
c18487ee
YR
2244static void bnx2x__link_reset(struct bnx2x *bp)
2245{
19680c48 2246 if (!BP_NOMCP(bp)) {
4a37fb66 2247 bnx2x_acquire_phy_lock(bp);
589abe3a 2248 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2249 bnx2x_release_phy_lock(bp);
19680c48 2250 } else
f5372251 2251 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2252}
a2fbb9ea 2253
c18487ee
YR
2254static u8 bnx2x_link_test(struct bnx2x *bp)
2255{
2256 u8 rc;
a2fbb9ea 2257
4a37fb66 2258 bnx2x_acquire_phy_lock(bp);
c18487ee 2259 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2260 bnx2x_release_phy_lock(bp);
a2fbb9ea 2261
c18487ee
YR
2262 return rc;
2263}
a2fbb9ea 2264
8a1c38d1 2265static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2266{
8a1c38d1
EG
2267 u32 r_param = bp->link_vars.line_speed / 8;
2268 u32 fair_periodic_timeout_usec;
2269 u32 t_fair;
34f80b04 2270
8a1c38d1
EG
2271 memset(&(bp->cmng.rs_vars), 0,
2272 sizeof(struct rate_shaping_vars_per_port));
2273 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2274
8a1c38d1
EG
2275 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2276 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2277
8a1c38d1
EG
2278 /* this is the threshold below which no timer arming will occur
2279 1.25 coefficient is for the threshold to be a little bigger
2280 than the real time, to compensate for timer in-accuracy */
2281 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2282 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2283
8a1c38d1
EG
2284 /* resolution of fairness timer */
2285 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2286 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2287 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2288
8a1c38d1
EG
2289 /* this is the threshold below which we won't arm the timer anymore */
2290 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2291
8a1c38d1
EG
2292 /* we multiply by 1e3/8 to get bytes/msec.
2293 We don't want the credits to pass a credit
2294 of the t_fair*FAIR_MEM (algorithm resolution) */
2295 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2296 /* since each tick is 4 usec */
2297 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2298}
2299
2691d51d
EG
2300/* Calculates the sum of vn_min_rates.
2301 It's needed for further normalizing of the min_rates.
2302 Returns:
2303 sum of vn_min_rates.
2304 or
2305 0 - if all the min_rates are 0.
2306 In the later case fainess algorithm should be deactivated.
2307 If not all min_rates are zero then those that are zeroes will be set to 1.
2308 */
2309static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2310{
2311 int all_zero = 1;
2312 int port = BP_PORT(bp);
2313 int vn;
2314
2315 bp->vn_weight_sum = 0;
2316 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2317 int func = 2*vn + port;
2318 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2319 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2320 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2321
2322 /* Skip hidden vns */
2323 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2324 continue;
2325
2326 /* If min rate is zero - set it to 1 */
2327 if (!vn_min_rate)
2328 vn_min_rate = DEF_MIN_RATE;
2329 else
2330 all_zero = 0;
2331
2332 bp->vn_weight_sum += vn_min_rate;
2333 }
2334
2335 /* ... only if all min rates are zeros - disable fairness */
2336 if (all_zero)
2337 bp->vn_weight_sum = 0;
2338}
2339
8a1c38d1 2340static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2341{
2342 struct rate_shaping_vars_per_vn m_rs_vn;
2343 struct fairness_vars_per_vn m_fair_vn;
2344 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2345 u16 vn_min_rate, vn_max_rate;
2346 int i;
2347
2348 /* If function is hidden - set min and max to zeroes */
2349 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2350 vn_min_rate = 0;
2351 vn_max_rate = 0;
2352
2353 } else {
2354 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2355 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2356 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2357 if current min rate is zero - set it to 1.
33471629 2358 This is a requirement of the algorithm. */
8a1c38d1 2359 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2360 vn_min_rate = DEF_MIN_RATE;
2361 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2362 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2363 }
2364
8a1c38d1
EG
2365 DP(NETIF_MSG_IFUP,
2366 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2367 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2368
2369 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2370 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2371
2372 /* global vn counter - maximal Mbps for this vn */
2373 m_rs_vn.vn_counter.rate = vn_max_rate;
2374
2375 /* quota - number of bytes transmitted in this period */
2376 m_rs_vn.vn_counter.quota =
2377 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2378
8a1c38d1 2379 if (bp->vn_weight_sum) {
34f80b04
EG
2380 /* credit for each period of the fairness algorithm:
2381 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2382 vn_weight_sum should not be larger than 10000, thus
2383 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2384 than zero */
34f80b04 2385 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2386 max((u32)(vn_min_rate * (T_FAIR_COEF /
2387 (8 * bp->vn_weight_sum))),
2388 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2389 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2390 m_fair_vn.vn_credit_delta);
2391 }
2392
34f80b04
EG
2393 /* Store it to internal memory */
2394 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2395 REG_WR(bp, BAR_XSTRORM_INTMEM +
2396 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2397 ((u32 *)(&m_rs_vn))[i]);
2398
2399 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2400 REG_WR(bp, BAR_XSTRORM_INTMEM +
2401 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2402 ((u32 *)(&m_fair_vn))[i]);
2403}
2404
8a1c38d1 2405
c18487ee
YR
2406/* This function is called upon link interrupt */
2407static void bnx2x_link_attn(struct bnx2x *bp)
2408{
bb2a0f7a
YG
2409 /* Make sure that we are synced with the current statistics */
2410 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2411
c18487ee 2412 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2413
bb2a0f7a
YG
2414 if (bp->link_vars.link_up) {
2415
1c06328c 2416 /* dropless flow control */
a18f5128 2417 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
2418 int port = BP_PORT(bp);
2419 u32 pause_enabled = 0;
2420
2421 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2422 pause_enabled = 1;
2423
2424 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2425 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2426 pause_enabled);
2427 }
2428
bb2a0f7a
YG
2429 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2430 struct host_port_stats *pstats;
2431
2432 pstats = bnx2x_sp(bp, port_stats);
2433 /* reset old bmac stats */
2434 memset(&(pstats->mac_stx[0]), 0,
2435 sizeof(struct mac_stx));
2436 }
2437 if ((bp->state == BNX2X_STATE_OPEN) ||
2438 (bp->state == BNX2X_STATE_DISABLED))
2439 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2440 }
2441
c18487ee
YR
2442 /* indicate link status */
2443 bnx2x_link_report(bp);
34f80b04
EG
2444
2445 if (IS_E1HMF(bp)) {
8a1c38d1 2446 int port = BP_PORT(bp);
34f80b04 2447 int func;
8a1c38d1 2448 int vn;
34f80b04 2449
ab6ad5a4 2450 /* Set the attention towards other drivers on the same port */
34f80b04
EG
2451 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2452 if (vn == BP_E1HVN(bp))
2453 continue;
2454
8a1c38d1 2455 func = ((vn << 1) | port);
34f80b04
EG
2456 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2457 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2458 }
34f80b04 2459
8a1c38d1
EG
2460 if (bp->link_vars.link_up) {
2461 int i;
2462
2463 /* Init rate shaping and fairness contexts */
2464 bnx2x_init_port_minmax(bp);
34f80b04 2465
34f80b04 2466 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2467 bnx2x_init_vn_minmax(bp, 2*vn + port);
2468
2469 /* Store it to internal memory */
2470 for (i = 0;
2471 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2472 REG_WR(bp, BAR_XSTRORM_INTMEM +
2473 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2474 ((u32 *)(&bp->cmng))[i]);
2475 }
34f80b04 2476 }
c18487ee 2477}
a2fbb9ea 2478
c18487ee
YR
2479static void bnx2x__link_status_update(struct bnx2x *bp)
2480{
2691d51d
EG
2481 int func = BP_FUNC(bp);
2482
c18487ee
YR
2483 if (bp->state != BNX2X_STATE_OPEN)
2484 return;
a2fbb9ea 2485
c18487ee 2486 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2487
bb2a0f7a
YG
2488 if (bp->link_vars.link_up)
2489 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2490 else
2491 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2492
2691d51d
EG
2493 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2494 bnx2x_calc_vn_weight_sum(bp);
2495
c18487ee
YR
2496 /* indicate link status */
2497 bnx2x_link_report(bp);
a2fbb9ea 2498}
a2fbb9ea 2499
34f80b04
EG
2500static void bnx2x_pmf_update(struct bnx2x *bp)
2501{
2502 int port = BP_PORT(bp);
2503 u32 val;
2504
2505 bp->port.pmf = 1;
2506 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2507
2508 /* enable nig attention */
2509 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2510 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2511 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2512
2513 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2514}
2515
c18487ee 2516/* end of Link */
a2fbb9ea
ET
2517
2518/* slow path */
2519
2520/*
2521 * General service functions
2522 */
2523
2691d51d
EG
2524/* send the MCP a request, block until there is a reply */
2525u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2526{
2527 int func = BP_FUNC(bp);
2528 u32 seq = ++bp->fw_seq;
2529 u32 rc = 0;
2530 u32 cnt = 1;
2531 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2532
2533 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2534 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2535
2536 do {
2537 /* let the FW do it's magic ... */
2538 msleep(delay);
2539
2540 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2541
2542 /* Give the FW up to 2 second (200*10ms) */
2543 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2544
2545 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2546 cnt*delay, rc, seq);
2547
2548 /* is this a reply to our command? */
2549 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2550 rc &= FW_MSG_CODE_MASK;
2551 else {
2552 /* FW BUG! */
2553 BNX2X_ERR("FW failed to respond!\n");
2554 bnx2x_fw_dump(bp);
2555 rc = 0;
2556 }
2557
2558 return rc;
2559}
2560
2561static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
e665bfda 2562static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2691d51d
EG
2563static void bnx2x_set_rx_mode(struct net_device *dev);
2564
2565static void bnx2x_e1h_disable(struct bnx2x *bp)
2566{
2567 int port = BP_PORT(bp);
2568 int i;
2569
2570 bp->rx_mode = BNX2X_RX_MODE_NONE;
2571 bnx2x_set_storm_rx_mode(bp);
2572
2573 netif_tx_disable(bp->dev);
2574 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2575
2576 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2577
e665bfda 2578 bnx2x_set_eth_mac_addr_e1h(bp, 0);
2691d51d
EG
2579
2580 for (i = 0; i < MC_HASH_SIZE; i++)
2581 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2582
2583 netif_carrier_off(bp->dev);
2584}
2585
2586static void bnx2x_e1h_enable(struct bnx2x *bp)
2587{
2588 int port = BP_PORT(bp);
2589
2590 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2591
e665bfda 2592 bnx2x_set_eth_mac_addr_e1h(bp, 1);
2691d51d
EG
2593
2594 /* Tx queue should be only reenabled */
2595 netif_tx_wake_all_queues(bp->dev);
2596
2597 /* Initialize the receive filter. */
2598 bnx2x_set_rx_mode(bp->dev);
2599}
2600
2601static void bnx2x_update_min_max(struct bnx2x *bp)
2602{
2603 int port = BP_PORT(bp);
2604 int vn, i;
2605
2606 /* Init rate shaping and fairness contexts */
2607 bnx2x_init_port_minmax(bp);
2608
2609 bnx2x_calc_vn_weight_sum(bp);
2610
2611 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2612 bnx2x_init_vn_minmax(bp, 2*vn + port);
2613
2614 if (bp->port.pmf) {
2615 int func;
2616
2617 /* Set the attention towards other drivers on the same port */
2618 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2619 if (vn == BP_E1HVN(bp))
2620 continue;
2621
2622 func = ((vn << 1) | port);
2623 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2624 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2625 }
2626
2627 /* Store it to internal memory */
2628 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2629 REG_WR(bp, BAR_XSTRORM_INTMEM +
2630 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2631 ((u32 *)(&bp->cmng))[i]);
2632 }
2633}
2634
2635static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2636{
2637 int func = BP_FUNC(bp);
2638
2639 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2640 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2641
2642 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2643
2644 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2645 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2646 bp->state = BNX2X_STATE_DISABLED;
2647
2648 bnx2x_e1h_disable(bp);
2649 } else {
2650 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2651 bp->state = BNX2X_STATE_OPEN;
2652
2653 bnx2x_e1h_enable(bp);
2654 }
2655 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2656 }
2657 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2658
2659 bnx2x_update_min_max(bp);
2660 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2661 }
2662
2663 /* Report results to MCP */
2664 if (dcc_event)
2665 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2666 else
2667 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2668}
2669
28912902
MC
2670/* must be called under the spq lock */
2671static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2672{
2673 struct eth_spe *next_spe = bp->spq_prod_bd;
2674
2675 if (bp->spq_prod_bd == bp->spq_last_bd) {
2676 bp->spq_prod_bd = bp->spq;
2677 bp->spq_prod_idx = 0;
2678 DP(NETIF_MSG_TIMER, "end of spq\n");
2679 } else {
2680 bp->spq_prod_bd++;
2681 bp->spq_prod_idx++;
2682 }
2683 return next_spe;
2684}
2685
2686/* must be called under the spq lock */
2687static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2688{
2689 int func = BP_FUNC(bp);
2690
2691 /* Make sure that BD data is updated before writing the producer */
2692 wmb();
2693
2694 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2695 bp->spq_prod_idx);
2696 mmiowb();
2697}
2698
a2fbb9ea
ET
2699/* the slow path queue is odd since completions arrive on the fastpath ring */
2700static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2701 u32 data_hi, u32 data_lo, int common)
2702{
28912902 2703 struct eth_spe *spe;
a2fbb9ea 2704
34f80b04
EG
2705 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2706 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2707 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2708 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2709 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2710
2711#ifdef BNX2X_STOP_ON_ERROR
2712 if (unlikely(bp->panic))
2713 return -EIO;
2714#endif
2715
34f80b04 2716 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2717
2718 if (!bp->spq_left) {
2719 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2720 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2721 bnx2x_panic();
2722 return -EBUSY;
2723 }
f1410647 2724
28912902
MC
2725 spe = bnx2x_sp_get_next(bp);
2726
a2fbb9ea 2727 /* CID needs port number to be encoded int it */
28912902 2728 spe->hdr.conn_and_cmd_data =
a2fbb9ea
ET
2729 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2730 HW_CID(bp, cid)));
28912902 2731 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 2732 if (common)
28912902 2733 spe->hdr.type |=
a2fbb9ea
ET
2734 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2735
28912902
MC
2736 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2737 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
2738
2739 bp->spq_left--;
2740
28912902 2741 bnx2x_sp_prod_update(bp);
34f80b04 2742 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2743 return 0;
2744}
2745
2746/* acquire split MCP access lock register */
4a37fb66 2747static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2748{
a2fbb9ea 2749 u32 i, j, val;
34f80b04 2750 int rc = 0;
a2fbb9ea
ET
2751
2752 might_sleep();
2753 i = 100;
2754 for (j = 0; j < i*10; j++) {
2755 val = (1UL << 31);
2756 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2757 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2758 if (val & (1L << 31))
2759 break;
2760
2761 msleep(5);
2762 }
a2fbb9ea 2763 if (!(val & (1L << 31))) {
19680c48 2764 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2765 rc = -EBUSY;
2766 }
2767
2768 return rc;
2769}
2770
4a37fb66
YG
2771/* release split MCP access lock register */
2772static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2773{
2774 u32 val = 0;
2775
2776 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2777}
2778
2779static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2780{
2781 struct host_def_status_block *def_sb = bp->def_status_blk;
2782 u16 rc = 0;
2783
2784 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2785 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2786 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2787 rc |= 1;
2788 }
2789 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2790 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2791 rc |= 2;
2792 }
2793 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2794 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2795 rc |= 4;
2796 }
2797 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2798 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2799 rc |= 8;
2800 }
2801 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2802 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2803 rc |= 16;
2804 }
2805 return rc;
2806}
2807
2808/*
2809 * slow path service functions
2810 */
2811
2812static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2813{
34f80b04 2814 int port = BP_PORT(bp);
5c862848
EG
2815 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2816 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2817 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2818 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2819 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2820 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2821 u32 aeu_mask;
87942b46 2822 u32 nig_mask = 0;
a2fbb9ea 2823
a2fbb9ea
ET
2824 if (bp->attn_state & asserted)
2825 BNX2X_ERR("IGU ERROR\n");
2826
3fcaf2e5
EG
2827 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2828 aeu_mask = REG_RD(bp, aeu_addr);
2829
a2fbb9ea 2830 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2831 aeu_mask, asserted);
2832 aeu_mask &= ~(asserted & 0xff);
2833 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2834
3fcaf2e5
EG
2835 REG_WR(bp, aeu_addr, aeu_mask);
2836 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2837
3fcaf2e5 2838 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2839 bp->attn_state |= asserted;
3fcaf2e5 2840 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2841
2842 if (asserted & ATTN_HARD_WIRED_MASK) {
2843 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2844
a5e9a7cf
EG
2845 bnx2x_acquire_phy_lock(bp);
2846
877e9aa4 2847 /* save nig interrupt mask */
87942b46 2848 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2849 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2850
c18487ee 2851 bnx2x_link_attn(bp);
a2fbb9ea
ET
2852
2853 /* handle unicore attn? */
2854 }
2855 if (asserted & ATTN_SW_TIMER_4_FUNC)
2856 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2857
2858 if (asserted & GPIO_2_FUNC)
2859 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2860
2861 if (asserted & GPIO_3_FUNC)
2862 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2863
2864 if (asserted & GPIO_4_FUNC)
2865 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2866
2867 if (port == 0) {
2868 if (asserted & ATTN_GENERAL_ATTN_1) {
2869 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2870 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2871 }
2872 if (asserted & ATTN_GENERAL_ATTN_2) {
2873 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2874 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2875 }
2876 if (asserted & ATTN_GENERAL_ATTN_3) {
2877 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2878 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2879 }
2880 } else {
2881 if (asserted & ATTN_GENERAL_ATTN_4) {
2882 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2883 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2884 }
2885 if (asserted & ATTN_GENERAL_ATTN_5) {
2886 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2887 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2888 }
2889 if (asserted & ATTN_GENERAL_ATTN_6) {
2890 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2891 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2892 }
2893 }
2894
2895 } /* if hardwired */
2896
5c862848
EG
2897 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2898 asserted, hc_addr);
2899 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2900
2901 /* now set back the mask */
a5e9a7cf 2902 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2903 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2904 bnx2x_release_phy_lock(bp);
2905 }
a2fbb9ea
ET
2906}
2907
fd4ef40d
EG
2908static inline void bnx2x_fan_failure(struct bnx2x *bp)
2909{
2910 int port = BP_PORT(bp);
2911
2912 /* mark the failure */
2913 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2914 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2915 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2916 bp->link_params.ext_phy_config);
2917
2918 /* log the failure */
2919 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2920 " the driver to shutdown the card to prevent permanent"
2921 " damage. Please contact Dell Support for assistance\n",
2922 bp->dev->name);
2923}
ab6ad5a4 2924
877e9aa4 2925static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2926{
34f80b04 2927 int port = BP_PORT(bp);
877e9aa4 2928 int reg_offset;
4d295db0 2929 u32 val, swap_val, swap_override;
877e9aa4 2930
34f80b04
EG
2931 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2932 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2933
34f80b04 2934 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2935
2936 val = REG_RD(bp, reg_offset);
2937 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2938 REG_WR(bp, reg_offset, val);
2939
2940 BNX2X_ERR("SPIO5 hw attention\n");
2941
fd4ef40d 2942 /* Fan failure attention */
35b19ba5
EG
2943 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2944 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2945 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2946 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2947 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2948 /* The PHY reset is controlled by GPIO 1 */
2949 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2950 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2951 break;
2952
4d295db0
EG
2953 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2954 /* The PHY reset is controlled by GPIO 1 */
2955 /* fake the port number to cancel the swap done in
2956 set_gpio() */
2957 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2958 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2959 port = (swap_val && swap_override) ^ 1;
2960 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2961 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2962 break;
2963
877e9aa4
ET
2964 default:
2965 break;
2966 }
fd4ef40d 2967 bnx2x_fan_failure(bp);
877e9aa4 2968 }
34f80b04 2969
589abe3a
EG
2970 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2971 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2972 bnx2x_acquire_phy_lock(bp);
2973 bnx2x_handle_module_detect_int(&bp->link_params);
2974 bnx2x_release_phy_lock(bp);
2975 }
2976
34f80b04
EG
2977 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2978
2979 val = REG_RD(bp, reg_offset);
2980 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2981 REG_WR(bp, reg_offset, val);
2982
2983 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2984 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2985 bnx2x_panic();
2986 }
877e9aa4
ET
2987}
2988
2989static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2990{
2991 u32 val;
2992
0626b899 2993 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2994
2995 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2996 BNX2X_ERR("DB hw attention 0x%x\n", val);
2997 /* DORQ discard attention */
2998 if (val & 0x2)
2999 BNX2X_ERR("FATAL error from DORQ\n");
3000 }
34f80b04
EG
3001
3002 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3003
3004 int port = BP_PORT(bp);
3005 int reg_offset;
3006
3007 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3008 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3009
3010 val = REG_RD(bp, reg_offset);
3011 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3012 REG_WR(bp, reg_offset, val);
3013
3014 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 3015 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
3016 bnx2x_panic();
3017 }
877e9aa4
ET
3018}
3019
3020static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3021{
3022 u32 val;
3023
3024 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3025
3026 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3027 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3028 /* CFC error attention */
3029 if (val & 0x2)
3030 BNX2X_ERR("FATAL error from CFC\n");
3031 }
3032
3033 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3034
3035 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3036 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3037 /* RQ_USDMDP_FIFO_OVERFLOW */
3038 if (val & 0x18000)
3039 BNX2X_ERR("FATAL error from PXP\n");
3040 }
34f80b04
EG
3041
3042 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3043
3044 int port = BP_PORT(bp);
3045 int reg_offset;
3046
3047 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3048 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3049
3050 val = REG_RD(bp, reg_offset);
3051 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3052 REG_WR(bp, reg_offset, val);
3053
3054 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3055 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3056 bnx2x_panic();
3057 }
877e9aa4
ET
3058}
3059
3060static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3061{
34f80b04
EG
3062 u32 val;
3063
877e9aa4
ET
3064 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3065
34f80b04
EG
3066 if (attn & BNX2X_PMF_LINK_ASSERT) {
3067 int func = BP_FUNC(bp);
3068
3069 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2691d51d
EG
3070 val = SHMEM_RD(bp, func_mb[func].drv_status);
3071 if (val & DRV_STATUS_DCC_EVENT_MASK)
3072 bnx2x_dcc_event(bp,
3073 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3074 bnx2x__link_status_update(bp);
2691d51d 3075 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3076 bnx2x_pmf_update(bp);
3077
3078 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3079
3080 BNX2X_ERR("MC assert!\n");
3081 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3082 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3083 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3084 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3085 bnx2x_panic();
3086
3087 } else if (attn & BNX2X_MCP_ASSERT) {
3088
3089 BNX2X_ERR("MCP assert!\n");
3090 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3091 bnx2x_fw_dump(bp);
877e9aa4
ET
3092
3093 } else
3094 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3095 }
3096
3097 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3098 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3099 if (attn & BNX2X_GRC_TIMEOUT) {
3100 val = CHIP_IS_E1H(bp) ?
3101 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3102 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3103 }
3104 if (attn & BNX2X_GRC_RSV) {
3105 val = CHIP_IS_E1H(bp) ?
3106 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3107 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3108 }
877e9aa4 3109 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3110 }
3111}
3112
3113static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3114{
a2fbb9ea
ET
3115 struct attn_route attn;
3116 struct attn_route group_mask;
34f80b04 3117 int port = BP_PORT(bp);
877e9aa4 3118 int index;
a2fbb9ea
ET
3119 u32 reg_addr;
3120 u32 val;
3fcaf2e5 3121 u32 aeu_mask;
a2fbb9ea
ET
3122
3123 /* need to take HW lock because MCP or other port might also
3124 try to handle this event */
4a37fb66 3125 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
3126
3127 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3128 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3129 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3130 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3131 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3132 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3133
3134 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3135 if (deasserted & (1 << index)) {
3136 group_mask = bp->attn_group[index];
3137
34f80b04
EG
3138 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3139 index, group_mask.sig[0], group_mask.sig[1],
3140 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 3141
877e9aa4
ET
3142 bnx2x_attn_int_deasserted3(bp,
3143 attn.sig[3] & group_mask.sig[3]);
3144 bnx2x_attn_int_deasserted1(bp,
3145 attn.sig[1] & group_mask.sig[1]);
3146 bnx2x_attn_int_deasserted2(bp,
3147 attn.sig[2] & group_mask.sig[2]);
3148 bnx2x_attn_int_deasserted0(bp,
3149 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 3150
a2fbb9ea
ET
3151 if ((attn.sig[0] & group_mask.sig[0] &
3152 HW_PRTY_ASSERT_SET_0) ||
3153 (attn.sig[1] & group_mask.sig[1] &
3154 HW_PRTY_ASSERT_SET_1) ||
3155 (attn.sig[2] & group_mask.sig[2] &
3156 HW_PRTY_ASSERT_SET_2))
6378c025 3157 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
3158 }
3159 }
3160
4a37fb66 3161 bnx2x_release_alr(bp);
a2fbb9ea 3162
5c862848 3163 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3164
3165 val = ~deasserted;
3fcaf2e5
EG
3166 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3167 val, reg_addr);
5c862848 3168 REG_WR(bp, reg_addr, val);
a2fbb9ea 3169
a2fbb9ea 3170 if (~bp->attn_state & deasserted)
3fcaf2e5 3171 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3172
3173 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3174 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3175
3fcaf2e5
EG
3176 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3177 aeu_mask = REG_RD(bp, reg_addr);
3178
3179 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3180 aeu_mask, deasserted);
3181 aeu_mask |= (deasserted & 0xff);
3182 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3183
3fcaf2e5
EG
3184 REG_WR(bp, reg_addr, aeu_mask);
3185 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3186
3187 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3188 bp->attn_state &= ~deasserted;
3189 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3190}
3191
3192static void bnx2x_attn_int(struct bnx2x *bp)
3193{
3194 /* read local copy of bits */
68d59484
EG
3195 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3196 attn_bits);
3197 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3198 attn_bits_ack);
a2fbb9ea
ET
3199 u32 attn_state = bp->attn_state;
3200
3201 /* look for changed bits */
3202 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3203 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3204
3205 DP(NETIF_MSG_HW,
3206 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3207 attn_bits, attn_ack, asserted, deasserted);
3208
3209 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3210 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3211
3212 /* handle bits that were raised */
3213 if (asserted)
3214 bnx2x_attn_int_asserted(bp, asserted);
3215
3216 if (deasserted)
3217 bnx2x_attn_int_deasserted(bp, deasserted);
3218}
3219
3220static void bnx2x_sp_task(struct work_struct *work)
3221{
1cf167f2 3222 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3223 u16 status;
3224
34f80b04 3225
a2fbb9ea
ET
3226 /* Return here if interrupt is disabled */
3227 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3228 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3229 return;
3230 }
3231
3232 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3233/* if (status == 0) */
3234/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3235
3196a88a 3236 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 3237
877e9aa4
ET
3238 /* HW attentions */
3239 if (status & 0x1)
a2fbb9ea 3240 bnx2x_attn_int(bp);
a2fbb9ea 3241
68d59484 3242 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3243 IGU_INT_NOP, 1);
3244 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3245 IGU_INT_NOP, 1);
3246 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3247 IGU_INT_NOP, 1);
3248 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3249 IGU_INT_NOP, 1);
3250 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3251 IGU_INT_ENABLE, 1);
877e9aa4 3252
a2fbb9ea
ET
3253}
3254
3255static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3256{
3257 struct net_device *dev = dev_instance;
3258 struct bnx2x *bp = netdev_priv(dev);
3259
3260 /* Return here if interrupt is disabled */
3261 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3262 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3263 return IRQ_HANDLED;
3264 }
3265
8d9c5f34 3266 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3267
3268#ifdef BNX2X_STOP_ON_ERROR
3269 if (unlikely(bp->panic))
3270 return IRQ_HANDLED;
3271#endif
3272
993ac7b5
MC
3273#ifdef BCM_CNIC
3274 {
3275 struct cnic_ops *c_ops;
3276
3277 rcu_read_lock();
3278 c_ops = rcu_dereference(bp->cnic_ops);
3279 if (c_ops)
3280 c_ops->cnic_handler(bp->cnic_data, NULL);
3281 rcu_read_unlock();
3282 }
3283#endif
1cf167f2 3284 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3285
3286 return IRQ_HANDLED;
3287}
3288
3289/* end of slow path */
3290
3291/* Statistics */
3292
3293/****************************************************************************
3294* Macros
3295****************************************************************************/
3296
a2fbb9ea
ET
3297/* sum[hi:lo] += add[hi:lo] */
3298#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3299 do { \
3300 s_lo += a_lo; \
f5ba6772 3301 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3302 } while (0)
3303
3304/* difference = minuend - subtrahend */
3305#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3306 do { \
bb2a0f7a
YG
3307 if (m_lo < s_lo) { \
3308 /* underflow */ \
a2fbb9ea 3309 d_hi = m_hi - s_hi; \
bb2a0f7a 3310 if (d_hi > 0) { \
6378c025 3311 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3312 d_hi--; \
3313 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3314 } else { \
6378c025 3315 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3316 d_hi = 0; \
3317 d_lo = 0; \
3318 } \
bb2a0f7a
YG
3319 } else { \
3320 /* m_lo >= s_lo */ \
a2fbb9ea 3321 if (m_hi < s_hi) { \
bb2a0f7a
YG
3322 d_hi = 0; \
3323 d_lo = 0; \
3324 } else { \
6378c025 3325 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3326 d_hi = m_hi - s_hi; \
3327 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3328 } \
3329 } \
3330 } while (0)
3331
bb2a0f7a 3332#define UPDATE_STAT64(s, t) \
a2fbb9ea 3333 do { \
bb2a0f7a
YG
3334 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3335 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3336 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3337 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3338 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3339 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3340 } while (0)
3341
bb2a0f7a 3342#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3343 do { \
bb2a0f7a
YG
3344 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3345 diff.lo, new->s##_lo, old->s##_lo); \
3346 ADD_64(estats->t##_hi, diff.hi, \
3347 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3348 } while (0)
3349
3350/* sum[hi:lo] += add */
3351#define ADD_EXTEND_64(s_hi, s_lo, a) \
3352 do { \
3353 s_lo += a; \
3354 s_hi += (s_lo < a) ? 1 : 0; \
3355 } while (0)
3356
bb2a0f7a 3357#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3358 do { \
bb2a0f7a
YG
3359 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3360 pstats->mac_stx[1].s##_lo, \
3361 new->s); \
a2fbb9ea
ET
3362 } while (0)
3363
bb2a0f7a 3364#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3365 do { \
4781bfad
EG
3366 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3367 old_tclient->s = tclient->s; \
de832a55
EG
3368 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3369 } while (0)
3370
3371#define UPDATE_EXTEND_USTAT(s, t) \
3372 do { \
3373 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3374 old_uclient->s = uclient->s; \
3375 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3376 } while (0)
3377
3378#define UPDATE_EXTEND_XSTAT(s, t) \
3379 do { \
4781bfad
EG
3380 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3381 old_xclient->s = xclient->s; \
de832a55
EG
3382 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3383 } while (0)
3384
3385/* minuend -= subtrahend */
3386#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3387 do { \
3388 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3389 } while (0)
3390
3391/* minuend[hi:lo] -= subtrahend */
3392#define SUB_EXTEND_64(m_hi, m_lo, s) \
3393 do { \
3394 SUB_64(m_hi, 0, m_lo, s); \
3395 } while (0)
3396
3397#define SUB_EXTEND_USTAT(s, t) \
3398 do { \
3399 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3400 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3401 } while (0)
3402
3403/*
3404 * General service functions
3405 */
3406
3407static inline long bnx2x_hilo(u32 *hiref)
3408{
3409 u32 lo = *(hiref + 1);
3410#if (BITS_PER_LONG == 64)
3411 u32 hi = *hiref;
3412
3413 return HILO_U64(hi, lo);
3414#else
3415 return lo;
3416#endif
3417}
3418
3419/*
3420 * Init service functions
3421 */
3422
bb2a0f7a
YG
3423static void bnx2x_storm_stats_post(struct bnx2x *bp)
3424{
3425 if (!bp->stats_pending) {
3426 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3427 int i, rc;
bb2a0f7a
YG
3428
3429 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3430 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3431 for_each_queue(bp, i)
3432 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3433
3434 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3435 ((u32 *)&ramrod_data)[1],
3436 ((u32 *)&ramrod_data)[0], 0);
3437 if (rc == 0) {
3438 /* stats ramrod has it's own slot on the spq */
3439 bp->spq_left++;
3440 bp->stats_pending = 1;
3441 }
3442 }
3443}
3444
bb2a0f7a
YG
3445static void bnx2x_hw_stats_post(struct bnx2x *bp)
3446{
3447 struct dmae_command *dmae = &bp->stats_dmae;
3448 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3449
3450 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3451 if (CHIP_REV_IS_SLOW(bp))
3452 return;
bb2a0f7a
YG
3453
3454 /* loader */
3455 if (bp->executer_idx) {
3456 int loader_idx = PMF_DMAE_C(bp);
3457
3458 memset(dmae, 0, sizeof(struct dmae_command));
3459
3460 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3461 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3462 DMAE_CMD_DST_RESET |
3463#ifdef __BIG_ENDIAN
3464 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3465#else
3466 DMAE_CMD_ENDIANITY_DW_SWAP |
3467#endif
3468 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3469 DMAE_CMD_PORT_0) |
3470 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3471 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3472 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3473 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3474 sizeof(struct dmae_command) *
3475 (loader_idx + 1)) >> 2;
3476 dmae->dst_addr_hi = 0;
3477 dmae->len = sizeof(struct dmae_command) >> 2;
3478 if (CHIP_IS_E1(bp))
3479 dmae->len--;
3480 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3481 dmae->comp_addr_hi = 0;
3482 dmae->comp_val = 1;
3483
3484 *stats_comp = 0;
3485 bnx2x_post_dmae(bp, dmae, loader_idx);
3486
3487 } else if (bp->func_stx) {
3488 *stats_comp = 0;
3489 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3490 }
3491}
3492
3493static int bnx2x_stats_comp(struct bnx2x *bp)
3494{
3495 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3496 int cnt = 10;
3497
3498 might_sleep();
3499 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3500 if (!cnt) {
3501 BNX2X_ERR("timeout waiting for stats finished\n");
3502 break;
3503 }
3504 cnt--;
12469401 3505 msleep(1);
bb2a0f7a
YG
3506 }
3507 return 1;
3508}
3509
3510/*
3511 * Statistics service functions
3512 */
3513
3514static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3515{
3516 struct dmae_command *dmae;
3517 u32 opcode;
3518 int loader_idx = PMF_DMAE_C(bp);
3519 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3520
3521 /* sanity */
3522 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3523 BNX2X_ERR("BUG!\n");
3524 return;
3525 }
3526
3527 bp->executer_idx = 0;
3528
3529 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3530 DMAE_CMD_C_ENABLE |
3531 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3532#ifdef __BIG_ENDIAN
3533 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3534#else
3535 DMAE_CMD_ENDIANITY_DW_SWAP |
3536#endif
3537 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3538 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3539
3540 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3541 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3542 dmae->src_addr_lo = bp->port.port_stx >> 2;
3543 dmae->src_addr_hi = 0;
3544 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3545 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3546 dmae->len = DMAE_LEN32_RD_MAX;
3547 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3548 dmae->comp_addr_hi = 0;
3549 dmae->comp_val = 1;
3550
3551 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3552 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3553 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3554 dmae->src_addr_hi = 0;
7a9b2557
VZ
3555 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3556 DMAE_LEN32_RD_MAX * 4);
3557 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3558 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3559 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3560 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3561 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3562 dmae->comp_val = DMAE_COMP_VAL;
3563
3564 *stats_comp = 0;
3565 bnx2x_hw_stats_post(bp);
3566 bnx2x_stats_comp(bp);
3567}
3568
3569static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3570{
3571 struct dmae_command *dmae;
34f80b04 3572 int port = BP_PORT(bp);
bb2a0f7a 3573 int vn = BP_E1HVN(bp);
a2fbb9ea 3574 u32 opcode;
bb2a0f7a 3575 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3576 u32 mac_addr;
bb2a0f7a
YG
3577 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3578
3579 /* sanity */
3580 if (!bp->link_vars.link_up || !bp->port.pmf) {
3581 BNX2X_ERR("BUG!\n");
3582 return;
3583 }
a2fbb9ea
ET
3584
3585 bp->executer_idx = 0;
bb2a0f7a
YG
3586
3587 /* MCP */
3588 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3589 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3590 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3591#ifdef __BIG_ENDIAN
bb2a0f7a 3592 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3593#else
bb2a0f7a 3594 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3595#endif
bb2a0f7a
YG
3596 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3597 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3598
bb2a0f7a 3599 if (bp->port.port_stx) {
a2fbb9ea
ET
3600
3601 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3602 dmae->opcode = opcode;
bb2a0f7a
YG
3603 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3604 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3605 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3606 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3607 dmae->len = sizeof(struct host_port_stats) >> 2;
3608 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3609 dmae->comp_addr_hi = 0;
3610 dmae->comp_val = 1;
a2fbb9ea
ET
3611 }
3612
bb2a0f7a
YG
3613 if (bp->func_stx) {
3614
3615 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3616 dmae->opcode = opcode;
3617 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3618 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3619 dmae->dst_addr_lo = bp->func_stx >> 2;
3620 dmae->dst_addr_hi = 0;
3621 dmae->len = sizeof(struct host_func_stats) >> 2;
3622 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3623 dmae->comp_addr_hi = 0;
3624 dmae->comp_val = 1;
a2fbb9ea
ET
3625 }
3626
bb2a0f7a 3627 /* MAC */
a2fbb9ea
ET
3628 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3629 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3630 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3631#ifdef __BIG_ENDIAN
3632 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3633#else
3634 DMAE_CMD_ENDIANITY_DW_SWAP |
3635#endif
bb2a0f7a
YG
3636 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3637 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3638
c18487ee 3639 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3640
3641 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3642 NIG_REG_INGRESS_BMAC0_MEM);
3643
3644 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3645 BIGMAC_REGISTER_TX_STAT_GTBYT */
3646 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3647 dmae->opcode = opcode;
3648 dmae->src_addr_lo = (mac_addr +
3649 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3650 dmae->src_addr_hi = 0;
3651 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3652 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3653 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3654 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3655 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3656 dmae->comp_addr_hi = 0;
3657 dmae->comp_val = 1;
3658
3659 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3660 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3661 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3662 dmae->opcode = opcode;
3663 dmae->src_addr_lo = (mac_addr +
3664 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3665 dmae->src_addr_hi = 0;
3666 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3667 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3668 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3669 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3670 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3671 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3672 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3673 dmae->comp_addr_hi = 0;
3674 dmae->comp_val = 1;
3675
c18487ee 3676 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3677
3678 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3679
3680 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3681 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3682 dmae->opcode = opcode;
3683 dmae->src_addr_lo = (mac_addr +
3684 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3685 dmae->src_addr_hi = 0;
3686 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3687 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3688 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3689 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3690 dmae->comp_addr_hi = 0;
3691 dmae->comp_val = 1;
3692
3693 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3694 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3695 dmae->opcode = opcode;
3696 dmae->src_addr_lo = (mac_addr +
3697 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3698 dmae->src_addr_hi = 0;
3699 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3700 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3701 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3702 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3703 dmae->len = 1;
3704 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3705 dmae->comp_addr_hi = 0;
3706 dmae->comp_val = 1;
3707
3708 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3709 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3710 dmae->opcode = opcode;
3711 dmae->src_addr_lo = (mac_addr +
3712 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3713 dmae->src_addr_hi = 0;
3714 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3715 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3716 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3717 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3718 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3719 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3720 dmae->comp_addr_hi = 0;
3721 dmae->comp_val = 1;
3722 }
3723
3724 /* NIG */
bb2a0f7a
YG
3725 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3726 dmae->opcode = opcode;
3727 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3728 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3729 dmae->src_addr_hi = 0;
3730 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3731 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3732 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3733 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3734 dmae->comp_addr_hi = 0;
3735 dmae->comp_val = 1;
3736
3737 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3738 dmae->opcode = opcode;
3739 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3740 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3741 dmae->src_addr_hi = 0;
3742 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3743 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3744 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3745 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3746 dmae->len = (2*sizeof(u32)) >> 2;
3747 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3748 dmae->comp_addr_hi = 0;
3749 dmae->comp_val = 1;
3750
a2fbb9ea
ET
3751 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3752 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3753 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3754 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3755#ifdef __BIG_ENDIAN
3756 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3757#else
3758 DMAE_CMD_ENDIANITY_DW_SWAP |
3759#endif
bb2a0f7a
YG
3760 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3761 (vn << DMAE_CMD_E1HVN_SHIFT));
3762 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3763 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3764 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3765 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3766 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3767 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3768 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3769 dmae->len = (2*sizeof(u32)) >> 2;
3770 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3771 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3772 dmae->comp_val = DMAE_COMP_VAL;
3773
3774 *stats_comp = 0;
a2fbb9ea
ET
3775}
3776
bb2a0f7a 3777static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3778{
bb2a0f7a
YG
3779 struct dmae_command *dmae = &bp->stats_dmae;
3780 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3781
bb2a0f7a
YG
3782 /* sanity */
3783 if (!bp->func_stx) {
3784 BNX2X_ERR("BUG!\n");
3785 return;
3786 }
a2fbb9ea 3787
bb2a0f7a
YG
3788 bp->executer_idx = 0;
3789 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3790
bb2a0f7a
YG
3791 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3792 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3793 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3794#ifdef __BIG_ENDIAN
3795 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3796#else
3797 DMAE_CMD_ENDIANITY_DW_SWAP |
3798#endif
3799 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3800 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3801 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3802 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3803 dmae->dst_addr_lo = bp->func_stx >> 2;
3804 dmae->dst_addr_hi = 0;
3805 dmae->len = sizeof(struct host_func_stats) >> 2;
3806 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3807 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3808 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3809
bb2a0f7a
YG
3810 *stats_comp = 0;
3811}
a2fbb9ea 3812
bb2a0f7a
YG
3813static void bnx2x_stats_start(struct bnx2x *bp)
3814{
3815 if (bp->port.pmf)
3816 bnx2x_port_stats_init(bp);
3817
3818 else if (bp->func_stx)
3819 bnx2x_func_stats_init(bp);
3820
3821 bnx2x_hw_stats_post(bp);
3822 bnx2x_storm_stats_post(bp);
3823}
3824
3825static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3826{
3827 bnx2x_stats_comp(bp);
3828 bnx2x_stats_pmf_update(bp);
3829 bnx2x_stats_start(bp);
3830}
3831
3832static void bnx2x_stats_restart(struct bnx2x *bp)
3833{
3834 bnx2x_stats_comp(bp);
3835 bnx2x_stats_start(bp);
3836}
3837
3838static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3839{
3840 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3841 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3842 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3843 struct {
3844 u32 lo;
3845 u32 hi;
3846 } diff;
bb2a0f7a
YG
3847
3848 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3849 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3850 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3851 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3852 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3853 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3854 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3855 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3856 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3857 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3858 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3859 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3860 UPDATE_STAT64(tx_stat_gt127,
3861 tx_stat_etherstatspkts65octetsto127octets);
3862 UPDATE_STAT64(tx_stat_gt255,
3863 tx_stat_etherstatspkts128octetsto255octets);
3864 UPDATE_STAT64(tx_stat_gt511,
3865 tx_stat_etherstatspkts256octetsto511octets);
3866 UPDATE_STAT64(tx_stat_gt1023,
3867 tx_stat_etherstatspkts512octetsto1023octets);
3868 UPDATE_STAT64(tx_stat_gt1518,
3869 tx_stat_etherstatspkts1024octetsto1522octets);
3870 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3871 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3872 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3873 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3874 UPDATE_STAT64(tx_stat_gterr,
3875 tx_stat_dot3statsinternalmactransmiterrors);
3876 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3877
3878 estats->pause_frames_received_hi =
3879 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3880 estats->pause_frames_received_lo =
3881 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3882
3883 estats->pause_frames_sent_hi =
3884 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3885 estats->pause_frames_sent_lo =
3886 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3887}
3888
3889static void bnx2x_emac_stats_update(struct bnx2x *bp)
3890{
3891 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3892 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3893 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3894
3895 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3896 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3897 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3898 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3899 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3900 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3901 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3902 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3903 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3904 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3905 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3906 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3907 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3908 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3909 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3910 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3911 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3912 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3913 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3914 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3915 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3916 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3917 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3918 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3919 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3920 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3921 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3922 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3923 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3924 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3925 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3926
3927 estats->pause_frames_received_hi =
3928 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3929 estats->pause_frames_received_lo =
3930 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3931 ADD_64(estats->pause_frames_received_hi,
3932 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3933 estats->pause_frames_received_lo,
3934 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3935
3936 estats->pause_frames_sent_hi =
3937 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3938 estats->pause_frames_sent_lo =
3939 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3940 ADD_64(estats->pause_frames_sent_hi,
3941 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3942 estats->pause_frames_sent_lo,
3943 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3944}
3945
3946static int bnx2x_hw_stats_update(struct bnx2x *bp)
3947{
3948 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3949 struct nig_stats *old = &(bp->port.old_nig_stats);
3950 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3951 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3952 struct {
3953 u32 lo;
3954 u32 hi;
3955 } diff;
de832a55 3956 u32 nig_timer_max;
bb2a0f7a
YG
3957
3958 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3959 bnx2x_bmac_stats_update(bp);
3960
3961 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3962 bnx2x_emac_stats_update(bp);
3963
3964 else { /* unreached */
c3eefaf6 3965 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3966 return -1;
3967 }
a2fbb9ea 3968
bb2a0f7a
YG
3969 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3970 new->brb_discard - old->brb_discard);
66e855f3
YG
3971 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3972 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3973
bb2a0f7a
YG
3974 UPDATE_STAT64_NIG(egress_mac_pkt0,
3975 etherstatspkts1024octetsto1522octets);
3976 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3977
bb2a0f7a 3978 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3979
bb2a0f7a
YG
3980 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3981 sizeof(struct mac_stx));
3982 estats->brb_drop_hi = pstats->brb_drop_hi;
3983 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3984
bb2a0f7a 3985 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3986
de832a55
EG
3987 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3988 if (nig_timer_max != estats->nig_timer_max) {
3989 estats->nig_timer_max = nig_timer_max;
3990 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3991 }
3992
bb2a0f7a 3993 return 0;
a2fbb9ea
ET
3994}
3995
bb2a0f7a 3996static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3997{
3998 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3999 struct tstorm_per_port_stats *tport =
de832a55 4000 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
4001 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4002 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
4003 int i;
4004
6fe49bb9
EG
4005 memcpy(&(fstats->total_bytes_received_hi),
4006 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
4007 sizeof(struct host_func_stats) - 2*sizeof(u32));
4008 estats->error_bytes_received_hi = 0;
4009 estats->error_bytes_received_lo = 0;
4010 estats->etherstatsoverrsizepkts_hi = 0;
4011 estats->etherstatsoverrsizepkts_lo = 0;
4012 estats->no_buff_discard_hi = 0;
4013 estats->no_buff_discard_lo = 0;
a2fbb9ea 4014
ca00392c 4015 for_each_rx_queue(bp, i) {
de832a55
EG
4016 struct bnx2x_fastpath *fp = &bp->fp[i];
4017 int cl_id = fp->cl_id;
4018 struct tstorm_per_client_stats *tclient =
4019 &stats->tstorm_common.client_statistics[cl_id];
4020 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4021 struct ustorm_per_client_stats *uclient =
4022 &stats->ustorm_common.client_statistics[cl_id];
4023 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4024 struct xstorm_per_client_stats *xclient =
4025 &stats->xstorm_common.client_statistics[cl_id];
4026 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4027 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4028 u32 diff;
4029
4030 /* are storm stats valid? */
4031 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 4032 bp->stats_counter) {
de832a55
EG
4033 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4034 " xstorm counter (%d) != stats_counter (%d)\n",
4035 i, xclient->stats_counter, bp->stats_counter);
4036 return -1;
4037 }
4038 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 4039 bp->stats_counter) {
de832a55
EG
4040 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4041 " tstorm counter (%d) != stats_counter (%d)\n",
4042 i, tclient->stats_counter, bp->stats_counter);
4043 return -2;
4044 }
4045 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4046 bp->stats_counter) {
4047 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4048 " ustorm counter (%d) != stats_counter (%d)\n",
4049 i, uclient->stats_counter, bp->stats_counter);
4050 return -4;
4051 }
a2fbb9ea 4052
de832a55 4053 qstats->total_bytes_received_hi =
ca00392c 4054 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 4055 qstats->total_bytes_received_lo =
ca00392c
EG
4056 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4057
4058 ADD_64(qstats->total_bytes_received_hi,
4059 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4060 qstats->total_bytes_received_lo,
4061 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4062
4063 ADD_64(qstats->total_bytes_received_hi,
4064 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4065 qstats->total_bytes_received_lo,
4066 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4067
4068 qstats->valid_bytes_received_hi =
4069 qstats->total_bytes_received_hi;
de832a55 4070 qstats->valid_bytes_received_lo =
ca00392c 4071 qstats->total_bytes_received_lo;
bb2a0f7a 4072
de832a55 4073 qstats->error_bytes_received_hi =
bb2a0f7a 4074 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4075 qstats->error_bytes_received_lo =
bb2a0f7a 4076 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4077
de832a55
EG
4078 ADD_64(qstats->total_bytes_received_hi,
4079 qstats->error_bytes_received_hi,
4080 qstats->total_bytes_received_lo,
4081 qstats->error_bytes_received_lo);
4082
4083 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4084 total_unicast_packets_received);
4085 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4086 total_multicast_packets_received);
4087 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4088 total_broadcast_packets_received);
4089 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4090 etherstatsoverrsizepkts);
4091 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4092
4093 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4094 total_unicast_packets_received);
4095 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4096 total_multicast_packets_received);
4097 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4098 total_broadcast_packets_received);
4099 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4100 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4101 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4102
4103 qstats->total_bytes_transmitted_hi =
ca00392c 4104 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4105 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4106 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4107
4108 ADD_64(qstats->total_bytes_transmitted_hi,
4109 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4110 qstats->total_bytes_transmitted_lo,
4111 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4112
4113 ADD_64(qstats->total_bytes_transmitted_hi,
4114 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4115 qstats->total_bytes_transmitted_lo,
4116 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4117
de832a55
EG
4118 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4119 total_unicast_packets_transmitted);
4120 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4121 total_multicast_packets_transmitted);
4122 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4123 total_broadcast_packets_transmitted);
4124
4125 old_tclient->checksum_discard = tclient->checksum_discard;
4126 old_tclient->ttl0_discard = tclient->ttl0_discard;
4127
4128 ADD_64(fstats->total_bytes_received_hi,
4129 qstats->total_bytes_received_hi,
4130 fstats->total_bytes_received_lo,
4131 qstats->total_bytes_received_lo);
4132 ADD_64(fstats->total_bytes_transmitted_hi,
4133 qstats->total_bytes_transmitted_hi,
4134 fstats->total_bytes_transmitted_lo,
4135 qstats->total_bytes_transmitted_lo);
4136 ADD_64(fstats->total_unicast_packets_received_hi,
4137 qstats->total_unicast_packets_received_hi,
4138 fstats->total_unicast_packets_received_lo,
4139 qstats->total_unicast_packets_received_lo);
4140 ADD_64(fstats->total_multicast_packets_received_hi,
4141 qstats->total_multicast_packets_received_hi,
4142 fstats->total_multicast_packets_received_lo,
4143 qstats->total_multicast_packets_received_lo);
4144 ADD_64(fstats->total_broadcast_packets_received_hi,
4145 qstats->total_broadcast_packets_received_hi,
4146 fstats->total_broadcast_packets_received_lo,
4147 qstats->total_broadcast_packets_received_lo);
4148 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4149 qstats->total_unicast_packets_transmitted_hi,
4150 fstats->total_unicast_packets_transmitted_lo,
4151 qstats->total_unicast_packets_transmitted_lo);
4152 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4153 qstats->total_multicast_packets_transmitted_hi,
4154 fstats->total_multicast_packets_transmitted_lo,
4155 qstats->total_multicast_packets_transmitted_lo);
4156 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4157 qstats->total_broadcast_packets_transmitted_hi,
4158 fstats->total_broadcast_packets_transmitted_lo,
4159 qstats->total_broadcast_packets_transmitted_lo);
4160 ADD_64(fstats->valid_bytes_received_hi,
4161 qstats->valid_bytes_received_hi,
4162 fstats->valid_bytes_received_lo,
4163 qstats->valid_bytes_received_lo);
4164
4165 ADD_64(estats->error_bytes_received_hi,
4166 qstats->error_bytes_received_hi,
4167 estats->error_bytes_received_lo,
4168 qstats->error_bytes_received_lo);
4169 ADD_64(estats->etherstatsoverrsizepkts_hi,
4170 qstats->etherstatsoverrsizepkts_hi,
4171 estats->etherstatsoverrsizepkts_lo,
4172 qstats->etherstatsoverrsizepkts_lo);
4173 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4174 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4175 }
4176
4177 ADD_64(fstats->total_bytes_received_hi,
4178 estats->rx_stat_ifhcinbadoctets_hi,
4179 fstats->total_bytes_received_lo,
4180 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4181
4182 memcpy(estats, &(fstats->total_bytes_received_hi),
4183 sizeof(struct host_func_stats) - 2*sizeof(u32));
4184
de832a55
EG
4185 ADD_64(estats->etherstatsoverrsizepkts_hi,
4186 estats->rx_stat_dot3statsframestoolong_hi,
4187 estats->etherstatsoverrsizepkts_lo,
4188 estats->rx_stat_dot3statsframestoolong_lo);
4189 ADD_64(estats->error_bytes_received_hi,
4190 estats->rx_stat_ifhcinbadoctets_hi,
4191 estats->error_bytes_received_lo,
4192 estats->rx_stat_ifhcinbadoctets_lo);
4193
4194 if (bp->port.pmf) {
4195 estats->mac_filter_discard =
4196 le32_to_cpu(tport->mac_filter_discard);
4197 estats->xxoverflow_discard =
4198 le32_to_cpu(tport->xxoverflow_discard);
4199 estats->brb_truncate_discard =
bb2a0f7a 4200 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4201 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4202 }
bb2a0f7a
YG
4203
4204 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4205
de832a55
EG
4206 bp->stats_pending = 0;
4207
a2fbb9ea
ET
4208 return 0;
4209}
4210
bb2a0f7a 4211static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4212{
bb2a0f7a 4213 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4214 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4215 int i;
a2fbb9ea
ET
4216
4217 nstats->rx_packets =
4218 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4219 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4220 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4221
4222 nstats->tx_packets =
4223 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4224 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4225 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4226
de832a55 4227 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4228
0e39e645 4229 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4230
de832a55 4231 nstats->rx_dropped = estats->mac_discard;
ca00392c 4232 for_each_rx_queue(bp, i)
de832a55
EG
4233 nstats->rx_dropped +=
4234 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4235
a2fbb9ea
ET
4236 nstats->tx_dropped = 0;
4237
4238 nstats->multicast =
de832a55 4239 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4240
bb2a0f7a 4241 nstats->collisions =
de832a55 4242 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4243
4244 nstats->rx_length_errors =
de832a55
EG
4245 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4246 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4247 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4248 bnx2x_hilo(&estats->brb_truncate_hi);
4249 nstats->rx_crc_errors =
4250 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4251 nstats->rx_frame_errors =
4252 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4253 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4254 nstats->rx_missed_errors = estats->xxoverflow_discard;
4255
4256 nstats->rx_errors = nstats->rx_length_errors +
4257 nstats->rx_over_errors +
4258 nstats->rx_crc_errors +
4259 nstats->rx_frame_errors +
0e39e645
ET
4260 nstats->rx_fifo_errors +
4261 nstats->rx_missed_errors;
a2fbb9ea 4262
bb2a0f7a 4263 nstats->tx_aborted_errors =
de832a55
EG
4264 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4265 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4266 nstats->tx_carrier_errors =
4267 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4268 nstats->tx_fifo_errors = 0;
4269 nstats->tx_heartbeat_errors = 0;
4270 nstats->tx_window_errors = 0;
4271
4272 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4273 nstats->tx_carrier_errors +
4274 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4275}
4276
4277static void bnx2x_drv_stats_update(struct bnx2x *bp)
4278{
4279 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4280 int i;
4281
4282 estats->driver_xoff = 0;
4283 estats->rx_err_discard_pkt = 0;
4284 estats->rx_skb_alloc_failed = 0;
4285 estats->hw_csum_err = 0;
ca00392c 4286 for_each_rx_queue(bp, i) {
de832a55
EG
4287 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4288
4289 estats->driver_xoff += qstats->driver_xoff;
4290 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4291 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4292 estats->hw_csum_err += qstats->hw_csum_err;
4293 }
a2fbb9ea
ET
4294}
4295
bb2a0f7a 4296static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4297{
bb2a0f7a 4298 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4299
bb2a0f7a
YG
4300 if (*stats_comp != DMAE_COMP_VAL)
4301 return;
4302
4303 if (bp->port.pmf)
de832a55 4304 bnx2x_hw_stats_update(bp);
a2fbb9ea 4305
de832a55
EG
4306 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4307 BNX2X_ERR("storm stats were not updated for 3 times\n");
4308 bnx2x_panic();
4309 return;
a2fbb9ea
ET
4310 }
4311
de832a55
EG
4312 bnx2x_net_stats_update(bp);
4313 bnx2x_drv_stats_update(bp);
4314
a2fbb9ea 4315 if (bp->msglevel & NETIF_MSG_TIMER) {
ca00392c
EG
4316 struct bnx2x_fastpath *fp0_rx = bp->fp;
4317 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
de832a55
EG
4318 struct tstorm_per_client_stats *old_tclient =
4319 &bp->fp->old_tclient;
4320 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4321 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4322 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4323 int i;
a2fbb9ea
ET
4324
4325 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4326 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4327 " tx pkt (%lx)\n",
ca00392c
EG
4328 bnx2x_tx_avail(fp0_tx),
4329 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4330 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4331 " rx pkt (%lx)\n",
ca00392c
EG
4332 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4333 fp0_rx->rx_comp_cons),
4334 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4335 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4336 "brb truncate %u\n",
4337 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4338 qstats->driver_xoff,
4339 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4340 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4341 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4342 "mac_discard %u mac_filter_discard %u "
4343 "xxovrflow_discard %u brb_truncate_discard %u "
4344 "ttl0_discard %u\n",
4781bfad 4345 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4346 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4347 bnx2x_hilo(&qstats->no_buff_discard_hi),
4348 estats->mac_discard, estats->mac_filter_discard,
4349 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4350 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4351
4352 for_each_queue(bp, i) {
4353 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4354 bnx2x_fp(bp, i, tx_pkt),
4355 bnx2x_fp(bp, i, rx_pkt),
4356 bnx2x_fp(bp, i, rx_calls));
4357 }
4358 }
4359
bb2a0f7a
YG
4360 bnx2x_hw_stats_post(bp);
4361 bnx2x_storm_stats_post(bp);
4362}
a2fbb9ea 4363
bb2a0f7a
YG
4364static void bnx2x_port_stats_stop(struct bnx2x *bp)
4365{
4366 struct dmae_command *dmae;
4367 u32 opcode;
4368 int loader_idx = PMF_DMAE_C(bp);
4369 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4370
bb2a0f7a 4371 bp->executer_idx = 0;
a2fbb9ea 4372
bb2a0f7a
YG
4373 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4374 DMAE_CMD_C_ENABLE |
4375 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4376#ifdef __BIG_ENDIAN
bb2a0f7a 4377 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4378#else
bb2a0f7a 4379 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4380#endif
bb2a0f7a
YG
4381 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4382 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4383
4384 if (bp->port.port_stx) {
4385
4386 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4387 if (bp->func_stx)
4388 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4389 else
4390 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4391 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4392 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4393 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4394 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4395 dmae->len = sizeof(struct host_port_stats) >> 2;
4396 if (bp->func_stx) {
4397 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4398 dmae->comp_addr_hi = 0;
4399 dmae->comp_val = 1;
4400 } else {
4401 dmae->comp_addr_lo =
4402 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4403 dmae->comp_addr_hi =
4404 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4405 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4406
bb2a0f7a
YG
4407 *stats_comp = 0;
4408 }
a2fbb9ea
ET
4409 }
4410
bb2a0f7a
YG
4411 if (bp->func_stx) {
4412
4413 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4414 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4415 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4416 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4417 dmae->dst_addr_lo = bp->func_stx >> 2;
4418 dmae->dst_addr_hi = 0;
4419 dmae->len = sizeof(struct host_func_stats) >> 2;
4420 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4421 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4422 dmae->comp_val = DMAE_COMP_VAL;
4423
4424 *stats_comp = 0;
a2fbb9ea 4425 }
bb2a0f7a
YG
4426}
4427
4428static void bnx2x_stats_stop(struct bnx2x *bp)
4429{
4430 int update = 0;
4431
4432 bnx2x_stats_comp(bp);
4433
4434 if (bp->port.pmf)
4435 update = (bnx2x_hw_stats_update(bp) == 0);
4436
4437 update |= (bnx2x_storm_stats_update(bp) == 0);
4438
4439 if (update) {
4440 bnx2x_net_stats_update(bp);
a2fbb9ea 4441
bb2a0f7a
YG
4442 if (bp->port.pmf)
4443 bnx2x_port_stats_stop(bp);
4444
4445 bnx2x_hw_stats_post(bp);
4446 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4447 }
4448}
4449
bb2a0f7a
YG
4450static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4451{
4452}
4453
4454static const struct {
4455 void (*action)(struct bnx2x *bp);
4456 enum bnx2x_stats_state next_state;
4457} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4458/* state event */
4459{
4460/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4461/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4462/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4463/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4464},
4465{
4466/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4467/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4468/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4469/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4470}
4471};
4472
4473static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4474{
4475 enum bnx2x_stats_state state = bp->stats_state;
4476
4477 bnx2x_stats_stm[state][event].action(bp);
4478 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4479
8924665a
EG
4480 /* Make sure the state has been "changed" */
4481 smp_wmb();
4482
bb2a0f7a
YG
4483 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4484 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4485 state, event, bp->stats_state);
4486}
4487
6fe49bb9
EG
4488static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4489{
4490 struct dmae_command *dmae;
4491 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4492
4493 /* sanity */
4494 if (!bp->port.pmf || !bp->port.port_stx) {
4495 BNX2X_ERR("BUG!\n");
4496 return;
4497 }
4498
4499 bp->executer_idx = 0;
4500
4501 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4502 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4503 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4504 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4505#ifdef __BIG_ENDIAN
4506 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4507#else
4508 DMAE_CMD_ENDIANITY_DW_SWAP |
4509#endif
4510 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4511 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4512 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4513 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4514 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4515 dmae->dst_addr_hi = 0;
4516 dmae->len = sizeof(struct host_port_stats) >> 2;
4517 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4518 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4519 dmae->comp_val = DMAE_COMP_VAL;
4520
4521 *stats_comp = 0;
4522 bnx2x_hw_stats_post(bp);
4523 bnx2x_stats_comp(bp);
4524}
4525
4526static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4527{
4528 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4529 int port = BP_PORT(bp);
4530 int func;
4531 u32 func_stx;
4532
4533 /* sanity */
4534 if (!bp->port.pmf || !bp->func_stx) {
4535 BNX2X_ERR("BUG!\n");
4536 return;
4537 }
4538
4539 /* save our func_stx */
4540 func_stx = bp->func_stx;
4541
4542 for (vn = VN_0; vn < vn_max; vn++) {
4543 func = 2*vn + port;
4544
4545 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4546 bnx2x_func_stats_init(bp);
4547 bnx2x_hw_stats_post(bp);
4548 bnx2x_stats_comp(bp);
4549 }
4550
4551 /* restore our func_stx */
4552 bp->func_stx = func_stx;
4553}
4554
4555static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4556{
4557 struct dmae_command *dmae = &bp->stats_dmae;
4558 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4559
4560 /* sanity */
4561 if (!bp->func_stx) {
4562 BNX2X_ERR("BUG!\n");
4563 return;
4564 }
4565
4566 bp->executer_idx = 0;
4567 memset(dmae, 0, sizeof(struct dmae_command));
4568
4569 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4570 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4571 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4572#ifdef __BIG_ENDIAN
4573 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4574#else
4575 DMAE_CMD_ENDIANITY_DW_SWAP |
4576#endif
4577 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4578 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4579 dmae->src_addr_lo = bp->func_stx >> 2;
4580 dmae->src_addr_hi = 0;
4581 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4582 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4583 dmae->len = sizeof(struct host_func_stats) >> 2;
4584 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4585 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4586 dmae->comp_val = DMAE_COMP_VAL;
4587
4588 *stats_comp = 0;
4589 bnx2x_hw_stats_post(bp);
4590 bnx2x_stats_comp(bp);
4591}
4592
4593static void bnx2x_stats_init(struct bnx2x *bp)
4594{
4595 int port = BP_PORT(bp);
4596 int func = BP_FUNC(bp);
4597 int i;
4598
4599 bp->stats_pending = 0;
4600 bp->executer_idx = 0;
4601 bp->stats_counter = 0;
4602
4603 /* port and func stats for management */
4604 if (!BP_NOMCP(bp)) {
4605 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4606 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4607
4608 } else {
4609 bp->port.port_stx = 0;
4610 bp->func_stx = 0;
4611 }
4612 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4613 bp->port.port_stx, bp->func_stx);
4614
4615 /* port stats */
4616 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4617 bp->port.old_nig_stats.brb_discard =
4618 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4619 bp->port.old_nig_stats.brb_truncate =
4620 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4621 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4622 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4623 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4624 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4625
4626 /* function stats */
4627 for_each_queue(bp, i) {
4628 struct bnx2x_fastpath *fp = &bp->fp[i];
4629
4630 memset(&fp->old_tclient, 0,
4631 sizeof(struct tstorm_per_client_stats));
4632 memset(&fp->old_uclient, 0,
4633 sizeof(struct ustorm_per_client_stats));
4634 memset(&fp->old_xclient, 0,
4635 sizeof(struct xstorm_per_client_stats));
4636 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4637 }
4638
4639 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4640 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4641
4642 bp->stats_state = STATS_STATE_DISABLED;
4643
4644 if (bp->port.pmf) {
4645 if (bp->port.port_stx)
4646 bnx2x_port_stats_base_init(bp);
4647
4648 if (bp->func_stx)
4649 bnx2x_func_stats_base_init(bp);
4650
4651 } else if (bp->func_stx)
4652 bnx2x_func_stats_base_update(bp);
4653}
4654
a2fbb9ea
ET
4655static void bnx2x_timer(unsigned long data)
4656{
4657 struct bnx2x *bp = (struct bnx2x *) data;
4658
4659 if (!netif_running(bp->dev))
4660 return;
4661
4662 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4663 goto timer_restart;
a2fbb9ea
ET
4664
4665 if (poll) {
4666 struct bnx2x_fastpath *fp = &bp->fp[0];
4667 int rc;
4668
7961f791 4669 bnx2x_tx_int(fp);
a2fbb9ea
ET
4670 rc = bnx2x_rx_int(fp, 1000);
4671 }
4672
34f80b04
EG
4673 if (!BP_NOMCP(bp)) {
4674 int func = BP_FUNC(bp);
a2fbb9ea
ET
4675 u32 drv_pulse;
4676 u32 mcp_pulse;
4677
4678 ++bp->fw_drv_pulse_wr_seq;
4679 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4680 /* TBD - add SYSTEM_TIME */
4681 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4682 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4683
34f80b04 4684 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4685 MCP_PULSE_SEQ_MASK);
4686 /* The delta between driver pulse and mcp response
4687 * should be 1 (before mcp response) or 0 (after mcp response)
4688 */
4689 if ((drv_pulse != mcp_pulse) &&
4690 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4691 /* someone lost a heartbeat... */
4692 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4693 drv_pulse, mcp_pulse);
4694 }
4695 }
4696
bb2a0f7a
YG
4697 if ((bp->state == BNX2X_STATE_OPEN) ||
4698 (bp->state == BNX2X_STATE_DISABLED))
4699 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4700
f1410647 4701timer_restart:
a2fbb9ea
ET
4702 mod_timer(&bp->timer, jiffies + bp->current_interval);
4703}
4704
4705/* end of Statistics */
4706
4707/* nic init */
4708
4709/*
4710 * nic init service functions
4711 */
4712
34f80b04 4713static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4714{
34f80b04
EG
4715 int port = BP_PORT(bp);
4716
ca00392c
EG
4717 /* "CSTORM" */
4718 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4719 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4720 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4721 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4722 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4723 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
4724}
4725
5c862848
EG
4726static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4727 dma_addr_t mapping, int sb_id)
34f80b04
EG
4728{
4729 int port = BP_PORT(bp);
bb2a0f7a 4730 int func = BP_FUNC(bp);
a2fbb9ea 4731 int index;
34f80b04 4732 u64 section;
a2fbb9ea
ET
4733
4734 /* USTORM */
4735 section = ((u64)mapping) + offsetof(struct host_status_block,
4736 u_status_block);
34f80b04 4737 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 4738
ca00392c
EG
4739 REG_WR(bp, BAR_CSTRORM_INTMEM +
4740 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4741 REG_WR(bp, BAR_CSTRORM_INTMEM +
4742 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4743 U64_HI(section));
ca00392c
EG
4744 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4745 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4746
4747 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
4748 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4749 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4750
4751 /* CSTORM */
4752 section = ((u64)mapping) + offsetof(struct host_status_block,
4753 c_status_block);
34f80b04 4754 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4755
4756 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4757 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4758 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4759 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4760 U64_HI(section));
7a9b2557 4761 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 4762 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4763
4764 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4765 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4766 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
4767
4768 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4769}
4770
4771static void bnx2x_zero_def_sb(struct bnx2x *bp)
4772{
4773 int func = BP_FUNC(bp);
a2fbb9ea 4774
ca00392c 4775 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
4776 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4777 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
4778 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4779 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4780 sizeof(struct cstorm_def_status_block_u)/4);
4781 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4782 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4783 sizeof(struct cstorm_def_status_block_c)/4);
4784 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
4785 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4786 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4787}
4788
4789static void bnx2x_init_def_sb(struct bnx2x *bp,
4790 struct host_def_status_block *def_sb,
34f80b04 4791 dma_addr_t mapping, int sb_id)
a2fbb9ea 4792{
34f80b04
EG
4793 int port = BP_PORT(bp);
4794 int func = BP_FUNC(bp);
a2fbb9ea
ET
4795 int index, val, reg_offset;
4796 u64 section;
4797
4798 /* ATTN */
4799 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4800 atten_status_block);
34f80b04 4801 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4802
49d66772
ET
4803 bp->attn_state = 0;
4804
a2fbb9ea
ET
4805 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4806 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4807
34f80b04 4808 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4809 bp->attn_group[index].sig[0] = REG_RD(bp,
4810 reg_offset + 0x10*index);
4811 bp->attn_group[index].sig[1] = REG_RD(bp,
4812 reg_offset + 0x4 + 0x10*index);
4813 bp->attn_group[index].sig[2] = REG_RD(bp,
4814 reg_offset + 0x8 + 0x10*index);
4815 bp->attn_group[index].sig[3] = REG_RD(bp,
4816 reg_offset + 0xc + 0x10*index);
4817 }
4818
a2fbb9ea
ET
4819 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4820 HC_REG_ATTN_MSG0_ADDR_L);
4821
4822 REG_WR(bp, reg_offset, U64_LO(section));
4823 REG_WR(bp, reg_offset + 4, U64_HI(section));
4824
4825 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4826
4827 val = REG_RD(bp, reg_offset);
34f80b04 4828 val |= sb_id;
a2fbb9ea
ET
4829 REG_WR(bp, reg_offset, val);
4830
4831 /* USTORM */
4832 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4833 u_def_status_block);
34f80b04 4834 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4835
ca00392c
EG
4836 REG_WR(bp, BAR_CSTRORM_INTMEM +
4837 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4838 REG_WR(bp, BAR_CSTRORM_INTMEM +
4839 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 4840 U64_HI(section));
ca00392c
EG
4841 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4842 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
4843
4844 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
4845 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4846 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
4847
4848 /* CSTORM */
4849 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4850 c_def_status_block);
34f80b04 4851 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4852
4853 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4854 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 4855 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4856 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 4857 U64_HI(section));
5c862848 4858 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 4859 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
4860
4861 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4862 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4863 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
4864
4865 /* TSTORM */
4866 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4867 t_def_status_block);
34f80b04 4868 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4869
4870 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4871 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4872 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4873 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4874 U64_HI(section));
5c862848 4875 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4876 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4877
4878 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4879 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4880 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4881
4882 /* XSTORM */
4883 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4884 x_def_status_block);
34f80b04 4885 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4886
4887 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4888 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4889 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4890 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4891 U64_HI(section));
5c862848 4892 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4893 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4894
4895 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4896 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4897 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4898
bb2a0f7a 4899 bp->stats_pending = 0;
66e855f3 4900 bp->set_mac_pending = 0;
bb2a0f7a 4901
34f80b04 4902 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4903}
4904
4905static void bnx2x_update_coalesce(struct bnx2x *bp)
4906{
34f80b04 4907 int port = BP_PORT(bp);
a2fbb9ea
ET
4908 int i;
4909
4910 for_each_queue(bp, i) {
34f80b04 4911 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4912
4913 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
4914 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4915 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4916 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4917 bp->rx_ticks/12);
ca00392c
EG
4918 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4919 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4920 U_SB_ETH_RX_CQ_INDEX),
3799cf47 4921 (bp->rx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4922
4923 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4924 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4925 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4926 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4927 bp->tx_ticks/12);
a2fbb9ea 4928 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4929 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4930 C_SB_ETH_TX_CQ_INDEX),
3799cf47 4931 (bp->tx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4932 }
4933}
4934
7a9b2557
VZ
4935static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4936 struct bnx2x_fastpath *fp, int last)
4937{
4938 int i;
4939
4940 for (i = 0; i < last; i++) {
4941 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4942 struct sk_buff *skb = rx_buf->skb;
4943
4944 if (skb == NULL) {
4945 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4946 continue;
4947 }
4948
4949 if (fp->tpa_state[i] == BNX2X_TPA_START)
4950 pci_unmap_single(bp->pdev,
4951 pci_unmap_addr(rx_buf, mapping),
356e2385 4952 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4953
4954 dev_kfree_skb(skb);
4955 rx_buf->skb = NULL;
4956 }
4957}
4958
a2fbb9ea
ET
4959static void bnx2x_init_rx_rings(struct bnx2x *bp)
4960{
7a9b2557 4961 int func = BP_FUNC(bp);
32626230
EG
4962 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4963 ETH_MAX_AGGREGATION_QUEUES_E1H;
4964 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4965 int i, j;
a2fbb9ea 4966
87942b46 4967 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4968 DP(NETIF_MSG_IFUP,
4969 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4970
7a9b2557 4971 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4972
555f6c78 4973 for_each_rx_queue(bp, j) {
32626230 4974 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4975
32626230 4976 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4977 fp->tpa_pool[i].skb =
4978 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4979 if (!fp->tpa_pool[i].skb) {
4980 BNX2X_ERR("Failed to allocate TPA "
4981 "skb pool for queue[%d] - "
4982 "disabling TPA on this "
4983 "queue!\n", j);
4984 bnx2x_free_tpa_pool(bp, fp, i);
4985 fp->disable_tpa = 1;
4986 break;
4987 }
4988 pci_unmap_addr_set((struct sw_rx_bd *)
4989 &bp->fp->tpa_pool[i],
4990 mapping, 0);
4991 fp->tpa_state[i] = BNX2X_TPA_STOP;
4992 }
4993 }
4994 }
4995
555f6c78 4996 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4997 struct bnx2x_fastpath *fp = &bp->fp[j];
4998
4999 fp->rx_bd_cons = 0;
5000 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
5001 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5002
ca00392c
EG
5003 /* Mark queue as Rx */
5004 fp->is_rx_queue = 1;
5005
7a9b2557
VZ
5006 /* "next page" elements initialization */
5007 /* SGE ring */
5008 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5009 struct eth_rx_sge *sge;
5010
5011 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5012 sge->addr_hi =
5013 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5014 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5015 sge->addr_lo =
5016 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5017 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5018 }
5019
5020 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 5021
7a9b2557 5022 /* RX BD ring */
a2fbb9ea
ET
5023 for (i = 1; i <= NUM_RX_RINGS; i++) {
5024 struct eth_rx_bd *rx_bd;
5025
5026 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5027 rx_bd->addr_hi =
5028 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 5029 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5030 rx_bd->addr_lo =
5031 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 5032 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5033 }
5034
34f80b04 5035 /* CQ ring */
a2fbb9ea
ET
5036 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5037 struct eth_rx_cqe_next_page *nextpg;
5038
5039 nextpg = (struct eth_rx_cqe_next_page *)
5040 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5041 nextpg->addr_hi =
5042 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 5043 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5044 nextpg->addr_lo =
5045 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 5046 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5047 }
5048
7a9b2557
VZ
5049 /* Allocate SGEs and initialize the ring elements */
5050 for (i = 0, ring_prod = 0;
5051 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 5052
7a9b2557
VZ
5053 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5054 BNX2X_ERR("was only able to allocate "
5055 "%d rx sges\n", i);
5056 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5057 /* Cleanup already allocated elements */
5058 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 5059 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
5060 fp->disable_tpa = 1;
5061 ring_prod = 0;
5062 break;
5063 }
5064 ring_prod = NEXT_SGE_IDX(ring_prod);
5065 }
5066 fp->rx_sge_prod = ring_prod;
5067
5068 /* Allocate BDs and initialize BD ring */
66e855f3 5069 fp->rx_comp_cons = 0;
7a9b2557 5070 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
5071 for (i = 0; i < bp->rx_ring_size; i++) {
5072 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5073 BNX2X_ERR("was only able to allocate "
de832a55
EG
5074 "%d rx skbs on queue[%d]\n", i, j);
5075 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
5076 break;
5077 }
5078 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 5079 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5080 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5081 }
5082
7a9b2557
VZ
5083 fp->rx_bd_prod = ring_prod;
5084 /* must not have more available CQEs than BDs */
5085 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5086 cqe_ring_prod);
a2fbb9ea
ET
5087 fp->rx_pkt = fp->rx_calls = 0;
5088
7a9b2557
VZ
5089 /* Warning!
5090 * this will generate an interrupt (to the TSTORM)
5091 * must only be done after chip is initialized
5092 */
5093 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5094 fp->rx_sge_prod);
a2fbb9ea
ET
5095 if (j != 0)
5096 continue;
5097
5098 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5099 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5100 U64_LO(fp->rx_comp_mapping));
5101 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5102 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5103 U64_HI(fp->rx_comp_mapping));
5104 }
5105}
5106
5107static void bnx2x_init_tx_ring(struct bnx2x *bp)
5108{
5109 int i, j;
5110
555f6c78 5111 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
5112 struct bnx2x_fastpath *fp = &bp->fp[j];
5113
5114 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5115 struct eth_tx_next_bd *tx_next_bd =
5116 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5117
ca00392c 5118 tx_next_bd->addr_hi =
a2fbb9ea 5119 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5120 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5121 tx_next_bd->addr_lo =
a2fbb9ea 5122 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5123 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5124 }
5125
ca00392c
EG
5126 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5127 fp->tx_db.data.zero_fill1 = 0;
5128 fp->tx_db.data.prod = 0;
5129
a2fbb9ea
ET
5130 fp->tx_pkt_prod = 0;
5131 fp->tx_pkt_cons = 0;
5132 fp->tx_bd_prod = 0;
5133 fp->tx_bd_cons = 0;
5134 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5135 fp->tx_pkt = 0;
5136 }
6fe49bb9
EG
5137
5138 /* clean tx statistics */
5139 for_each_rx_queue(bp, i)
5140 bnx2x_fp(bp, i, tx_pkt) = 0;
a2fbb9ea
ET
5141}
5142
5143static void bnx2x_init_sp_ring(struct bnx2x *bp)
5144{
34f80b04 5145 int func = BP_FUNC(bp);
a2fbb9ea
ET
5146
5147 spin_lock_init(&bp->spq_lock);
5148
5149 bp->spq_left = MAX_SPQ_PENDING;
5150 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5151 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5152 bp->spq_prod_bd = bp->spq;
5153 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5154
34f80b04 5155 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5156 U64_LO(bp->spq_mapping));
34f80b04
EG
5157 REG_WR(bp,
5158 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5159 U64_HI(bp->spq_mapping));
5160
34f80b04 5161 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5162 bp->spq_prod_idx);
5163}
5164
5165static void bnx2x_init_context(struct bnx2x *bp)
5166{
5167 int i;
5168
ca00392c 5169 for_each_rx_queue(bp, i) {
a2fbb9ea
ET
5170 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5171 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5172 u8 cl_id = fp->cl_id;
a2fbb9ea 5173
34f80b04
EG
5174 context->ustorm_st_context.common.sb_index_numbers =
5175 BNX2X_RX_SB_INDEX_NUM;
0626b899 5176 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5177 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5178 context->ustorm_st_context.common.flags =
de832a55
EG
5179 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5180 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5181 context->ustorm_st_context.common.statistics_counter_id =
5182 cl_id;
8d9c5f34 5183 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5184 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5185 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5186 bp->rx_buf_size;
34f80b04 5187 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5188 U64_HI(fp->rx_desc_mapping);
34f80b04 5189 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5190 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5191 if (!fp->disable_tpa) {
5192 context->ustorm_st_context.common.flags |=
ca00392c 5193 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5194 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
5195 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5196 (u32)0xffff);
7a9b2557
VZ
5197 context->ustorm_st_context.common.sge_page_base_hi =
5198 U64_HI(fp->rx_sge_mapping);
5199 context->ustorm_st_context.common.sge_page_base_lo =
5200 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5201
5202 context->ustorm_st_context.common.max_sges_for_packet =
5203 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5204 context->ustorm_st_context.common.max_sges_for_packet =
5205 ((context->ustorm_st_context.common.
5206 max_sges_for_packet + PAGES_PER_SGE - 1) &
5207 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5208 }
5209
8d9c5f34
EG
5210 context->ustorm_ag_context.cdu_usage =
5211 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5212 CDU_REGION_NUMBER_UCM_AG,
5213 ETH_CONNECTION_TYPE);
5214
ca00392c
EG
5215 context->xstorm_ag_context.cdu_reserved =
5216 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5217 CDU_REGION_NUMBER_XCM_AG,
5218 ETH_CONNECTION_TYPE);
5219 }
5220
5221 for_each_tx_queue(bp, i) {
5222 struct bnx2x_fastpath *fp = &bp->fp[i];
5223 struct eth_context *context =
5224 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5225
5226 context->cstorm_st_context.sb_index_number =
5227 C_SB_ETH_TX_CQ_INDEX;
5228 context->cstorm_st_context.status_block_id = fp->sb_id;
5229
8d9c5f34
EG
5230 context->xstorm_st_context.tx_bd_page_base_hi =
5231 U64_HI(fp->tx_desc_mapping);
5232 context->xstorm_st_context.tx_bd_page_base_lo =
5233 U64_LO(fp->tx_desc_mapping);
ca00392c 5234 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5235 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5236 }
5237}
5238
5239static void bnx2x_init_ind_table(struct bnx2x *bp)
5240{
26c8fa4d 5241 int func = BP_FUNC(bp);
a2fbb9ea
ET
5242 int i;
5243
555f6c78 5244 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5245 return;
5246
555f6c78
EG
5247 DP(NETIF_MSG_IFUP,
5248 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5249 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5250 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5251 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 5252 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
5253}
5254
49d66772
ET
5255static void bnx2x_set_client_config(struct bnx2x *bp)
5256{
49d66772 5257 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5258 int port = BP_PORT(bp);
5259 int i;
49d66772 5260
e7799c5f 5261 tstorm_client.mtu = bp->dev->mtu;
49d66772 5262 tstorm_client.config_flags =
de832a55
EG
5263 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5264 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5265#ifdef BCM_VLAN
0c6671b0 5266 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5267 tstorm_client.config_flags |=
8d9c5f34 5268 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5269 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5270 }
5271#endif
49d66772
ET
5272
5273 for_each_queue(bp, i) {
de832a55
EG
5274 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5275
49d66772 5276 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5277 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5278 ((u32 *)&tstorm_client)[0]);
5279 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5280 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5281 ((u32 *)&tstorm_client)[1]);
5282 }
5283
34f80b04
EG
5284 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5285 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5286}
5287
a2fbb9ea
ET
5288static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5289{
a2fbb9ea 5290 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04 5291 int mode = bp->rx_mode;
37b091ba 5292 int mask = bp->rx_mode_cl_mask;
34f80b04 5293 int func = BP_FUNC(bp);
581ce43d 5294 int port = BP_PORT(bp);
a2fbb9ea 5295 int i;
581ce43d
EG
5296 /* All but management unicast packets should pass to the host as well */
5297 u32 llh_mask =
5298 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5299 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5300 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5301 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5302
3196a88a 5303 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5304
5305 switch (mode) {
5306 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5307 tstorm_mac_filter.ucast_drop_all = mask;
5308 tstorm_mac_filter.mcast_drop_all = mask;
5309 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5310 break;
356e2385 5311
a2fbb9ea 5312 case BNX2X_RX_MODE_NORMAL:
34f80b04 5313 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5314 break;
356e2385 5315
a2fbb9ea 5316 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5317 tstorm_mac_filter.mcast_accept_all = mask;
5318 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5319 break;
356e2385 5320
a2fbb9ea 5321 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5322 tstorm_mac_filter.ucast_accept_all = mask;
5323 tstorm_mac_filter.mcast_accept_all = mask;
5324 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5325 /* pass management unicast packets as well */
5326 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5327 break;
356e2385 5328
a2fbb9ea 5329 default:
34f80b04
EG
5330 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5331 break;
a2fbb9ea
ET
5332 }
5333
581ce43d
EG
5334 REG_WR(bp,
5335 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5336 llh_mask);
5337
a2fbb9ea
ET
5338 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5339 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5340 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5341 ((u32 *)&tstorm_mac_filter)[i]);
5342
34f80b04 5343/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5344 ((u32 *)&tstorm_mac_filter)[i]); */
5345 }
a2fbb9ea 5346
49d66772
ET
5347 if (mode != BNX2X_RX_MODE_NONE)
5348 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5349}
5350
471de716
EG
5351static void bnx2x_init_internal_common(struct bnx2x *bp)
5352{
5353 int i;
5354
5355 /* Zero this manually as its initialization is
5356 currently missing in the initTool */
5357 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5358 REG_WR(bp, BAR_USTRORM_INTMEM +
5359 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5360}
5361
5362static void bnx2x_init_internal_port(struct bnx2x *bp)
5363{
5364 int port = BP_PORT(bp);
5365
ca00392c
EG
5366 REG_WR(bp,
5367 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5368 REG_WR(bp,
5369 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5370 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5371 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5372}
5373
5374static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5375{
a2fbb9ea
ET
5376 struct tstorm_eth_function_common_config tstorm_config = {0};
5377 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5378 int port = BP_PORT(bp);
5379 int func = BP_FUNC(bp);
de832a55
EG
5380 int i, j;
5381 u32 offset;
471de716 5382 u16 max_agg_size;
a2fbb9ea
ET
5383
5384 if (is_multi(bp)) {
555f6c78 5385 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5386 tstorm_config.rss_result_mask = MULTI_MASK;
5387 }
ca00392c
EG
5388
5389 /* Enable TPA if needed */
5390 if (bp->flags & TPA_ENABLE_FLAG)
5391 tstorm_config.config_flags |=
5392 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5393
8d9c5f34
EG
5394 if (IS_E1HMF(bp))
5395 tstorm_config.config_flags |=
5396 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5397
34f80b04
EG
5398 tstorm_config.leading_client_id = BP_L_ID(bp);
5399
a2fbb9ea 5400 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5401 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5402 (*(u32 *)&tstorm_config));
5403
c14423fe 5404 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
37b091ba 5405 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
a2fbb9ea
ET
5406 bnx2x_set_storm_rx_mode(bp);
5407
de832a55
EG
5408 for_each_queue(bp, i) {
5409 u8 cl_id = bp->fp[i].cl_id;
5410
5411 /* reset xstorm per client statistics */
5412 offset = BAR_XSTRORM_INTMEM +
5413 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5414 for (j = 0;
5415 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5416 REG_WR(bp, offset + j*4, 0);
5417
5418 /* reset tstorm per client statistics */
5419 offset = BAR_TSTRORM_INTMEM +
5420 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5421 for (j = 0;
5422 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5423 REG_WR(bp, offset + j*4, 0);
5424
5425 /* reset ustorm per client statistics */
5426 offset = BAR_USTRORM_INTMEM +
5427 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5428 for (j = 0;
5429 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5430 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5431 }
5432
5433 /* Init statistics related context */
34f80b04 5434 stats_flags.collect_eth = 1;
a2fbb9ea 5435
66e855f3 5436 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5437 ((u32 *)&stats_flags)[0]);
66e855f3 5438 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5439 ((u32 *)&stats_flags)[1]);
5440
66e855f3 5441 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5442 ((u32 *)&stats_flags)[0]);
66e855f3 5443 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5444 ((u32 *)&stats_flags)[1]);
5445
de832a55
EG
5446 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5447 ((u32 *)&stats_flags)[0]);
5448 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5449 ((u32 *)&stats_flags)[1]);
5450
66e855f3 5451 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5452 ((u32 *)&stats_flags)[0]);
66e855f3 5453 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5454 ((u32 *)&stats_flags)[1]);
5455
66e855f3
YG
5456 REG_WR(bp, BAR_XSTRORM_INTMEM +
5457 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5458 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5459 REG_WR(bp, BAR_XSTRORM_INTMEM +
5460 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5461 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5462
5463 REG_WR(bp, BAR_TSTRORM_INTMEM +
5464 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5465 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5466 REG_WR(bp, BAR_TSTRORM_INTMEM +
5467 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5468 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5469
de832a55
EG
5470 REG_WR(bp, BAR_USTRORM_INTMEM +
5471 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5472 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5473 REG_WR(bp, BAR_USTRORM_INTMEM +
5474 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5475 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5476
34f80b04
EG
5477 if (CHIP_IS_E1H(bp)) {
5478 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5479 IS_E1HMF(bp));
5480 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5481 IS_E1HMF(bp));
5482 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5483 IS_E1HMF(bp));
5484 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5485 IS_E1HMF(bp));
5486
7a9b2557
VZ
5487 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5488 bp->e1hov);
34f80b04
EG
5489 }
5490
4f40f2cb
EG
5491 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5492 max_agg_size =
5493 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5494 SGE_PAGE_SIZE * PAGES_PER_SGE),
5495 (u32)0xffff);
555f6c78 5496 for_each_rx_queue(bp, i) {
7a9b2557 5497 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5498
5499 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5500 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5501 U64_LO(fp->rx_comp_mapping));
5502 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5503 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5504 U64_HI(fp->rx_comp_mapping));
5505
ca00392c
EG
5506 /* Next page */
5507 REG_WR(bp, BAR_USTRORM_INTMEM +
5508 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5509 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5510 REG_WR(bp, BAR_USTRORM_INTMEM +
5511 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5512 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5513
7a9b2557 5514 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5515 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5516 max_agg_size);
5517 }
8a1c38d1 5518
1c06328c
EG
5519 /* dropless flow control */
5520 if (CHIP_IS_E1H(bp)) {
5521 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5522
5523 rx_pause.bd_thr_low = 250;
5524 rx_pause.cqe_thr_low = 250;
5525 rx_pause.cos = 1;
5526 rx_pause.sge_thr_low = 0;
5527 rx_pause.bd_thr_high = 350;
5528 rx_pause.cqe_thr_high = 350;
5529 rx_pause.sge_thr_high = 0;
5530
5531 for_each_rx_queue(bp, i) {
5532 struct bnx2x_fastpath *fp = &bp->fp[i];
5533
5534 if (!fp->disable_tpa) {
5535 rx_pause.sge_thr_low = 150;
5536 rx_pause.sge_thr_high = 250;
5537 }
5538
5539
5540 offset = BAR_USTRORM_INTMEM +
5541 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5542 fp->cl_id);
5543 for (j = 0;
5544 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5545 j++)
5546 REG_WR(bp, offset + j*4,
5547 ((u32 *)&rx_pause)[j]);
5548 }
5549 }
5550
8a1c38d1
EG
5551 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5552
5553 /* Init rate shaping and fairness contexts */
5554 if (IS_E1HMF(bp)) {
5555 int vn;
5556
5557 /* During init there is no active link
5558 Until link is up, set link rate to 10Gbps */
5559 bp->link_vars.line_speed = SPEED_10000;
5560 bnx2x_init_port_minmax(bp);
5561
5562 bnx2x_calc_vn_weight_sum(bp);
5563
5564 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5565 bnx2x_init_vn_minmax(bp, 2*vn + port);
5566
5567 /* Enable rate shaping and fairness */
5568 bp->cmng.flags.cmng_enables =
5569 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5570 if (bp->vn_weight_sum)
5571 bp->cmng.flags.cmng_enables |=
5572 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5573 else
5574 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5575 " fairness will be disabled\n");
5576 } else {
5577 /* rate shaping and fairness are disabled */
5578 DP(NETIF_MSG_IFUP,
5579 "single function mode minmax will be disabled\n");
5580 }
5581
5582
5583 /* Store it to internal memory */
5584 if (bp->port.pmf)
5585 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5586 REG_WR(bp, BAR_XSTRORM_INTMEM +
5587 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5588 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5589}
5590
471de716
EG
5591static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5592{
5593 switch (load_code) {
5594 case FW_MSG_CODE_DRV_LOAD_COMMON:
5595 bnx2x_init_internal_common(bp);
5596 /* no break */
5597
5598 case FW_MSG_CODE_DRV_LOAD_PORT:
5599 bnx2x_init_internal_port(bp);
5600 /* no break */
5601
5602 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5603 bnx2x_init_internal_func(bp);
5604 break;
5605
5606 default:
5607 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5608 break;
5609 }
5610}
5611
5612static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5613{
5614 int i;
5615
5616 for_each_queue(bp, i) {
5617 struct bnx2x_fastpath *fp = &bp->fp[i];
5618
34f80b04 5619 fp->bp = bp;
a2fbb9ea 5620 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5621 fp->index = i;
34f80b04 5622 fp->cl_id = BP_L_ID(bp) + i;
37b091ba
MC
5623#ifdef BCM_CNIC
5624 fp->sb_id = fp->cl_id + 1;
5625#else
34f80b04 5626 fp->sb_id = fp->cl_id;
37b091ba 5627#endif
ca00392c
EG
5628 /* Suitable Rx and Tx SBs are served by the same client */
5629 if (i >= bp->num_rx_queues)
5630 fp->cl_id -= bp->num_rx_queues;
34f80b04 5631 DP(NETIF_MSG_IFUP,
f5372251
EG
5632 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5633 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5634 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5635 fp->sb_id);
5c862848 5636 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5637 }
5638
16119785
EG
5639 /* ensure status block indices were read */
5640 rmb();
5641
5642
5c862848
EG
5643 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5644 DEF_SB_ID);
5645 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5646 bnx2x_update_coalesce(bp);
5647 bnx2x_init_rx_rings(bp);
5648 bnx2x_init_tx_ring(bp);
5649 bnx2x_init_sp_ring(bp);
5650 bnx2x_init_context(bp);
471de716 5651 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5652 bnx2x_init_ind_table(bp);
0ef00459
EG
5653 bnx2x_stats_init(bp);
5654
5655 /* At this point, we are ready for interrupts */
5656 atomic_set(&bp->intr_sem, 0);
5657
5658 /* flush all before enabling interrupts */
5659 mb();
5660 mmiowb();
5661
615f8fd9 5662 bnx2x_int_enable(bp);
eb8da205
EG
5663
5664 /* Check for SPIO5 */
5665 bnx2x_attn_int_deasserted0(bp,
5666 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5667 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5668}
5669
5670/* end of nic init */
5671
5672/*
5673 * gzip service functions
5674 */
5675
5676static int bnx2x_gunzip_init(struct bnx2x *bp)
5677{
5678 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5679 &bp->gunzip_mapping);
5680 if (bp->gunzip_buf == NULL)
5681 goto gunzip_nomem1;
5682
5683 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5684 if (bp->strm == NULL)
5685 goto gunzip_nomem2;
5686
5687 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5688 GFP_KERNEL);
5689 if (bp->strm->workspace == NULL)
5690 goto gunzip_nomem3;
5691
5692 return 0;
5693
5694gunzip_nomem3:
5695 kfree(bp->strm);
5696 bp->strm = NULL;
5697
5698gunzip_nomem2:
5699 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5700 bp->gunzip_mapping);
5701 bp->gunzip_buf = NULL;
5702
5703gunzip_nomem1:
5704 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5705 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5706 return -ENOMEM;
5707}
5708
5709static void bnx2x_gunzip_end(struct bnx2x *bp)
5710{
5711 kfree(bp->strm->workspace);
5712
5713 kfree(bp->strm);
5714 bp->strm = NULL;
5715
5716 if (bp->gunzip_buf) {
5717 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5718 bp->gunzip_mapping);
5719 bp->gunzip_buf = NULL;
5720 }
5721}
5722
94a78b79 5723static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5724{
5725 int n, rc;
5726
5727 /* check gzip header */
94a78b79
VZ
5728 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5729 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5730 return -EINVAL;
94a78b79 5731 }
a2fbb9ea
ET
5732
5733 n = 10;
5734
34f80b04 5735#define FNAME 0x8
a2fbb9ea
ET
5736
5737 if (zbuf[3] & FNAME)
5738 while ((zbuf[n++] != 0) && (n < len));
5739
94a78b79 5740 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5741 bp->strm->avail_in = len - n;
5742 bp->strm->next_out = bp->gunzip_buf;
5743 bp->strm->avail_out = FW_BUF_SIZE;
5744
5745 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5746 if (rc != Z_OK)
5747 return rc;
5748
5749 rc = zlib_inflate(bp->strm, Z_FINISH);
5750 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5751 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5752 bp->dev->name, bp->strm->msg);
5753
5754 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5755 if (bp->gunzip_outlen & 0x3)
5756 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5757 " gunzip_outlen (%d) not aligned\n",
5758 bp->dev->name, bp->gunzip_outlen);
5759 bp->gunzip_outlen >>= 2;
5760
5761 zlib_inflateEnd(bp->strm);
5762
5763 if (rc == Z_STREAM_END)
5764 return 0;
5765
5766 return rc;
5767}
5768
5769/* nic load/unload */
5770
5771/*
34f80b04 5772 * General service functions
a2fbb9ea
ET
5773 */
5774
5775/* send a NIG loopback debug packet */
5776static void bnx2x_lb_pckt(struct bnx2x *bp)
5777{
a2fbb9ea 5778 u32 wb_write[3];
a2fbb9ea
ET
5779
5780 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5781 wb_write[0] = 0x55555555;
5782 wb_write[1] = 0x55555555;
34f80b04 5783 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5784 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5785
5786 /* NON-IP protocol */
a2fbb9ea
ET
5787 wb_write[0] = 0x09000000;
5788 wb_write[1] = 0x55555555;
34f80b04 5789 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5790 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5791}
5792
5793/* some of the internal memories
5794 * are not directly readable from the driver
5795 * to test them we send debug packets
5796 */
5797static int bnx2x_int_mem_test(struct bnx2x *bp)
5798{
5799 int factor;
5800 int count, i;
5801 u32 val = 0;
5802
ad8d3948 5803 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5804 factor = 120;
ad8d3948
EG
5805 else if (CHIP_REV_IS_EMUL(bp))
5806 factor = 200;
5807 else
a2fbb9ea 5808 factor = 1;
a2fbb9ea
ET
5809
5810 DP(NETIF_MSG_HW, "start part1\n");
5811
5812 /* Disable inputs of parser neighbor blocks */
5813 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5814 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5815 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5816 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5817
5818 /* Write 0 to parser credits for CFC search request */
5819 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5820
5821 /* send Ethernet packet */
5822 bnx2x_lb_pckt(bp);
5823
5824 /* TODO do i reset NIG statistic? */
5825 /* Wait until NIG register shows 1 packet of size 0x10 */
5826 count = 1000 * factor;
5827 while (count) {
34f80b04 5828
a2fbb9ea
ET
5829 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5830 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5831 if (val == 0x10)
5832 break;
5833
5834 msleep(10);
5835 count--;
5836 }
5837 if (val != 0x10) {
5838 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5839 return -1;
5840 }
5841
5842 /* Wait until PRS register shows 1 packet */
5843 count = 1000 * factor;
5844 while (count) {
5845 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5846 if (val == 1)
5847 break;
5848
5849 msleep(10);
5850 count--;
5851 }
5852 if (val != 0x1) {
5853 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5854 return -2;
5855 }
5856
5857 /* Reset and init BRB, PRS */
34f80b04 5858 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5859 msleep(50);
34f80b04 5860 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5861 msleep(50);
94a78b79
VZ
5862 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5863 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5864
5865 DP(NETIF_MSG_HW, "part2\n");
5866
5867 /* Disable inputs of parser neighbor blocks */
5868 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5869 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5870 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5871 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5872
5873 /* Write 0 to parser credits for CFC search request */
5874 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5875
5876 /* send 10 Ethernet packets */
5877 for (i = 0; i < 10; i++)
5878 bnx2x_lb_pckt(bp);
5879
5880 /* Wait until NIG register shows 10 + 1
5881 packets of size 11*0x10 = 0xb0 */
5882 count = 1000 * factor;
5883 while (count) {
34f80b04 5884
a2fbb9ea
ET
5885 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5886 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5887 if (val == 0xb0)
5888 break;
5889
5890 msleep(10);
5891 count--;
5892 }
5893 if (val != 0xb0) {
5894 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5895 return -3;
5896 }
5897
5898 /* Wait until PRS register shows 2 packets */
5899 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5900 if (val != 2)
5901 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5902
5903 /* Write 1 to parser credits for CFC search request */
5904 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5905
5906 /* Wait until PRS register shows 3 packets */
5907 msleep(10 * factor);
5908 /* Wait until NIG register shows 1 packet of size 0x10 */
5909 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5910 if (val != 3)
5911 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5912
5913 /* clear NIG EOP FIFO */
5914 for (i = 0; i < 11; i++)
5915 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5916 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5917 if (val != 1) {
5918 BNX2X_ERR("clear of NIG failed\n");
5919 return -4;
5920 }
5921
5922 /* Reset and init BRB, PRS, NIG */
5923 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5924 msleep(50);
5925 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5926 msleep(50);
94a78b79
VZ
5927 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5928 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 5929#ifndef BCM_CNIC
a2fbb9ea
ET
5930 /* set NIC mode */
5931 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5932#endif
5933
5934 /* Enable inputs of parser neighbor blocks */
5935 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5936 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5937 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5938 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5939
5940 DP(NETIF_MSG_HW, "done\n");
5941
5942 return 0; /* OK */
5943}
5944
5945static void enable_blocks_attention(struct bnx2x *bp)
5946{
5947 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5948 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5949 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5950 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5951 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5952 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5953 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5954 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5955 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5956/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5957/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5958 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5959 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5960 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5961/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5962/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5963 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5964 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5965 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5966 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5967/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5968/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5969 if (CHIP_REV_IS_FPGA(bp))
5970 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5971 else
5972 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5973 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5974 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5975 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5976/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5977/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5978 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5979 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5980/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5981 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5982}
5983
34f80b04 5984
81f75bbf
EG
5985static void bnx2x_reset_common(struct bnx2x *bp)
5986{
5987 /* reset_common */
5988 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5989 0xd3ffff7f);
5990 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5991}
5992
573f2035
EG
5993static void bnx2x_init_pxp(struct bnx2x *bp)
5994{
5995 u16 devctl;
5996 int r_order, w_order;
5997
5998 pci_read_config_word(bp->pdev,
5999 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6000 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6001 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6002 if (bp->mrrs == -1)
6003 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6004 else {
6005 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6006 r_order = bp->mrrs;
6007 }
6008
6009 bnx2x_init_pxp_arb(bp, r_order, w_order);
6010}
fd4ef40d
EG
6011
6012static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6013{
6014 u32 val;
6015 u8 port;
6016 u8 is_required = 0;
6017
6018 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6019 SHARED_HW_CFG_FAN_FAILURE_MASK;
6020
6021 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6022 is_required = 1;
6023
6024 /*
6025 * The fan failure mechanism is usually related to the PHY type since
6026 * the power consumption of the board is affected by the PHY. Currently,
6027 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6028 */
6029 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6030 for (port = PORT_0; port < PORT_MAX; port++) {
6031 u32 phy_type =
6032 SHMEM_RD(bp, dev_info.port_hw_config[port].
6033 external_phy_config) &
6034 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6035 is_required |=
6036 ((phy_type ==
6037 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
6038 (phy_type ==
6039 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
6040 (phy_type ==
6041 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6042 }
6043
6044 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6045
6046 if (is_required == 0)
6047 return;
6048
6049 /* Fan failure is indicated by SPIO 5 */
6050 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6051 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6052
6053 /* set to active low mode */
6054 val = REG_RD(bp, MISC_REG_SPIO_INT);
6055 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6056 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6057 REG_WR(bp, MISC_REG_SPIO_INT, val);
6058
6059 /* enable interrupt to signal the IGU */
6060 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6061 val |= (1 << MISC_REGISTERS_SPIO_5);
6062 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6063}
6064
34f80b04 6065static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 6066{
a2fbb9ea 6067 u32 val, i;
37b091ba
MC
6068#ifdef BCM_CNIC
6069 u32 wb_write[2];
6070#endif
a2fbb9ea 6071
34f80b04 6072 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 6073
81f75bbf 6074 bnx2x_reset_common(bp);
34f80b04
EG
6075 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6076 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 6077
94a78b79 6078 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
6079 if (CHIP_IS_E1H(bp))
6080 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 6081
34f80b04
EG
6082 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6083 msleep(30);
6084 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 6085
94a78b79 6086 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
6087 if (CHIP_IS_E1(bp)) {
6088 /* enable HW interrupt from PXP on USDM overflow
6089 bit 16 on INT_MASK_0 */
6090 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6091 }
a2fbb9ea 6092
94a78b79 6093 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 6094 bnx2x_init_pxp(bp);
a2fbb9ea
ET
6095
6096#ifdef __BIG_ENDIAN
34f80b04
EG
6097 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6098 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6099 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6100 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6101 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6102 /* make sure this value is 0 */
6103 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6104
6105/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6106 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6107 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6108 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6109 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6110#endif
6111
34f80b04 6112 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
37b091ba 6113#ifdef BCM_CNIC
34f80b04
EG
6114 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6115 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6116 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6117#endif
6118
34f80b04
EG
6119 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6120 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6121
34f80b04
EG
6122 /* let the HW do it's magic ... */
6123 msleep(100);
6124 /* finish PXP init */
6125 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6126 if (val != 1) {
6127 BNX2X_ERR("PXP2 CFG failed\n");
6128 return -EBUSY;
6129 }
6130 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6131 if (val != 1) {
6132 BNX2X_ERR("PXP2 RD_INIT failed\n");
6133 return -EBUSY;
6134 }
a2fbb9ea 6135
34f80b04
EG
6136 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6137 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6138
94a78b79 6139 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6140
34f80b04
EG
6141 /* clean the DMAE memory */
6142 bp->dmae_ready = 1;
6143 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6144
94a78b79
VZ
6145 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6146 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6147 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6148 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6149
34f80b04
EG
6150 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6151 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6152 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6153 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6154
94a78b79 6155 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba
MC
6156
6157#ifdef BCM_CNIC
6158 wb_write[0] = 0;
6159 wb_write[1] = 0;
6160 for (i = 0; i < 64; i++) {
6161 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6162 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6163
6164 if (CHIP_IS_E1H(bp)) {
6165 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6166 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6167 wb_write, 2);
6168 }
6169 }
6170#endif
34f80b04
EG
6171 /* soft reset pulse */
6172 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6173 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 6174
37b091ba 6175#ifdef BCM_CNIC
94a78b79 6176 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6177#endif
a2fbb9ea 6178
94a78b79 6179 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6180 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6181 if (!CHIP_REV_IS_SLOW(bp)) {
6182 /* enable hw interrupt from doorbell Q */
6183 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6184 }
a2fbb9ea 6185
94a78b79
VZ
6186 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6187 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6188 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 6189#ifndef BCM_CNIC
3196a88a
EG
6190 /* set NIC mode */
6191 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 6192#endif
34f80b04
EG
6193 if (CHIP_IS_E1H(bp))
6194 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6195
94a78b79
VZ
6196 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6197 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6198 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6199 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6200
ca00392c
EG
6201 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6202 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6203 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6204 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6205
94a78b79
VZ
6206 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6207 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6208 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6209 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6210
34f80b04
EG
6211 /* sync semi rtc */
6212 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6213 0x80000000);
6214 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6215 0x80000000);
a2fbb9ea 6216
94a78b79
VZ
6217 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6218 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6219 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6220
34f80b04
EG
6221 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6222 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6223 REG_WR(bp, i, 0xc0cac01a);
6224 /* TODO: replace with something meaningful */
6225 }
94a78b79 6226 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
6227#ifdef BCM_CNIC
6228 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6229 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6230 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6231 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6232 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6233 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6234 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6235 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6236 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6237 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6238#endif
34f80b04 6239 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6240
34f80b04
EG
6241 if (sizeof(union cdu_context) != 1024)
6242 /* we currently assume that a context is 1024 bytes */
6243 printk(KERN_ALERT PFX "please adjust the size of"
6244 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 6245
94a78b79 6246 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6247 val = (4 << 24) + (0 << 12) + 1024;
6248 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6249
94a78b79 6250 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6251 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6252 /* enable context validation interrupt from CFC */
6253 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6254
6255 /* set the thresholds to prevent CFC/CDU race */
6256 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6257
94a78b79
VZ
6258 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6259 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6260
94a78b79 6261 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6262 /* Reset PCIE errors for debug */
6263 REG_WR(bp, 0x2814, 0xffffffff);
6264 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6265
94a78b79 6266 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6267 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6268 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6269 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6270
94a78b79 6271 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6272 if (CHIP_IS_E1H(bp)) {
6273 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6274 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6275 }
6276
6277 if (CHIP_REV_IS_SLOW(bp))
6278 msleep(200);
6279
6280 /* finish CFC init */
6281 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6282 if (val != 1) {
6283 BNX2X_ERR("CFC LL_INIT failed\n");
6284 return -EBUSY;
6285 }
6286 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6287 if (val != 1) {
6288 BNX2X_ERR("CFC AC_INIT failed\n");
6289 return -EBUSY;
6290 }
6291 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6292 if (val != 1) {
6293 BNX2X_ERR("CFC CAM_INIT failed\n");
6294 return -EBUSY;
6295 }
6296 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6297
34f80b04
EG
6298 /* read NIG statistic
6299 to see if this is our first up since powerup */
6300 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6301 val = *bnx2x_sp(bp, wb_data[0]);
6302
6303 /* do internal memory self test */
6304 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6305 BNX2X_ERR("internal mem self test failed\n");
6306 return -EBUSY;
6307 }
6308
35b19ba5 6309 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6310 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6311 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6312 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6313 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6314 bp->port.need_hw_lock = 1;
6315 break;
6316
34f80b04
EG
6317 default:
6318 break;
6319 }
f1410647 6320
fd4ef40d
EG
6321 bnx2x_setup_fan_failure_detection(bp);
6322
34f80b04
EG
6323 /* clear PXP2 attentions */
6324 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6325
34f80b04 6326 enable_blocks_attention(bp);
a2fbb9ea 6327
6bbca910
YR
6328 if (!BP_NOMCP(bp)) {
6329 bnx2x_acquire_phy_lock(bp);
6330 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6331 bnx2x_release_phy_lock(bp);
6332 } else
6333 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6334
34f80b04
EG
6335 return 0;
6336}
a2fbb9ea 6337
34f80b04
EG
6338static int bnx2x_init_port(struct bnx2x *bp)
6339{
6340 int port = BP_PORT(bp);
94a78b79 6341 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6342 u32 low, high;
34f80b04 6343 u32 val;
a2fbb9ea 6344
34f80b04
EG
6345 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6346
6347 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6348
94a78b79 6349 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6350 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6351
6352 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6353 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6354 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 6355 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6356
37b091ba
MC
6357#ifdef BCM_CNIC
6358 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
a2fbb9ea 6359
94a78b79 6360 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
6361 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6362 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 6363#endif
94a78b79 6364 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6365
94a78b79 6366 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6367 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6368 /* no pause for emulation and FPGA */
6369 low = 0;
6370 high = 513;
6371 } else {
6372 if (IS_E1HMF(bp))
6373 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6374 else if (bp->dev->mtu > 4096) {
6375 if (bp->flags & ONE_PORT_FLAG)
6376 low = 160;
6377 else {
6378 val = bp->dev->mtu;
6379 /* (24*1024 + val*4)/256 */
6380 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6381 }
6382 } else
6383 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6384 high = low + 56; /* 14*1024/256 */
6385 }
6386 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6387 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6388
6389
94a78b79 6390 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6391
94a78b79 6392 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6393 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6394 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6395 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6396
94a78b79
VZ
6397 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6398 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6399 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6400 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6401
94a78b79 6402 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6403 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6404
94a78b79 6405 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6406
6407 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6408 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6409
6410 /* update threshold */
34f80b04 6411 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6412 /* update init credit */
34f80b04 6413 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6414
6415 /* probe changes */
34f80b04 6416 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6417 msleep(5);
34f80b04 6418 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea 6419
37b091ba
MC
6420#ifdef BCM_CNIC
6421 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 6422#endif
94a78b79 6423 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6424 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6425
6426 if (CHIP_IS_E1(bp)) {
6427 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6428 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6429 }
94a78b79 6430 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6431
94a78b79 6432 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6433 /* init aeu_mask_attn_func_0/1:
6434 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6435 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6436 * bits 4-7 are used for "per vn group attention" */
6437 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6438 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6439
94a78b79 6440 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6441 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6442 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6443 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6444 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6445
94a78b79 6446 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6447
6448 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6449
6450 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6451 /* 0x2 disable e1hov, 0x1 enable */
6452 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6453 (IS_E1HMF(bp) ? 0x1 : 0x2));
6454
1c06328c
EG
6455 {
6456 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6457 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6458 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6459 }
34f80b04
EG
6460 }
6461
94a78b79 6462 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6463 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6464
35b19ba5 6465 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6466 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6467 {
6468 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6469
6470 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6471 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6472
6473 /* The GPIO should be swapped if the swap register is
6474 set and active */
6475 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6476 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6477
6478 /* Select function upon port-swap configuration */
6479 if (port == 0) {
6480 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6481 aeu_gpio_mask = (swap_val && swap_override) ?
6482 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6483 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6484 } else {
6485 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6486 aeu_gpio_mask = (swap_val && swap_override) ?
6487 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6488 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6489 }
6490 val = REG_RD(bp, offset);
6491 /* add GPIO3 to group */
6492 val |= aeu_gpio_mask;
6493 REG_WR(bp, offset, val);
6494 }
6495 break;
6496
35b19ba5 6497 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6498 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6499 /* add SPIO 5 to group 0 */
4d295db0
EG
6500 {
6501 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6502 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6503 val = REG_RD(bp, reg_addr);
f1410647 6504 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6505 REG_WR(bp, reg_addr, val);
6506 }
f1410647
ET
6507 break;
6508
6509 default:
6510 break;
6511 }
6512
c18487ee 6513 bnx2x__link_reset(bp);
a2fbb9ea 6514
34f80b04
EG
6515 return 0;
6516}
6517
6518#define ILT_PER_FUNC (768/2)
6519#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6520/* the phys address is shifted right 12 bits and has an added
6521 1=valid bit added to the 53rd bit
6522 then since this is a wide register(TM)
6523 we split it into two 32 bit writes
6524 */
6525#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6526#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6527#define PXP_ONE_ILT(x) (((x) << 10) | x)
6528#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6529
37b091ba
MC
6530#ifdef BCM_CNIC
6531#define CNIC_ILT_LINES 127
6532#define CNIC_CTX_PER_ILT 16
6533#else
34f80b04 6534#define CNIC_ILT_LINES 0
37b091ba 6535#endif
34f80b04
EG
6536
6537static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6538{
6539 int reg;
6540
6541 if (CHIP_IS_E1H(bp))
6542 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6543 else /* E1 */
6544 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6545
6546 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6547}
6548
6549static int bnx2x_init_func(struct bnx2x *bp)
6550{
6551 int port = BP_PORT(bp);
6552 int func = BP_FUNC(bp);
8badd27a 6553 u32 addr, val;
34f80b04
EG
6554 int i;
6555
6556 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6557
8badd27a
EG
6558 /* set MSI reconfigure capability */
6559 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6560 val = REG_RD(bp, addr);
6561 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6562 REG_WR(bp, addr, val);
6563
34f80b04
EG
6564 i = FUNC_ILT_BASE(func);
6565
6566 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6567 if (CHIP_IS_E1H(bp)) {
6568 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6569 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6570 } else /* E1 */
6571 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6572 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6573
37b091ba
MC
6574#ifdef BCM_CNIC
6575 i += 1 + CNIC_ILT_LINES;
6576 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6577 if (CHIP_IS_E1(bp))
6578 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6579 else {
6580 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6581 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6582 }
6583
6584 i++;
6585 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6586 if (CHIP_IS_E1(bp))
6587 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6588 else {
6589 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6590 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6591 }
6592
6593 i++;
6594 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6595 if (CHIP_IS_E1(bp))
6596 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6597 else {
6598 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6599 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6600 }
6601
6602 /* tell the searcher where the T2 table is */
6603 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6604
6605 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6606 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6607
6608 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6609 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6610 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6611
6612 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6613#endif
34f80b04
EG
6614
6615 if (CHIP_IS_E1H(bp)) {
573f2035
EG
6616 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6617 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6618 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6619 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6620 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6621 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6622 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6623 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6624 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
6625
6626 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6627 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6628 }
6629
6630 /* HC init per function */
6631 if (CHIP_IS_E1H(bp)) {
6632 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6633
6634 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6635 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6636 }
94a78b79 6637 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6638
c14423fe 6639 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6640 REG_WR(bp, 0x2114, 0xffffffff);
6641 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6642
34f80b04
EG
6643 return 0;
6644}
6645
6646static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6647{
6648 int i, rc = 0;
a2fbb9ea 6649
34f80b04
EG
6650 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6651 BP_FUNC(bp), load_code);
a2fbb9ea 6652
34f80b04
EG
6653 bp->dmae_ready = 0;
6654 mutex_init(&bp->dmae_mutex);
54016b26
EG
6655 rc = bnx2x_gunzip_init(bp);
6656 if (rc)
6657 return rc;
a2fbb9ea 6658
34f80b04
EG
6659 switch (load_code) {
6660 case FW_MSG_CODE_DRV_LOAD_COMMON:
6661 rc = bnx2x_init_common(bp);
6662 if (rc)
6663 goto init_hw_err;
6664 /* no break */
6665
6666 case FW_MSG_CODE_DRV_LOAD_PORT:
6667 bp->dmae_ready = 1;
6668 rc = bnx2x_init_port(bp);
6669 if (rc)
6670 goto init_hw_err;
6671 /* no break */
6672
6673 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6674 bp->dmae_ready = 1;
6675 rc = bnx2x_init_func(bp);
6676 if (rc)
6677 goto init_hw_err;
6678 break;
6679
6680 default:
6681 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6682 break;
6683 }
6684
6685 if (!BP_NOMCP(bp)) {
6686 int func = BP_FUNC(bp);
a2fbb9ea
ET
6687
6688 bp->fw_drv_pulse_wr_seq =
34f80b04 6689 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6690 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
6691 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6692 }
a2fbb9ea 6693
34f80b04
EG
6694 /* this needs to be done before gunzip end */
6695 bnx2x_zero_def_sb(bp);
6696 for_each_queue(bp, i)
6697 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
37b091ba
MC
6698#ifdef BCM_CNIC
6699 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6700#endif
34f80b04
EG
6701
6702init_hw_err:
6703 bnx2x_gunzip_end(bp);
6704
6705 return rc;
a2fbb9ea
ET
6706}
6707
a2fbb9ea
ET
6708static void bnx2x_free_mem(struct bnx2x *bp)
6709{
6710
6711#define BNX2X_PCI_FREE(x, y, size) \
6712 do { \
6713 if (x) { \
6714 pci_free_consistent(bp->pdev, size, x, y); \
6715 x = NULL; \
6716 y = 0; \
6717 } \
6718 } while (0)
6719
6720#define BNX2X_FREE(x) \
6721 do { \
6722 if (x) { \
6723 vfree(x); \
6724 x = NULL; \
6725 } \
6726 } while (0)
6727
6728 int i;
6729
6730 /* fastpath */
555f6c78 6731 /* Common */
a2fbb9ea
ET
6732 for_each_queue(bp, i) {
6733
555f6c78 6734 /* status blocks */
a2fbb9ea
ET
6735 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6736 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6737 sizeof(struct host_status_block));
555f6c78
EG
6738 }
6739 /* Rx */
6740 for_each_rx_queue(bp, i) {
a2fbb9ea 6741
555f6c78 6742 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6743 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6744 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6745 bnx2x_fp(bp, i, rx_desc_mapping),
6746 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6747
6748 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6749 bnx2x_fp(bp, i, rx_comp_mapping),
6750 sizeof(struct eth_fast_path_rx_cqe) *
6751 NUM_RCQ_BD);
a2fbb9ea 6752
7a9b2557 6753 /* SGE ring */
32626230 6754 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6755 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6756 bnx2x_fp(bp, i, rx_sge_mapping),
6757 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6758 }
555f6c78
EG
6759 /* Tx */
6760 for_each_tx_queue(bp, i) {
6761
6762 /* fastpath tx rings: tx_buf tx_desc */
6763 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6764 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6765 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6766 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6767 }
a2fbb9ea
ET
6768 /* end of fastpath */
6769
6770 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6771 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6772
6773 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6774 sizeof(struct bnx2x_slowpath));
a2fbb9ea 6775
37b091ba 6776#ifdef BCM_CNIC
a2fbb9ea
ET
6777 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6778 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6779 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6780 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
37b091ba
MC
6781 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6782 sizeof(struct host_status_block));
a2fbb9ea 6783#endif
7a9b2557 6784 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6785
6786#undef BNX2X_PCI_FREE
6787#undef BNX2X_KFREE
6788}
6789
6790static int bnx2x_alloc_mem(struct bnx2x *bp)
6791{
6792
6793#define BNX2X_PCI_ALLOC(x, y, size) \
6794 do { \
6795 x = pci_alloc_consistent(bp->pdev, size, y); \
6796 if (x == NULL) \
6797 goto alloc_mem_err; \
6798 memset(x, 0, size); \
6799 } while (0)
6800
6801#define BNX2X_ALLOC(x, size) \
6802 do { \
6803 x = vmalloc(size); \
6804 if (x == NULL) \
6805 goto alloc_mem_err; \
6806 memset(x, 0, size); \
6807 } while (0)
6808
6809 int i;
6810
6811 /* fastpath */
555f6c78 6812 /* Common */
a2fbb9ea
ET
6813 for_each_queue(bp, i) {
6814 bnx2x_fp(bp, i, bp) = bp;
6815
555f6c78 6816 /* status blocks */
a2fbb9ea
ET
6817 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6818 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6819 sizeof(struct host_status_block));
555f6c78
EG
6820 }
6821 /* Rx */
6822 for_each_rx_queue(bp, i) {
a2fbb9ea 6823
555f6c78 6824 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6825 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6826 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6827 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6828 &bnx2x_fp(bp, i, rx_desc_mapping),
6829 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6830
6831 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6832 &bnx2x_fp(bp, i, rx_comp_mapping),
6833 sizeof(struct eth_fast_path_rx_cqe) *
6834 NUM_RCQ_BD);
6835
7a9b2557
VZ
6836 /* SGE ring */
6837 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6838 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6839 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6840 &bnx2x_fp(bp, i, rx_sge_mapping),
6841 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6842 }
555f6c78
EG
6843 /* Tx */
6844 for_each_tx_queue(bp, i) {
6845
555f6c78
EG
6846 /* fastpath tx rings: tx_buf tx_desc */
6847 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6848 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6849 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6850 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6851 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6852 }
a2fbb9ea
ET
6853 /* end of fastpath */
6854
6855 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6856 sizeof(struct host_def_status_block));
6857
6858 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6859 sizeof(struct bnx2x_slowpath));
6860
37b091ba 6861#ifdef BCM_CNIC
a2fbb9ea
ET
6862 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6863
a2fbb9ea
ET
6864 /* allocate searcher T2 table
6865 we allocate 1/4 of alloc num for T2
6866 (which is not entered into the ILT) */
6867 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6868
37b091ba 6869 /* Initialize T2 (for 1024 connections) */
a2fbb9ea 6870 for (i = 0; i < 16*1024; i += 64)
37b091ba 6871 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
a2fbb9ea 6872
37b091ba 6873 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
a2fbb9ea
ET
6874 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6875
6876 /* QM queues (128*MAX_CONN) */
6877 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
37b091ba
MC
6878
6879 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6880 sizeof(struct host_status_block));
a2fbb9ea
ET
6881#endif
6882
6883 /* Slow path ring */
6884 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6885
6886 return 0;
6887
6888alloc_mem_err:
6889 bnx2x_free_mem(bp);
6890 return -ENOMEM;
6891
6892#undef BNX2X_PCI_ALLOC
6893#undef BNX2X_ALLOC
6894}
6895
6896static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6897{
6898 int i;
6899
555f6c78 6900 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6901 struct bnx2x_fastpath *fp = &bp->fp[i];
6902
6903 u16 bd_cons = fp->tx_bd_cons;
6904 u16 sw_prod = fp->tx_pkt_prod;
6905 u16 sw_cons = fp->tx_pkt_cons;
6906
a2fbb9ea
ET
6907 while (sw_cons != sw_prod) {
6908 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6909 sw_cons++;
6910 }
6911 }
6912}
6913
6914static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6915{
6916 int i, j;
6917
555f6c78 6918 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6919 struct bnx2x_fastpath *fp = &bp->fp[j];
6920
a2fbb9ea
ET
6921 for (i = 0; i < NUM_RX_BD; i++) {
6922 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6923 struct sk_buff *skb = rx_buf->skb;
6924
6925 if (skb == NULL)
6926 continue;
6927
6928 pci_unmap_single(bp->pdev,
6929 pci_unmap_addr(rx_buf, mapping),
356e2385 6930 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6931
6932 rx_buf->skb = NULL;
6933 dev_kfree_skb(skb);
6934 }
7a9b2557 6935 if (!fp->disable_tpa)
32626230
EG
6936 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6937 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6938 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6939 }
6940}
6941
6942static void bnx2x_free_skbs(struct bnx2x *bp)
6943{
6944 bnx2x_free_tx_skbs(bp);
6945 bnx2x_free_rx_skbs(bp);
6946}
6947
6948static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6949{
34f80b04 6950 int i, offset = 1;
a2fbb9ea
ET
6951
6952 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6953 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6954 bp->msix_table[0].vector);
6955
37b091ba
MC
6956#ifdef BCM_CNIC
6957 offset++;
6958#endif
a2fbb9ea 6959 for_each_queue(bp, i) {
c14423fe 6960 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6961 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6962 bnx2x_fp(bp, i, state));
6963
34f80b04 6964 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6965 }
a2fbb9ea
ET
6966}
6967
6968static void bnx2x_free_irq(struct bnx2x *bp)
6969{
a2fbb9ea 6970 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6971 bnx2x_free_msix_irqs(bp);
6972 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6973 bp->flags &= ~USING_MSIX_FLAG;
6974
8badd27a
EG
6975 } else if (bp->flags & USING_MSI_FLAG) {
6976 free_irq(bp->pdev->irq, bp->dev);
6977 pci_disable_msi(bp->pdev);
6978 bp->flags &= ~USING_MSI_FLAG;
6979
a2fbb9ea
ET
6980 } else
6981 free_irq(bp->pdev->irq, bp->dev);
6982}
6983
6984static int bnx2x_enable_msix(struct bnx2x *bp)
6985{
8badd27a
EG
6986 int i, rc, offset = 1;
6987 int igu_vec = 0;
a2fbb9ea 6988
8badd27a
EG
6989 bp->msix_table[0].entry = igu_vec;
6990 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6991
37b091ba
MC
6992#ifdef BCM_CNIC
6993 igu_vec = BP_L_ID(bp) + offset;
6994 bp->msix_table[1].entry = igu_vec;
6995 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6996 offset++;
6997#endif
34f80b04 6998 for_each_queue(bp, i) {
8badd27a 6999 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
7000 bp->msix_table[i + offset].entry = igu_vec;
7001 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7002 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
7003 }
7004
34f80b04 7005 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 7006 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 7007 if (rc) {
8badd27a
EG
7008 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7009 return rc;
34f80b04 7010 }
8badd27a 7011
a2fbb9ea
ET
7012 bp->flags |= USING_MSIX_FLAG;
7013
7014 return 0;
a2fbb9ea
ET
7015}
7016
a2fbb9ea
ET
7017static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7018{
34f80b04 7019 int i, rc, offset = 1;
a2fbb9ea 7020
a2fbb9ea
ET
7021 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7022 bp->dev->name, bp->dev);
a2fbb9ea
ET
7023 if (rc) {
7024 BNX2X_ERR("request sp irq failed\n");
7025 return -EBUSY;
7026 }
7027
37b091ba
MC
7028#ifdef BCM_CNIC
7029 offset++;
7030#endif
a2fbb9ea 7031 for_each_queue(bp, i) {
555f6c78
EG
7032 struct bnx2x_fastpath *fp = &bp->fp[i];
7033
ca00392c
EG
7034 if (i < bp->num_rx_queues)
7035 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
7036 else
7037 sprintf(fp->name, "%s-tx-%d",
7038 bp->dev->name, i - bp->num_rx_queues);
7039
34f80b04 7040 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 7041 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 7042 if (rc) {
555f6c78 7043 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
7044 bnx2x_free_msix_irqs(bp);
7045 return -EBUSY;
7046 }
7047
555f6c78 7048 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
7049 }
7050
555f6c78 7051 i = BNX2X_NUM_QUEUES(bp);
ca00392c
EG
7052 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
7053 " ... fp[%d] %d\n",
7054 bp->dev->name, bp->msix_table[0].vector,
7055 0, bp->msix_table[offset].vector,
7056 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 7057
a2fbb9ea 7058 return 0;
a2fbb9ea
ET
7059}
7060
8badd27a
EG
7061static int bnx2x_enable_msi(struct bnx2x *bp)
7062{
7063 int rc;
7064
7065 rc = pci_enable_msi(bp->pdev);
7066 if (rc) {
7067 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7068 return -1;
7069 }
7070 bp->flags |= USING_MSI_FLAG;
7071
7072 return 0;
7073}
7074
a2fbb9ea
ET
7075static int bnx2x_req_irq(struct bnx2x *bp)
7076{
8badd27a 7077 unsigned long flags;
34f80b04 7078 int rc;
a2fbb9ea 7079
8badd27a
EG
7080 if (bp->flags & USING_MSI_FLAG)
7081 flags = 0;
7082 else
7083 flags = IRQF_SHARED;
7084
7085 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 7086 bp->dev->name, bp->dev);
a2fbb9ea
ET
7087 if (!rc)
7088 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7089
7090 return rc;
a2fbb9ea
ET
7091}
7092
65abd74d
YG
7093static void bnx2x_napi_enable(struct bnx2x *bp)
7094{
7095 int i;
7096
555f6c78 7097 for_each_rx_queue(bp, i)
65abd74d
YG
7098 napi_enable(&bnx2x_fp(bp, i, napi));
7099}
7100
7101static void bnx2x_napi_disable(struct bnx2x *bp)
7102{
7103 int i;
7104
555f6c78 7105 for_each_rx_queue(bp, i)
65abd74d
YG
7106 napi_disable(&bnx2x_fp(bp, i, napi));
7107}
7108
7109static void bnx2x_netif_start(struct bnx2x *bp)
7110{
e1510706
EG
7111 int intr_sem;
7112
7113 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7114 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7115
7116 if (intr_sem) {
65abd74d 7117 if (netif_running(bp->dev)) {
65abd74d
YG
7118 bnx2x_napi_enable(bp);
7119 bnx2x_int_enable(bp);
555f6c78
EG
7120 if (bp->state == BNX2X_STATE_OPEN)
7121 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
7122 }
7123 }
7124}
7125
f8ef6e44 7126static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 7127{
f8ef6e44 7128 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 7129 bnx2x_napi_disable(bp);
762d5f6c
EG
7130 netif_tx_disable(bp->dev);
7131 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
7132}
7133
a2fbb9ea
ET
7134/*
7135 * Init service functions
7136 */
7137
e665bfda
MC
7138/**
7139 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7140 *
7141 * @param bp driver descriptor
7142 * @param set set or clear an entry (1 or 0)
7143 * @param mac pointer to a buffer containing a MAC
7144 * @param cl_bit_vec bit vector of clients to register a MAC for
7145 * @param cam_offset offset in a CAM to use
7146 * @param with_bcast set broadcast MAC as well
7147 */
7148static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7149 u32 cl_bit_vec, u8 cam_offset,
7150 u8 with_bcast)
a2fbb9ea
ET
7151{
7152 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 7153 int port = BP_PORT(bp);
a2fbb9ea
ET
7154
7155 /* CAM allocation
7156 * unicasts 0-31:port0 32-63:port1
7157 * multicast 64-127:port0 128-191:port1
7158 */
e665bfda
MC
7159 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7160 config->hdr.offset = cam_offset;
7161 config->hdr.client_id = 0xff;
a2fbb9ea
ET
7162 config->hdr.reserved1 = 0;
7163
7164 /* primary MAC */
7165 config->config_table[0].cam_entry.msb_mac_addr =
e665bfda 7166 swab16(*(u16 *)&mac[0]);
a2fbb9ea 7167 config->config_table[0].cam_entry.middle_mac_addr =
e665bfda 7168 swab16(*(u16 *)&mac[2]);
a2fbb9ea 7169 config->config_table[0].cam_entry.lsb_mac_addr =
e665bfda 7170 swab16(*(u16 *)&mac[4]);
34f80b04 7171 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7172 if (set)
7173 config->config_table[0].target_table_entry.flags = 0;
7174 else
7175 CAM_INVALIDATE(config->config_table[0]);
ca00392c 7176 config->config_table[0].target_table_entry.clients_bit_vector =
e665bfda 7177 cpu_to_le32(cl_bit_vec);
a2fbb9ea
ET
7178 config->config_table[0].target_table_entry.vlan_id = 0;
7179
3101c2bc
YG
7180 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7181 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7182 config->config_table[0].cam_entry.msb_mac_addr,
7183 config->config_table[0].cam_entry.middle_mac_addr,
7184 config->config_table[0].cam_entry.lsb_mac_addr);
7185
7186 /* broadcast */
e665bfda
MC
7187 if (with_bcast) {
7188 config->config_table[1].cam_entry.msb_mac_addr =
7189 cpu_to_le16(0xffff);
7190 config->config_table[1].cam_entry.middle_mac_addr =
7191 cpu_to_le16(0xffff);
7192 config->config_table[1].cam_entry.lsb_mac_addr =
7193 cpu_to_le16(0xffff);
7194 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7195 if (set)
7196 config->config_table[1].target_table_entry.flags =
7197 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7198 else
7199 CAM_INVALIDATE(config->config_table[1]);
7200 config->config_table[1].target_table_entry.clients_bit_vector =
7201 cpu_to_le32(cl_bit_vec);
7202 config->config_table[1].target_table_entry.vlan_id = 0;
7203 }
a2fbb9ea
ET
7204
7205 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7206 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7207 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7208}
7209
e665bfda
MC
7210/**
7211 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7212 *
7213 * @param bp driver descriptor
7214 * @param set set or clear an entry (1 or 0)
7215 * @param mac pointer to a buffer containing a MAC
7216 * @param cl_bit_vec bit vector of clients to register a MAC for
7217 * @param cam_offset offset in a CAM to use
7218 */
7219static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7220 u32 cl_bit_vec, u8 cam_offset)
34f80b04
EG
7221{
7222 struct mac_configuration_cmd_e1h *config =
7223 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7224
8d9c5f34 7225 config->hdr.length = 1;
e665bfda
MC
7226 config->hdr.offset = cam_offset;
7227 config->hdr.client_id = 0xff;
34f80b04
EG
7228 config->hdr.reserved1 = 0;
7229
7230 /* primary MAC */
7231 config->config_table[0].msb_mac_addr =
e665bfda 7232 swab16(*(u16 *)&mac[0]);
34f80b04 7233 config->config_table[0].middle_mac_addr =
e665bfda 7234 swab16(*(u16 *)&mac[2]);
34f80b04 7235 config->config_table[0].lsb_mac_addr =
e665bfda 7236 swab16(*(u16 *)&mac[4]);
ca00392c 7237 config->config_table[0].clients_bit_vector =
e665bfda 7238 cpu_to_le32(cl_bit_vec);
34f80b04
EG
7239 config->config_table[0].vlan_id = 0;
7240 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7241 if (set)
7242 config->config_table[0].flags = BP_PORT(bp);
7243 else
7244 config->config_table[0].flags =
7245 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7246
e665bfda 7247 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
3101c2bc 7248 (set ? "setting" : "clearing"),
34f80b04
EG
7249 config->config_table[0].msb_mac_addr,
7250 config->config_table[0].middle_mac_addr,
e665bfda 7251 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
34f80b04
EG
7252
7253 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7254 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7255 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7256}
7257
a2fbb9ea
ET
7258static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7259 int *state_p, int poll)
7260{
7261 /* can take a while if any port is running */
8b3a0f0b 7262 int cnt = 5000;
a2fbb9ea 7263
c14423fe
ET
7264 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7265 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7266
7267 might_sleep();
34f80b04 7268 while (cnt--) {
a2fbb9ea
ET
7269 if (poll) {
7270 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7271 /* if index is different from 0
7272 * the reply for some commands will
3101c2bc 7273 * be on the non default queue
a2fbb9ea
ET
7274 */
7275 if (idx)
7276 bnx2x_rx_int(&bp->fp[idx], 10);
7277 }
a2fbb9ea 7278
3101c2bc 7279 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7280 if (*state_p == state) {
7281#ifdef BNX2X_STOP_ON_ERROR
7282 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7283#endif
a2fbb9ea 7284 return 0;
8b3a0f0b 7285 }
a2fbb9ea 7286
a2fbb9ea 7287 msleep(1);
e3553b29
EG
7288
7289 if (bp->panic)
7290 return -EIO;
a2fbb9ea
ET
7291 }
7292
a2fbb9ea 7293 /* timeout! */
49d66772
ET
7294 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7295 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7296#ifdef BNX2X_STOP_ON_ERROR
7297 bnx2x_panic();
7298#endif
a2fbb9ea 7299
49d66772 7300 return -EBUSY;
a2fbb9ea
ET
7301}
7302
e665bfda
MC
7303static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7304{
7305 bp->set_mac_pending++;
7306 smp_wmb();
7307
7308 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7309 (1 << bp->fp->cl_id), BP_FUNC(bp));
7310
7311 /* Wait for a completion */
7312 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7313}
7314
7315static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7316{
7317 bp->set_mac_pending++;
7318 smp_wmb();
7319
7320 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7321 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7322 1);
7323
7324 /* Wait for a completion */
7325 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7326}
7327
993ac7b5
MC
7328#ifdef BCM_CNIC
7329/**
7330 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7331 * MAC(s). This function will wait until the ramdord completion
7332 * returns.
7333 *
7334 * @param bp driver handle
7335 * @param set set or clear the CAM entry
7336 *
7337 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7338 */
7339static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7340{
7341 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7342
7343 bp->set_mac_pending++;
7344 smp_wmb();
7345
7346 /* Send a SET_MAC ramrod */
7347 if (CHIP_IS_E1(bp))
7348 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7349 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7350 1);
7351 else
7352 /* CAM allocation for E1H
7353 * unicasts: by func number
7354 * multicast: 20+FUNC*20, 20 each
7355 */
7356 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7357 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7358
7359 /* Wait for a completion when setting */
7360 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7361
7362 return 0;
7363}
7364#endif
7365
a2fbb9ea
ET
7366static int bnx2x_setup_leading(struct bnx2x *bp)
7367{
34f80b04 7368 int rc;
a2fbb9ea 7369
c14423fe 7370 /* reset IGU state */
34f80b04 7371 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7372
7373 /* SETUP ramrod */
7374 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7375
34f80b04
EG
7376 /* Wait for completion */
7377 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7378
34f80b04 7379 return rc;
a2fbb9ea
ET
7380}
7381
7382static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7383{
555f6c78
EG
7384 struct bnx2x_fastpath *fp = &bp->fp[index];
7385
a2fbb9ea 7386 /* reset IGU state */
555f6c78 7387 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7388
228241eb 7389 /* SETUP ramrod */
555f6c78
EG
7390 fp->state = BNX2X_FP_STATE_OPENING;
7391 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7392 fp->cl_id, 0);
a2fbb9ea
ET
7393
7394 /* Wait for completion */
7395 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7396 &(fp->state), 0);
a2fbb9ea
ET
7397}
7398
a2fbb9ea 7399static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7400
ca00392c
EG
7401static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7402 int *num_tx_queues_out)
7403{
7404 int _num_rx_queues = 0, _num_tx_queues = 0;
7405
7406 switch (bp->multi_mode) {
7407 case ETH_RSS_MODE_DISABLED:
7408 _num_rx_queues = 1;
7409 _num_tx_queues = 1;
7410 break;
7411
7412 case ETH_RSS_MODE_REGULAR:
7413 if (num_rx_queues)
7414 _num_rx_queues = min_t(u32, num_rx_queues,
7415 BNX2X_MAX_QUEUES(bp));
7416 else
7417 _num_rx_queues = min_t(u32, num_online_cpus(),
7418 BNX2X_MAX_QUEUES(bp));
7419
7420 if (num_tx_queues)
7421 _num_tx_queues = min_t(u32, num_tx_queues,
7422 BNX2X_MAX_QUEUES(bp));
7423 else
7424 _num_tx_queues = min_t(u32, num_online_cpus(),
7425 BNX2X_MAX_QUEUES(bp));
7426
7427 /* There must be not more Tx queues than Rx queues */
7428 if (_num_tx_queues > _num_rx_queues) {
7429 BNX2X_ERR("number of tx queues (%d) > "
7430 "number of rx queues (%d)"
7431 " defaulting to %d\n",
7432 _num_tx_queues, _num_rx_queues,
7433 _num_rx_queues);
7434 _num_tx_queues = _num_rx_queues;
7435 }
7436 break;
7437
7438
7439 default:
7440 _num_rx_queues = 1;
7441 _num_tx_queues = 1;
7442 break;
7443 }
7444
7445 *num_rx_queues_out = _num_rx_queues;
7446 *num_tx_queues_out = _num_tx_queues;
7447}
7448
7449static int bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 7450{
ca00392c 7451 int rc = 0;
a2fbb9ea 7452
8badd27a
EG
7453 switch (int_mode) {
7454 case INT_MODE_INTx:
7455 case INT_MODE_MSI:
ca00392c
EG
7456 bp->num_rx_queues = 1;
7457 bp->num_tx_queues = 1;
7458 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a
EG
7459 break;
7460
7461 case INT_MODE_MSIX:
7462 default:
ca00392c
EG
7463 /* Set interrupt mode according to bp->multi_mode value */
7464 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7465 &bp->num_tx_queues);
7466
7467 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
555f6c78 7468 bp->num_rx_queues, bp->num_tx_queues);
ca00392c 7469
2dfe0e1f
EG
7470 /* if we can't use MSI-X we only need one fp,
7471 * so try to enable MSI-X with the requested number of fp's
7472 * and fallback to MSI or legacy INTx with one fp
7473 */
ca00392c
EG
7474 rc = bnx2x_enable_msix(bp);
7475 if (rc) {
34f80b04 7476 /* failed to enable MSI-X */
555f6c78
EG
7477 if (bp->multi_mode)
7478 BNX2X_ERR("Multi requested but failed to "
ca00392c
EG
7479 "enable MSI-X (rx %d tx %d), "
7480 "set number of queues to 1\n",
7481 bp->num_rx_queues, bp->num_tx_queues);
7482 bp->num_rx_queues = 1;
7483 bp->num_tx_queues = 1;
a2fbb9ea 7484 }
8badd27a 7485 break;
a2fbb9ea 7486 }
555f6c78 7487 bp->dev->real_num_tx_queues = bp->num_tx_queues;
ca00392c 7488 return rc;
8badd27a
EG
7489}
7490
993ac7b5
MC
7491#ifdef BCM_CNIC
7492static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7493static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7494#endif
8badd27a
EG
7495
7496/* must be called with rtnl_lock */
7497static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7498{
7499 u32 load_code;
ca00392c
EG
7500 int i, rc;
7501
8badd27a 7502#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7503 if (unlikely(bp->panic))
7504 return -EPERM;
7505#endif
7506
7507 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7508
ca00392c 7509 rc = bnx2x_set_int_mode(bp);
c14423fe 7510
a2fbb9ea
ET
7511 if (bnx2x_alloc_mem(bp))
7512 return -ENOMEM;
7513
555f6c78 7514 for_each_rx_queue(bp, i)
7a9b2557
VZ
7515 bnx2x_fp(bp, i, disable_tpa) =
7516 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7517
555f6c78 7518 for_each_rx_queue(bp, i)
2dfe0e1f
EG
7519 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7520 bnx2x_poll, 128);
7521
2dfe0e1f
EG
7522 bnx2x_napi_enable(bp);
7523
34f80b04
EG
7524 if (bp->flags & USING_MSIX_FLAG) {
7525 rc = bnx2x_req_msix_irqs(bp);
7526 if (rc) {
7527 pci_disable_msix(bp->pdev);
2dfe0e1f 7528 goto load_error1;
34f80b04
EG
7529 }
7530 } else {
ca00392c
EG
7531 /* Fall to INTx if failed to enable MSI-X due to lack of
7532 memory (in bnx2x_set_int_mode()) */
8badd27a
EG
7533 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7534 bnx2x_enable_msi(bp);
34f80b04
EG
7535 bnx2x_ack_int(bp);
7536 rc = bnx2x_req_irq(bp);
7537 if (rc) {
2dfe0e1f 7538 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
7539 if (bp->flags & USING_MSI_FLAG)
7540 pci_disable_msi(bp->pdev);
2dfe0e1f 7541 goto load_error1;
a2fbb9ea 7542 }
8badd27a
EG
7543 if (bp->flags & USING_MSI_FLAG) {
7544 bp->dev->irq = bp->pdev->irq;
7545 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7546 bp->dev->name, bp->pdev->irq);
7547 }
a2fbb9ea
ET
7548 }
7549
2dfe0e1f
EG
7550 /* Send LOAD_REQUEST command to MCP
7551 Returns the type of LOAD command:
7552 if it is the first port to be initialized
7553 common blocks should be initialized, otherwise - not
7554 */
7555 if (!BP_NOMCP(bp)) {
7556 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7557 if (!load_code) {
7558 BNX2X_ERR("MCP response failure, aborting\n");
7559 rc = -EBUSY;
7560 goto load_error2;
7561 }
7562 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7563 rc = -EBUSY; /* other port in diagnostic mode */
7564 goto load_error2;
7565 }
7566
7567 } else {
7568 int port = BP_PORT(bp);
7569
f5372251 7570 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7571 load_count[0], load_count[1], load_count[2]);
7572 load_count[0]++;
7573 load_count[1 + port]++;
f5372251 7574 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7575 load_count[0], load_count[1], load_count[2]);
7576 if (load_count[0] == 1)
7577 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7578 else if (load_count[1 + port] == 1)
7579 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7580 else
7581 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7582 }
7583
7584 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7585 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7586 bp->port.pmf = 1;
7587 else
7588 bp->port.pmf = 0;
7589 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7590
a2fbb9ea 7591 /* Initialize HW */
34f80b04
EG
7592 rc = bnx2x_init_hw(bp, load_code);
7593 if (rc) {
a2fbb9ea 7594 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 7595 goto load_error2;
a2fbb9ea
ET
7596 }
7597
a2fbb9ea 7598 /* Setup NIC internals and enable interrupts */
471de716 7599 bnx2x_nic_init(bp, load_code);
a2fbb9ea 7600
2691d51d
EG
7601 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7602 (bp->common.shmem2_base))
7603 SHMEM2_WR(bp, dcc_support,
7604 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7605 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7606
a2fbb9ea 7607 /* Send LOAD_DONE command to MCP */
34f80b04 7608 if (!BP_NOMCP(bp)) {
228241eb
ET
7609 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7610 if (!load_code) {
da5a662a 7611 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7612 rc = -EBUSY;
2dfe0e1f 7613 goto load_error3;
a2fbb9ea
ET
7614 }
7615 }
7616
7617 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7618
34f80b04
EG
7619 rc = bnx2x_setup_leading(bp);
7620 if (rc) {
da5a662a 7621 BNX2X_ERR("Setup leading failed!\n");
e3553b29 7622#ifndef BNX2X_STOP_ON_ERROR
2dfe0e1f 7623 goto load_error3;
e3553b29
EG
7624#else
7625 bp->panic = 1;
7626 return -EBUSY;
7627#endif
34f80b04 7628 }
a2fbb9ea 7629
34f80b04
EG
7630 if (CHIP_IS_E1H(bp))
7631 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7632 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
7633 bp->state = BNX2X_STATE_DISABLED;
7634 }
a2fbb9ea 7635
ca00392c 7636 if (bp->state == BNX2X_STATE_OPEN) {
37b091ba
MC
7637#ifdef BCM_CNIC
7638 /* Enable Timer scan */
7639 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7640#endif
34f80b04
EG
7641 for_each_nondefault_queue(bp, i) {
7642 rc = bnx2x_setup_multi(bp, i);
7643 if (rc)
37b091ba
MC
7644#ifdef BCM_CNIC
7645 goto load_error4;
7646#else
2dfe0e1f 7647 goto load_error3;
37b091ba 7648#endif
34f80b04 7649 }
a2fbb9ea 7650
ca00392c 7651 if (CHIP_IS_E1(bp))
e665bfda 7652 bnx2x_set_eth_mac_addr_e1(bp, 1);
ca00392c 7653 else
e665bfda 7654 bnx2x_set_eth_mac_addr_e1h(bp, 1);
993ac7b5
MC
7655#ifdef BCM_CNIC
7656 /* Set iSCSI L2 MAC */
7657 mutex_lock(&bp->cnic_mutex);
7658 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7659 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7660 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7661 }
7662 mutex_unlock(&bp->cnic_mutex);
7663#endif
ca00392c 7664 }
34f80b04
EG
7665
7666 if (bp->port.pmf)
b5bf9068 7667 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7668
7669 /* Start fast path */
34f80b04
EG
7670 switch (load_mode) {
7671 case LOAD_NORMAL:
ca00392c
EG
7672 if (bp->state == BNX2X_STATE_OPEN) {
7673 /* Tx queue should be only reenabled */
7674 netif_tx_wake_all_queues(bp->dev);
7675 }
2dfe0e1f 7676 /* Initialize the receive filter. */
34f80b04
EG
7677 bnx2x_set_rx_mode(bp->dev);
7678 break;
7679
7680 case LOAD_OPEN:
555f6c78 7681 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
7682 if (bp->state != BNX2X_STATE_OPEN)
7683 netif_tx_disable(bp->dev);
2dfe0e1f 7684 /* Initialize the receive filter. */
34f80b04 7685 bnx2x_set_rx_mode(bp->dev);
34f80b04 7686 break;
a2fbb9ea 7687
34f80b04 7688 case LOAD_DIAG:
2dfe0e1f 7689 /* Initialize the receive filter. */
a2fbb9ea 7690 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7691 bp->state = BNX2X_STATE_DIAG;
7692 break;
7693
7694 default:
7695 break;
a2fbb9ea
ET
7696 }
7697
34f80b04
EG
7698 if (!bp->port.pmf)
7699 bnx2x__link_status_update(bp);
7700
a2fbb9ea
ET
7701 /* start the timer */
7702 mod_timer(&bp->timer, jiffies + bp->current_interval);
7703
993ac7b5
MC
7704#ifdef BCM_CNIC
7705 bnx2x_setup_cnic_irq_info(bp);
7706 if (bp->state == BNX2X_STATE_OPEN)
7707 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7708#endif
34f80b04 7709
a2fbb9ea
ET
7710 return 0;
7711
37b091ba
MC
7712#ifdef BCM_CNIC
7713load_error4:
7714 /* Disable Timer scan */
7715 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7716#endif
2dfe0e1f
EG
7717load_error3:
7718 bnx2x_int_disable_sync(bp, 1);
7719 if (!BP_NOMCP(bp)) {
7720 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7721 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7722 }
7723 bp->port.pmf = 0;
7a9b2557
VZ
7724 /* Free SKBs, SGEs, TPA pool and driver internals */
7725 bnx2x_free_skbs(bp);
555f6c78 7726 for_each_rx_queue(bp, i)
3196a88a 7727 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7728load_error2:
d1014634
YG
7729 /* Release IRQs */
7730 bnx2x_free_irq(bp);
2dfe0e1f
EG
7731load_error1:
7732 bnx2x_napi_disable(bp);
555f6c78 7733 for_each_rx_queue(bp, i)
7cde1c8b 7734 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7735 bnx2x_free_mem(bp);
7736
34f80b04 7737 return rc;
a2fbb9ea
ET
7738}
7739
7740static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7741{
555f6c78 7742 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7743 int rc;
7744
c14423fe 7745 /* halt the connection */
555f6c78
EG
7746 fp->state = BNX2X_FP_STATE_HALTING;
7747 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7748
34f80b04 7749 /* Wait for completion */
a2fbb9ea 7750 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7751 &(fp->state), 1);
c14423fe 7752 if (rc) /* timeout */
a2fbb9ea
ET
7753 return rc;
7754
7755 /* delete cfc entry */
7756 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7757
34f80b04
EG
7758 /* Wait for completion */
7759 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7760 &(fp->state), 1);
34f80b04 7761 return rc;
a2fbb9ea
ET
7762}
7763
da5a662a 7764static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7765{
4781bfad 7766 __le16 dsb_sp_prod_idx;
c14423fe 7767 /* if the other port is handling traffic,
a2fbb9ea 7768 this can take a lot of time */
34f80b04
EG
7769 int cnt = 500;
7770 int rc;
a2fbb9ea
ET
7771
7772 might_sleep();
7773
7774 /* Send HALT ramrod */
7775 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7776 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7777
34f80b04
EG
7778 /* Wait for completion */
7779 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7780 &(bp->fp[0].state), 1);
7781 if (rc) /* timeout */
da5a662a 7782 return rc;
a2fbb9ea 7783
49d66772 7784 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7785
228241eb 7786 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7787 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7788
49d66772 7789 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7790 we are going to reset the chip anyway
7791 so there is not much to do if this times out
7792 */
34f80b04 7793 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7794 if (!cnt) {
7795 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7796 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7797 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7798#ifdef BNX2X_STOP_ON_ERROR
7799 bnx2x_panic();
7800#endif
36e552ab 7801 rc = -EBUSY;
34f80b04
EG
7802 break;
7803 }
7804 cnt--;
da5a662a 7805 msleep(1);
5650d9d4 7806 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7807 }
7808 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7809 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7810
7811 return rc;
a2fbb9ea
ET
7812}
7813
34f80b04
EG
7814static void bnx2x_reset_func(struct bnx2x *bp)
7815{
7816 int port = BP_PORT(bp);
7817 int func = BP_FUNC(bp);
7818 int base, i;
7819
7820 /* Configure IGU */
7821 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7822 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7823
37b091ba
MC
7824#ifdef BCM_CNIC
7825 /* Disable Timer scan */
7826 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7827 /*
7828 * Wait for at least 10ms and up to 2 second for the timers scan to
7829 * complete
7830 */
7831 for (i = 0; i < 200; i++) {
7832 msleep(10);
7833 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7834 break;
7835 }
7836#endif
34f80b04
EG
7837 /* Clear ILT */
7838 base = FUNC_ILT_BASE(func);
7839 for (i = base; i < base + ILT_PER_FUNC; i++)
7840 bnx2x_ilt_wr(bp, i, 0);
7841}
7842
7843static void bnx2x_reset_port(struct bnx2x *bp)
7844{
7845 int port = BP_PORT(bp);
7846 u32 val;
7847
7848 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7849
7850 /* Do not rcv packets to BRB */
7851 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7852 /* Do not direct rcv packets that are not for MCP to the BRB */
7853 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7854 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7855
7856 /* Configure AEU */
7857 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7858
7859 msleep(100);
7860 /* Check for BRB port occupancy */
7861 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7862 if (val)
7863 DP(NETIF_MSG_IFDOWN,
33471629 7864 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7865
7866 /* TODO: Close Doorbell port? */
7867}
7868
34f80b04
EG
7869static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7870{
7871 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7872 BP_FUNC(bp), reset_code);
7873
7874 switch (reset_code) {
7875 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7876 bnx2x_reset_port(bp);
7877 bnx2x_reset_func(bp);
7878 bnx2x_reset_common(bp);
7879 break;
7880
7881 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7882 bnx2x_reset_port(bp);
7883 bnx2x_reset_func(bp);
7884 break;
7885
7886 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7887 bnx2x_reset_func(bp);
7888 break;
49d66772 7889
34f80b04
EG
7890 default:
7891 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7892 break;
7893 }
7894}
7895
33471629 7896/* must be called with rtnl_lock */
34f80b04 7897static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7898{
da5a662a 7899 int port = BP_PORT(bp);
a2fbb9ea 7900 u32 reset_code = 0;
da5a662a 7901 int i, cnt, rc;
a2fbb9ea 7902
993ac7b5
MC
7903#ifdef BCM_CNIC
7904 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7905#endif
a2fbb9ea
ET
7906 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7907
ab6ad5a4 7908 /* Set "drop all" */
228241eb
ET
7909 bp->rx_mode = BNX2X_RX_MODE_NONE;
7910 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7911
ab6ad5a4 7912 /* Disable HW interrupts, NAPI and Tx */
f8ef6e44 7913 bnx2x_netif_stop(bp, 1);
e94d8af3 7914
34f80b04
EG
7915 del_timer_sync(&bp->timer);
7916 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7917 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7918 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7919
70b9986c
EG
7920 /* Release IRQs */
7921 bnx2x_free_irq(bp);
7922
555f6c78
EG
7923 /* Wait until tx fastpath tasks complete */
7924 for_each_tx_queue(bp, i) {
228241eb
ET
7925 struct bnx2x_fastpath *fp = &bp->fp[i];
7926
34f80b04 7927 cnt = 1000;
e8b5fc51 7928 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7929
7961f791 7930 bnx2x_tx_int(fp);
34f80b04
EG
7931 if (!cnt) {
7932 BNX2X_ERR("timeout waiting for queue[%d]\n",
7933 i);
7934#ifdef BNX2X_STOP_ON_ERROR
7935 bnx2x_panic();
7936 return -EBUSY;
7937#else
7938 break;
7939#endif
7940 }
7941 cnt--;
da5a662a 7942 msleep(1);
34f80b04 7943 }
228241eb 7944 }
da5a662a
VZ
7945 /* Give HW time to discard old tx messages */
7946 msleep(1);
a2fbb9ea 7947
3101c2bc
YG
7948 if (CHIP_IS_E1(bp)) {
7949 struct mac_configuration_cmd *config =
7950 bnx2x_sp(bp, mcast_config);
7951
e665bfda 7952 bnx2x_set_eth_mac_addr_e1(bp, 0);
3101c2bc 7953
8d9c5f34 7954 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7955 CAM_INVALIDATE(config->config_table[i]);
7956
8d9c5f34 7957 config->hdr.length = i;
3101c2bc
YG
7958 if (CHIP_REV_IS_SLOW(bp))
7959 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7960 else
7961 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7962 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7963 config->hdr.reserved1 = 0;
7964
e665bfda
MC
7965 bp->set_mac_pending++;
7966 smp_wmb();
7967
3101c2bc
YG
7968 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7969 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7970 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7971
7972 } else { /* E1H */
65abd74d
YG
7973 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7974
e665bfda 7975 bnx2x_set_eth_mac_addr_e1h(bp, 0);
3101c2bc
YG
7976
7977 for (i = 0; i < MC_HASH_SIZE; i++)
7978 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
7979
7980 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc 7981 }
993ac7b5
MC
7982#ifdef BCM_CNIC
7983 /* Clear iSCSI L2 MAC */
7984 mutex_lock(&bp->cnic_mutex);
7985 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7986 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7987 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7988 }
7989 mutex_unlock(&bp->cnic_mutex);
7990#endif
3101c2bc 7991
65abd74d
YG
7992 if (unload_mode == UNLOAD_NORMAL)
7993 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7994
7d0446c2 7995 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7996 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7997
7d0446c2 7998 else if (bp->wol) {
65abd74d
YG
7999 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8000 u8 *mac_addr = bp->dev->dev_addr;
8001 u32 val;
8002 /* The mac address is written to entries 1-4 to
8003 preserve entry 0 which is used by the PMF */
8004 u8 entry = (BP_E1HVN(bp) + 1)*8;
8005
8006 val = (mac_addr[0] << 8) | mac_addr[1];
8007 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8008
8009 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8010 (mac_addr[4] << 8) | mac_addr[5];
8011 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8012
8013 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8014
8015 } else
8016 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8017
34f80b04
EG
8018 /* Close multi and leading connections
8019 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
8020 for_each_nondefault_queue(bp, i)
8021 if (bnx2x_stop_multi(bp, i))
228241eb 8022 goto unload_error;
a2fbb9ea 8023
da5a662a
VZ
8024 rc = bnx2x_stop_leading(bp);
8025 if (rc) {
34f80b04 8026 BNX2X_ERR("Stop leading failed!\n");
da5a662a 8027#ifdef BNX2X_STOP_ON_ERROR
34f80b04 8028 return -EBUSY;
da5a662a
VZ
8029#else
8030 goto unload_error;
34f80b04 8031#endif
228241eb
ET
8032 }
8033
8034unload_error:
34f80b04 8035 if (!BP_NOMCP(bp))
228241eb 8036 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 8037 else {
f5372251 8038 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
8039 load_count[0], load_count[1], load_count[2]);
8040 load_count[0]--;
da5a662a 8041 load_count[1 + port]--;
f5372251 8042 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
8043 load_count[0], load_count[1], load_count[2]);
8044 if (load_count[0] == 0)
8045 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 8046 else if (load_count[1 + port] == 0)
34f80b04
EG
8047 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8048 else
8049 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8050 }
a2fbb9ea 8051
34f80b04
EG
8052 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8053 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8054 bnx2x__link_reset(bp);
a2fbb9ea
ET
8055
8056 /* Reset the chip */
228241eb 8057 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
8058
8059 /* Report UNLOAD_DONE to MCP */
34f80b04 8060 if (!BP_NOMCP(bp))
a2fbb9ea 8061 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 8062
9a035440 8063 bp->port.pmf = 0;
a2fbb9ea 8064
7a9b2557 8065 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 8066 bnx2x_free_skbs(bp);
555f6c78 8067 for_each_rx_queue(bp, i)
3196a88a 8068 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 8069 for_each_rx_queue(bp, i)
7cde1c8b 8070 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
8071 bnx2x_free_mem(bp);
8072
8073 bp->state = BNX2X_STATE_CLOSED;
228241eb 8074
a2fbb9ea
ET
8075 netif_carrier_off(bp->dev);
8076
8077 return 0;
8078}
8079
34f80b04
EG
8080static void bnx2x_reset_task(struct work_struct *work)
8081{
8082 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8083
8084#ifdef BNX2X_STOP_ON_ERROR
8085 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8086 " so reset not done to allow debug dump,\n"
ad361c98 8087 " you will need to reboot when done\n");
34f80b04
EG
8088 return;
8089#endif
8090
8091 rtnl_lock();
8092
8093 if (!netif_running(bp->dev))
8094 goto reset_task_exit;
8095
8096 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8097 bnx2x_nic_load(bp, LOAD_NORMAL);
8098
8099reset_task_exit:
8100 rtnl_unlock();
8101}
8102
a2fbb9ea
ET
8103/* end of nic load/unload */
8104
8105/* ethtool_ops */
8106
8107/*
8108 * Init service functions
8109 */
8110
f1ef27ef
EG
8111static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8112{
8113 switch (func) {
8114 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8115 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8116 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8117 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8118 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8119 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8120 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8121 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8122 default:
8123 BNX2X_ERR("Unsupported function index: %d\n", func);
8124 return (u32)(-1);
8125 }
8126}
8127
8128static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8129{
8130 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8131
8132 /* Flush all outstanding writes */
8133 mmiowb();
8134
8135 /* Pretend to be function 0 */
8136 REG_WR(bp, reg, 0);
8137 /* Flush the GRC transaction (in the chip) */
8138 new_val = REG_RD(bp, reg);
8139 if (new_val != 0) {
8140 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8141 new_val);
8142 BUG();
8143 }
8144
8145 /* From now we are in the "like-E1" mode */
8146 bnx2x_int_disable(bp);
8147
8148 /* Flush all outstanding writes */
8149 mmiowb();
8150
8151 /* Restore the original funtion settings */
8152 REG_WR(bp, reg, orig_func);
8153 new_val = REG_RD(bp, reg);
8154 if (new_val != orig_func) {
8155 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8156 orig_func, new_val);
8157 BUG();
8158 }
8159}
8160
8161static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8162{
8163 if (CHIP_IS_E1H(bp))
8164 bnx2x_undi_int_disable_e1h(bp, func);
8165 else
8166 bnx2x_int_disable(bp);
8167}
8168
34f80b04
EG
8169static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8170{
8171 u32 val;
8172
8173 /* Check if there is any driver already loaded */
8174 val = REG_RD(bp, MISC_REG_UNPREPARED);
8175 if (val == 0x1) {
8176 /* Check if it is the UNDI driver
8177 * UNDI driver initializes CID offset for normal bell to 0x7
8178 */
4a37fb66 8179 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
8180 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8181 if (val == 0x7) {
8182 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8183 /* save our func */
34f80b04 8184 int func = BP_FUNC(bp);
da5a662a
VZ
8185 u32 swap_en;
8186 u32 swap_val;
34f80b04 8187
b4661739
EG
8188 /* clear the UNDI indication */
8189 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8190
34f80b04
EG
8191 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8192
8193 /* try unload UNDI on port 0 */
8194 bp->func = 0;
da5a662a
VZ
8195 bp->fw_seq =
8196 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8197 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 8198 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
8199
8200 /* if UNDI is loaded on the other port */
8201 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8202
da5a662a
VZ
8203 /* send "DONE" for previous unload */
8204 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8205
8206 /* unload UNDI on port 1 */
34f80b04 8207 bp->func = 1;
da5a662a
VZ
8208 bp->fw_seq =
8209 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8210 DRV_MSG_SEQ_NUMBER_MASK);
8211 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8212
8213 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
8214 }
8215
b4661739
EG
8216 /* now it's safe to release the lock */
8217 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8218
f1ef27ef 8219 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
8220
8221 /* close input traffic and wait for it */
8222 /* Do not rcv packets to BRB */
8223 REG_WR(bp,
8224 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8225 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8226 /* Do not direct rcv packets that are not for MCP to
8227 * the BRB */
8228 REG_WR(bp,
8229 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8230 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8231 /* clear AEU */
8232 REG_WR(bp,
8233 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8234 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8235 msleep(10);
8236
8237 /* save NIG port swap info */
8238 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8239 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
8240 /* reset device */
8241 REG_WR(bp,
8242 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 8243 0xd3ffffff);
34f80b04
EG
8244 REG_WR(bp,
8245 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8246 0x1403);
da5a662a
VZ
8247 /* take the NIG out of reset and restore swap values */
8248 REG_WR(bp,
8249 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8250 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8251 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8252 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8253
8254 /* send unload done to the MCP */
8255 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8256
8257 /* restore our func and fw_seq */
8258 bp->func = func;
8259 bp->fw_seq =
8260 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8261 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
8262
8263 } else
8264 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
8265 }
8266}
8267
8268static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8269{
8270 u32 val, val2, val3, val4, id;
72ce58c3 8271 u16 pmc;
34f80b04
EG
8272
8273 /* Get the chip revision id and number. */
8274 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8275 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8276 id = ((val & 0xffff) << 16);
8277 val = REG_RD(bp, MISC_REG_CHIP_REV);
8278 id |= ((val & 0xf) << 12);
8279 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8280 id |= ((val & 0xff) << 4);
5a40e08e 8281 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
8282 id |= (val & 0xf);
8283 bp->common.chip_id = id;
8284 bp->link_params.chip_id = bp->common.chip_id;
8285 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8286
1c06328c
EG
8287 val = (REG_RD(bp, 0x2874) & 0x55);
8288 if ((bp->common.chip_id & 0x1) ||
8289 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8290 bp->flags |= ONE_PORT_FLAG;
8291 BNX2X_DEV_INFO("single port device\n");
8292 }
8293
34f80b04
EG
8294 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8295 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8296 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8297 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8298 bp->common.flash_size, bp->common.flash_size);
8299
8300 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 8301 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 8302 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
8303 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8304 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
8305
8306 if (!bp->common.shmem_base ||
8307 (bp->common.shmem_base < 0xA0000) ||
8308 (bp->common.shmem_base >= 0xC0000)) {
8309 BNX2X_DEV_INFO("MCP not active\n");
8310 bp->flags |= NO_MCP_FLAG;
8311 return;
8312 }
8313
8314 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8315 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8316 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8317 BNX2X_ERR("BAD MCP validity signature\n");
8318
8319 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 8320 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
8321
8322 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8323 SHARED_HW_CFG_LED_MODE_MASK) >>
8324 SHARED_HW_CFG_LED_MODE_SHIFT);
8325
c2c8b03e
EG
8326 bp->link_params.feature_config_flags = 0;
8327 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8328 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8329 bp->link_params.feature_config_flags |=
8330 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8331 else
8332 bp->link_params.feature_config_flags &=
8333 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8334
34f80b04
EG
8335 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8336 bp->common.bc_ver = val;
8337 BNX2X_DEV_INFO("bc_ver %X\n", val);
8338 if (val < BNX2X_BC_VER) {
8339 /* for now only warn
8340 * later we might need to enforce this */
8341 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8342 " please upgrade BC\n", BNX2X_BC_VER, val);
8343 }
4d295db0
EG
8344 bp->link_params.feature_config_flags |=
8345 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8346 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
8347
8348 if (BP_E1HVN(bp) == 0) {
8349 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8350 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8351 } else {
8352 /* no WOL capability for E1HVN != 0 */
8353 bp->flags |= NO_WOL_FLAG;
8354 }
8355 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 8356 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
8357
8358 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8359 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8360 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8361 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8362
8363 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8364 val, val2, val3, val4);
8365}
8366
8367static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8368 u32 switch_cfg)
a2fbb9ea 8369{
34f80b04 8370 int port = BP_PORT(bp);
a2fbb9ea
ET
8371 u32 ext_phy_type;
8372
a2fbb9ea
ET
8373 switch (switch_cfg) {
8374 case SWITCH_CFG_1G:
8375 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8376
c18487ee
YR
8377 ext_phy_type =
8378 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8379 switch (ext_phy_type) {
8380 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8381 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8382 ext_phy_type);
8383
34f80b04
EG
8384 bp->port.supported |= (SUPPORTED_10baseT_Half |
8385 SUPPORTED_10baseT_Full |
8386 SUPPORTED_100baseT_Half |
8387 SUPPORTED_100baseT_Full |
8388 SUPPORTED_1000baseT_Full |
8389 SUPPORTED_2500baseX_Full |
8390 SUPPORTED_TP |
8391 SUPPORTED_FIBRE |
8392 SUPPORTED_Autoneg |
8393 SUPPORTED_Pause |
8394 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8395 break;
8396
8397 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8398 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8399 ext_phy_type);
8400
34f80b04
EG
8401 bp->port.supported |= (SUPPORTED_10baseT_Half |
8402 SUPPORTED_10baseT_Full |
8403 SUPPORTED_100baseT_Half |
8404 SUPPORTED_100baseT_Full |
8405 SUPPORTED_1000baseT_Full |
8406 SUPPORTED_TP |
8407 SUPPORTED_FIBRE |
8408 SUPPORTED_Autoneg |
8409 SUPPORTED_Pause |
8410 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8411 break;
8412
8413 default:
8414 BNX2X_ERR("NVRAM config error. "
8415 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 8416 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8417 return;
8418 }
8419
34f80b04
EG
8420 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8421 port*0x10);
8422 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
8423 break;
8424
8425 case SWITCH_CFG_10G:
8426 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8427
c18487ee
YR
8428 ext_phy_type =
8429 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8430 switch (ext_phy_type) {
8431 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8432 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8433 ext_phy_type);
8434
34f80b04
EG
8435 bp->port.supported |= (SUPPORTED_10baseT_Half |
8436 SUPPORTED_10baseT_Full |
8437 SUPPORTED_100baseT_Half |
8438 SUPPORTED_100baseT_Full |
8439 SUPPORTED_1000baseT_Full |
8440 SUPPORTED_2500baseX_Full |
8441 SUPPORTED_10000baseT_Full |
8442 SUPPORTED_TP |
8443 SUPPORTED_FIBRE |
8444 SUPPORTED_Autoneg |
8445 SUPPORTED_Pause |
8446 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8447 break;
8448
589abe3a
EG
8449 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8450 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 8451 ext_phy_type);
f1410647 8452
34f80b04 8453 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8454 SUPPORTED_1000baseT_Full |
34f80b04 8455 SUPPORTED_FIBRE |
589abe3a 8456 SUPPORTED_Autoneg |
34f80b04
EG
8457 SUPPORTED_Pause |
8458 SUPPORTED_Asym_Pause);
f1410647
ET
8459 break;
8460
589abe3a
EG
8461 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8462 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
8463 ext_phy_type);
8464
34f80b04 8465 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8466 SUPPORTED_2500baseX_Full |
34f80b04 8467 SUPPORTED_1000baseT_Full |
589abe3a
EG
8468 SUPPORTED_FIBRE |
8469 SUPPORTED_Autoneg |
8470 SUPPORTED_Pause |
8471 SUPPORTED_Asym_Pause);
8472 break;
8473
8474 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8475 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8476 ext_phy_type);
8477
8478 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
8479 SUPPORTED_FIBRE |
8480 SUPPORTED_Pause |
8481 SUPPORTED_Asym_Pause);
f1410647
ET
8482 break;
8483
589abe3a
EG
8484 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8485 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
8486 ext_phy_type);
8487
34f80b04
EG
8488 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8489 SUPPORTED_1000baseT_Full |
8490 SUPPORTED_FIBRE |
34f80b04
EG
8491 SUPPORTED_Pause |
8492 SUPPORTED_Asym_Pause);
f1410647
ET
8493 break;
8494
589abe3a
EG
8495 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8496 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
8497 ext_phy_type);
8498
34f80b04 8499 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 8500 SUPPORTED_1000baseT_Full |
34f80b04 8501 SUPPORTED_Autoneg |
589abe3a 8502 SUPPORTED_FIBRE |
34f80b04
EG
8503 SUPPORTED_Pause |
8504 SUPPORTED_Asym_Pause);
c18487ee
YR
8505 break;
8506
4d295db0
EG
8507 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8508 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8509 ext_phy_type);
8510
8511 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8512 SUPPORTED_1000baseT_Full |
8513 SUPPORTED_Autoneg |
8514 SUPPORTED_FIBRE |
8515 SUPPORTED_Pause |
8516 SUPPORTED_Asym_Pause);
8517 break;
8518
f1410647
ET
8519 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8520 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8521 ext_phy_type);
8522
34f80b04
EG
8523 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8524 SUPPORTED_TP |
8525 SUPPORTED_Autoneg |
8526 SUPPORTED_Pause |
8527 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8528 break;
8529
28577185
EG
8530 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8531 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8532 ext_phy_type);
8533
8534 bp->port.supported |= (SUPPORTED_10baseT_Half |
8535 SUPPORTED_10baseT_Full |
8536 SUPPORTED_100baseT_Half |
8537 SUPPORTED_100baseT_Full |
8538 SUPPORTED_1000baseT_Full |
8539 SUPPORTED_10000baseT_Full |
8540 SUPPORTED_TP |
8541 SUPPORTED_Autoneg |
8542 SUPPORTED_Pause |
8543 SUPPORTED_Asym_Pause);
8544 break;
8545
c18487ee
YR
8546 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8547 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8548 bp->link_params.ext_phy_config);
8549 break;
8550
a2fbb9ea
ET
8551 default:
8552 BNX2X_ERR("NVRAM config error. "
8553 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 8554 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8555 return;
8556 }
8557
34f80b04
EG
8558 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8559 port*0x18);
8560 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 8561
a2fbb9ea
ET
8562 break;
8563
8564 default:
8565 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 8566 bp->port.link_config);
a2fbb9ea
ET
8567 return;
8568 }
34f80b04 8569 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
8570
8571 /* mask what we support according to speed_cap_mask */
c18487ee
YR
8572 if (!(bp->link_params.speed_cap_mask &
8573 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 8574 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 8575
c18487ee
YR
8576 if (!(bp->link_params.speed_cap_mask &
8577 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 8578 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 8579
c18487ee
YR
8580 if (!(bp->link_params.speed_cap_mask &
8581 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 8582 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 8583
c18487ee
YR
8584 if (!(bp->link_params.speed_cap_mask &
8585 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 8586 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 8587
c18487ee
YR
8588 if (!(bp->link_params.speed_cap_mask &
8589 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
8590 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8591 SUPPORTED_1000baseT_Full);
a2fbb9ea 8592
c18487ee
YR
8593 if (!(bp->link_params.speed_cap_mask &
8594 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 8595 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 8596
c18487ee
YR
8597 if (!(bp->link_params.speed_cap_mask &
8598 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 8599 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 8600
34f80b04 8601 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
8602}
8603
34f80b04 8604static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8605{
c18487ee 8606 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 8607
34f80b04 8608 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 8609 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 8610 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 8611 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8612 bp->port.advertising = bp->port.supported;
a2fbb9ea 8613 } else {
c18487ee
YR
8614 u32 ext_phy_type =
8615 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8616
8617 if ((ext_phy_type ==
8618 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8619 (ext_phy_type ==
8620 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 8621 /* force 10G, no AN */
c18487ee 8622 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 8623 bp->port.advertising =
a2fbb9ea
ET
8624 (ADVERTISED_10000baseT_Full |
8625 ADVERTISED_FIBRE);
8626 break;
8627 }
8628 BNX2X_ERR("NVRAM config error. "
8629 "Invalid link_config 0x%x"
8630 " Autoneg not supported\n",
34f80b04 8631 bp->port.link_config);
a2fbb9ea
ET
8632 return;
8633 }
8634 break;
8635
8636 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 8637 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 8638 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
8639 bp->port.advertising = (ADVERTISED_10baseT_Full |
8640 ADVERTISED_TP);
a2fbb9ea
ET
8641 } else {
8642 BNX2X_ERR("NVRAM config error. "
8643 "Invalid link_config 0x%x"
8644 " speed_cap_mask 0x%x\n",
34f80b04 8645 bp->port.link_config,
c18487ee 8646 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8647 return;
8648 }
8649 break;
8650
8651 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 8652 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
8653 bp->link_params.req_line_speed = SPEED_10;
8654 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8655 bp->port.advertising = (ADVERTISED_10baseT_Half |
8656 ADVERTISED_TP);
a2fbb9ea
ET
8657 } else {
8658 BNX2X_ERR("NVRAM config error. "
8659 "Invalid link_config 0x%x"
8660 " speed_cap_mask 0x%x\n",
34f80b04 8661 bp->port.link_config,
c18487ee 8662 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8663 return;
8664 }
8665 break;
8666
8667 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 8668 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 8669 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
8670 bp->port.advertising = (ADVERTISED_100baseT_Full |
8671 ADVERTISED_TP);
a2fbb9ea
ET
8672 } else {
8673 BNX2X_ERR("NVRAM config error. "
8674 "Invalid link_config 0x%x"
8675 " speed_cap_mask 0x%x\n",
34f80b04 8676 bp->port.link_config,
c18487ee 8677 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8678 return;
8679 }
8680 break;
8681
8682 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 8683 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
8684 bp->link_params.req_line_speed = SPEED_100;
8685 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8686 bp->port.advertising = (ADVERTISED_100baseT_Half |
8687 ADVERTISED_TP);
a2fbb9ea
ET
8688 } else {
8689 BNX2X_ERR("NVRAM config error. "
8690 "Invalid link_config 0x%x"
8691 " speed_cap_mask 0x%x\n",
34f80b04 8692 bp->port.link_config,
c18487ee 8693 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8694 return;
8695 }
8696 break;
8697
8698 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 8699 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 8700 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
8701 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8702 ADVERTISED_TP);
a2fbb9ea
ET
8703 } else {
8704 BNX2X_ERR("NVRAM config error. "
8705 "Invalid link_config 0x%x"
8706 " speed_cap_mask 0x%x\n",
34f80b04 8707 bp->port.link_config,
c18487ee 8708 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8709 return;
8710 }
8711 break;
8712
8713 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8714 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8715 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8716 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8717 ADVERTISED_TP);
a2fbb9ea
ET
8718 } else {
8719 BNX2X_ERR("NVRAM config error. "
8720 "Invalid link_config 0x%x"
8721 " speed_cap_mask 0x%x\n",
34f80b04 8722 bp->port.link_config,
c18487ee 8723 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8724 return;
8725 }
8726 break;
8727
8728 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8729 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8730 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8731 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8732 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8733 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8734 ADVERTISED_FIBRE);
a2fbb9ea
ET
8735 } else {
8736 BNX2X_ERR("NVRAM config error. "
8737 "Invalid link_config 0x%x"
8738 " speed_cap_mask 0x%x\n",
34f80b04 8739 bp->port.link_config,
c18487ee 8740 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8741 return;
8742 }
8743 break;
8744
8745 default:
8746 BNX2X_ERR("NVRAM config error. "
8747 "BAD link speed link_config 0x%x\n",
34f80b04 8748 bp->port.link_config);
c18487ee 8749 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8750 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8751 break;
8752 }
a2fbb9ea 8753
34f80b04
EG
8754 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8755 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8756 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8757 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8758 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8759
c18487ee 8760 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8761 " advertising 0x%x\n",
c18487ee
YR
8762 bp->link_params.req_line_speed,
8763 bp->link_params.req_duplex,
34f80b04 8764 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8765}
8766
e665bfda
MC
8767static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8768{
8769 mac_hi = cpu_to_be16(mac_hi);
8770 mac_lo = cpu_to_be32(mac_lo);
8771 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8772 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8773}
8774
34f80b04 8775static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8776{
34f80b04
EG
8777 int port = BP_PORT(bp);
8778 u32 val, val2;
589abe3a 8779 u32 config;
c2c8b03e 8780 u16 i;
01cd4528 8781 u32 ext_phy_type;
a2fbb9ea 8782
c18487ee 8783 bp->link_params.bp = bp;
34f80b04 8784 bp->link_params.port = port;
c18487ee 8785
c18487ee 8786 bp->link_params.lane_config =
a2fbb9ea 8787 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8788 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8789 SHMEM_RD(bp,
8790 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
8791 /* BCM8727_NOC => BCM8727 no over current */
8792 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8793 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8794 bp->link_params.ext_phy_config &=
8795 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8796 bp->link_params.ext_phy_config |=
8797 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8798 bp->link_params.feature_config_flags |=
8799 FEATURE_CONFIG_BCM8727_NOC;
8800 }
8801
c18487ee 8802 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8803 SHMEM_RD(bp,
8804 dev_info.port_hw_config[port].speed_capability_mask);
8805
34f80b04 8806 bp->port.link_config =
a2fbb9ea
ET
8807 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8808
c2c8b03e
EG
8809 /* Get the 4 lanes xgxs config rx and tx */
8810 for (i = 0; i < 2; i++) {
8811 val = SHMEM_RD(bp,
8812 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8813 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8814 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8815
8816 val = SHMEM_RD(bp,
8817 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8818 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8819 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8820 }
8821
3ce2c3f9
EG
8822 /* If the device is capable of WoL, set the default state according
8823 * to the HW
8824 */
4d295db0 8825 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8826 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8827 (config & PORT_FEATURE_WOL_ENABLED));
8828
c2c8b03e
EG
8829 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8830 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8831 bp->link_params.lane_config,
8832 bp->link_params.ext_phy_config,
34f80b04 8833 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8834
4d295db0
EG
8835 bp->link_params.switch_cfg |= (bp->port.link_config &
8836 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 8837 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8838
8839 bnx2x_link_settings_requested(bp);
8840
01cd4528
EG
8841 /*
8842 * If connected directly, work with the internal PHY, otherwise, work
8843 * with the external PHY
8844 */
8845 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8846 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8847 bp->mdio.prtad = bp->link_params.phy_addr;
8848
8849 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8850 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8851 bp->mdio.prtad =
659bc5c4 8852 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
01cd4528 8853
a2fbb9ea
ET
8854 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8855 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 8856 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
8857 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8858 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
8859
8860#ifdef BCM_CNIC
8861 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8862 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8863 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8864#endif
34f80b04
EG
8865}
8866
8867static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8868{
8869 int func = BP_FUNC(bp);
8870 u32 val, val2;
8871 int rc = 0;
a2fbb9ea 8872
34f80b04 8873 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8874
34f80b04
EG
8875 bp->e1hov = 0;
8876 bp->e1hmf = 0;
8877 if (CHIP_IS_E1H(bp)) {
8878 bp->mf_config =
8879 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8880
2691d51d 8881 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 8882 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 8883 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 8884 bp->e1hmf = 1;
2691d51d
EG
8885 BNX2X_DEV_INFO("%s function mode\n",
8886 IS_E1HMF(bp) ? "multi" : "single");
8887
8888 if (IS_E1HMF(bp)) {
8889 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8890 e1hov_tag) &
8891 FUNC_MF_CFG_E1HOV_TAG_MASK);
8892 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8893 bp->e1hov = val;
8894 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8895 "(0x%04x)\n",
8896 func, bp->e1hov, bp->e1hov);
8897 } else {
34f80b04
EG
8898 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8899 " aborting\n", func);
8900 rc = -EPERM;
8901 }
2691d51d
EG
8902 } else {
8903 if (BP_E1HVN(bp)) {
8904 BNX2X_ERR("!!! VN %d in single function mode,"
8905 " aborting\n", BP_E1HVN(bp));
8906 rc = -EPERM;
8907 }
34f80b04
EG
8908 }
8909 }
a2fbb9ea 8910
34f80b04
EG
8911 if (!BP_NOMCP(bp)) {
8912 bnx2x_get_port_hwinfo(bp);
8913
8914 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8915 DRV_MSG_SEQ_NUMBER_MASK);
8916 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8917 }
8918
8919 if (IS_E1HMF(bp)) {
8920 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8921 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8922 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8923 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8924 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8925 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8926 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8927 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8928 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8929 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8930 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8931 ETH_ALEN);
8932 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8933 ETH_ALEN);
a2fbb9ea 8934 }
34f80b04
EG
8935
8936 return rc;
a2fbb9ea
ET
8937 }
8938
34f80b04
EG
8939 if (BP_NOMCP(bp)) {
8940 /* only supposed to happen on emulation/FPGA */
33471629 8941 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8942 random_ether_addr(bp->dev->dev_addr);
8943 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8944 }
a2fbb9ea 8945
34f80b04
EG
8946 return rc;
8947}
8948
8949static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8950{
8951 int func = BP_FUNC(bp);
87942b46 8952 int timer_interval;
34f80b04
EG
8953 int rc;
8954
da5a662a
VZ
8955 /* Disable interrupt handling until HW is initialized */
8956 atomic_set(&bp->intr_sem, 1);
e1510706 8957 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8958
34f80b04 8959 mutex_init(&bp->port.phy_mutex);
993ac7b5
MC
8960#ifdef BCM_CNIC
8961 mutex_init(&bp->cnic_mutex);
8962#endif
a2fbb9ea 8963
1cf167f2 8964 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8965 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8966
8967 rc = bnx2x_get_hwinfo(bp);
8968
8969 /* need to reset chip if undi was active */
8970 if (!BP_NOMCP(bp))
8971 bnx2x_undi_unload(bp);
8972
8973 if (CHIP_REV_IS_FPGA(bp))
8974 printk(KERN_ERR PFX "FPGA detected\n");
8975
8976 if (BP_NOMCP(bp) && (func == 0))
8977 printk(KERN_ERR PFX
8978 "MCP disabled, must load devices in order!\n");
8979
555f6c78 8980 /* Set multi queue mode */
8badd27a
EG
8981 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8982 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8983 printk(KERN_ERR PFX
8badd27a 8984 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8985 multi_mode = ETH_RSS_MODE_DISABLED;
8986 }
8987 bp->multi_mode = multi_mode;
8988
8989
7a9b2557
VZ
8990 /* Set TPA flags */
8991 if (disable_tpa) {
8992 bp->flags &= ~TPA_ENABLE_FLAG;
8993 bp->dev->features &= ~NETIF_F_LRO;
8994 } else {
8995 bp->flags |= TPA_ENABLE_FLAG;
8996 bp->dev->features |= NETIF_F_LRO;
8997 }
8998
a18f5128
EG
8999 if (CHIP_IS_E1(bp))
9000 bp->dropless_fc = 0;
9001 else
9002 bp->dropless_fc = dropless_fc;
9003
8d5726c4 9004 bp->mrrs = mrrs;
7a9b2557 9005
34f80b04
EG
9006 bp->tx_ring_size = MAX_TX_AVAIL;
9007 bp->rx_ring_size = MAX_RX_AVAIL;
9008
9009 bp->rx_csum = 1;
34f80b04
EG
9010
9011 bp->tx_ticks = 50;
9012 bp->rx_ticks = 25;
9013
87942b46
EG
9014 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9015 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
9016
9017 init_timer(&bp->timer);
9018 bp->timer.expires = jiffies + bp->current_interval;
9019 bp->timer.data = (unsigned long) bp;
9020 bp->timer.function = bnx2x_timer;
9021
9022 return rc;
a2fbb9ea
ET
9023}
9024
9025/*
9026 * ethtool service functions
9027 */
9028
9029/* All ethtool functions called with rtnl_lock */
9030
9031static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9032{
9033 struct bnx2x *bp = netdev_priv(dev);
9034
34f80b04
EG
9035 cmd->supported = bp->port.supported;
9036 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
9037
9038 if (netif_carrier_ok(dev)) {
c18487ee
YR
9039 cmd->speed = bp->link_vars.line_speed;
9040 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 9041 } else {
c18487ee
YR
9042 cmd->speed = bp->link_params.req_line_speed;
9043 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 9044 }
34f80b04
EG
9045 if (IS_E1HMF(bp)) {
9046 u16 vn_max_rate;
9047
9048 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
9049 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
9050 if (vn_max_rate < cmd->speed)
9051 cmd->speed = vn_max_rate;
9052 }
a2fbb9ea 9053
c18487ee
YR
9054 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9055 u32 ext_phy_type =
9056 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
9057
9058 switch (ext_phy_type) {
9059 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 9060 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 9061 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
9062 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9063 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9064 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 9065 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
9066 cmd->port = PORT_FIBRE;
9067 break;
9068
9069 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 9070 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
9071 cmd->port = PORT_TP;
9072 break;
9073
c18487ee
YR
9074 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9075 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9076 bp->link_params.ext_phy_config);
9077 break;
9078
f1410647
ET
9079 default:
9080 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
9081 bp->link_params.ext_phy_config);
9082 break;
f1410647
ET
9083 }
9084 } else
a2fbb9ea 9085 cmd->port = PORT_TP;
a2fbb9ea 9086
01cd4528 9087 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
9088 cmd->transceiver = XCVR_INTERNAL;
9089
c18487ee 9090 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 9091 cmd->autoneg = AUTONEG_ENABLE;
f1410647 9092 else
a2fbb9ea 9093 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
9094
9095 cmd->maxtxpkt = 0;
9096 cmd->maxrxpkt = 0;
9097
9098 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9099 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9100 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9101 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9102 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9103 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9104 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9105
9106 return 0;
9107}
9108
9109static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9110{
9111 struct bnx2x *bp = netdev_priv(dev);
9112 u32 advertising;
9113
34f80b04
EG
9114 if (IS_E1HMF(bp))
9115 return 0;
9116
a2fbb9ea
ET
9117 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9118 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9119 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9120 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9121 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9122 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9123 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9124
a2fbb9ea 9125 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
9126 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9127 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 9128 return -EINVAL;
f1410647 9129 }
a2fbb9ea
ET
9130
9131 /* advertise the requested speed and duplex if supported */
34f80b04 9132 cmd->advertising &= bp->port.supported;
a2fbb9ea 9133
c18487ee
YR
9134 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9135 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
9136 bp->port.advertising |= (ADVERTISED_Autoneg |
9137 cmd->advertising);
a2fbb9ea
ET
9138
9139 } else { /* forced speed */
9140 /* advertise the requested speed and duplex if supported */
9141 switch (cmd->speed) {
9142 case SPEED_10:
9143 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 9144 if (!(bp->port.supported &
f1410647
ET
9145 SUPPORTED_10baseT_Full)) {
9146 DP(NETIF_MSG_LINK,
9147 "10M full not supported\n");
a2fbb9ea 9148 return -EINVAL;
f1410647 9149 }
a2fbb9ea
ET
9150
9151 advertising = (ADVERTISED_10baseT_Full |
9152 ADVERTISED_TP);
9153 } else {
34f80b04 9154 if (!(bp->port.supported &
f1410647
ET
9155 SUPPORTED_10baseT_Half)) {
9156 DP(NETIF_MSG_LINK,
9157 "10M half not supported\n");
a2fbb9ea 9158 return -EINVAL;
f1410647 9159 }
a2fbb9ea
ET
9160
9161 advertising = (ADVERTISED_10baseT_Half |
9162 ADVERTISED_TP);
9163 }
9164 break;
9165
9166 case SPEED_100:
9167 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 9168 if (!(bp->port.supported &
f1410647
ET
9169 SUPPORTED_100baseT_Full)) {
9170 DP(NETIF_MSG_LINK,
9171 "100M full not supported\n");
a2fbb9ea 9172 return -EINVAL;
f1410647 9173 }
a2fbb9ea
ET
9174
9175 advertising = (ADVERTISED_100baseT_Full |
9176 ADVERTISED_TP);
9177 } else {
34f80b04 9178 if (!(bp->port.supported &
f1410647
ET
9179 SUPPORTED_100baseT_Half)) {
9180 DP(NETIF_MSG_LINK,
9181 "100M half not supported\n");
a2fbb9ea 9182 return -EINVAL;
f1410647 9183 }
a2fbb9ea
ET
9184
9185 advertising = (ADVERTISED_100baseT_Half |
9186 ADVERTISED_TP);
9187 }
9188 break;
9189
9190 case SPEED_1000:
f1410647
ET
9191 if (cmd->duplex != DUPLEX_FULL) {
9192 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 9193 return -EINVAL;
f1410647 9194 }
a2fbb9ea 9195
34f80b04 9196 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 9197 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 9198 return -EINVAL;
f1410647 9199 }
a2fbb9ea
ET
9200
9201 advertising = (ADVERTISED_1000baseT_Full |
9202 ADVERTISED_TP);
9203 break;
9204
9205 case SPEED_2500:
f1410647
ET
9206 if (cmd->duplex != DUPLEX_FULL) {
9207 DP(NETIF_MSG_LINK,
9208 "2.5G half not supported\n");
a2fbb9ea 9209 return -EINVAL;
f1410647 9210 }
a2fbb9ea 9211
34f80b04 9212 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
9213 DP(NETIF_MSG_LINK,
9214 "2.5G full not supported\n");
a2fbb9ea 9215 return -EINVAL;
f1410647 9216 }
a2fbb9ea 9217
f1410647 9218 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
9219 ADVERTISED_TP);
9220 break;
9221
9222 case SPEED_10000:
f1410647
ET
9223 if (cmd->duplex != DUPLEX_FULL) {
9224 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 9225 return -EINVAL;
f1410647 9226 }
a2fbb9ea 9227
34f80b04 9228 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 9229 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 9230 return -EINVAL;
f1410647 9231 }
a2fbb9ea
ET
9232
9233 advertising = (ADVERTISED_10000baseT_Full |
9234 ADVERTISED_FIBRE);
9235 break;
9236
9237 default:
f1410647 9238 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
9239 return -EINVAL;
9240 }
9241
c18487ee
YR
9242 bp->link_params.req_line_speed = cmd->speed;
9243 bp->link_params.req_duplex = cmd->duplex;
34f80b04 9244 bp->port.advertising = advertising;
a2fbb9ea
ET
9245 }
9246
c18487ee 9247 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 9248 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 9249 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 9250 bp->port.advertising);
a2fbb9ea 9251
34f80b04 9252 if (netif_running(dev)) {
bb2a0f7a 9253 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9254 bnx2x_link_set(bp);
9255 }
a2fbb9ea
ET
9256
9257 return 0;
9258}
9259
0a64ea57
EG
9260#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9261#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9262
9263static int bnx2x_get_regs_len(struct net_device *dev)
9264{
0a64ea57 9265 struct bnx2x *bp = netdev_priv(dev);
0d28e49a 9266 int regdump_len = 0;
0a64ea57
EG
9267 int i;
9268
0a64ea57
EG
9269 if (CHIP_IS_E1(bp)) {
9270 for (i = 0; i < REGS_COUNT; i++)
9271 if (IS_E1_ONLINE(reg_addrs[i].info))
9272 regdump_len += reg_addrs[i].size;
9273
9274 for (i = 0; i < WREGS_COUNT_E1; i++)
9275 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9276 regdump_len += wreg_addrs_e1[i].size *
9277 (1 + wreg_addrs_e1[i].read_regs_count);
9278
9279 } else { /* E1H */
9280 for (i = 0; i < REGS_COUNT; i++)
9281 if (IS_E1H_ONLINE(reg_addrs[i].info))
9282 regdump_len += reg_addrs[i].size;
9283
9284 for (i = 0; i < WREGS_COUNT_E1H; i++)
9285 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9286 regdump_len += wreg_addrs_e1h[i].size *
9287 (1 + wreg_addrs_e1h[i].read_regs_count);
9288 }
9289 regdump_len *= 4;
9290 regdump_len += sizeof(struct dump_hdr);
9291
9292 return regdump_len;
9293}
9294
9295static void bnx2x_get_regs(struct net_device *dev,
9296 struct ethtool_regs *regs, void *_p)
9297{
9298 u32 *p = _p, i, j;
9299 struct bnx2x *bp = netdev_priv(dev);
9300 struct dump_hdr dump_hdr = {0};
9301
9302 regs->version = 0;
9303 memset(p, 0, regs->len);
9304
9305 if (!netif_running(bp->dev))
9306 return;
9307
9308 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9309 dump_hdr.dump_sign = dump_sign_all;
9310 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9311 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9312 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9313 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9314 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9315
9316 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9317 p += dump_hdr.hdr_size + 1;
9318
9319 if (CHIP_IS_E1(bp)) {
9320 for (i = 0; i < REGS_COUNT; i++)
9321 if (IS_E1_ONLINE(reg_addrs[i].info))
9322 for (j = 0; j < reg_addrs[i].size; j++)
9323 *p++ = REG_RD(bp,
9324 reg_addrs[i].addr + j*4);
9325
9326 } else { /* E1H */
9327 for (i = 0; i < REGS_COUNT; i++)
9328 if (IS_E1H_ONLINE(reg_addrs[i].info))
9329 for (j = 0; j < reg_addrs[i].size; j++)
9330 *p++ = REG_RD(bp,
9331 reg_addrs[i].addr + j*4);
9332 }
9333}
9334
0d28e49a
EG
9335#define PHY_FW_VER_LEN 10
9336
9337static void bnx2x_get_drvinfo(struct net_device *dev,
9338 struct ethtool_drvinfo *info)
9339{
9340 struct bnx2x *bp = netdev_priv(dev);
9341 u8 phy_fw_ver[PHY_FW_VER_LEN];
9342
9343 strcpy(info->driver, DRV_MODULE_NAME);
9344 strcpy(info->version, DRV_MODULE_VERSION);
9345
9346 phy_fw_ver[0] = '\0';
9347 if (bp->port.pmf) {
9348 bnx2x_acquire_phy_lock(bp);
9349 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9350 (bp->state != BNX2X_STATE_CLOSED),
9351 phy_fw_ver, PHY_FW_VER_LEN);
9352 bnx2x_release_phy_lock(bp);
9353 }
9354
9355 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9356 (bp->common.bc_ver & 0xff0000) >> 16,
9357 (bp->common.bc_ver & 0xff00) >> 8,
9358 (bp->common.bc_ver & 0xff),
9359 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9360 strcpy(info->bus_info, pci_name(bp->pdev));
9361 info->n_stats = BNX2X_NUM_STATS;
9362 info->testinfo_len = BNX2X_NUM_TESTS;
9363 info->eedump_len = bp->common.flash_size;
9364 info->regdump_len = bnx2x_get_regs_len(dev);
9365}
9366
a2fbb9ea
ET
9367static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9368{
9369 struct bnx2x *bp = netdev_priv(dev);
9370
9371 if (bp->flags & NO_WOL_FLAG) {
9372 wol->supported = 0;
9373 wol->wolopts = 0;
9374 } else {
9375 wol->supported = WAKE_MAGIC;
9376 if (bp->wol)
9377 wol->wolopts = WAKE_MAGIC;
9378 else
9379 wol->wolopts = 0;
9380 }
9381 memset(&wol->sopass, 0, sizeof(wol->sopass));
9382}
9383
9384static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9385{
9386 struct bnx2x *bp = netdev_priv(dev);
9387
9388 if (wol->wolopts & ~WAKE_MAGIC)
9389 return -EINVAL;
9390
9391 if (wol->wolopts & WAKE_MAGIC) {
9392 if (bp->flags & NO_WOL_FLAG)
9393 return -EINVAL;
9394
9395 bp->wol = 1;
34f80b04 9396 } else
a2fbb9ea 9397 bp->wol = 0;
34f80b04 9398
a2fbb9ea
ET
9399 return 0;
9400}
9401
9402static u32 bnx2x_get_msglevel(struct net_device *dev)
9403{
9404 struct bnx2x *bp = netdev_priv(dev);
9405
9406 return bp->msglevel;
9407}
9408
9409static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9410{
9411 struct bnx2x *bp = netdev_priv(dev);
9412
9413 if (capable(CAP_NET_ADMIN))
9414 bp->msglevel = level;
9415}
9416
9417static int bnx2x_nway_reset(struct net_device *dev)
9418{
9419 struct bnx2x *bp = netdev_priv(dev);
9420
34f80b04
EG
9421 if (!bp->port.pmf)
9422 return 0;
a2fbb9ea 9423
34f80b04 9424 if (netif_running(dev)) {
bb2a0f7a 9425 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9426 bnx2x_link_set(bp);
9427 }
a2fbb9ea
ET
9428
9429 return 0;
9430}
9431
ab6ad5a4 9432static u32 bnx2x_get_link(struct net_device *dev)
01e53298
NO
9433{
9434 struct bnx2x *bp = netdev_priv(dev);
9435
9436 return bp->link_vars.link_up;
9437}
9438
a2fbb9ea
ET
9439static int bnx2x_get_eeprom_len(struct net_device *dev)
9440{
9441 struct bnx2x *bp = netdev_priv(dev);
9442
34f80b04 9443 return bp->common.flash_size;
a2fbb9ea
ET
9444}
9445
9446static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9447{
34f80b04 9448 int port = BP_PORT(bp);
a2fbb9ea
ET
9449 int count, i;
9450 u32 val = 0;
9451
9452 /* adjust timeout for emulation/FPGA */
9453 count = NVRAM_TIMEOUT_COUNT;
9454 if (CHIP_REV_IS_SLOW(bp))
9455 count *= 100;
9456
9457 /* request access to nvram interface */
9458 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9459 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9460
9461 for (i = 0; i < count*10; i++) {
9462 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9463 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9464 break;
9465
9466 udelay(5);
9467 }
9468
9469 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 9470 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
9471 return -EBUSY;
9472 }
9473
9474 return 0;
9475}
9476
9477static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9478{
34f80b04 9479 int port = BP_PORT(bp);
a2fbb9ea
ET
9480 int count, i;
9481 u32 val = 0;
9482
9483 /* adjust timeout for emulation/FPGA */
9484 count = NVRAM_TIMEOUT_COUNT;
9485 if (CHIP_REV_IS_SLOW(bp))
9486 count *= 100;
9487
9488 /* relinquish nvram interface */
9489 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9490 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9491
9492 for (i = 0; i < count*10; i++) {
9493 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9494 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9495 break;
9496
9497 udelay(5);
9498 }
9499
9500 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 9501 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
9502 return -EBUSY;
9503 }
9504
9505 return 0;
9506}
9507
9508static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9509{
9510 u32 val;
9511
9512 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9513
9514 /* enable both bits, even on read */
9515 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9516 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9517 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9518}
9519
9520static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9521{
9522 u32 val;
9523
9524 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9525
9526 /* disable both bits, even after read */
9527 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9528 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9529 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9530}
9531
4781bfad 9532static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
9533 u32 cmd_flags)
9534{
f1410647 9535 int count, i, rc;
a2fbb9ea
ET
9536 u32 val;
9537
9538 /* build the command word */
9539 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9540
9541 /* need to clear DONE bit separately */
9542 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9543
9544 /* address of the NVRAM to read from */
9545 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9546 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9547
9548 /* issue a read command */
9549 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9550
9551 /* adjust timeout for emulation/FPGA */
9552 count = NVRAM_TIMEOUT_COUNT;
9553 if (CHIP_REV_IS_SLOW(bp))
9554 count *= 100;
9555
9556 /* wait for completion */
9557 *ret_val = 0;
9558 rc = -EBUSY;
9559 for (i = 0; i < count; i++) {
9560 udelay(5);
9561 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9562
9563 if (val & MCPR_NVM_COMMAND_DONE) {
9564 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
9565 /* we read nvram data in cpu order
9566 * but ethtool sees it as an array of bytes
9567 * converting to big-endian will do the work */
4781bfad 9568 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
9569 rc = 0;
9570 break;
9571 }
9572 }
9573
9574 return rc;
9575}
9576
9577static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9578 int buf_size)
9579{
9580 int rc;
9581 u32 cmd_flags;
4781bfad 9582 __be32 val;
a2fbb9ea
ET
9583
9584 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9585 DP(BNX2X_MSG_NVM,
c14423fe 9586 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9587 offset, buf_size);
9588 return -EINVAL;
9589 }
9590
34f80b04
EG
9591 if (offset + buf_size > bp->common.flash_size) {
9592 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9593 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9594 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9595 return -EINVAL;
9596 }
9597
9598 /* request access to nvram interface */
9599 rc = bnx2x_acquire_nvram_lock(bp);
9600 if (rc)
9601 return rc;
9602
9603 /* enable access to nvram interface */
9604 bnx2x_enable_nvram_access(bp);
9605
9606 /* read the first word(s) */
9607 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9608 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9609 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9610 memcpy(ret_buf, &val, 4);
9611
9612 /* advance to the next dword */
9613 offset += sizeof(u32);
9614 ret_buf += sizeof(u32);
9615 buf_size -= sizeof(u32);
9616 cmd_flags = 0;
9617 }
9618
9619 if (rc == 0) {
9620 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9621 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9622 memcpy(ret_buf, &val, 4);
9623 }
9624
9625 /* disable access to nvram interface */
9626 bnx2x_disable_nvram_access(bp);
9627 bnx2x_release_nvram_lock(bp);
9628
9629 return rc;
9630}
9631
9632static int bnx2x_get_eeprom(struct net_device *dev,
9633 struct ethtool_eeprom *eeprom, u8 *eebuf)
9634{
9635 struct bnx2x *bp = netdev_priv(dev);
9636 int rc;
9637
2add3acb
EG
9638 if (!netif_running(dev))
9639 return -EAGAIN;
9640
34f80b04 9641 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9642 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9643 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9644 eeprom->len, eeprom->len);
9645
9646 /* parameters already validated in ethtool_get_eeprom */
9647
9648 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9649
9650 return rc;
9651}
9652
9653static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9654 u32 cmd_flags)
9655{
f1410647 9656 int count, i, rc;
a2fbb9ea
ET
9657
9658 /* build the command word */
9659 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9660
9661 /* need to clear DONE bit separately */
9662 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9663
9664 /* write the data */
9665 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9666
9667 /* address of the NVRAM to write to */
9668 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9669 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9670
9671 /* issue the write command */
9672 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9673
9674 /* adjust timeout for emulation/FPGA */
9675 count = NVRAM_TIMEOUT_COUNT;
9676 if (CHIP_REV_IS_SLOW(bp))
9677 count *= 100;
9678
9679 /* wait for completion */
9680 rc = -EBUSY;
9681 for (i = 0; i < count; i++) {
9682 udelay(5);
9683 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9684 if (val & MCPR_NVM_COMMAND_DONE) {
9685 rc = 0;
9686 break;
9687 }
9688 }
9689
9690 return rc;
9691}
9692
f1410647 9693#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
9694
9695static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9696 int buf_size)
9697{
9698 int rc;
9699 u32 cmd_flags;
9700 u32 align_offset;
4781bfad 9701 __be32 val;
a2fbb9ea 9702
34f80b04
EG
9703 if (offset + buf_size > bp->common.flash_size) {
9704 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9705 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9706 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9707 return -EINVAL;
9708 }
9709
9710 /* request access to nvram interface */
9711 rc = bnx2x_acquire_nvram_lock(bp);
9712 if (rc)
9713 return rc;
9714
9715 /* enable access to nvram interface */
9716 bnx2x_enable_nvram_access(bp);
9717
9718 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9719 align_offset = (offset & ~0x03);
9720 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9721
9722 if (rc == 0) {
9723 val &= ~(0xff << BYTE_OFFSET(offset));
9724 val |= (*data_buf << BYTE_OFFSET(offset));
9725
9726 /* nvram data is returned as an array of bytes
9727 * convert it back to cpu order */
9728 val = be32_to_cpu(val);
9729
a2fbb9ea
ET
9730 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9731 cmd_flags);
9732 }
9733
9734 /* disable access to nvram interface */
9735 bnx2x_disable_nvram_access(bp);
9736 bnx2x_release_nvram_lock(bp);
9737
9738 return rc;
9739}
9740
9741static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9742 int buf_size)
9743{
9744 int rc;
9745 u32 cmd_flags;
9746 u32 val;
9747 u32 written_so_far;
9748
34f80b04 9749 if (buf_size == 1) /* ethtool */
a2fbb9ea 9750 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9751
9752 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9753 DP(BNX2X_MSG_NVM,
c14423fe 9754 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9755 offset, buf_size);
9756 return -EINVAL;
9757 }
9758
34f80b04
EG
9759 if (offset + buf_size > bp->common.flash_size) {
9760 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9761 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9762 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9763 return -EINVAL;
9764 }
9765
9766 /* request access to nvram interface */
9767 rc = bnx2x_acquire_nvram_lock(bp);
9768 if (rc)
9769 return rc;
9770
9771 /* enable access to nvram interface */
9772 bnx2x_enable_nvram_access(bp);
9773
9774 written_so_far = 0;
9775 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9776 while ((written_so_far < buf_size) && (rc == 0)) {
9777 if (written_so_far == (buf_size - sizeof(u32)))
9778 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9779 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9780 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9781 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9782 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9783
9784 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9785
9786 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9787
9788 /* advance to the next dword */
9789 offset += sizeof(u32);
9790 data_buf += sizeof(u32);
9791 written_so_far += sizeof(u32);
9792 cmd_flags = 0;
9793 }
9794
9795 /* disable access to nvram interface */
9796 bnx2x_disable_nvram_access(bp);
9797 bnx2x_release_nvram_lock(bp);
9798
9799 return rc;
9800}
9801
9802static int bnx2x_set_eeprom(struct net_device *dev,
9803 struct ethtool_eeprom *eeprom, u8 *eebuf)
9804{
9805 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
9806 int port = BP_PORT(bp);
9807 int rc = 0;
a2fbb9ea 9808
9f4c9583
EG
9809 if (!netif_running(dev))
9810 return -EAGAIN;
9811
34f80b04 9812 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9813 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9814 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9815 eeprom->len, eeprom->len);
9816
9817 /* parameters already validated in ethtool_set_eeprom */
9818
f57a6025
EG
9819 /* PHY eeprom can be accessed only by the PMF */
9820 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9821 !bp->port.pmf)
9822 return -EINVAL;
9823
9824 if (eeprom->magic == 0x50485950) {
9825 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9826 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 9827
f57a6025
EG
9828 bnx2x_acquire_phy_lock(bp);
9829 rc |= bnx2x_link_reset(&bp->link_params,
9830 &bp->link_vars, 0);
9831 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9832 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9833 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9834 MISC_REGISTERS_GPIO_HIGH, port);
9835 bnx2x_release_phy_lock(bp);
9836 bnx2x_link_report(bp);
9837
9838 } else if (eeprom->magic == 0x50485952) {
9839 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9840 if ((bp->state == BNX2X_STATE_OPEN) ||
9841 (bp->state == BNX2X_STATE_DISABLED)) {
4a37fb66 9842 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
9843 rc |= bnx2x_link_reset(&bp->link_params,
9844 &bp->link_vars, 1);
9845
9846 rc |= bnx2x_phy_init(&bp->link_params,
9847 &bp->link_vars);
4a37fb66 9848 bnx2x_release_phy_lock(bp);
f57a6025
EG
9849 bnx2x_calc_fc_adv(bp);
9850 }
9851 } else if (eeprom->magic == 0x53985943) {
9852 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9853 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9854 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9855 u8 ext_phy_addr =
659bc5c4 9856 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
f57a6025
EG
9857
9858 /* DSP Remove Download Mode */
9859 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9860 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 9861
f57a6025
EG
9862 bnx2x_acquire_phy_lock(bp);
9863
9864 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9865
9866 /* wait 0.5 sec to allow it to run */
9867 msleep(500);
9868 bnx2x_ext_phy_hw_reset(bp, port);
9869 msleep(500);
9870 bnx2x_release_phy_lock(bp);
9871 }
9872 } else
c18487ee 9873 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9874
9875 return rc;
9876}
9877
9878static int bnx2x_get_coalesce(struct net_device *dev,
9879 struct ethtool_coalesce *coal)
9880{
9881 struct bnx2x *bp = netdev_priv(dev);
9882
9883 memset(coal, 0, sizeof(struct ethtool_coalesce));
9884
9885 coal->rx_coalesce_usecs = bp->rx_ticks;
9886 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9887
9888 return 0;
9889}
9890
ca00392c 9891#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
a2fbb9ea
ET
9892static int bnx2x_set_coalesce(struct net_device *dev,
9893 struct ethtool_coalesce *coal)
9894{
9895 struct bnx2x *bp = netdev_priv(dev);
9896
9897 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
ca00392c
EG
9898 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9899 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea
ET
9900
9901 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
ca00392c
EG
9902 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9903 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea 9904
34f80b04 9905 if (netif_running(dev))
a2fbb9ea
ET
9906 bnx2x_update_coalesce(bp);
9907
9908 return 0;
9909}
9910
9911static void bnx2x_get_ringparam(struct net_device *dev,
9912 struct ethtool_ringparam *ering)
9913{
9914 struct bnx2x *bp = netdev_priv(dev);
9915
9916 ering->rx_max_pending = MAX_RX_AVAIL;
9917 ering->rx_mini_max_pending = 0;
9918 ering->rx_jumbo_max_pending = 0;
9919
9920 ering->rx_pending = bp->rx_ring_size;
9921 ering->rx_mini_pending = 0;
9922 ering->rx_jumbo_pending = 0;
9923
9924 ering->tx_max_pending = MAX_TX_AVAIL;
9925 ering->tx_pending = bp->tx_ring_size;
9926}
9927
9928static int bnx2x_set_ringparam(struct net_device *dev,
9929 struct ethtool_ringparam *ering)
9930{
9931 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9932 int rc = 0;
a2fbb9ea
ET
9933
9934 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9935 (ering->tx_pending > MAX_TX_AVAIL) ||
9936 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9937 return -EINVAL;
9938
9939 bp->rx_ring_size = ering->rx_pending;
9940 bp->tx_ring_size = ering->tx_pending;
9941
34f80b04
EG
9942 if (netif_running(dev)) {
9943 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9944 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9945 }
9946
34f80b04 9947 return rc;
a2fbb9ea
ET
9948}
9949
9950static void bnx2x_get_pauseparam(struct net_device *dev,
9951 struct ethtool_pauseparam *epause)
9952{
9953 struct bnx2x *bp = netdev_priv(dev);
9954
356e2385
EG
9955 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9956 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9957 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9958
c0700f90
DM
9959 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9960 BNX2X_FLOW_CTRL_RX);
9961 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9962 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9963
9964 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9965 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9966 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9967}
9968
9969static int bnx2x_set_pauseparam(struct net_device *dev,
9970 struct ethtool_pauseparam *epause)
9971{
9972 struct bnx2x *bp = netdev_priv(dev);
9973
34f80b04
EG
9974 if (IS_E1HMF(bp))
9975 return 0;
9976
a2fbb9ea
ET
9977 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9978 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9979 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9980
c0700f90 9981 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9982
f1410647 9983 if (epause->rx_pause)
c0700f90 9984 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9985
f1410647 9986 if (epause->tx_pause)
c0700f90 9987 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9988
c0700f90
DM
9989 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9990 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9991
c18487ee 9992 if (epause->autoneg) {
34f80b04 9993 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9994 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9995 return -EINVAL;
9996 }
a2fbb9ea 9997
c18487ee 9998 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9999 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 10000 }
a2fbb9ea 10001
c18487ee
YR
10002 DP(NETIF_MSG_LINK,
10003 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
10004
10005 if (netif_running(dev)) {
bb2a0f7a 10006 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10007 bnx2x_link_set(bp);
10008 }
a2fbb9ea
ET
10009
10010 return 0;
10011}
10012
df0f2343
VZ
10013static int bnx2x_set_flags(struct net_device *dev, u32 data)
10014{
10015 struct bnx2x *bp = netdev_priv(dev);
10016 int changed = 0;
10017 int rc = 0;
10018
10019 /* TPA requires Rx CSUM offloading */
10020 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
10021 if (!(dev->features & NETIF_F_LRO)) {
10022 dev->features |= NETIF_F_LRO;
10023 bp->flags |= TPA_ENABLE_FLAG;
10024 changed = 1;
10025 }
10026
10027 } else if (dev->features & NETIF_F_LRO) {
10028 dev->features &= ~NETIF_F_LRO;
10029 bp->flags &= ~TPA_ENABLE_FLAG;
10030 changed = 1;
10031 }
10032
10033 if (changed && netif_running(dev)) {
10034 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10035 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10036 }
10037
10038 return rc;
10039}
10040
a2fbb9ea
ET
10041static u32 bnx2x_get_rx_csum(struct net_device *dev)
10042{
10043 struct bnx2x *bp = netdev_priv(dev);
10044
10045 return bp->rx_csum;
10046}
10047
10048static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10049{
10050 struct bnx2x *bp = netdev_priv(dev);
df0f2343 10051 int rc = 0;
a2fbb9ea
ET
10052
10053 bp->rx_csum = data;
df0f2343
VZ
10054
10055 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10056 TPA'ed packets will be discarded due to wrong TCP CSUM */
10057 if (!data) {
10058 u32 flags = ethtool_op_get_flags(dev);
10059
10060 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10061 }
10062
10063 return rc;
a2fbb9ea
ET
10064}
10065
10066static int bnx2x_set_tso(struct net_device *dev, u32 data)
10067{
755735eb 10068 if (data) {
a2fbb9ea 10069 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
10070 dev->features |= NETIF_F_TSO6;
10071 } else {
a2fbb9ea 10072 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
10073 dev->features &= ~NETIF_F_TSO6;
10074 }
10075
a2fbb9ea
ET
10076 return 0;
10077}
10078
f3c87cdd 10079static const struct {
a2fbb9ea
ET
10080 char string[ETH_GSTRING_LEN];
10081} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
10082 { "register_test (offline)" },
10083 { "memory_test (offline)" },
10084 { "loopback_test (offline)" },
10085 { "nvram_test (online)" },
10086 { "interrupt_test (online)" },
10087 { "link_test (online)" },
d3d4f495 10088 { "idle check (online)" }
a2fbb9ea
ET
10089};
10090
f3c87cdd
YG
10091static int bnx2x_test_registers(struct bnx2x *bp)
10092{
10093 int idx, i, rc = -ENODEV;
10094 u32 wr_val = 0;
9dabc424 10095 int port = BP_PORT(bp);
f3c87cdd
YG
10096 static const struct {
10097 u32 offset0;
10098 u32 offset1;
10099 u32 mask;
10100 } reg_tbl[] = {
10101/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10102 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
10103 { HC_REG_AGG_INT_0, 4, 0x000003ff },
10104 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
10105 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
10106 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
10107 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
10108 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10109 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
10110 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10111/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
10112 { QM_REG_CONNNUM_0, 4, 0x000fffff },
10113 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
10114 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
10115 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
10116 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10117 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
10118 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 10119 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
10120 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
10121/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
10122 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
10123 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
10124 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
10125 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
10126 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
10127 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
10128 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
10129 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
10130 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
10131/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
10132 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
10133 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
10134 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10135 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
10136 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10137 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
10138
10139 { 0xffffffff, 0, 0x00000000 }
10140 };
10141
10142 if (!netif_running(bp->dev))
10143 return rc;
10144
10145 /* Repeat the test twice:
10146 First by writing 0x00000000, second by writing 0xffffffff */
10147 for (idx = 0; idx < 2; idx++) {
10148
10149 switch (idx) {
10150 case 0:
10151 wr_val = 0;
10152 break;
10153 case 1:
10154 wr_val = 0xffffffff;
10155 break;
10156 }
10157
10158 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10159 u32 offset, mask, save_val, val;
f3c87cdd
YG
10160
10161 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10162 mask = reg_tbl[i].mask;
10163
10164 save_val = REG_RD(bp, offset);
10165
10166 REG_WR(bp, offset, wr_val);
10167 val = REG_RD(bp, offset);
10168
10169 /* Restore the original register's value */
10170 REG_WR(bp, offset, save_val);
10171
10172 /* verify that value is as expected value */
10173 if ((val & mask) != (wr_val & mask))
10174 goto test_reg_exit;
10175 }
10176 }
10177
10178 rc = 0;
10179
10180test_reg_exit:
10181 return rc;
10182}
10183
10184static int bnx2x_test_memory(struct bnx2x *bp)
10185{
10186 int i, j, rc = -ENODEV;
10187 u32 val;
10188 static const struct {
10189 u32 offset;
10190 int size;
10191 } mem_tbl[] = {
10192 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
10193 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10194 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
10195 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
10196 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
10197 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
10198 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
10199
10200 { 0xffffffff, 0 }
10201 };
10202 static const struct {
10203 char *name;
10204 u32 offset;
9dabc424
YG
10205 u32 e1_mask;
10206 u32 e1h_mask;
f3c87cdd 10207 } prty_tbl[] = {
9dabc424
YG
10208 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
10209 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
10210 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
10211 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
10212 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
10213 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
10214
10215 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
10216 };
10217
10218 if (!netif_running(bp->dev))
10219 return rc;
10220
10221 /* Go through all the memories */
10222 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10223 for (j = 0; j < mem_tbl[i].size; j++)
10224 REG_RD(bp, mem_tbl[i].offset + j*4);
10225
10226 /* Check the parity status */
10227 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10228 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
10229 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10230 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
10231 DP(NETIF_MSG_HW,
10232 "%s is 0x%x\n", prty_tbl[i].name, val);
10233 goto test_mem_exit;
10234 }
10235 }
10236
10237 rc = 0;
10238
10239test_mem_exit:
10240 return rc;
10241}
10242
f3c87cdd
YG
10243static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10244{
10245 int cnt = 1000;
10246
10247 if (link_up)
10248 while (bnx2x_link_test(bp) && cnt--)
10249 msleep(10);
10250}
10251
10252static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10253{
10254 unsigned int pkt_size, num_pkts, i;
10255 struct sk_buff *skb;
10256 unsigned char *packet;
ca00392c
EG
10257 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10258 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
f3c87cdd
YG
10259 u16 tx_start_idx, tx_idx;
10260 u16 rx_start_idx, rx_idx;
ca00392c 10261 u16 pkt_prod, bd_prod;
f3c87cdd 10262 struct sw_tx_bd *tx_buf;
ca00392c
EG
10263 struct eth_tx_start_bd *tx_start_bd;
10264 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
10265 dma_addr_t mapping;
10266 union eth_rx_cqe *cqe;
10267 u8 cqe_fp_flags;
10268 struct sw_rx_bd *rx_buf;
10269 u16 len;
10270 int rc = -ENODEV;
10271
b5bf9068
EG
10272 /* check the loopback mode */
10273 switch (loopback_mode) {
10274 case BNX2X_PHY_LOOPBACK:
10275 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10276 return -EINVAL;
10277 break;
10278 case BNX2X_MAC_LOOPBACK:
f3c87cdd 10279 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 10280 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
10281 break;
10282 default:
f3c87cdd 10283 return -EINVAL;
b5bf9068 10284 }
f3c87cdd 10285
b5bf9068
EG
10286 /* prepare the loopback packet */
10287 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10288 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
10289 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10290 if (!skb) {
10291 rc = -ENOMEM;
10292 goto test_loopback_exit;
10293 }
10294 packet = skb_put(skb, pkt_size);
10295 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
10296 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10297 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
10298 for (i = ETH_HLEN; i < pkt_size; i++)
10299 packet[i] = (unsigned char) (i & 0xff);
10300
b5bf9068 10301 /* send the loopback packet */
f3c87cdd 10302 num_pkts = 0;
ca00392c
EG
10303 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10304 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 10305
ca00392c
EG
10306 pkt_prod = fp_tx->tx_pkt_prod++;
10307 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10308 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 10309 tx_buf->skb = skb;
ca00392c 10310 tx_buf->flags = 0;
f3c87cdd 10311
ca00392c
EG
10312 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10313 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
f3c87cdd
YG
10314 mapping = pci_map_single(bp->pdev, skb->data,
10315 skb_headlen(skb), PCI_DMA_TODEVICE);
ca00392c
EG
10316 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10317 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10318 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10319 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10320 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10321 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10322 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10323 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10324
10325 /* turn on parsing and get a BD */
10326 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10327 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10328
10329 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 10330
58f4c4cf
EG
10331 wmb();
10332
ca00392c
EG
10333 fp_tx->tx_db.data.prod += 2;
10334 barrier();
10335 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
f3c87cdd
YG
10336
10337 mmiowb();
10338
10339 num_pkts++;
ca00392c 10340 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
10341 bp->dev->trans_start = jiffies;
10342
10343 udelay(100);
10344
ca00392c 10345 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
10346 if (tx_idx != tx_start_idx + num_pkts)
10347 goto test_loopback_exit;
10348
ca00392c 10349 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
10350 if (rx_idx != rx_start_idx + num_pkts)
10351 goto test_loopback_exit;
10352
ca00392c 10353 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
10354 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10355 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10356 goto test_loopback_rx_exit;
10357
10358 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10359 if (len != pkt_size)
10360 goto test_loopback_rx_exit;
10361
ca00392c 10362 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
10363 skb = rx_buf->skb;
10364 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10365 for (i = ETH_HLEN; i < pkt_size; i++)
10366 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10367 goto test_loopback_rx_exit;
10368
10369 rc = 0;
10370
10371test_loopback_rx_exit:
f3c87cdd 10372
ca00392c
EG
10373 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10374 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10375 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10376 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
10377
10378 /* Update producers */
ca00392c
EG
10379 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10380 fp_rx->rx_sge_prod);
f3c87cdd
YG
10381
10382test_loopback_exit:
10383 bp->link_params.loopback_mode = LOOPBACK_NONE;
10384
10385 return rc;
10386}
10387
10388static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10389{
b5bf9068 10390 int rc = 0, res;
f3c87cdd
YG
10391
10392 if (!netif_running(bp->dev))
10393 return BNX2X_LOOPBACK_FAILED;
10394
f8ef6e44 10395 bnx2x_netif_stop(bp, 1);
3910c8ae 10396 bnx2x_acquire_phy_lock(bp);
f3c87cdd 10397
b5bf9068
EG
10398 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10399 if (res) {
10400 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10401 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
10402 }
10403
b5bf9068
EG
10404 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10405 if (res) {
10406 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10407 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
10408 }
10409
3910c8ae 10410 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
10411 bnx2x_netif_start(bp);
10412
10413 return rc;
10414}
10415
10416#define CRC32_RESIDUAL 0xdebb20e3
10417
10418static int bnx2x_test_nvram(struct bnx2x *bp)
10419{
10420 static const struct {
10421 int offset;
10422 int size;
10423 } nvram_tbl[] = {
10424 { 0, 0x14 }, /* bootstrap */
10425 { 0x14, 0xec }, /* dir */
10426 { 0x100, 0x350 }, /* manuf_info */
10427 { 0x450, 0xf0 }, /* feature_info */
10428 { 0x640, 0x64 }, /* upgrade_key_info */
10429 { 0x6a4, 0x64 },
10430 { 0x708, 0x70 }, /* manuf_key_info */
10431 { 0x778, 0x70 },
10432 { 0, 0 }
10433 };
4781bfad 10434 __be32 buf[0x350 / 4];
f3c87cdd
YG
10435 u8 *data = (u8 *)buf;
10436 int i, rc;
ab6ad5a4 10437 u32 magic, crc;
f3c87cdd
YG
10438
10439 rc = bnx2x_nvram_read(bp, 0, data, 4);
10440 if (rc) {
f5372251 10441 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
10442 goto test_nvram_exit;
10443 }
10444
10445 magic = be32_to_cpu(buf[0]);
10446 if (magic != 0x669955aa) {
10447 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10448 rc = -ENODEV;
10449 goto test_nvram_exit;
10450 }
10451
10452 for (i = 0; nvram_tbl[i].size; i++) {
10453
10454 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10455 nvram_tbl[i].size);
10456 if (rc) {
10457 DP(NETIF_MSG_PROBE,
f5372251 10458 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
10459 goto test_nvram_exit;
10460 }
10461
ab6ad5a4
EG
10462 crc = ether_crc_le(nvram_tbl[i].size, data);
10463 if (crc != CRC32_RESIDUAL) {
f3c87cdd 10464 DP(NETIF_MSG_PROBE,
ab6ad5a4 10465 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
f3c87cdd
YG
10466 rc = -ENODEV;
10467 goto test_nvram_exit;
10468 }
10469 }
10470
10471test_nvram_exit:
10472 return rc;
10473}
10474
10475static int bnx2x_test_intr(struct bnx2x *bp)
10476{
10477 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10478 int i, rc;
10479
10480 if (!netif_running(bp->dev))
10481 return -ENODEV;
10482
8d9c5f34 10483 config->hdr.length = 0;
af246401
EG
10484 if (CHIP_IS_E1(bp))
10485 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10486 else
10487 config->hdr.offset = BP_FUNC(bp);
0626b899 10488 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
10489 config->hdr.reserved1 = 0;
10490
e665bfda
MC
10491 bp->set_mac_pending++;
10492 smp_wmb();
f3c87cdd
YG
10493 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10494 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10495 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10496 if (rc == 0) {
f3c87cdd
YG
10497 for (i = 0; i < 10; i++) {
10498 if (!bp->set_mac_pending)
10499 break;
e665bfda 10500 smp_rmb();
f3c87cdd
YG
10501 msleep_interruptible(10);
10502 }
10503 if (i == 10)
10504 rc = -ENODEV;
10505 }
10506
10507 return rc;
10508}
10509
a2fbb9ea
ET
10510static void bnx2x_self_test(struct net_device *dev,
10511 struct ethtool_test *etest, u64 *buf)
10512{
10513 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
10514
10515 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10516
f3c87cdd 10517 if (!netif_running(dev))
a2fbb9ea 10518 return;
a2fbb9ea 10519
33471629 10520 /* offline tests are not supported in MF mode */
f3c87cdd
YG
10521 if (IS_E1HMF(bp))
10522 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10523
10524 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
10525 int port = BP_PORT(bp);
10526 u32 val;
f3c87cdd
YG
10527 u8 link_up;
10528
279abdf5
EG
10529 /* save current value of input enable for TX port IF */
10530 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10531 /* disable input for TX port IF */
10532 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10533
f3c87cdd
YG
10534 link_up = bp->link_vars.link_up;
10535 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10536 bnx2x_nic_load(bp, LOAD_DIAG);
10537 /* wait until link state is restored */
10538 bnx2x_wait_for_link(bp, link_up);
10539
10540 if (bnx2x_test_registers(bp) != 0) {
10541 buf[0] = 1;
10542 etest->flags |= ETH_TEST_FL_FAILED;
10543 }
10544 if (bnx2x_test_memory(bp) != 0) {
10545 buf[1] = 1;
10546 etest->flags |= ETH_TEST_FL_FAILED;
10547 }
10548 buf[2] = bnx2x_test_loopback(bp, link_up);
10549 if (buf[2] != 0)
10550 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 10551
f3c87cdd 10552 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
10553
10554 /* restore input for TX port IF */
10555 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10556
f3c87cdd
YG
10557 bnx2x_nic_load(bp, LOAD_NORMAL);
10558 /* wait until link state is restored */
10559 bnx2x_wait_for_link(bp, link_up);
10560 }
10561 if (bnx2x_test_nvram(bp) != 0) {
10562 buf[3] = 1;
a2fbb9ea
ET
10563 etest->flags |= ETH_TEST_FL_FAILED;
10564 }
f3c87cdd
YG
10565 if (bnx2x_test_intr(bp) != 0) {
10566 buf[4] = 1;
10567 etest->flags |= ETH_TEST_FL_FAILED;
10568 }
10569 if (bp->port.pmf)
10570 if (bnx2x_link_test(bp) != 0) {
10571 buf[5] = 1;
10572 etest->flags |= ETH_TEST_FL_FAILED;
10573 }
f3c87cdd
YG
10574
10575#ifdef BNX2X_EXTRA_DEBUG
10576 bnx2x_panic_dump(bp);
10577#endif
a2fbb9ea
ET
10578}
10579
de832a55
EG
10580static const struct {
10581 long offset;
10582 int size;
10583 u8 string[ETH_GSTRING_LEN];
10584} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10585/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10586 { Q_STATS_OFFSET32(error_bytes_received_hi),
10587 8, "[%d]: rx_error_bytes" },
10588 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10589 8, "[%d]: rx_ucast_packets" },
10590 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10591 8, "[%d]: rx_mcast_packets" },
10592 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10593 8, "[%d]: rx_bcast_packets" },
10594 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10595 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10596 4, "[%d]: rx_phy_ip_err_discards"},
10597 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10598 4, "[%d]: rx_skb_alloc_discard" },
10599 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10600
10601/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10602 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10603 8, "[%d]: tx_packets" }
10604};
10605
bb2a0f7a
YG
10606static const struct {
10607 long offset;
10608 int size;
10609 u32 flags;
66e855f3
YG
10610#define STATS_FLAGS_PORT 1
10611#define STATS_FLAGS_FUNC 2
de832a55 10612#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 10613 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 10614} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
10615/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10616 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 10617 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 10618 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 10619 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 10620 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 10621 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 10622 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 10623 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 10624 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 10625 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 10626 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 10627 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 10628 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
10629 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10630 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10631 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10632 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10633/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10634 8, STATS_FLAGS_PORT, "rx_fragments" },
10635 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10636 8, STATS_FLAGS_PORT, "rx_jabbers" },
10637 { STATS_OFFSET32(no_buff_discard_hi),
10638 8, STATS_FLAGS_BOTH, "rx_discards" },
10639 { STATS_OFFSET32(mac_filter_discard),
10640 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10641 { STATS_OFFSET32(xxoverflow_discard),
10642 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10643 { STATS_OFFSET32(brb_drop_hi),
10644 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10645 { STATS_OFFSET32(brb_truncate_hi),
10646 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10647 { STATS_OFFSET32(pause_frames_received_hi),
10648 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10649 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10650 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10651 { STATS_OFFSET32(nig_timer_max),
10652 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10653/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10654 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10655 { STATS_OFFSET32(rx_skb_alloc_failed),
10656 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10657 { STATS_OFFSET32(hw_csum_err),
10658 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10659
10660 { STATS_OFFSET32(total_bytes_transmitted_hi),
10661 8, STATS_FLAGS_BOTH, "tx_bytes" },
10662 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10663 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10664 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10665 8, STATS_FLAGS_BOTH, "tx_packets" },
10666 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10667 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10668 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10669 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 10670 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 10671 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 10672 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 10673 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 10674/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 10675 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 10676 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 10677 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 10678 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 10679 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 10680 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 10681 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 10682 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 10683 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 10684 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 10685 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 10686 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 10687 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 10688 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 10689 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 10690 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 10691 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 10692 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 10693 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 10694/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 10695 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
10696 { STATS_OFFSET32(pause_frames_sent_hi),
10697 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
10698};
10699
de832a55
EG
10700#define IS_PORT_STAT(i) \
10701 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10702#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10703#define IS_E1HMF_MODE_STAT(bp) \
10704 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 10705
15f0a394
BH
10706static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10707{
10708 struct bnx2x *bp = netdev_priv(dev);
10709 int i, num_stats;
10710
10711 switch(stringset) {
10712 case ETH_SS_STATS:
10713 if (is_multi(bp)) {
10714 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10715 if (!IS_E1HMF_MODE_STAT(bp))
10716 num_stats += BNX2X_NUM_STATS;
10717 } else {
10718 if (IS_E1HMF_MODE_STAT(bp)) {
10719 num_stats = 0;
10720 for (i = 0; i < BNX2X_NUM_STATS; i++)
10721 if (IS_FUNC_STAT(i))
10722 num_stats++;
10723 } else
10724 num_stats = BNX2X_NUM_STATS;
10725 }
10726 return num_stats;
10727
10728 case ETH_SS_TEST:
10729 return BNX2X_NUM_TESTS;
10730
10731 default:
10732 return -EINVAL;
10733 }
10734}
10735
a2fbb9ea
ET
10736static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10737{
bb2a0f7a 10738 struct bnx2x *bp = netdev_priv(dev);
de832a55 10739 int i, j, k;
bb2a0f7a 10740
a2fbb9ea
ET
10741 switch (stringset) {
10742 case ETH_SS_STATS:
de832a55
EG
10743 if (is_multi(bp)) {
10744 k = 0;
ca00392c 10745 for_each_rx_queue(bp, i) {
de832a55
EG
10746 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10747 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10748 bnx2x_q_stats_arr[j].string, i);
10749 k += BNX2X_NUM_Q_STATS;
10750 }
10751 if (IS_E1HMF_MODE_STAT(bp))
10752 break;
10753 for (j = 0; j < BNX2X_NUM_STATS; j++)
10754 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10755 bnx2x_stats_arr[j].string);
10756 } else {
10757 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10758 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10759 continue;
10760 strcpy(buf + j*ETH_GSTRING_LEN,
10761 bnx2x_stats_arr[i].string);
10762 j++;
10763 }
bb2a0f7a 10764 }
a2fbb9ea
ET
10765 break;
10766
10767 case ETH_SS_TEST:
10768 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10769 break;
10770 }
10771}
10772
a2fbb9ea
ET
10773static void bnx2x_get_ethtool_stats(struct net_device *dev,
10774 struct ethtool_stats *stats, u64 *buf)
10775{
10776 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
10777 u32 *hw_stats, *offset;
10778 int i, j, k;
bb2a0f7a 10779
de832a55
EG
10780 if (is_multi(bp)) {
10781 k = 0;
ca00392c 10782 for_each_rx_queue(bp, i) {
de832a55
EG
10783 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10784 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10785 if (bnx2x_q_stats_arr[j].size == 0) {
10786 /* skip this counter */
10787 buf[k + j] = 0;
10788 continue;
10789 }
10790 offset = (hw_stats +
10791 bnx2x_q_stats_arr[j].offset);
10792 if (bnx2x_q_stats_arr[j].size == 4) {
10793 /* 4-byte counter */
10794 buf[k + j] = (u64) *offset;
10795 continue;
10796 }
10797 /* 8-byte counter */
10798 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10799 }
10800 k += BNX2X_NUM_Q_STATS;
10801 }
10802 if (IS_E1HMF_MODE_STAT(bp))
10803 return;
10804 hw_stats = (u32 *)&bp->eth_stats;
10805 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10806 if (bnx2x_stats_arr[j].size == 0) {
10807 /* skip this counter */
10808 buf[k + j] = 0;
10809 continue;
10810 }
10811 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10812 if (bnx2x_stats_arr[j].size == 4) {
10813 /* 4-byte counter */
10814 buf[k + j] = (u64) *offset;
10815 continue;
10816 }
10817 /* 8-byte counter */
10818 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10819 }
de832a55
EG
10820 } else {
10821 hw_stats = (u32 *)&bp->eth_stats;
10822 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10823 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10824 continue;
10825 if (bnx2x_stats_arr[i].size == 0) {
10826 /* skip this counter */
10827 buf[j] = 0;
10828 j++;
10829 continue;
10830 }
10831 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10832 if (bnx2x_stats_arr[i].size == 4) {
10833 /* 4-byte counter */
10834 buf[j] = (u64) *offset;
10835 j++;
10836 continue;
10837 }
10838 /* 8-byte counter */
10839 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10840 j++;
a2fbb9ea 10841 }
a2fbb9ea
ET
10842 }
10843}
10844
10845static int bnx2x_phys_id(struct net_device *dev, u32 data)
10846{
10847 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10848 int port = BP_PORT(bp);
a2fbb9ea
ET
10849 int i;
10850
34f80b04
EG
10851 if (!netif_running(dev))
10852 return 0;
10853
10854 if (!bp->port.pmf)
10855 return 0;
10856
a2fbb9ea
ET
10857 if (data == 0)
10858 data = 2;
10859
10860 for (i = 0; i < (data * 2); i++) {
c18487ee 10861 if ((i % 2) == 0)
34f80b04 10862 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
10863 bp->link_params.hw_led_mode,
10864 bp->link_params.chip_id);
10865 else
34f80b04 10866 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
10867 bp->link_params.hw_led_mode,
10868 bp->link_params.chip_id);
10869
a2fbb9ea
ET
10870 msleep_interruptible(500);
10871 if (signal_pending(current))
10872 break;
10873 }
10874
c18487ee 10875 if (bp->link_vars.link_up)
34f80b04 10876 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
10877 bp->link_vars.line_speed,
10878 bp->link_params.hw_led_mode,
10879 bp->link_params.chip_id);
a2fbb9ea
ET
10880
10881 return 0;
10882}
10883
0fc0b732 10884static const struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10885 .get_settings = bnx2x_get_settings,
10886 .set_settings = bnx2x_set_settings,
10887 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10888 .get_regs_len = bnx2x_get_regs_len,
10889 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10890 .get_wol = bnx2x_get_wol,
10891 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10892 .get_msglevel = bnx2x_get_msglevel,
10893 .set_msglevel = bnx2x_set_msglevel,
10894 .nway_reset = bnx2x_nway_reset,
01e53298 10895 .get_link = bnx2x_get_link,
7a9b2557
VZ
10896 .get_eeprom_len = bnx2x_get_eeprom_len,
10897 .get_eeprom = bnx2x_get_eeprom,
10898 .set_eeprom = bnx2x_set_eeprom,
10899 .get_coalesce = bnx2x_get_coalesce,
10900 .set_coalesce = bnx2x_set_coalesce,
10901 .get_ringparam = bnx2x_get_ringparam,
10902 .set_ringparam = bnx2x_set_ringparam,
10903 .get_pauseparam = bnx2x_get_pauseparam,
10904 .set_pauseparam = bnx2x_set_pauseparam,
10905 .get_rx_csum = bnx2x_get_rx_csum,
10906 .set_rx_csum = bnx2x_set_rx_csum,
10907 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10908 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10909 .set_flags = bnx2x_set_flags,
10910 .get_flags = ethtool_op_get_flags,
10911 .get_sg = ethtool_op_get_sg,
10912 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10913 .get_tso = ethtool_op_get_tso,
10914 .set_tso = bnx2x_set_tso,
7a9b2557 10915 .self_test = bnx2x_self_test,
15f0a394 10916 .get_sset_count = bnx2x_get_sset_count,
7a9b2557 10917 .get_strings = bnx2x_get_strings,
a2fbb9ea 10918 .phys_id = bnx2x_phys_id,
bb2a0f7a 10919 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10920};
10921
10922/* end of ethtool_ops */
10923
10924/****************************************************************************
10925* General service functions
10926****************************************************************************/
10927
10928static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10929{
10930 u16 pmcsr;
10931
10932 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10933
10934 switch (state) {
10935 case PCI_D0:
34f80b04 10936 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10937 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10938 PCI_PM_CTRL_PME_STATUS));
10939
10940 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10941 /* delay required during transition out of D3hot */
a2fbb9ea 10942 msleep(20);
34f80b04 10943 break;
a2fbb9ea 10944
34f80b04
EG
10945 case PCI_D3hot:
10946 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10947 pmcsr |= 3;
a2fbb9ea 10948
34f80b04
EG
10949 if (bp->wol)
10950 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10951
34f80b04
EG
10952 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10953 pmcsr);
a2fbb9ea 10954
34f80b04
EG
10955 /* No more memory access after this point until
10956 * device is brought back to D0.
10957 */
10958 break;
10959
10960 default:
10961 return -EINVAL;
10962 }
10963 return 0;
a2fbb9ea
ET
10964}
10965
237907c1
EG
10966static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10967{
10968 u16 rx_cons_sb;
10969
10970 /* Tell compiler that status block fields can change */
10971 barrier();
10972 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10973 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10974 rx_cons_sb++;
10975 return (fp->rx_comp_cons != rx_cons_sb);
10976}
10977
34f80b04
EG
10978/*
10979 * net_device service functions
10980 */
10981
a2fbb9ea
ET
10982static int bnx2x_poll(struct napi_struct *napi, int budget)
10983{
10984 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10985 napi);
10986 struct bnx2x *bp = fp->bp;
10987 int work_done = 0;
10988
10989#ifdef BNX2X_STOP_ON_ERROR
10990 if (unlikely(bp->panic))
34f80b04 10991 goto poll_panic;
a2fbb9ea
ET
10992#endif
10993
a2fbb9ea
ET
10994 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10995 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10996
10997 bnx2x_update_fpsb_idx(fp);
10998
8534f32c 10999 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 11000 work_done = bnx2x_rx_int(fp, budget);
356e2385 11001
8534f32c
EG
11002 /* must not complete if we consumed full budget */
11003 if (work_done >= budget)
11004 goto poll_again;
11005 }
a2fbb9ea 11006
ca00392c 11007 /* bnx2x_has_rx_work() reads the status block, thus we need to
8534f32c 11008 * ensure that status block indices have been actually read
ca00392c 11009 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
8534f32c 11010 * so that we won't write the "newer" value of the status block to IGU
ca00392c 11011 * (if there was a DMA right after bnx2x_has_rx_work and
8534f32c
EG
11012 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
11013 * may be postponed to right before bnx2x_ack_sb). In this case
11014 * there will never be another interrupt until there is another update
11015 * of the status block, while there is still unhandled work.
11016 */
11017 rmb();
a2fbb9ea 11018
ca00392c 11019 if (!bnx2x_has_rx_work(fp)) {
a2fbb9ea 11020#ifdef BNX2X_STOP_ON_ERROR
34f80b04 11021poll_panic:
a2fbb9ea 11022#endif
288379f0 11023 napi_complete(napi);
a2fbb9ea 11024
0626b899 11025 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 11026 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 11027 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
11028 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
11029 }
356e2385 11030
8534f32c 11031poll_again:
a2fbb9ea
ET
11032 return work_done;
11033}
11034
755735eb
EG
11035
11036/* we split the first BD into headers and data BDs
33471629 11037 * to ease the pain of our fellow microcode engineers
755735eb
EG
11038 * we use one mapping for both BDs
11039 * So far this has only been observed to happen
11040 * in Other Operating Systems(TM)
11041 */
11042static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
11043 struct bnx2x_fastpath *fp,
ca00392c
EG
11044 struct sw_tx_bd *tx_buf,
11045 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
11046 u16 bd_prod, int nbd)
11047{
ca00392c 11048 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
11049 struct eth_tx_bd *d_tx_bd;
11050 dma_addr_t mapping;
11051 int old_len = le16_to_cpu(h_tx_bd->nbytes);
11052
11053 /* first fix first BD */
11054 h_tx_bd->nbd = cpu_to_le16(nbd);
11055 h_tx_bd->nbytes = cpu_to_le16(hlen);
11056
11057 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11058 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11059 h_tx_bd->addr_lo, h_tx_bd->nbd);
11060
11061 /* now get a new data BD
11062 * (after the pbd) and fill it */
11063 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 11064 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
11065
11066 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11067 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11068
11069 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11070 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11071 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
11072
11073 /* this marks the BD as one that has no individual mapping */
11074 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11075
755735eb
EG
11076 DP(NETIF_MSG_TX_QUEUED,
11077 "TSO split data size is %d (%x:%x)\n",
11078 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11079
ca00392c
EG
11080 /* update tx_bd */
11081 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
11082
11083 return bd_prod;
11084}
11085
11086static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11087{
11088 if (fix > 0)
11089 csum = (u16) ~csum_fold(csum_sub(csum,
11090 csum_partial(t_header - fix, fix, 0)));
11091
11092 else if (fix < 0)
11093 csum = (u16) ~csum_fold(csum_add(csum,
11094 csum_partial(t_header, -fix, 0)));
11095
11096 return swab16(csum);
11097}
11098
11099static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11100{
11101 u32 rc;
11102
11103 if (skb->ip_summed != CHECKSUM_PARTIAL)
11104 rc = XMIT_PLAIN;
11105
11106 else {
4781bfad 11107 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
11108 rc = XMIT_CSUM_V6;
11109 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11110 rc |= XMIT_CSUM_TCP;
11111
11112 } else {
11113 rc = XMIT_CSUM_V4;
11114 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11115 rc |= XMIT_CSUM_TCP;
11116 }
11117 }
11118
11119 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
11120 rc |= XMIT_GSO_V4;
11121
11122 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
11123 rc |= XMIT_GSO_V6;
11124
11125 return rc;
11126}
11127
632da4d6 11128#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
11129/* check if packet requires linearization (packet is too fragmented)
11130 no need to check fragmentation if page size > 8K (there will be no
11131 violation to FW restrictions) */
755735eb
EG
11132static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11133 u32 xmit_type)
11134{
11135 int to_copy = 0;
11136 int hlen = 0;
11137 int first_bd_sz = 0;
11138
11139 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11140 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11141
11142 if (xmit_type & XMIT_GSO) {
11143 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11144 /* Check if LSO packet needs to be copied:
11145 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11146 int wnd_size = MAX_FETCH_BD - 3;
33471629 11147 /* Number of windows to check */
755735eb
EG
11148 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11149 int wnd_idx = 0;
11150 int frag_idx = 0;
11151 u32 wnd_sum = 0;
11152
11153 /* Headers length */
11154 hlen = (int)(skb_transport_header(skb) - skb->data) +
11155 tcp_hdrlen(skb);
11156
11157 /* Amount of data (w/o headers) on linear part of SKB*/
11158 first_bd_sz = skb_headlen(skb) - hlen;
11159
11160 wnd_sum = first_bd_sz;
11161
11162 /* Calculate the first sum - it's special */
11163 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11164 wnd_sum +=
11165 skb_shinfo(skb)->frags[frag_idx].size;
11166
11167 /* If there was data on linear skb data - check it */
11168 if (first_bd_sz > 0) {
11169 if (unlikely(wnd_sum < lso_mss)) {
11170 to_copy = 1;
11171 goto exit_lbl;
11172 }
11173
11174 wnd_sum -= first_bd_sz;
11175 }
11176
11177 /* Others are easier: run through the frag list and
11178 check all windows */
11179 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11180 wnd_sum +=
11181 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11182
11183 if (unlikely(wnd_sum < lso_mss)) {
11184 to_copy = 1;
11185 break;
11186 }
11187 wnd_sum -=
11188 skb_shinfo(skb)->frags[wnd_idx].size;
11189 }
755735eb
EG
11190 } else {
11191 /* in non-LSO too fragmented packet should always
11192 be linearized */
11193 to_copy = 1;
11194 }
11195 }
11196
11197exit_lbl:
11198 if (unlikely(to_copy))
11199 DP(NETIF_MSG_TX_QUEUED,
11200 "Linearization IS REQUIRED for %s packet. "
11201 "num_frags %d hlen %d first_bd_sz %d\n",
11202 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11203 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11204
11205 return to_copy;
11206}
632da4d6 11207#endif
755735eb
EG
11208
11209/* called with netif_tx_lock
a2fbb9ea 11210 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 11211 * netif_wake_queue()
a2fbb9ea 11212 */
61357325 11213static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
a2fbb9ea
ET
11214{
11215 struct bnx2x *bp = netdev_priv(dev);
ca00392c 11216 struct bnx2x_fastpath *fp, *fp_stat;
555f6c78 11217 struct netdev_queue *txq;
a2fbb9ea 11218 struct sw_tx_bd *tx_buf;
ca00392c
EG
11219 struct eth_tx_start_bd *tx_start_bd;
11220 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
11221 struct eth_tx_parse_bd *pbd = NULL;
11222 u16 pkt_prod, bd_prod;
755735eb 11223 int nbd, fp_index;
a2fbb9ea 11224 dma_addr_t mapping;
755735eb 11225 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
11226 int i;
11227 u8 hlen = 0;
ca00392c 11228 __le16 pkt_size = 0;
a2fbb9ea
ET
11229
11230#ifdef BNX2X_STOP_ON_ERROR
11231 if (unlikely(bp->panic))
11232 return NETDEV_TX_BUSY;
11233#endif
11234
555f6c78
EG
11235 fp_index = skb_get_queue_mapping(skb);
11236 txq = netdev_get_tx_queue(dev, fp_index);
11237
ca00392c
EG
11238 fp = &bp->fp[fp_index + bp->num_rx_queues];
11239 fp_stat = &bp->fp[fp_index];
755735eb 11240
231fd58a 11241 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
ca00392c 11242 fp_stat->eth_q_stats.driver_xoff++;
555f6c78 11243 netif_tx_stop_queue(txq);
a2fbb9ea
ET
11244 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11245 return NETDEV_TX_BUSY;
11246 }
11247
755735eb
EG
11248 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
11249 " gso type %x xmit_type %x\n",
11250 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11251 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11252
632da4d6 11253#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
11254 /* First, check if we need to linearize the skb (due to FW
11255 restrictions). No need to check fragmentation if page size > 8K
11256 (there will be no violation to FW restrictions) */
755735eb
EG
11257 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11258 /* Statistics of linearization */
11259 bp->lin_cnt++;
11260 if (skb_linearize(skb) != 0) {
11261 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11262 "silently dropping this SKB\n");
11263 dev_kfree_skb_any(skb);
da5a662a 11264 return NETDEV_TX_OK;
755735eb
EG
11265 }
11266 }
632da4d6 11267#endif
755735eb 11268
a2fbb9ea 11269 /*
755735eb 11270 Please read carefully. First we use one BD which we mark as start,
ca00392c 11271 then we have a parsing info BD (used for TSO or xsum),
755735eb 11272 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
11273 (don't forget to mark the last one as last,
11274 and to unmap only AFTER you write to the BD ...)
755735eb 11275 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
11276 */
11277
11278 pkt_prod = fp->tx_pkt_prod++;
755735eb 11279 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 11280
755735eb 11281 /* get a tx_buf and first BD */
a2fbb9ea 11282 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 11283 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 11284
ca00392c
EG
11285 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11286 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11287 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 11288 /* header nbd */
ca00392c 11289 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 11290
755735eb
EG
11291 /* remember the first BD of the packet */
11292 tx_buf->first_bd = fp->tx_bd_prod;
11293 tx_buf->skb = skb;
ca00392c 11294 tx_buf->flags = 0;
a2fbb9ea
ET
11295
11296 DP(NETIF_MSG_TX_QUEUED,
11297 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 11298 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 11299
0c6671b0
EG
11300#ifdef BCM_VLAN
11301 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11302 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
11303 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11304 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 11305 } else
0c6671b0 11306#endif
ca00392c 11307 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 11308
ca00392c
EG
11309 /* turn on parsing and get a BD */
11310 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11311 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 11312
ca00392c 11313 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
11314
11315 if (xmit_type & XMIT_CSUM) {
ca00392c 11316 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
11317
11318 /* for now NS flag is not used in Linux */
4781bfad
EG
11319 pbd->global_data =
11320 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11321 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 11322
755735eb
EG
11323 pbd->ip_hlen = (skb_transport_header(skb) -
11324 skb_network_header(skb)) / 2;
11325
11326 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 11327
755735eb 11328 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 11329 hlen = hlen*2;
a2fbb9ea 11330
ca00392c 11331 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
11332
11333 if (xmit_type & XMIT_CSUM_V4)
ca00392c 11334 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
11335 ETH_TX_BD_FLAGS_IP_CSUM;
11336 else
ca00392c
EG
11337 tx_start_bd->bd_flags.as_bitfield |=
11338 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
11339
11340 if (xmit_type & XMIT_CSUM_TCP) {
11341 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11342
11343 } else {
11344 s8 fix = SKB_CS_OFF(skb); /* signed! */
11345
ca00392c 11346 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 11347
755735eb 11348 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11349 "hlen %d fix %d csum before fix %x\n",
11350 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
11351
11352 /* HW bug: fixup the CSUM */
11353 pbd->tcp_pseudo_csum =
11354 bnx2x_csum_fix(skb_transport_header(skb),
11355 SKB_CS(skb), fix);
11356
11357 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11358 pbd->tcp_pseudo_csum);
11359 }
a2fbb9ea
ET
11360 }
11361
11362 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 11363 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea 11364
ca00392c
EG
11365 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11366 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11367 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11368 tx_start_bd->nbd = cpu_to_le16(nbd);
11369 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11370 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
11371
11372 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 11373 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
11374 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11375 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11376 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 11377
755735eb 11378 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
11379
11380 DP(NETIF_MSG_TX_QUEUED,
11381 "TSO packet len %d hlen %d total len %d tso size %d\n",
11382 skb->len, hlen, skb_headlen(skb),
11383 skb_shinfo(skb)->gso_size);
11384
ca00392c 11385 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 11386
755735eb 11387 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
11388 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11389 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
11390
11391 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11392 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
11393 pbd->tcp_flags = pbd_tcp_flags(skb);
11394
11395 if (xmit_type & XMIT_GSO_V4) {
11396 pbd->ip_id = swab16(ip_hdr(skb)->id);
11397 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
11398 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11399 ip_hdr(skb)->daddr,
11400 0, IPPROTO_TCP, 0));
755735eb
EG
11401
11402 } else
11403 pbd->tcp_pseudo_csum =
11404 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11405 &ipv6_hdr(skb)->daddr,
11406 0, IPPROTO_TCP, 0));
11407
a2fbb9ea
ET
11408 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11409 }
ca00392c 11410 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 11411
755735eb
EG
11412 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11413 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 11414
755735eb 11415 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
11416 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11417 if (total_pkt_bd == NULL)
11418 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 11419
755735eb
EG
11420 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11421 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 11422
ca00392c
EG
11423 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11424 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11425 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11426 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 11427
755735eb 11428 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11429 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11430 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11431 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
11432 }
11433
ca00392c 11434 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 11435
a2fbb9ea
ET
11436 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11437
755735eb 11438 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
11439 * if the packet contains or ends with it
11440 */
11441 if (TX_BD_POFF(bd_prod) < nbd)
11442 nbd++;
11443
ca00392c
EG
11444 if (total_pkt_bd != NULL)
11445 total_pkt_bd->total_pkt_bytes = pkt_size;
11446
a2fbb9ea
ET
11447 if (pbd)
11448 DP(NETIF_MSG_TX_QUEUED,
11449 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11450 " tcp_flags %x xsum %x seq %u hlen %u\n",
11451 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11452 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 11453 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 11454
755735eb 11455 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 11456
58f4c4cf
EG
11457 /*
11458 * Make sure that the BD data is updated before updating the producer
11459 * since FW might read the BD right after the producer is updated.
11460 * This is only applicable for weak-ordered memory model archs such
11461 * as IA-64. The following barrier is also mandatory since FW will
11462 * assumes packets must have BDs.
11463 */
11464 wmb();
11465
ca00392c
EG
11466 fp->tx_db.data.prod += nbd;
11467 barrier();
11468 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
a2fbb9ea
ET
11469
11470 mmiowb();
11471
755735eb 11472 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
11473
11474 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 11475 netif_tx_stop_queue(txq);
58f4c4cf
EG
11476 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11477 if we put Tx into XOFF state. */
11478 smp_mb();
ca00392c 11479 fp_stat->eth_q_stats.driver_xoff++;
a2fbb9ea 11480 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 11481 netif_tx_wake_queue(txq);
a2fbb9ea 11482 }
ca00392c 11483 fp_stat->tx_pkt++;
a2fbb9ea
ET
11484
11485 return NETDEV_TX_OK;
11486}
11487
bb2a0f7a 11488/* called with rtnl_lock */
a2fbb9ea
ET
11489static int bnx2x_open(struct net_device *dev)
11490{
11491 struct bnx2x *bp = netdev_priv(dev);
11492
6eccabb3
EG
11493 netif_carrier_off(dev);
11494
a2fbb9ea
ET
11495 bnx2x_set_power_state(bp, PCI_D0);
11496
bb2a0f7a 11497 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
11498}
11499
bb2a0f7a 11500/* called with rtnl_lock */
a2fbb9ea
ET
11501static int bnx2x_close(struct net_device *dev)
11502{
a2fbb9ea
ET
11503 struct bnx2x *bp = netdev_priv(dev);
11504
11505 /* Unload the driver, release IRQs */
bb2a0f7a
YG
11506 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11507 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11508 if (!CHIP_REV_IS_SLOW(bp))
11509 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
11510
11511 return 0;
11512}
11513
f5372251 11514/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
11515static void bnx2x_set_rx_mode(struct net_device *dev)
11516{
11517 struct bnx2x *bp = netdev_priv(dev);
11518 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11519 int port = BP_PORT(bp);
11520
11521 if (bp->state != BNX2X_STATE_OPEN) {
11522 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11523 return;
11524 }
11525
11526 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11527
11528 if (dev->flags & IFF_PROMISC)
11529 rx_mode = BNX2X_RX_MODE_PROMISC;
11530
11531 else if ((dev->flags & IFF_ALLMULTI) ||
11532 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11533 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11534
11535 else { /* some multicasts */
11536 if (CHIP_IS_E1(bp)) {
11537 int i, old, offset;
11538 struct dev_mc_list *mclist;
11539 struct mac_configuration_cmd *config =
11540 bnx2x_sp(bp, mcast_config);
11541
11542 for (i = 0, mclist = dev->mc_list;
11543 mclist && (i < dev->mc_count);
11544 i++, mclist = mclist->next) {
11545
11546 config->config_table[i].
11547 cam_entry.msb_mac_addr =
11548 swab16(*(u16 *)&mclist->dmi_addr[0]);
11549 config->config_table[i].
11550 cam_entry.middle_mac_addr =
11551 swab16(*(u16 *)&mclist->dmi_addr[2]);
11552 config->config_table[i].
11553 cam_entry.lsb_mac_addr =
11554 swab16(*(u16 *)&mclist->dmi_addr[4]);
11555 config->config_table[i].cam_entry.flags =
11556 cpu_to_le16(port);
11557 config->config_table[i].
11558 target_table_entry.flags = 0;
ca00392c
EG
11559 config->config_table[i].target_table_entry.
11560 clients_bit_vector =
11561 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
11562 config->config_table[i].
11563 target_table_entry.vlan_id = 0;
11564
11565 DP(NETIF_MSG_IFUP,
11566 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11567 config->config_table[i].
11568 cam_entry.msb_mac_addr,
11569 config->config_table[i].
11570 cam_entry.middle_mac_addr,
11571 config->config_table[i].
11572 cam_entry.lsb_mac_addr);
11573 }
8d9c5f34 11574 old = config->hdr.length;
34f80b04
EG
11575 if (old > i) {
11576 for (; i < old; i++) {
11577 if (CAM_IS_INVALID(config->
11578 config_table[i])) {
af246401 11579 /* already invalidated */
34f80b04
EG
11580 break;
11581 }
11582 /* invalidate */
11583 CAM_INVALIDATE(config->
11584 config_table[i]);
11585 }
11586 }
11587
11588 if (CHIP_REV_IS_SLOW(bp))
11589 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11590 else
11591 offset = BNX2X_MAX_MULTICAST*(1 + port);
11592
8d9c5f34 11593 config->hdr.length = i;
34f80b04 11594 config->hdr.offset = offset;
8d9c5f34 11595 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
11596 config->hdr.reserved1 = 0;
11597
e665bfda
MC
11598 bp->set_mac_pending++;
11599 smp_wmb();
11600
34f80b04
EG
11601 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11602 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11603 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11604 0);
11605 } else { /* E1H */
11606 /* Accept one or more multicasts */
11607 struct dev_mc_list *mclist;
11608 u32 mc_filter[MC_HASH_SIZE];
11609 u32 crc, bit, regidx;
11610 int i;
11611
11612 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11613
11614 for (i = 0, mclist = dev->mc_list;
11615 mclist && (i < dev->mc_count);
11616 i++, mclist = mclist->next) {
11617
7c510e4b
JB
11618 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11619 mclist->dmi_addr);
34f80b04
EG
11620
11621 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11622 bit = (crc >> 24) & 0xff;
11623 regidx = bit >> 5;
11624 bit &= 0x1f;
11625 mc_filter[regidx] |= (1 << bit);
11626 }
11627
11628 for (i = 0; i < MC_HASH_SIZE; i++)
11629 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11630 mc_filter[i]);
11631 }
11632 }
11633
11634 bp->rx_mode = rx_mode;
11635 bnx2x_set_storm_rx_mode(bp);
11636}
11637
11638/* called with rtnl_lock */
a2fbb9ea
ET
11639static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11640{
11641 struct sockaddr *addr = p;
11642 struct bnx2x *bp = netdev_priv(dev);
11643
34f80b04 11644 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
11645 return -EINVAL;
11646
11647 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
11648 if (netif_running(dev)) {
11649 if (CHIP_IS_E1(bp))
e665bfda 11650 bnx2x_set_eth_mac_addr_e1(bp, 1);
34f80b04 11651 else
e665bfda 11652 bnx2x_set_eth_mac_addr_e1h(bp, 1);
34f80b04 11653 }
a2fbb9ea
ET
11654
11655 return 0;
11656}
11657
c18487ee 11658/* called with rtnl_lock */
01cd4528
EG
11659static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11660 int devad, u16 addr)
a2fbb9ea 11661{
01cd4528
EG
11662 struct bnx2x *bp = netdev_priv(netdev);
11663 u16 value;
11664 int rc;
11665 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 11666
01cd4528
EG
11667 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11668 prtad, devad, addr);
a2fbb9ea 11669
01cd4528
EG
11670 if (prtad != bp->mdio.prtad) {
11671 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11672 prtad, bp->mdio.prtad);
11673 return -EINVAL;
11674 }
11675
11676 /* The HW expects different devad if CL22 is used */
11677 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 11678
01cd4528
EG
11679 bnx2x_acquire_phy_lock(bp);
11680 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11681 devad, addr, &value);
11682 bnx2x_release_phy_lock(bp);
11683 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 11684
01cd4528
EG
11685 if (!rc)
11686 rc = value;
11687 return rc;
11688}
a2fbb9ea 11689
01cd4528
EG
11690/* called with rtnl_lock */
11691static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11692 u16 addr, u16 value)
11693{
11694 struct bnx2x *bp = netdev_priv(netdev);
11695 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11696 int rc;
11697
11698 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11699 " value 0x%x\n", prtad, devad, addr, value);
11700
11701 if (prtad != bp->mdio.prtad) {
11702 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11703 prtad, bp->mdio.prtad);
11704 return -EINVAL;
a2fbb9ea
ET
11705 }
11706
01cd4528
EG
11707 /* The HW expects different devad if CL22 is used */
11708 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 11709
01cd4528
EG
11710 bnx2x_acquire_phy_lock(bp);
11711 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11712 devad, addr, value);
11713 bnx2x_release_phy_lock(bp);
11714 return rc;
11715}
c18487ee 11716
01cd4528
EG
11717/* called with rtnl_lock */
11718static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11719{
11720 struct bnx2x *bp = netdev_priv(dev);
11721 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 11722
01cd4528
EG
11723 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11724 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 11725
01cd4528
EG
11726 if (!netif_running(dev))
11727 return -EAGAIN;
11728
11729 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
11730}
11731
34f80b04 11732/* called with rtnl_lock */
a2fbb9ea
ET
11733static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11734{
11735 struct bnx2x *bp = netdev_priv(dev);
34f80b04 11736 int rc = 0;
a2fbb9ea
ET
11737
11738 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11739 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11740 return -EINVAL;
11741
11742 /* This does not race with packet allocation
c14423fe 11743 * because the actual alloc size is
a2fbb9ea
ET
11744 * only updated as part of load
11745 */
11746 dev->mtu = new_mtu;
11747
11748 if (netif_running(dev)) {
34f80b04
EG
11749 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11750 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 11751 }
34f80b04
EG
11752
11753 return rc;
a2fbb9ea
ET
11754}
11755
11756static void bnx2x_tx_timeout(struct net_device *dev)
11757{
11758 struct bnx2x *bp = netdev_priv(dev);
11759
11760#ifdef BNX2X_STOP_ON_ERROR
11761 if (!bp->panic)
11762 bnx2x_panic();
11763#endif
11764 /* This allows the netif to be shutdown gracefully before resetting */
11765 schedule_work(&bp->reset_task);
11766}
11767
11768#ifdef BCM_VLAN
34f80b04 11769/* called with rtnl_lock */
a2fbb9ea
ET
11770static void bnx2x_vlan_rx_register(struct net_device *dev,
11771 struct vlan_group *vlgrp)
11772{
11773 struct bnx2x *bp = netdev_priv(dev);
11774
11775 bp->vlgrp = vlgrp;
0c6671b0
EG
11776
11777 /* Set flags according to the required capabilities */
11778 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11779
11780 if (dev->features & NETIF_F_HW_VLAN_TX)
11781 bp->flags |= HW_VLAN_TX_FLAG;
11782
11783 if (dev->features & NETIF_F_HW_VLAN_RX)
11784 bp->flags |= HW_VLAN_RX_FLAG;
11785
a2fbb9ea 11786 if (netif_running(dev))
49d66772 11787 bnx2x_set_client_config(bp);
a2fbb9ea 11788}
34f80b04 11789
a2fbb9ea
ET
11790#endif
11791
11792#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11793static void poll_bnx2x(struct net_device *dev)
11794{
11795 struct bnx2x *bp = netdev_priv(dev);
11796
11797 disable_irq(bp->pdev->irq);
11798 bnx2x_interrupt(bp->pdev->irq, dev);
11799 enable_irq(bp->pdev->irq);
11800}
11801#endif
11802
c64213cd
SH
11803static const struct net_device_ops bnx2x_netdev_ops = {
11804 .ndo_open = bnx2x_open,
11805 .ndo_stop = bnx2x_close,
11806 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 11807 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
11808 .ndo_set_mac_address = bnx2x_change_mac_addr,
11809 .ndo_validate_addr = eth_validate_addr,
11810 .ndo_do_ioctl = bnx2x_ioctl,
11811 .ndo_change_mtu = bnx2x_change_mtu,
11812 .ndo_tx_timeout = bnx2x_tx_timeout,
11813#ifdef BCM_VLAN
11814 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11815#endif
11816#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11817 .ndo_poll_controller = poll_bnx2x,
11818#endif
11819};
11820
34f80b04
EG
11821static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11822 struct net_device *dev)
a2fbb9ea
ET
11823{
11824 struct bnx2x *bp;
11825 int rc;
11826
11827 SET_NETDEV_DEV(dev, &pdev->dev);
11828 bp = netdev_priv(dev);
11829
34f80b04
EG
11830 bp->dev = dev;
11831 bp->pdev = pdev;
a2fbb9ea 11832 bp->flags = 0;
34f80b04 11833 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11834
11835 rc = pci_enable_device(pdev);
11836 if (rc) {
11837 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11838 goto err_out;
11839 }
11840
11841 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11842 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11843 " aborting\n");
11844 rc = -ENODEV;
11845 goto err_out_disable;
11846 }
11847
11848 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11849 printk(KERN_ERR PFX "Cannot find second PCI device"
11850 " base address, aborting\n");
11851 rc = -ENODEV;
11852 goto err_out_disable;
11853 }
11854
34f80b04
EG
11855 if (atomic_read(&pdev->enable_cnt) == 1) {
11856 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11857 if (rc) {
11858 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11859 " aborting\n");
11860 goto err_out_disable;
11861 }
a2fbb9ea 11862
34f80b04
EG
11863 pci_set_master(pdev);
11864 pci_save_state(pdev);
11865 }
a2fbb9ea
ET
11866
11867 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11868 if (bp->pm_cap == 0) {
11869 printk(KERN_ERR PFX "Cannot find power management"
11870 " capability, aborting\n");
11871 rc = -EIO;
11872 goto err_out_release;
11873 }
11874
11875 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11876 if (bp->pcie_cap == 0) {
11877 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11878 " aborting\n");
11879 rc = -EIO;
11880 goto err_out_release;
11881 }
11882
6a35528a 11883 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11884 bp->flags |= USING_DAC_FLAG;
6a35528a 11885 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
11886 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11887 " failed, aborting\n");
11888 rc = -EIO;
11889 goto err_out_release;
11890 }
11891
284901a9 11892 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a2fbb9ea
ET
11893 printk(KERN_ERR PFX "System does not support DMA,"
11894 " aborting\n");
11895 rc = -EIO;
11896 goto err_out_release;
11897 }
11898
34f80b04
EG
11899 dev->mem_start = pci_resource_start(pdev, 0);
11900 dev->base_addr = dev->mem_start;
11901 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11902
11903 dev->irq = pdev->irq;
11904
275f165f 11905 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11906 if (!bp->regview) {
11907 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11908 rc = -ENOMEM;
11909 goto err_out_release;
11910 }
11911
34f80b04
EG
11912 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11913 min_t(u64, BNX2X_DB_SIZE,
11914 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11915 if (!bp->doorbells) {
11916 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11917 rc = -ENOMEM;
11918 goto err_out_unmap;
11919 }
11920
11921 bnx2x_set_power_state(bp, PCI_D0);
11922
34f80b04
EG
11923 /* clean indirect addresses */
11924 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11925 PCICFG_VENDOR_ID_OFFSET);
11926 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11927 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11928 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11929 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11930
34f80b04 11931 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11932
c64213cd 11933 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11934 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11935 dev->features |= NETIF_F_SG;
11936 dev->features |= NETIF_F_HW_CSUM;
11937 if (bp->flags & USING_DAC_FLAG)
11938 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
11939 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11940 dev->features |= NETIF_F_TSO6;
34f80b04
EG
11941#ifdef BCM_VLAN
11942 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11943 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
11944
11945 dev->vlan_features |= NETIF_F_SG;
11946 dev->vlan_features |= NETIF_F_HW_CSUM;
11947 if (bp->flags & USING_DAC_FLAG)
11948 dev->vlan_features |= NETIF_F_HIGHDMA;
11949 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11950 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 11951#endif
a2fbb9ea 11952
01cd4528
EG
11953 /* get_port_hwinfo() will set prtad and mmds properly */
11954 bp->mdio.prtad = MDIO_PRTAD_NONE;
11955 bp->mdio.mmds = 0;
11956 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11957 bp->mdio.dev = dev;
11958 bp->mdio.mdio_read = bnx2x_mdio_read;
11959 bp->mdio.mdio_write = bnx2x_mdio_write;
11960
a2fbb9ea
ET
11961 return 0;
11962
11963err_out_unmap:
11964 if (bp->regview) {
11965 iounmap(bp->regview);
11966 bp->regview = NULL;
11967 }
a2fbb9ea
ET
11968 if (bp->doorbells) {
11969 iounmap(bp->doorbells);
11970 bp->doorbells = NULL;
11971 }
11972
11973err_out_release:
34f80b04
EG
11974 if (atomic_read(&pdev->enable_cnt) == 1)
11975 pci_release_regions(pdev);
a2fbb9ea
ET
11976
11977err_out_disable:
11978 pci_disable_device(pdev);
11979 pci_set_drvdata(pdev, NULL);
11980
11981err_out:
11982 return rc;
11983}
11984
37f9ce62
EG
11985static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11986 int *width, int *speed)
25047950
ET
11987{
11988 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11989
37f9ce62 11990 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 11991
37f9ce62
EG
11992 /* return value of 1=2.5GHz 2=5GHz */
11993 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 11994}
37f9ce62 11995
94a78b79
VZ
11996static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11997{
37f9ce62 11998 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
11999 struct bnx2x_fw_file_hdr *fw_hdr;
12000 struct bnx2x_fw_file_section *sections;
94a78b79 12001 u32 offset, len, num_ops;
37f9ce62 12002 u16 *ops_offsets;
94a78b79 12003 int i;
37f9ce62 12004 const u8 *fw_ver;
94a78b79
VZ
12005
12006 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
12007 return -EINVAL;
12008
12009 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
12010 sections = (struct bnx2x_fw_file_section *)fw_hdr;
12011
12012 /* Make sure none of the offsets and sizes make us read beyond
12013 * the end of the firmware data */
12014 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
12015 offset = be32_to_cpu(sections[i].offset);
12016 len = be32_to_cpu(sections[i].len);
12017 if (offset + len > firmware->size) {
37f9ce62
EG
12018 printk(KERN_ERR PFX "Section %d length is out of "
12019 "bounds\n", i);
94a78b79
VZ
12020 return -EINVAL;
12021 }
12022 }
12023
12024 /* Likewise for the init_ops offsets */
12025 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
12026 ops_offsets = (u16 *)(firmware->data + offset);
12027 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
12028
12029 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
12030 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
37f9ce62
EG
12031 printk(KERN_ERR PFX "Section offset %d is out of "
12032 "bounds\n", i);
94a78b79
VZ
12033 return -EINVAL;
12034 }
12035 }
12036
12037 /* Check FW version */
12038 offset = be32_to_cpu(fw_hdr->fw_version.offset);
12039 fw_ver = firmware->data + offset;
12040 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
12041 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
12042 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
12043 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
12044 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
12045 " Should be %d.%d.%d.%d\n",
12046 fw_ver[0], fw_ver[1], fw_ver[2],
12047 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
12048 BCM_5710_FW_MINOR_VERSION,
12049 BCM_5710_FW_REVISION_VERSION,
12050 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 12051 return -EINVAL;
94a78b79
VZ
12052 }
12053
12054 return 0;
12055}
12056
ab6ad5a4 12057static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 12058{
ab6ad5a4
EG
12059 const __be32 *source = (const __be32 *)_source;
12060 u32 *target = (u32 *)_target;
94a78b79 12061 u32 i;
94a78b79
VZ
12062
12063 for (i = 0; i < n/4; i++)
12064 target[i] = be32_to_cpu(source[i]);
12065}
12066
12067/*
12068 Ops array is stored in the following format:
12069 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12070 */
ab6ad5a4 12071static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 12072{
ab6ad5a4
EG
12073 const __be32 *source = (const __be32 *)_source;
12074 struct raw_op *target = (struct raw_op *)_target;
94a78b79 12075 u32 i, j, tmp;
94a78b79 12076
ab6ad5a4 12077 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
12078 tmp = be32_to_cpu(source[j]);
12079 target[i].op = (tmp >> 24) & 0xff;
12080 target[i].offset = tmp & 0xffffff;
12081 target[i].raw_data = be32_to_cpu(source[j+1]);
12082 }
12083}
ab6ad5a4
EG
12084
12085static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 12086{
ab6ad5a4
EG
12087 const __be16 *source = (const __be16 *)_source;
12088 u16 *target = (u16 *)_target;
94a78b79 12089 u32 i;
94a78b79
VZ
12090
12091 for (i = 0; i < n/2; i++)
12092 target[i] = be16_to_cpu(source[i]);
12093}
12094
12095#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
ab6ad5a4
EG
12096 do { \
12097 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12098 bp->arr = kmalloc(len, GFP_KERNEL); \
94a78b79 12099 if (!bp->arr) { \
ab6ad5a4
EG
12100 printk(KERN_ERR PFX "Failed to allocate %d bytes " \
12101 "for "#arr"\n", len); \
94a78b79
VZ
12102 goto lbl; \
12103 } \
ab6ad5a4
EG
12104 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12105 (u8 *)bp->arr, len); \
94a78b79
VZ
12106 } while (0)
12107
94a78b79
VZ
12108static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12109{
12110 char fw_file_name[40] = {0};
94a78b79 12111 struct bnx2x_fw_file_hdr *fw_hdr;
ab6ad5a4 12112 int rc, offset;
94a78b79
VZ
12113
12114 /* Create a FW file name */
12115 if (CHIP_IS_E1(bp))
ab6ad5a4 12116 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
94a78b79
VZ
12117 else
12118 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
12119
12120 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
12121 BCM_5710_FW_MAJOR_VERSION,
ab6ad5a4
EG
12122 BCM_5710_FW_MINOR_VERSION,
12123 BCM_5710_FW_REVISION_VERSION,
12124 BCM_5710_FW_ENGINEERING_VERSION);
94a78b79
VZ
12125
12126 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
12127
12128 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12129 if (rc) {
ab6ad5a4
EG
12130 printk(KERN_ERR PFX "Can't load firmware file %s\n",
12131 fw_file_name);
94a78b79
VZ
12132 goto request_firmware_exit;
12133 }
12134
12135 rc = bnx2x_check_firmware(bp);
12136 if (rc) {
12137 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
12138 goto request_firmware_exit;
12139 }
12140
12141 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12142
12143 /* Initialize the pointers to the init arrays */
12144 /* Blob */
12145 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12146
12147 /* Opcodes */
12148 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12149
12150 /* Offsets */
ab6ad5a4
EG
12151 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12152 be16_to_cpu_n);
94a78b79
VZ
12153
12154 /* STORMs firmware */
573f2035
EG
12155 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12156 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12157 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12158 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12159 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12160 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12161 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12162 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12163 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12164 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12165 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12166 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12167 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12168 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12169 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12170 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
12171
12172 return 0;
ab6ad5a4 12173
94a78b79
VZ
12174init_offsets_alloc_err:
12175 kfree(bp->init_ops);
12176init_ops_alloc_err:
12177 kfree(bp->init_data);
12178request_firmware_exit:
12179 release_firmware(bp->firmware);
12180
12181 return rc;
12182}
12183
12184
a2fbb9ea
ET
12185static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12186 const struct pci_device_id *ent)
12187{
a2fbb9ea
ET
12188 struct net_device *dev = NULL;
12189 struct bnx2x *bp;
37f9ce62 12190 int pcie_width, pcie_speed;
25047950 12191 int rc;
a2fbb9ea 12192
a2fbb9ea 12193 /* dev zeroed in init_etherdev */
555f6c78 12194 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
12195 if (!dev) {
12196 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 12197 return -ENOMEM;
34f80b04 12198 }
a2fbb9ea 12199
a2fbb9ea
ET
12200 bp = netdev_priv(dev);
12201 bp->msglevel = debug;
12202
df4770de
EG
12203 pci_set_drvdata(pdev, dev);
12204
34f80b04 12205 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
12206 if (rc < 0) {
12207 free_netdev(dev);
12208 return rc;
12209 }
12210
34f80b04 12211 rc = bnx2x_init_bp(bp);
693fc0d1
EG
12212 if (rc)
12213 goto init_one_exit;
12214
94a78b79
VZ
12215 /* Set init arrays */
12216 rc = bnx2x_init_firmware(bp, &pdev->dev);
12217 if (rc) {
12218 printk(KERN_ERR PFX "Error loading firmware\n");
12219 goto init_one_exit;
12220 }
12221
693fc0d1 12222 rc = register_netdev(dev);
34f80b04 12223 if (rc) {
693fc0d1 12224 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
12225 goto init_one_exit;
12226 }
12227
37f9ce62 12228 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
25047950 12229 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 12230 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 12231 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
37f9ce62 12232 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
25047950 12233 dev->base_addr, bp->pdev->irq);
e174961c 12234 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 12235
a2fbb9ea 12236 return 0;
34f80b04
EG
12237
12238init_one_exit:
12239 if (bp->regview)
12240 iounmap(bp->regview);
12241
12242 if (bp->doorbells)
12243 iounmap(bp->doorbells);
12244
12245 free_netdev(dev);
12246
12247 if (atomic_read(&pdev->enable_cnt) == 1)
12248 pci_release_regions(pdev);
12249
12250 pci_disable_device(pdev);
12251 pci_set_drvdata(pdev, NULL);
12252
12253 return rc;
a2fbb9ea
ET
12254}
12255
12256static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12257{
12258 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
12259 struct bnx2x *bp;
12260
12261 if (!dev) {
228241eb
ET
12262 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12263 return;
12264 }
228241eb 12265 bp = netdev_priv(dev);
a2fbb9ea 12266
a2fbb9ea
ET
12267 unregister_netdev(dev);
12268
94a78b79
VZ
12269 kfree(bp->init_ops_offsets);
12270 kfree(bp->init_ops);
12271 kfree(bp->init_data);
12272 release_firmware(bp->firmware);
12273
a2fbb9ea
ET
12274 if (bp->regview)
12275 iounmap(bp->regview);
12276
12277 if (bp->doorbells)
12278 iounmap(bp->doorbells);
12279
12280 free_netdev(dev);
34f80b04
EG
12281
12282 if (atomic_read(&pdev->enable_cnt) == 1)
12283 pci_release_regions(pdev);
12284
a2fbb9ea
ET
12285 pci_disable_device(pdev);
12286 pci_set_drvdata(pdev, NULL);
12287}
12288
12289static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12290{
12291 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
12292 struct bnx2x *bp;
12293
34f80b04
EG
12294 if (!dev) {
12295 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12296 return -ENODEV;
12297 }
12298 bp = netdev_priv(dev);
a2fbb9ea 12299
34f80b04 12300 rtnl_lock();
a2fbb9ea 12301
34f80b04 12302 pci_save_state(pdev);
228241eb 12303
34f80b04
EG
12304 if (!netif_running(dev)) {
12305 rtnl_unlock();
12306 return 0;
12307 }
a2fbb9ea
ET
12308
12309 netif_device_detach(dev);
a2fbb9ea 12310
da5a662a 12311 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 12312
a2fbb9ea 12313 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 12314
34f80b04
EG
12315 rtnl_unlock();
12316
a2fbb9ea
ET
12317 return 0;
12318}
12319
12320static int bnx2x_resume(struct pci_dev *pdev)
12321{
12322 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 12323 struct bnx2x *bp;
a2fbb9ea
ET
12324 int rc;
12325
228241eb
ET
12326 if (!dev) {
12327 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12328 return -ENODEV;
12329 }
228241eb 12330 bp = netdev_priv(dev);
a2fbb9ea 12331
34f80b04
EG
12332 rtnl_lock();
12333
228241eb 12334 pci_restore_state(pdev);
34f80b04
EG
12335
12336 if (!netif_running(dev)) {
12337 rtnl_unlock();
12338 return 0;
12339 }
12340
a2fbb9ea
ET
12341 bnx2x_set_power_state(bp, PCI_D0);
12342 netif_device_attach(dev);
12343
da5a662a 12344 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 12345
34f80b04
EG
12346 rtnl_unlock();
12347
12348 return rc;
a2fbb9ea
ET
12349}
12350
f8ef6e44
YG
12351static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12352{
12353 int i;
12354
12355 bp->state = BNX2X_STATE_ERROR;
12356
12357 bp->rx_mode = BNX2X_RX_MODE_NONE;
12358
12359 bnx2x_netif_stop(bp, 0);
12360
12361 del_timer_sync(&bp->timer);
12362 bp->stats_state = STATS_STATE_DISABLED;
12363 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12364
12365 /* Release IRQs */
12366 bnx2x_free_irq(bp);
12367
12368 if (CHIP_IS_E1(bp)) {
12369 struct mac_configuration_cmd *config =
12370 bnx2x_sp(bp, mcast_config);
12371
8d9c5f34 12372 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
12373 CAM_INVALIDATE(config->config_table[i]);
12374 }
12375
12376 /* Free SKBs, SGEs, TPA pool and driver internals */
12377 bnx2x_free_skbs(bp);
555f6c78 12378 for_each_rx_queue(bp, i)
f8ef6e44 12379 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 12380 for_each_rx_queue(bp, i)
7cde1c8b 12381 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
12382 bnx2x_free_mem(bp);
12383
12384 bp->state = BNX2X_STATE_CLOSED;
12385
12386 netif_carrier_off(bp->dev);
12387
12388 return 0;
12389}
12390
12391static void bnx2x_eeh_recover(struct bnx2x *bp)
12392{
12393 u32 val;
12394
12395 mutex_init(&bp->port.phy_mutex);
12396
12397 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12398 bp->link_params.shmem_base = bp->common.shmem_base;
12399 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12400
12401 if (!bp->common.shmem_base ||
12402 (bp->common.shmem_base < 0xA0000) ||
12403 (bp->common.shmem_base >= 0xC0000)) {
12404 BNX2X_DEV_INFO("MCP not active\n");
12405 bp->flags |= NO_MCP_FLAG;
12406 return;
12407 }
12408
12409 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12410 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12411 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12412 BNX2X_ERR("BAD MCP validity signature\n");
12413
12414 if (!BP_NOMCP(bp)) {
12415 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12416 & DRV_MSG_SEQ_NUMBER_MASK);
12417 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12418 }
12419}
12420
493adb1f
WX
12421/**
12422 * bnx2x_io_error_detected - called when PCI error is detected
12423 * @pdev: Pointer to PCI device
12424 * @state: The current pci connection state
12425 *
12426 * This function is called after a PCI bus error affecting
12427 * this device has been detected.
12428 */
12429static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12430 pci_channel_state_t state)
12431{
12432 struct net_device *dev = pci_get_drvdata(pdev);
12433 struct bnx2x *bp = netdev_priv(dev);
12434
12435 rtnl_lock();
12436
12437 netif_device_detach(dev);
12438
07ce50e4
DN
12439 if (state == pci_channel_io_perm_failure) {
12440 rtnl_unlock();
12441 return PCI_ERS_RESULT_DISCONNECT;
12442 }
12443
493adb1f 12444 if (netif_running(dev))
f8ef6e44 12445 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
12446
12447 pci_disable_device(pdev);
12448
12449 rtnl_unlock();
12450
12451 /* Request a slot reset */
12452 return PCI_ERS_RESULT_NEED_RESET;
12453}
12454
12455/**
12456 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12457 * @pdev: Pointer to PCI device
12458 *
12459 * Restart the card from scratch, as if from a cold-boot.
12460 */
12461static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12462{
12463 struct net_device *dev = pci_get_drvdata(pdev);
12464 struct bnx2x *bp = netdev_priv(dev);
12465
12466 rtnl_lock();
12467
12468 if (pci_enable_device(pdev)) {
12469 dev_err(&pdev->dev,
12470 "Cannot re-enable PCI device after reset\n");
12471 rtnl_unlock();
12472 return PCI_ERS_RESULT_DISCONNECT;
12473 }
12474
12475 pci_set_master(pdev);
12476 pci_restore_state(pdev);
12477
12478 if (netif_running(dev))
12479 bnx2x_set_power_state(bp, PCI_D0);
12480
12481 rtnl_unlock();
12482
12483 return PCI_ERS_RESULT_RECOVERED;
12484}
12485
12486/**
12487 * bnx2x_io_resume - called when traffic can start flowing again
12488 * @pdev: Pointer to PCI device
12489 *
12490 * This callback is called when the error recovery driver tells us that
12491 * its OK to resume normal operation.
12492 */
12493static void bnx2x_io_resume(struct pci_dev *pdev)
12494{
12495 struct net_device *dev = pci_get_drvdata(pdev);
12496 struct bnx2x *bp = netdev_priv(dev);
12497
12498 rtnl_lock();
12499
f8ef6e44
YG
12500 bnx2x_eeh_recover(bp);
12501
493adb1f 12502 if (netif_running(dev))
f8ef6e44 12503 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
12504
12505 netif_device_attach(dev);
12506
12507 rtnl_unlock();
12508}
12509
12510static struct pci_error_handlers bnx2x_err_handler = {
12511 .error_detected = bnx2x_io_error_detected,
356e2385
EG
12512 .slot_reset = bnx2x_io_slot_reset,
12513 .resume = bnx2x_io_resume,
493adb1f
WX
12514};
12515
a2fbb9ea 12516static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
12517 .name = DRV_MODULE_NAME,
12518 .id_table = bnx2x_pci_tbl,
12519 .probe = bnx2x_init_one,
12520 .remove = __devexit_p(bnx2x_remove_one),
12521 .suspend = bnx2x_suspend,
12522 .resume = bnx2x_resume,
12523 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
12524};
12525
12526static int __init bnx2x_init(void)
12527{
dd21ca6d
SG
12528 int ret;
12529
938cf541
EG
12530 printk(KERN_INFO "%s", version);
12531
1cf167f2
EG
12532 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12533 if (bnx2x_wq == NULL) {
12534 printk(KERN_ERR PFX "Cannot create workqueue\n");
12535 return -ENOMEM;
12536 }
12537
dd21ca6d
SG
12538 ret = pci_register_driver(&bnx2x_pci_driver);
12539 if (ret) {
12540 printk(KERN_ERR PFX "Cannot register driver\n");
12541 destroy_workqueue(bnx2x_wq);
12542 }
12543 return ret;
a2fbb9ea
ET
12544}
12545
12546static void __exit bnx2x_cleanup(void)
12547{
12548 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
12549
12550 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
12551}
12552
12553module_init(bnx2x_init);
12554module_exit(bnx2x_cleanup);
12555
993ac7b5
MC
12556#ifdef BCM_CNIC
12557
12558/* count denotes the number of new completions we have seen */
12559static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12560{
12561 struct eth_spe *spe;
12562
12563#ifdef BNX2X_STOP_ON_ERROR
12564 if (unlikely(bp->panic))
12565 return;
12566#endif
12567
12568 spin_lock_bh(&bp->spq_lock);
12569 bp->cnic_spq_pending -= count;
12570
12571 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12572 bp->cnic_spq_pending++) {
12573
12574 if (!bp->cnic_kwq_pending)
12575 break;
12576
12577 spe = bnx2x_sp_get_next(bp);
12578 *spe = *bp->cnic_kwq_cons;
12579
12580 bp->cnic_kwq_pending--;
12581
12582 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12583 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12584
12585 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12586 bp->cnic_kwq_cons = bp->cnic_kwq;
12587 else
12588 bp->cnic_kwq_cons++;
12589 }
12590 bnx2x_sp_prod_update(bp);
12591 spin_unlock_bh(&bp->spq_lock);
12592}
12593
12594static int bnx2x_cnic_sp_queue(struct net_device *dev,
12595 struct kwqe_16 *kwqes[], u32 count)
12596{
12597 struct bnx2x *bp = netdev_priv(dev);
12598 int i;
12599
12600#ifdef BNX2X_STOP_ON_ERROR
12601 if (unlikely(bp->panic))
12602 return -EIO;
12603#endif
12604
12605 spin_lock_bh(&bp->spq_lock);
12606
12607 for (i = 0; i < count; i++) {
12608 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12609
12610 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12611 break;
12612
12613 *bp->cnic_kwq_prod = *spe;
12614
12615 bp->cnic_kwq_pending++;
12616
12617 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12618 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12619 spe->data.mac_config_addr.hi,
12620 spe->data.mac_config_addr.lo,
12621 bp->cnic_kwq_pending);
12622
12623 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12624 bp->cnic_kwq_prod = bp->cnic_kwq;
12625 else
12626 bp->cnic_kwq_prod++;
12627 }
12628
12629 spin_unlock_bh(&bp->spq_lock);
12630
12631 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12632 bnx2x_cnic_sp_post(bp, 0);
12633
12634 return i;
12635}
12636
12637static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12638{
12639 struct cnic_ops *c_ops;
12640 int rc = 0;
12641
12642 mutex_lock(&bp->cnic_mutex);
12643 c_ops = bp->cnic_ops;
12644 if (c_ops)
12645 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12646 mutex_unlock(&bp->cnic_mutex);
12647
12648 return rc;
12649}
12650
12651static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12652{
12653 struct cnic_ops *c_ops;
12654 int rc = 0;
12655
12656 rcu_read_lock();
12657 c_ops = rcu_dereference(bp->cnic_ops);
12658 if (c_ops)
12659 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12660 rcu_read_unlock();
12661
12662 return rc;
12663}
12664
12665/*
12666 * for commands that have no data
12667 */
12668static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12669{
12670 struct cnic_ctl_info ctl = {0};
12671
12672 ctl.cmd = cmd;
12673
12674 return bnx2x_cnic_ctl_send(bp, &ctl);
12675}
12676
12677static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12678{
12679 struct cnic_ctl_info ctl;
12680
12681 /* first we tell CNIC and only then we count this as a completion */
12682 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12683 ctl.data.comp.cid = cid;
12684
12685 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12686 bnx2x_cnic_sp_post(bp, 1);
12687}
12688
12689static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12690{
12691 struct bnx2x *bp = netdev_priv(dev);
12692 int rc = 0;
12693
12694 switch (ctl->cmd) {
12695 case DRV_CTL_CTXTBL_WR_CMD: {
12696 u32 index = ctl->data.io.offset;
12697 dma_addr_t addr = ctl->data.io.dma_addr;
12698
12699 bnx2x_ilt_wr(bp, index, addr);
12700 break;
12701 }
12702
12703 case DRV_CTL_COMPLETION_CMD: {
12704 int count = ctl->data.comp.comp_count;
12705
12706 bnx2x_cnic_sp_post(bp, count);
12707 break;
12708 }
12709
12710 /* rtnl_lock is held. */
12711 case DRV_CTL_START_L2_CMD: {
12712 u32 cli = ctl->data.ring.client_id;
12713
12714 bp->rx_mode_cl_mask |= (1 << cli);
12715 bnx2x_set_storm_rx_mode(bp);
12716 break;
12717 }
12718
12719 /* rtnl_lock is held. */
12720 case DRV_CTL_STOP_L2_CMD: {
12721 u32 cli = ctl->data.ring.client_id;
12722
12723 bp->rx_mode_cl_mask &= ~(1 << cli);
12724 bnx2x_set_storm_rx_mode(bp);
12725 break;
12726 }
12727
12728 default:
12729 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12730 rc = -EINVAL;
12731 }
12732
12733 return rc;
12734}
12735
12736static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12737{
12738 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12739
12740 if (bp->flags & USING_MSIX_FLAG) {
12741 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12742 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12743 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12744 } else {
12745 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12746 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12747 }
12748 cp->irq_arr[0].status_blk = bp->cnic_sb;
12749 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12750 cp->irq_arr[1].status_blk = bp->def_status_blk;
12751 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12752
12753 cp->num_irq = 2;
12754}
12755
12756static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12757 void *data)
12758{
12759 struct bnx2x *bp = netdev_priv(dev);
12760 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12761
12762 if (ops == NULL)
12763 return -EINVAL;
12764
12765 if (atomic_read(&bp->intr_sem) != 0)
12766 return -EBUSY;
12767
12768 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12769 if (!bp->cnic_kwq)
12770 return -ENOMEM;
12771
12772 bp->cnic_kwq_cons = bp->cnic_kwq;
12773 bp->cnic_kwq_prod = bp->cnic_kwq;
12774 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12775
12776 bp->cnic_spq_pending = 0;
12777 bp->cnic_kwq_pending = 0;
12778
12779 bp->cnic_data = data;
12780
12781 cp->num_irq = 0;
12782 cp->drv_state = CNIC_DRV_STATE_REGD;
12783
12784 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12785
12786 bnx2x_setup_cnic_irq_info(bp);
12787 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12788 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12789 rcu_assign_pointer(bp->cnic_ops, ops);
12790
12791 return 0;
12792}
12793
12794static int bnx2x_unregister_cnic(struct net_device *dev)
12795{
12796 struct bnx2x *bp = netdev_priv(dev);
12797 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12798
12799 mutex_lock(&bp->cnic_mutex);
12800 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12801 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12802 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12803 }
12804 cp->drv_state = 0;
12805 rcu_assign_pointer(bp->cnic_ops, NULL);
12806 mutex_unlock(&bp->cnic_mutex);
12807 synchronize_rcu();
12808 kfree(bp->cnic_kwq);
12809 bp->cnic_kwq = NULL;
12810
12811 return 0;
12812}
12813
12814struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12815{
12816 struct bnx2x *bp = netdev_priv(dev);
12817 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12818
12819 cp->drv_owner = THIS_MODULE;
12820 cp->chip_id = CHIP_ID(bp);
12821 cp->pdev = bp->pdev;
12822 cp->io_base = bp->regview;
12823 cp->io_base2 = bp->doorbells;
12824 cp->max_kwqe_pending = 8;
12825 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12826 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12827 cp->ctx_tbl_len = CNIC_ILT_LINES;
12828 cp->starting_cid = BCM_CNIC_CID_START;
12829 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12830 cp->drv_ctl = bnx2x_drv_ctl;
12831 cp->drv_register_cnic = bnx2x_register_cnic;
12832 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12833
12834 return cp;
12835}
12836EXPORT_SYMBOL(bnx2x_cnic_probe);
12837
12838#endif /* BCM_CNIC */
94a78b79 12839