]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Refactor bnx2x_sp_post().
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
0a64ea57 57#include "bnx2x_dump.h"
a2fbb9ea 58
c458bc50
EG
59#define DRV_MODULE_VERSION "1.52.1"
60#define DRV_MODULE_RELDATE "2009/08/12"
34f80b04 61#define BNX2X_BC_VER 0x040200
a2fbb9ea 62
94a78b79
VZ
63#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
ab6ad5a4
EG
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
94a78b79 68
34f80b04
EG
69/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
a2fbb9ea 71
53a10565 72static char version[] __devinitdata =
34f80b04 73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
24e3fcef 76MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 80
555f6c78
EG
81static int multi_mode = 1;
82module_param(multi_mode, int, 0);
ca00392c
EG
83MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
85
86static int num_rx_queues;
87module_param(num_rx_queues, int, 0);
88MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
90
91static int num_tx_queues;
92module_param(num_tx_queues, int, 0);
93MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
555f6c78 95
19680c48 96static int disable_tpa;
19680c48 97module_param(disable_tpa, int, 0);
9898f86d 98MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
99
100static int int_mode;
101module_param(int_mode, int, 0);
102MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
a18f5128
EG
104static int dropless_fc;
105module_param(dropless_fc, int, 0);
106MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
107
9898f86d 108static int poll;
a2fbb9ea 109module_param(poll, int, 0);
9898f86d 110MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
111
112static int mrrs = -1;
113module_param(mrrs, int, 0);
114MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
115
9898f86d 116static int debug;
a2fbb9ea 117module_param(debug, int, 0);
9898f86d
EG
118MODULE_PARM_DESC(debug, " Default debug msglevel");
119
120static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 121
1cf167f2 122static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
123
124enum bnx2x_board_type {
125 BCM57710 = 0,
34f80b04
EG
126 BCM57711 = 1,
127 BCM57711E = 2,
a2fbb9ea
ET
128};
129
34f80b04 130/* indexed by board_type, above */
53a10565 131static struct {
a2fbb9ea
ET
132 char *name;
133} board_info[] __devinitdata = {
34f80b04
EG
134 { "Broadcom NetXtreme II BCM57710 XGb" },
135 { "Broadcom NetXtreme II BCM57711 XGb" },
136 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
137};
138
34f80b04 139
a2fbb9ea 140static const struct pci_device_id bnx2x_pci_tbl[] = {
e4ed7113
EG
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
143 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
144 { 0 }
145};
146
147MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
148
149/****************************************************************************
150* General service functions
151****************************************************************************/
152
153/* used only at init
154 * locking is done by mcp
155 */
573f2035 156void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
157{
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
160 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
161 PCICFG_VENDOR_ID_OFFSET);
162}
163
a2fbb9ea
ET
164static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
165{
166 u32 val;
167
168 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
169 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
170 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
171 PCICFG_VENDOR_ID_OFFSET);
172
173 return val;
174}
a2fbb9ea
ET
175
176static const u32 dmae_reg_go_c[] = {
177 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
178 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
179 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
180 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
181};
182
183/* copy command into DMAE command memory and set DMAE command go */
184static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
185 int idx)
186{
187 u32 cmd_offset;
188 int i;
189
190 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
191 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
192 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
193
ad8d3948
EG
194 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
195 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
196 }
197 REG_WR(bp, dmae_reg_go_c[idx], 1);
198}
199
ad8d3948
EG
200void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
201 u32 len32)
a2fbb9ea 202{
5ff7b6d4 203 struct dmae_command dmae;
a2fbb9ea 204 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
205 int cnt = 200;
206
207 if (!bp->dmae_ready) {
208 u32 *data = bnx2x_sp(bp, wb_data[0]);
209
210 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
211 " using indirect\n", dst_addr, len32);
212 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
213 return;
214 }
215
5ff7b6d4 216 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 217
5ff7b6d4
EG
218 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
219 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
220 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 221#ifdef __BIG_ENDIAN
5ff7b6d4 222 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 223#else
5ff7b6d4 224 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 225#endif
5ff7b6d4
EG
226 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
227 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
228 dmae.src_addr_lo = U64_LO(dma_addr);
229 dmae.src_addr_hi = U64_HI(dma_addr);
230 dmae.dst_addr_lo = dst_addr >> 2;
231 dmae.dst_addr_hi = 0;
232 dmae.len = len32;
233 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
234 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
235 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 236
c3eefaf6 237 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
238 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
239 "dst_addr [%x:%08x (%08x)]\n"
240 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
241 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
242 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
243 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 244 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
245 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
246 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 247
5ff7b6d4
EG
248 mutex_lock(&bp->dmae_mutex);
249
a2fbb9ea
ET
250 *wb_comp = 0;
251
5ff7b6d4 252 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
253
254 udelay(5);
ad8d3948
EG
255
256 while (*wb_comp != DMAE_COMP_VAL) {
257 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
258
ad8d3948 259 if (!cnt) {
c3eefaf6 260 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
261 break;
262 }
ad8d3948 263 cnt--;
12469401
YG
264 /* adjust delay for emulation/FPGA */
265 if (CHIP_REV_IS_SLOW(bp))
266 msleep(100);
267 else
268 udelay(5);
a2fbb9ea 269 }
ad8d3948
EG
270
271 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
272}
273
c18487ee 274void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 275{
5ff7b6d4 276 struct dmae_command dmae;
a2fbb9ea 277 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
278 int cnt = 200;
279
280 if (!bp->dmae_ready) {
281 u32 *data = bnx2x_sp(bp, wb_data[0]);
282 int i;
283
284 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
285 " using indirect\n", src_addr, len32);
286 for (i = 0; i < len32; i++)
287 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
288 return;
289 }
290
5ff7b6d4 291 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 292
5ff7b6d4
EG
293 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
294 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
295 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 296#ifdef __BIG_ENDIAN
5ff7b6d4 297 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 298#else
5ff7b6d4 299 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 300#endif
5ff7b6d4
EG
301 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
302 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
303 dmae.src_addr_lo = src_addr >> 2;
304 dmae.src_addr_hi = 0;
305 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
306 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
307 dmae.len = len32;
308 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
309 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
310 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 311
c3eefaf6 312 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
313 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
314 "dst_addr [%x:%08x (%08x)]\n"
315 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
316 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
317 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
318 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 319
5ff7b6d4
EG
320 mutex_lock(&bp->dmae_mutex);
321
322 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
323 *wb_comp = 0;
324
5ff7b6d4 325 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
326
327 udelay(5);
ad8d3948
EG
328
329 while (*wb_comp != DMAE_COMP_VAL) {
330
ad8d3948 331 if (!cnt) {
c3eefaf6 332 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
333 break;
334 }
ad8d3948 335 cnt--;
12469401
YG
336 /* adjust delay for emulation/FPGA */
337 if (CHIP_REV_IS_SLOW(bp))
338 msleep(100);
339 else
340 udelay(5);
a2fbb9ea 341 }
ad8d3948 342 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
343 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
344 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
345
346 mutex_unlock(&bp->dmae_mutex);
347}
348
573f2035
EG
349void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
350 u32 addr, u32 len)
351{
352 int offset = 0;
353
354 while (len > DMAE_LEN32_WR_MAX) {
355 bnx2x_write_dmae(bp, phys_addr + offset,
356 addr + offset, DMAE_LEN32_WR_MAX);
357 offset += DMAE_LEN32_WR_MAX * 4;
358 len -= DMAE_LEN32_WR_MAX;
359 }
360
361 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
362}
363
ad8d3948
EG
364/* used only for slowpath so not inlined */
365static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
366{
367 u32 wb_write[2];
368
369 wb_write[0] = val_hi;
370 wb_write[1] = val_lo;
371 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 372}
a2fbb9ea 373
ad8d3948
EG
374#ifdef USE_WB_RD
375static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
376{
377 u32 wb_data[2];
378
379 REG_RD_DMAE(bp, reg, wb_data, 2);
380
381 return HILO_U64(wb_data[0], wb_data[1]);
382}
383#endif
384
a2fbb9ea
ET
385static int bnx2x_mc_assert(struct bnx2x *bp)
386{
a2fbb9ea 387 char last_idx;
34f80b04
EG
388 int i, rc = 0;
389 u32 row0, row1, row2, row3;
390
391 /* XSTORM */
392 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
393 XSTORM_ASSERT_LIST_INDEX_OFFSET);
394 if (last_idx)
395 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
396
397 /* print the asserts */
398 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
399
400 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
401 XSTORM_ASSERT_LIST_OFFSET(i));
402 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
403 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
404 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
406 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
408
409 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
410 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
411 " 0x%08x 0x%08x 0x%08x\n",
412 i, row3, row2, row1, row0);
413 rc++;
414 } else {
415 break;
416 }
417 }
418
419 /* TSTORM */
420 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
421 TSTORM_ASSERT_LIST_INDEX_OFFSET);
422 if (last_idx)
423 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
424
425 /* print the asserts */
426 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
427
428 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
429 TSTORM_ASSERT_LIST_OFFSET(i));
430 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
431 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
432 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
434 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
436
437 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
438 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
439 " 0x%08x 0x%08x 0x%08x\n",
440 i, row3, row2, row1, row0);
441 rc++;
442 } else {
443 break;
444 }
445 }
446
447 /* CSTORM */
448 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
449 CSTORM_ASSERT_LIST_INDEX_OFFSET);
450 if (last_idx)
451 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
452
453 /* print the asserts */
454 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
455
456 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
457 CSTORM_ASSERT_LIST_OFFSET(i));
458 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
459 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
460 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
462 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
464
465 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
466 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
467 " 0x%08x 0x%08x 0x%08x\n",
468 i, row3, row2, row1, row0);
469 rc++;
470 } else {
471 break;
472 }
473 }
474
475 /* USTORM */
476 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
477 USTORM_ASSERT_LIST_INDEX_OFFSET);
478 if (last_idx)
479 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
480
481 /* print the asserts */
482 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
483
484 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
485 USTORM_ASSERT_LIST_OFFSET(i));
486 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
487 USTORM_ASSERT_LIST_OFFSET(i) + 4);
488 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
489 USTORM_ASSERT_LIST_OFFSET(i) + 8);
490 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
491 USTORM_ASSERT_LIST_OFFSET(i) + 12);
492
493 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
494 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
495 " 0x%08x 0x%08x 0x%08x\n",
496 i, row3, row2, row1, row0);
497 rc++;
498 } else {
499 break;
a2fbb9ea
ET
500 }
501 }
34f80b04 502
a2fbb9ea
ET
503 return rc;
504}
c14423fe 505
a2fbb9ea
ET
506static void bnx2x_fw_dump(struct bnx2x *bp)
507{
508 u32 mark, offset;
4781bfad 509 __be32 data[9];
a2fbb9ea
ET
510 int word;
511
512 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772 513 mark = ((mark + 0x3) & ~0x3);
ad361c98 514 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 515
ad361c98 516 printk(KERN_ERR PFX);
a2fbb9ea
ET
517 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
518 for (word = 0; word < 8; word++)
519 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
520 offset + 4*word));
521 data[8] = 0x0;
49d66772 522 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
523 }
524 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
525 for (word = 0; word < 8; word++)
526 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
527 offset + 4*word));
528 data[8] = 0x0;
49d66772 529 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea 530 }
ad361c98 531 printk(KERN_ERR PFX "end of fw dump\n");
a2fbb9ea
ET
532}
533
534static void bnx2x_panic_dump(struct bnx2x *bp)
535{
536 int i;
537 u16 j, start, end;
538
66e855f3
YG
539 bp->stats_state = STATS_STATE_DISABLED;
540 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
541
a2fbb9ea
ET
542 BNX2X_ERR("begin crash dump -----------------\n");
543
8440d2b6
EG
544 /* Indices */
545 /* Common */
546 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
547 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
548 " spq_prod_idx(%u)\n",
549 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
550 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
551
552 /* Rx */
553 for_each_rx_queue(bp, i) {
a2fbb9ea 554 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 555
c3eefaf6 556 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
557 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
558 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 559 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
560 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
561 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 562 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
563 " fp_u_idx(%x) *sb_u_idx(%x)\n",
564 fp->rx_sge_prod, fp->last_max_sge,
565 le16_to_cpu(fp->fp_u_idx),
566 fp->status_blk->u_status_block.status_block_index);
567 }
a2fbb9ea 568
8440d2b6
EG
569 /* Tx */
570 for_each_tx_queue(bp, i) {
571 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 572
c3eefaf6 573 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
574 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
575 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
576 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 577 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
ca00392c 578 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 579 fp->status_blk->c_status_block.status_block_index,
ca00392c 580 fp->tx_db.data.prod);
8440d2b6 581 }
a2fbb9ea 582
8440d2b6
EG
583 /* Rings */
584 /* Rx */
585 for_each_rx_queue(bp, i) {
586 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
587
588 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
589 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 590 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
591 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
592 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
593
c3eefaf6
EG
594 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
595 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
596 }
597
3196a88a
EG
598 start = RX_SGE(fp->rx_sge_prod);
599 end = RX_SGE(fp->last_max_sge);
8440d2b6 600 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
601 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
602 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
603
c3eefaf6
EG
604 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
605 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
606 }
607
a2fbb9ea
ET
608 start = RCQ_BD(fp->rx_comp_cons - 10);
609 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 610 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
611 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
612
c3eefaf6
EG
613 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
614 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
615 }
616 }
617
8440d2b6
EG
618 /* Tx */
619 for_each_tx_queue(bp, i) {
620 struct bnx2x_fastpath *fp = &bp->fp[i];
621
622 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
623 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
624 for (j = start; j != end; j = TX_BD(j + 1)) {
625 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
626
c3eefaf6
EG
627 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
628 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
629 }
630
631 start = TX_BD(fp->tx_bd_cons - 10);
632 end = TX_BD(fp->tx_bd_cons + 254);
633 for (j = start; j != end; j = TX_BD(j + 1)) {
634 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
635
c3eefaf6
EG
636 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
637 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
638 }
639 }
a2fbb9ea 640
34f80b04 641 bnx2x_fw_dump(bp);
a2fbb9ea
ET
642 bnx2x_mc_assert(bp);
643 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
644}
645
615f8fd9 646static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 647{
34f80b04 648 int port = BP_PORT(bp);
a2fbb9ea
ET
649 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
650 u32 val = REG_RD(bp, addr);
651 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 652 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
653
654 if (msix) {
8badd27a
EG
655 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
656 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
657 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
658 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
659 } else if (msi) {
660 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
661 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
662 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
664 } else {
665 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 666 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
667 HC_CONFIG_0_REG_INT_LINE_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 669
8badd27a
EG
670 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
671 val, port, addr);
615f8fd9
ET
672
673 REG_WR(bp, addr, val);
674
a2fbb9ea
ET
675 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
676 }
677
8badd27a
EG
678 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
679 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
680
681 REG_WR(bp, addr, val);
37dbbf32
EG
682 /*
683 * Ensure that HC_CONFIG is written before leading/trailing edge config
684 */
685 mmiowb();
686 barrier();
34f80b04
EG
687
688 if (CHIP_IS_E1H(bp)) {
689 /* init leading/trailing edge */
690 if (IS_E1HMF(bp)) {
8badd27a 691 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 692 if (bp->port.pmf)
4acac6a5
EG
693 /* enable nig and gpio3 attention */
694 val |= 0x1100;
34f80b04
EG
695 } else
696 val = 0xffff;
697
698 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
699 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
700 }
37dbbf32
EG
701
702 /* Make sure that interrupts are indeed enabled from here on */
703 mmiowb();
a2fbb9ea
ET
704}
705
615f8fd9 706static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 707{
34f80b04 708 int port = BP_PORT(bp);
a2fbb9ea
ET
709 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
710 u32 val = REG_RD(bp, addr);
711
712 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
713 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
714 HC_CONFIG_0_REG_INT_LINE_EN_0 |
715 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
716
717 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
718 val, port, addr);
719
8badd27a
EG
720 /* flush all outstanding writes */
721 mmiowb();
722
a2fbb9ea
ET
723 REG_WR(bp, addr, val);
724 if (REG_RD(bp, addr) != val)
725 BNX2X_ERR("BUG! proper val not read from IGU!\n");
726}
727
f8ef6e44 728static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 729{
a2fbb9ea 730 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 731 int i, offset;
a2fbb9ea 732
34f80b04 733 /* disable interrupt handling */
a2fbb9ea 734 atomic_inc(&bp->intr_sem);
e1510706
EG
735 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
736
f8ef6e44
YG
737 if (disable_hw)
738 /* prevent the HW from sending interrupts */
739 bnx2x_int_disable(bp);
a2fbb9ea
ET
740
741 /* make sure all ISRs are done */
742 if (msix) {
8badd27a
EG
743 synchronize_irq(bp->msix_table[0].vector);
744 offset = 1;
a2fbb9ea 745 for_each_queue(bp, i)
8badd27a 746 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
747 } else
748 synchronize_irq(bp->pdev->irq);
749
750 /* make sure sp_task is not running */
1cf167f2
EG
751 cancel_delayed_work(&bp->sp_task);
752 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
753}
754
34f80b04 755/* fast path */
a2fbb9ea
ET
756
757/*
34f80b04 758 * General service functions
a2fbb9ea
ET
759 */
760
34f80b04 761static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
762 u8 storm, u16 index, u8 op, u8 update)
763{
5c862848
EG
764 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
765 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
766 struct igu_ack_register igu_ack;
767
768 igu_ack.status_block_index = index;
769 igu_ack.sb_id_and_flags =
34f80b04 770 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
771 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
772 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
773 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
774
5c862848
EG
775 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
776 (*(u32 *)&igu_ack), hc_addr);
777 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
778
779 /* Make sure that ACK is written */
780 mmiowb();
781 barrier();
a2fbb9ea
ET
782}
783
784static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
785{
786 struct host_status_block *fpsb = fp->status_blk;
787 u16 rc = 0;
788
789 barrier(); /* status block is written to by the chip */
790 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
791 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
792 rc |= 1;
793 }
794 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
795 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
796 rc |= 2;
797 }
798 return rc;
799}
800
a2fbb9ea
ET
801static u16 bnx2x_ack_int(struct bnx2x *bp)
802{
5c862848
EG
803 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
804 COMMAND_REG_SIMD_MASK);
805 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 806
5c862848
EG
807 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
808 result, hc_addr);
a2fbb9ea 809
a2fbb9ea
ET
810 return result;
811}
812
813
814/*
815 * fast path service functions
816 */
817
e8b5fc51
VZ
818static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
819{
820 /* Tell compiler that consumer and producer can change */
821 barrier();
822 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
823}
824
a2fbb9ea
ET
825/* free skb in the packet ring at pos idx
826 * return idx of last bd freed
827 */
828static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
829 u16 idx)
830{
831 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
832 struct eth_tx_start_bd *tx_start_bd;
833 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 834 struct sk_buff *skb = tx_buf->skb;
34f80b04 835 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
836 int nbd;
837
838 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
839 idx, tx_buf, skb);
840
841 /* unmap first bd */
842 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c
EG
843 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
844 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
845 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 846
ca00392c 847 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 848#ifdef BNX2X_STOP_ON_ERROR
ca00392c 849 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 850 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
851 bnx2x_panic();
852 }
853#endif
ca00392c 854 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 855
ca00392c
EG
856 /* Get the next bd */
857 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 858
ca00392c
EG
859 /* Skip a parse bd... */
860 --nbd;
861 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
862
863 /* ...and the TSO split header bd since they have no mapping */
864 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
865 --nbd;
866 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
867 }
868
869 /* now free frags */
870 while (nbd > 0) {
871
872 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c
EG
873 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
874 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
875 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
a2fbb9ea
ET
876 if (--nbd)
877 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
878 }
879
880 /* release skb */
53e5e96e 881 WARN_ON(!skb);
ca00392c 882 dev_kfree_skb_any(skb);
a2fbb9ea
ET
883 tx_buf->first_bd = 0;
884 tx_buf->skb = NULL;
885
34f80b04 886 return new_cons;
a2fbb9ea
ET
887}
888
34f80b04 889static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 890{
34f80b04
EG
891 s16 used;
892 u16 prod;
893 u16 cons;
a2fbb9ea 894
34f80b04 895 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
896 prod = fp->tx_bd_prod;
897 cons = fp->tx_bd_cons;
898
34f80b04
EG
899 /* NUM_TX_RINGS = number of "next-page" entries
900 It will be used as a threshold */
901 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 902
34f80b04 903#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
904 WARN_ON(used < 0);
905 WARN_ON(used > fp->bp->tx_ring_size);
906 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 907#endif
a2fbb9ea 908
34f80b04 909 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
910}
911
7961f791 912static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
913{
914 struct bnx2x *bp = fp->bp;
555f6c78 915 struct netdev_queue *txq;
a2fbb9ea
ET
916 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
917 int done = 0;
918
919#ifdef BNX2X_STOP_ON_ERROR
920 if (unlikely(bp->panic))
921 return;
922#endif
923
ca00392c 924 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
a2fbb9ea
ET
925 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
926 sw_cons = fp->tx_pkt_cons;
927
928 while (sw_cons != hw_cons) {
929 u16 pkt_cons;
930
931 pkt_cons = TX_BD(sw_cons);
932
933 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
934
34f80b04 935 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
936 hw_cons, sw_cons, pkt_cons);
937
34f80b04 938/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
939 rmb();
940 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
941 }
942*/
943 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
944 sw_cons++;
945 done++;
a2fbb9ea
ET
946 }
947
948 fp->tx_pkt_cons = sw_cons;
949 fp->tx_bd_cons = bd_cons;
950
a2fbb9ea 951 /* TBD need a thresh? */
555f6c78 952 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 953
6044735d
EG
954 /* Need to make the tx_bd_cons update visible to start_xmit()
955 * before checking for netif_tx_queue_stopped(). Without the
956 * memory barrier, there is a small possibility that
957 * start_xmit() will miss it and cause the queue to be stopped
958 * forever.
959 */
960 smp_mb();
961
555f6c78 962 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 963 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 964 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 965 netif_tx_wake_queue(txq);
a2fbb9ea
ET
966 }
967}
968
3196a88a 969
a2fbb9ea
ET
970static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
971 union eth_rx_cqe *rr_cqe)
972{
973 struct bnx2x *bp = fp->bp;
974 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
975 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
976
34f80b04 977 DP(BNX2X_MSG_SP,
a2fbb9ea 978 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 979 fp->index, cid, command, bp->state,
34f80b04 980 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
981
982 bp->spq_left++;
983
0626b899 984 if (fp->index) {
a2fbb9ea
ET
985 switch (command | fp->state) {
986 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
987 BNX2X_FP_STATE_OPENING):
988 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
989 cid);
990 fp->state = BNX2X_FP_STATE_OPEN;
991 break;
992
993 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
994 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
995 cid);
996 fp->state = BNX2X_FP_STATE_HALTED;
997 break;
998
999 default:
34f80b04
EG
1000 BNX2X_ERR("unexpected MC reply (%d) "
1001 "fp->state is %x\n", command, fp->state);
1002 break;
a2fbb9ea 1003 }
34f80b04 1004 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1005 return;
1006 }
c14423fe 1007
a2fbb9ea
ET
1008 switch (command | bp->state) {
1009 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1010 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1011 bp->state = BNX2X_STATE_OPEN;
1012 break;
1013
1014 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1015 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1016 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1017 fp->state = BNX2X_FP_STATE_HALTED;
1018 break;
1019
a2fbb9ea 1020 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1021 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1022 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1023 break;
1024
3196a88a 1025
a2fbb9ea 1026 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1027 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1028 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 1029 bp->set_mac_pending = 0;
a2fbb9ea
ET
1030 break;
1031
49d66772 1032 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
ca00392c 1033 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
34f80b04 1034 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
1035 break;
1036
a2fbb9ea 1037 default:
34f80b04 1038 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1039 command, bp->state);
34f80b04 1040 break;
a2fbb9ea 1041 }
34f80b04 1042 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1043}
1044
7a9b2557
VZ
1045static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1046 struct bnx2x_fastpath *fp, u16 index)
1047{
1048 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1049 struct page *page = sw_buf->page;
1050 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1051
1052 /* Skip "next page" elements */
1053 if (!page)
1054 return;
1055
1056 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1057 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1058 __free_pages(page, PAGES_PER_SGE_SHIFT);
1059
1060 sw_buf->page = NULL;
1061 sge->addr_hi = 0;
1062 sge->addr_lo = 0;
1063}
1064
1065static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1066 struct bnx2x_fastpath *fp, int last)
1067{
1068 int i;
1069
1070 for (i = 0; i < last; i++)
1071 bnx2x_free_rx_sge(bp, fp, i);
1072}
1073
1074static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1075 struct bnx2x_fastpath *fp, u16 index)
1076{
1077 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1078 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1079 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1080 dma_addr_t mapping;
1081
1082 if (unlikely(page == NULL))
1083 return -ENOMEM;
1084
4f40f2cb 1085 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1086 PCI_DMA_FROMDEVICE);
8d8bb39b 1087 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1088 __free_pages(page, PAGES_PER_SGE_SHIFT);
1089 return -ENOMEM;
1090 }
1091
1092 sw_buf->page = page;
1093 pci_unmap_addr_set(sw_buf, mapping, mapping);
1094
1095 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1096 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1097
1098 return 0;
1099}
1100
a2fbb9ea
ET
1101static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1102 struct bnx2x_fastpath *fp, u16 index)
1103{
1104 struct sk_buff *skb;
1105 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1106 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1107 dma_addr_t mapping;
1108
1109 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1110 if (unlikely(skb == NULL))
1111 return -ENOMEM;
1112
437cf2f1 1113 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1114 PCI_DMA_FROMDEVICE);
8d8bb39b 1115 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1116 dev_kfree_skb(skb);
1117 return -ENOMEM;
1118 }
1119
1120 rx_buf->skb = skb;
1121 pci_unmap_addr_set(rx_buf, mapping, mapping);
1122
1123 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1124 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1125
1126 return 0;
1127}
1128
1129/* note that we are not allocating a new skb,
1130 * we are just moving one from cons to prod
1131 * we are not creating a new mapping,
1132 * so there is no need to check for dma_mapping_error().
1133 */
1134static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1135 struct sk_buff *skb, u16 cons, u16 prod)
1136{
1137 struct bnx2x *bp = fp->bp;
1138 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1139 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1140 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1141 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1142
1143 pci_dma_sync_single_for_device(bp->pdev,
1144 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1145 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1146
1147 prod_rx_buf->skb = cons_rx_buf->skb;
1148 pci_unmap_addr_set(prod_rx_buf, mapping,
1149 pci_unmap_addr(cons_rx_buf, mapping));
1150 *prod_bd = *cons_bd;
1151}
1152
7a9b2557
VZ
1153static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1154 u16 idx)
1155{
1156 u16 last_max = fp->last_max_sge;
1157
1158 if (SUB_S16(idx, last_max) > 0)
1159 fp->last_max_sge = idx;
1160}
1161
1162static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1163{
1164 int i, j;
1165
1166 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1167 int idx = RX_SGE_CNT * i - 1;
1168
1169 for (j = 0; j < 2; j++) {
1170 SGE_MASK_CLEAR_BIT(fp, idx);
1171 idx--;
1172 }
1173 }
1174}
1175
1176static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1177 struct eth_fast_path_rx_cqe *fp_cqe)
1178{
1179 struct bnx2x *bp = fp->bp;
4f40f2cb 1180 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1181 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1182 SGE_PAGE_SHIFT;
7a9b2557
VZ
1183 u16 last_max, last_elem, first_elem;
1184 u16 delta = 0;
1185 u16 i;
1186
1187 if (!sge_len)
1188 return;
1189
1190 /* First mark all used pages */
1191 for (i = 0; i < sge_len; i++)
1192 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1193
1194 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1195 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1196
1197 /* Here we assume that the last SGE index is the biggest */
1198 prefetch((void *)(fp->sge_mask));
1199 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1200
1201 last_max = RX_SGE(fp->last_max_sge);
1202 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1203 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1204
1205 /* If ring is not full */
1206 if (last_elem + 1 != first_elem)
1207 last_elem++;
1208
1209 /* Now update the prod */
1210 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1211 if (likely(fp->sge_mask[i]))
1212 break;
1213
1214 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1215 delta += RX_SGE_MASK_ELEM_SZ;
1216 }
1217
1218 if (delta > 0) {
1219 fp->rx_sge_prod += delta;
1220 /* clear page-end entries */
1221 bnx2x_clear_sge_mask_next_elems(fp);
1222 }
1223
1224 DP(NETIF_MSG_RX_STATUS,
1225 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1226 fp->last_max_sge, fp->rx_sge_prod);
1227}
1228
1229static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1230{
1231 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1232 memset(fp->sge_mask, 0xff,
1233 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1234
33471629
EG
1235 /* Clear the two last indices in the page to 1:
1236 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1237 hence will never be indicated and should be removed from
1238 the calculations. */
1239 bnx2x_clear_sge_mask_next_elems(fp);
1240}
1241
1242static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1243 struct sk_buff *skb, u16 cons, u16 prod)
1244{
1245 struct bnx2x *bp = fp->bp;
1246 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1247 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1248 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1249 dma_addr_t mapping;
1250
1251 /* move empty skb from pool to prod and map it */
1252 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1253 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1254 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1255 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1256
1257 /* move partial skb from cons to pool (don't unmap yet) */
1258 fp->tpa_pool[queue] = *cons_rx_buf;
1259
1260 /* mark bin state as start - print error if current state != stop */
1261 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1262 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1263
1264 fp->tpa_state[queue] = BNX2X_TPA_START;
1265
1266 /* point prod_bd to new skb */
1267 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1268 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1269
1270#ifdef BNX2X_STOP_ON_ERROR
1271 fp->tpa_queue_used |= (1 << queue);
1272#ifdef __powerpc64__
1273 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1274#else
1275 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1276#endif
1277 fp->tpa_queue_used);
1278#endif
1279}
1280
1281static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1282 struct sk_buff *skb,
1283 struct eth_fast_path_rx_cqe *fp_cqe,
1284 u16 cqe_idx)
1285{
1286 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1287 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1288 u32 i, frag_len, frag_size, pages;
1289 int err;
1290 int j;
1291
1292 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1293 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1294
1295 /* This is needed in order to enable forwarding support */
1296 if (frag_size)
4f40f2cb 1297 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1298 max(frag_size, (u32)len_on_bd));
1299
1300#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1301 if (pages >
1302 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1303 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1304 pages, cqe_idx);
1305 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1306 fp_cqe->pkt_len, len_on_bd);
1307 bnx2x_panic();
1308 return -EINVAL;
1309 }
1310#endif
1311
1312 /* Run through the SGL and compose the fragmented skb */
1313 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1314 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1315
1316 /* FW gives the indices of the SGE as if the ring is an array
1317 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1318 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1319 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1320 old_rx_pg = *rx_pg;
1321
1322 /* If we fail to allocate a substitute page, we simply stop
1323 where we are and drop the whole packet */
1324 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1325 if (unlikely(err)) {
de832a55 1326 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1327 return err;
1328 }
1329
1330 /* Unmap the page as we r going to pass it to the stack */
1331 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1332 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1333
1334 /* Add one frag and update the appropriate fields in the skb */
1335 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1336
1337 skb->data_len += frag_len;
1338 skb->truesize += frag_len;
1339 skb->len += frag_len;
1340
1341 frag_size -= frag_len;
1342 }
1343
1344 return 0;
1345}
1346
1347static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1348 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1349 u16 cqe_idx)
1350{
1351 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1352 struct sk_buff *skb = rx_buf->skb;
1353 /* alloc new skb */
1354 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1355
1356 /* Unmap skb in the pool anyway, as we are going to change
1357 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1358 fails. */
1359 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1360 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1361
7a9b2557 1362 if (likely(new_skb)) {
66e855f3
YG
1363 /* fix ip xsum and give it to the stack */
1364 /* (no need to map the new skb) */
0c6671b0
EG
1365#ifdef BCM_VLAN
1366 int is_vlan_cqe =
1367 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1368 PARSING_FLAGS_VLAN);
1369 int is_not_hwaccel_vlan_cqe =
1370 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1371#endif
7a9b2557
VZ
1372
1373 prefetch(skb);
1374 prefetch(((char *)(skb)) + 128);
1375
7a9b2557
VZ
1376#ifdef BNX2X_STOP_ON_ERROR
1377 if (pad + len > bp->rx_buf_size) {
1378 BNX2X_ERR("skb_put is about to fail... "
1379 "pad %d len %d rx_buf_size %d\n",
1380 pad, len, bp->rx_buf_size);
1381 bnx2x_panic();
1382 return;
1383 }
1384#endif
1385
1386 skb_reserve(skb, pad);
1387 skb_put(skb, len);
1388
1389 skb->protocol = eth_type_trans(skb, bp->dev);
1390 skb->ip_summed = CHECKSUM_UNNECESSARY;
1391
1392 {
1393 struct iphdr *iph;
1394
1395 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1396#ifdef BCM_VLAN
1397 /* If there is no Rx VLAN offloading -
1398 take VLAN tag into an account */
1399 if (unlikely(is_not_hwaccel_vlan_cqe))
1400 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1401#endif
7a9b2557
VZ
1402 iph->check = 0;
1403 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1404 }
1405
1406 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1407 &cqe->fast_path_cqe, cqe_idx)) {
1408#ifdef BCM_VLAN
0c6671b0
EG
1409 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1410 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1411 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1412 le16_to_cpu(cqe->fast_path_cqe.
1413 vlan_tag));
1414 else
1415#endif
1416 netif_receive_skb(skb);
1417 } else {
1418 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1419 " - dropping packet!\n");
1420 dev_kfree_skb(skb);
1421 }
1422
7a9b2557
VZ
1423
1424 /* put new skb in bin */
1425 fp->tpa_pool[queue].skb = new_skb;
1426
1427 } else {
66e855f3 1428 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1429 DP(NETIF_MSG_RX_STATUS,
1430 "Failed to allocate new skb - dropping packet!\n");
de832a55 1431 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1432 }
1433
1434 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1435}
1436
1437static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1438 struct bnx2x_fastpath *fp,
1439 u16 bd_prod, u16 rx_comp_prod,
1440 u16 rx_sge_prod)
1441{
8d9c5f34 1442 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1443 int i;
1444
1445 /* Update producers */
1446 rx_prods.bd_prod = bd_prod;
1447 rx_prods.cqe_prod = rx_comp_prod;
1448 rx_prods.sge_prod = rx_sge_prod;
1449
58f4c4cf
EG
1450 /*
1451 * Make sure that the BD and SGE data is updated before updating the
1452 * producers since FW might read the BD/SGE right after the producer
1453 * is updated.
1454 * This is only applicable for weak-ordered memory model archs such
1455 * as IA-64. The following barrier is also mandatory since FW will
1456 * assumes BDs must have buffers.
1457 */
1458 wmb();
1459
8d9c5f34
EG
1460 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1461 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1462 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1463 ((u32 *)&rx_prods)[i]);
1464
58f4c4cf
EG
1465 mmiowb(); /* keep prod updates ordered */
1466
7a9b2557 1467 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1468 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1469 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1470}
1471
a2fbb9ea
ET
1472static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1473{
1474 struct bnx2x *bp = fp->bp;
34f80b04 1475 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1476 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1477 int rx_pkt = 0;
1478
1479#ifdef BNX2X_STOP_ON_ERROR
1480 if (unlikely(bp->panic))
1481 return 0;
1482#endif
1483
34f80b04
EG
1484 /* CQ "next element" is of the size of the regular element,
1485 that's why it's ok here */
a2fbb9ea
ET
1486 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1487 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1488 hw_comp_cons++;
1489
1490 bd_cons = fp->rx_bd_cons;
1491 bd_prod = fp->rx_bd_prod;
34f80b04 1492 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1493 sw_comp_cons = fp->rx_comp_cons;
1494 sw_comp_prod = fp->rx_comp_prod;
1495
1496 /* Memory barrier necessary as speculative reads of the rx
1497 * buffer can be ahead of the index in the status block
1498 */
1499 rmb();
1500
1501 DP(NETIF_MSG_RX_STATUS,
1502 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1503 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1504
1505 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1506 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1507 struct sk_buff *skb;
1508 union eth_rx_cqe *cqe;
34f80b04
EG
1509 u8 cqe_fp_flags;
1510 u16 len, pad;
a2fbb9ea
ET
1511
1512 comp_ring_cons = RCQ_BD(sw_comp_cons);
1513 bd_prod = RX_BD(bd_prod);
1514 bd_cons = RX_BD(bd_cons);
1515
619e7a66
EG
1516 /* Prefetch the page containing the BD descriptor
1517 at producer's index. It will be needed when new skb is
1518 allocated */
1519 prefetch((void *)(PAGE_ALIGN((unsigned long)
1520 (&fp->rx_desc_ring[bd_prod])) -
1521 PAGE_SIZE + 1));
1522
a2fbb9ea 1523 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1524 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1525
a2fbb9ea 1526 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1527 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1528 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1529 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1530 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1531 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1532
1533 /* is this a slowpath msg? */
34f80b04 1534 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1535 bnx2x_sp_event(fp, cqe);
1536 goto next_cqe;
1537
1538 /* this is an rx packet */
1539 } else {
1540 rx_buf = &fp->rx_buf_ring[bd_cons];
1541 skb = rx_buf->skb;
a2fbb9ea
ET
1542 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1543 pad = cqe->fast_path_cqe.placement_offset;
1544
7a9b2557
VZ
1545 /* If CQE is marked both TPA_START and TPA_END
1546 it is a non-TPA CQE */
1547 if ((!fp->disable_tpa) &&
1548 (TPA_TYPE(cqe_fp_flags) !=
1549 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1550 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1551
1552 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1553 DP(NETIF_MSG_RX_STATUS,
1554 "calling tpa_start on queue %d\n",
1555 queue);
1556
1557 bnx2x_tpa_start(fp, queue, skb,
1558 bd_cons, bd_prod);
1559 goto next_rx;
1560 }
1561
1562 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1563 DP(NETIF_MSG_RX_STATUS,
1564 "calling tpa_stop on queue %d\n",
1565 queue);
1566
1567 if (!BNX2X_RX_SUM_FIX(cqe))
1568 BNX2X_ERR("STOP on none TCP "
1569 "data\n");
1570
1571 /* This is a size of the linear data
1572 on this skb */
1573 len = le16_to_cpu(cqe->fast_path_cqe.
1574 len_on_bd);
1575 bnx2x_tpa_stop(bp, fp, queue, pad,
1576 len, cqe, comp_ring_cons);
1577#ifdef BNX2X_STOP_ON_ERROR
1578 if (bp->panic)
17cb4006 1579 return 0;
7a9b2557
VZ
1580#endif
1581
1582 bnx2x_update_sge_prod(fp,
1583 &cqe->fast_path_cqe);
1584 goto next_cqe;
1585 }
1586 }
1587
a2fbb9ea
ET
1588 pci_dma_sync_single_for_device(bp->pdev,
1589 pci_unmap_addr(rx_buf, mapping),
1590 pad + RX_COPY_THRESH,
1591 PCI_DMA_FROMDEVICE);
1592 prefetch(skb);
1593 prefetch(((char *)(skb)) + 128);
1594
1595 /* is this an error packet? */
34f80b04 1596 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1597 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1598 "ERROR flags %x rx packet %u\n",
1599 cqe_fp_flags, sw_comp_cons);
de832a55 1600 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1601 goto reuse_rx;
1602 }
1603
1604 /* Since we don't have a jumbo ring
1605 * copy small packets if mtu > 1500
1606 */
1607 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1608 (len <= RX_COPY_THRESH)) {
1609 struct sk_buff *new_skb;
1610
1611 new_skb = netdev_alloc_skb(bp->dev,
1612 len + pad);
1613 if (new_skb == NULL) {
1614 DP(NETIF_MSG_RX_ERR,
34f80b04 1615 "ERROR packet dropped "
a2fbb9ea 1616 "because of alloc failure\n");
de832a55 1617 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1618 goto reuse_rx;
1619 }
1620
1621 /* aligned copy */
1622 skb_copy_from_linear_data_offset(skb, pad,
1623 new_skb->data + pad, len);
1624 skb_reserve(new_skb, pad);
1625 skb_put(new_skb, len);
1626
1627 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1628
1629 skb = new_skb;
1630
a119a069
EG
1631 } else
1632 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
a2fbb9ea
ET
1633 pci_unmap_single(bp->pdev,
1634 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1635 bp->rx_buf_size,
a2fbb9ea
ET
1636 PCI_DMA_FROMDEVICE);
1637 skb_reserve(skb, pad);
1638 skb_put(skb, len);
1639
1640 } else {
1641 DP(NETIF_MSG_RX_ERR,
34f80b04 1642 "ERROR packet dropped because "
a2fbb9ea 1643 "of alloc failure\n");
de832a55 1644 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1645reuse_rx:
1646 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1647 goto next_rx;
1648 }
1649
1650 skb->protocol = eth_type_trans(skb, bp->dev);
1651
1652 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1653 if (bp->rx_csum) {
1adcd8be
EG
1654 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1655 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1656 else
de832a55 1657 fp->eth_q_stats.hw_csum_err++;
66e855f3 1658 }
a2fbb9ea
ET
1659 }
1660
748e5439 1661 skb_record_rx_queue(skb, fp->index);
ab6ad5a4 1662
a2fbb9ea 1663#ifdef BCM_VLAN
0c6671b0 1664 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1665 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1666 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1667 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1668 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1669 else
1670#endif
34f80b04 1671 netif_receive_skb(skb);
a2fbb9ea 1672
a2fbb9ea
ET
1673
1674next_rx:
1675 rx_buf->skb = NULL;
1676
1677 bd_cons = NEXT_RX_IDX(bd_cons);
1678 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1679 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1680 rx_pkt++;
a2fbb9ea
ET
1681next_cqe:
1682 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1683 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1684
34f80b04 1685 if (rx_pkt == budget)
a2fbb9ea
ET
1686 break;
1687 } /* while */
1688
1689 fp->rx_bd_cons = bd_cons;
34f80b04 1690 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1691 fp->rx_comp_cons = sw_comp_cons;
1692 fp->rx_comp_prod = sw_comp_prod;
1693
7a9b2557
VZ
1694 /* Update producers */
1695 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1696 fp->rx_sge_prod);
a2fbb9ea
ET
1697
1698 fp->rx_pkt += rx_pkt;
1699 fp->rx_calls++;
1700
1701 return rx_pkt;
1702}
1703
1704static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1705{
1706 struct bnx2x_fastpath *fp = fp_cookie;
1707 struct bnx2x *bp = fp->bp;
a2fbb9ea 1708
da5a662a
VZ
1709 /* Return here if interrupt is disabled */
1710 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1711 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1712 return IRQ_HANDLED;
1713 }
1714
34f80b04 1715 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1716 fp->index, fp->sb_id);
0626b899 1717 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1718
1719#ifdef BNX2X_STOP_ON_ERROR
1720 if (unlikely(bp->panic))
1721 return IRQ_HANDLED;
1722#endif
ca00392c
EG
1723 /* Handle Rx or Tx according to MSI-X vector */
1724 if (fp->is_rx_queue) {
1725 prefetch(fp->rx_cons_sb);
1726 prefetch(&fp->status_blk->u_status_block.status_block_index);
a2fbb9ea 1727
ca00392c 1728 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1729
ca00392c
EG
1730 } else {
1731 prefetch(fp->tx_cons_sb);
1732 prefetch(&fp->status_blk->c_status_block.status_block_index);
1733
1734 bnx2x_update_fpsb_idx(fp);
1735 rmb();
1736 bnx2x_tx_int(fp);
1737
1738 /* Re-enable interrupts */
1739 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1740 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1741 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1742 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1743 }
34f80b04 1744
a2fbb9ea
ET
1745 return IRQ_HANDLED;
1746}
1747
1748static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1749{
555f6c78 1750 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1751 u16 status = bnx2x_ack_int(bp);
34f80b04 1752 u16 mask;
ca00392c 1753 int i;
a2fbb9ea 1754
34f80b04 1755 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1756 if (unlikely(status == 0)) {
1757 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1758 return IRQ_NONE;
1759 }
f5372251 1760 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1761
34f80b04 1762 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1763 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1764 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1765 return IRQ_HANDLED;
1766 }
1767
3196a88a
EG
1768#ifdef BNX2X_STOP_ON_ERROR
1769 if (unlikely(bp->panic))
1770 return IRQ_HANDLED;
1771#endif
1772
ca00392c
EG
1773 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1774 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1775
ca00392c
EG
1776 mask = 0x2 << fp->sb_id;
1777 if (status & mask) {
1778 /* Handle Rx or Tx according to SB id */
1779 if (fp->is_rx_queue) {
1780 prefetch(fp->rx_cons_sb);
1781 prefetch(&fp->status_blk->u_status_block.
1782 status_block_index);
a2fbb9ea 1783
ca00392c 1784 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1785
ca00392c
EG
1786 } else {
1787 prefetch(fp->tx_cons_sb);
1788 prefetch(&fp->status_blk->c_status_block.
1789 status_block_index);
1790
1791 bnx2x_update_fpsb_idx(fp);
1792 rmb();
1793 bnx2x_tx_int(fp);
1794
1795 /* Re-enable interrupts */
1796 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1797 le16_to_cpu(fp->fp_u_idx),
1798 IGU_INT_NOP, 1);
1799 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1800 le16_to_cpu(fp->fp_c_idx),
1801 IGU_INT_ENABLE, 1);
1802 }
1803 status &= ~mask;
1804 }
a2fbb9ea
ET
1805 }
1806
a2fbb9ea 1807
34f80b04 1808 if (unlikely(status & 0x1)) {
1cf167f2 1809 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1810
1811 status &= ~0x1;
1812 if (!status)
1813 return IRQ_HANDLED;
1814 }
1815
34f80b04
EG
1816 if (status)
1817 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1818 status);
a2fbb9ea 1819
c18487ee 1820 return IRQ_HANDLED;
a2fbb9ea
ET
1821}
1822
c18487ee 1823/* end of fast path */
a2fbb9ea 1824
bb2a0f7a 1825static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1826
c18487ee
YR
1827/* Link */
1828
1829/*
1830 * General service functions
1831 */
a2fbb9ea 1832
4a37fb66 1833static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1834{
1835 u32 lock_status;
1836 u32 resource_bit = (1 << resource);
4a37fb66
YG
1837 int func = BP_FUNC(bp);
1838 u32 hw_lock_control_reg;
c18487ee 1839 int cnt;
a2fbb9ea 1840
c18487ee
YR
1841 /* Validating that the resource is within range */
1842 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1843 DP(NETIF_MSG_HW,
1844 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1845 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1846 return -EINVAL;
1847 }
a2fbb9ea 1848
4a37fb66
YG
1849 if (func <= 5) {
1850 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1851 } else {
1852 hw_lock_control_reg =
1853 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1854 }
1855
c18487ee 1856 /* Validating that the resource is not already taken */
4a37fb66 1857 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1858 if (lock_status & resource_bit) {
1859 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1860 lock_status, resource_bit);
1861 return -EEXIST;
1862 }
a2fbb9ea 1863
46230476
EG
1864 /* Try for 5 second every 5ms */
1865 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1866 /* Try to acquire the lock */
4a37fb66
YG
1867 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1868 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1869 if (lock_status & resource_bit)
1870 return 0;
a2fbb9ea 1871
c18487ee 1872 msleep(5);
a2fbb9ea 1873 }
c18487ee
YR
1874 DP(NETIF_MSG_HW, "Timeout\n");
1875 return -EAGAIN;
1876}
a2fbb9ea 1877
4a37fb66 1878static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1879{
1880 u32 lock_status;
1881 u32 resource_bit = (1 << resource);
4a37fb66
YG
1882 int func = BP_FUNC(bp);
1883 u32 hw_lock_control_reg;
a2fbb9ea 1884
c18487ee
YR
1885 /* Validating that the resource is within range */
1886 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1887 DP(NETIF_MSG_HW,
1888 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1889 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1890 return -EINVAL;
1891 }
1892
4a37fb66
YG
1893 if (func <= 5) {
1894 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1895 } else {
1896 hw_lock_control_reg =
1897 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1898 }
1899
c18487ee 1900 /* Validating that the resource is currently taken */
4a37fb66 1901 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1902 if (!(lock_status & resource_bit)) {
1903 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1904 lock_status, resource_bit);
1905 return -EFAULT;
a2fbb9ea
ET
1906 }
1907
4a37fb66 1908 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1909 return 0;
1910}
1911
1912/* HW Lock for shared dual port PHYs */
4a37fb66 1913static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1914{
34f80b04 1915 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1916
46c6a674
EG
1917 if (bp->port.need_hw_lock)
1918 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1919}
a2fbb9ea 1920
4a37fb66 1921static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1922{
46c6a674
EG
1923 if (bp->port.need_hw_lock)
1924 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1925
34f80b04 1926 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1927}
a2fbb9ea 1928
4acac6a5
EG
1929int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1930{
1931 /* The GPIO should be swapped if swap register is set and active */
1932 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1933 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1934 int gpio_shift = gpio_num +
1935 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1936 u32 gpio_mask = (1 << gpio_shift);
1937 u32 gpio_reg;
1938 int value;
1939
1940 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1941 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1942 return -EINVAL;
1943 }
1944
1945 /* read GPIO value */
1946 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1947
1948 /* get the requested pin value */
1949 if ((gpio_reg & gpio_mask) == gpio_mask)
1950 value = 1;
1951 else
1952 value = 0;
1953
1954 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1955
1956 return value;
1957}
1958
17de50b7 1959int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1960{
1961 /* The GPIO should be swapped if swap register is set and active */
1962 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1963 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1964 int gpio_shift = gpio_num +
1965 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1966 u32 gpio_mask = (1 << gpio_shift);
1967 u32 gpio_reg;
a2fbb9ea 1968
c18487ee
YR
1969 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1970 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1971 return -EINVAL;
1972 }
a2fbb9ea 1973
4a37fb66 1974 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1975 /* read GPIO and mask except the float bits */
1976 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1977
c18487ee
YR
1978 switch (mode) {
1979 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1980 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1981 gpio_num, gpio_shift);
1982 /* clear FLOAT and set CLR */
1983 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1984 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1985 break;
a2fbb9ea 1986
c18487ee
YR
1987 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1988 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1989 gpio_num, gpio_shift);
1990 /* clear FLOAT and set SET */
1991 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1992 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1993 break;
a2fbb9ea 1994
17de50b7 1995 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1996 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1997 gpio_num, gpio_shift);
1998 /* set FLOAT */
1999 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2000 break;
a2fbb9ea 2001
c18487ee
YR
2002 default:
2003 break;
a2fbb9ea
ET
2004 }
2005
c18487ee 2006 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 2007 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 2008
c18487ee 2009 return 0;
a2fbb9ea
ET
2010}
2011
4acac6a5
EG
2012int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2013{
2014 /* The GPIO should be swapped if swap register is set and active */
2015 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2016 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2017 int gpio_shift = gpio_num +
2018 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2019 u32 gpio_mask = (1 << gpio_shift);
2020 u32 gpio_reg;
2021
2022 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2023 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2024 return -EINVAL;
2025 }
2026
2027 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2028 /* read GPIO int */
2029 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2030
2031 switch (mode) {
2032 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2033 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2034 "output low\n", gpio_num, gpio_shift);
2035 /* clear SET and set CLR */
2036 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2037 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2038 break;
2039
2040 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2041 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2042 "output high\n", gpio_num, gpio_shift);
2043 /* clear CLR and set SET */
2044 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2045 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2046 break;
2047
2048 default:
2049 break;
2050 }
2051
2052 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2053 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2054
2055 return 0;
2056}
2057
c18487ee 2058static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2059{
c18487ee
YR
2060 u32 spio_mask = (1 << spio_num);
2061 u32 spio_reg;
a2fbb9ea 2062
c18487ee
YR
2063 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2064 (spio_num > MISC_REGISTERS_SPIO_7)) {
2065 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2066 return -EINVAL;
a2fbb9ea
ET
2067 }
2068
4a37fb66 2069 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2070 /* read SPIO and mask except the float bits */
2071 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2072
c18487ee 2073 switch (mode) {
6378c025 2074 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2075 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2076 /* clear FLOAT and set CLR */
2077 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2078 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2079 break;
a2fbb9ea 2080
6378c025 2081 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2082 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2083 /* clear FLOAT and set SET */
2084 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2085 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2086 break;
a2fbb9ea 2087
c18487ee
YR
2088 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2089 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2090 /* set FLOAT */
2091 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2092 break;
a2fbb9ea 2093
c18487ee
YR
2094 default:
2095 break;
a2fbb9ea
ET
2096 }
2097
c18487ee 2098 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2099 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2100
a2fbb9ea
ET
2101 return 0;
2102}
2103
c18487ee 2104static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2105{
ad33ea3a
EG
2106 switch (bp->link_vars.ieee_fc &
2107 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2108 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2109 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2110 ADVERTISED_Pause);
2111 break;
356e2385 2112
c18487ee 2113 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2114 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2115 ADVERTISED_Pause);
2116 break;
356e2385 2117
c18487ee 2118 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2119 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2120 break;
356e2385 2121
c18487ee 2122 default:
34f80b04 2123 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2124 ADVERTISED_Pause);
2125 break;
2126 }
2127}
f1410647 2128
c18487ee
YR
2129static void bnx2x_link_report(struct bnx2x *bp)
2130{
2691d51d
EG
2131 if (bp->state == BNX2X_STATE_DISABLED) {
2132 netif_carrier_off(bp->dev);
2133 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2134 return;
2135 }
2136
c18487ee
YR
2137 if (bp->link_vars.link_up) {
2138 if (bp->state == BNX2X_STATE_OPEN)
2139 netif_carrier_on(bp->dev);
2140 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2141
c18487ee 2142 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2143
c18487ee
YR
2144 if (bp->link_vars.duplex == DUPLEX_FULL)
2145 printk("full duplex");
2146 else
2147 printk("half duplex");
f1410647 2148
c0700f90
DM
2149 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2150 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2151 printk(", receive ");
356e2385
EG
2152 if (bp->link_vars.flow_ctrl &
2153 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2154 printk("& transmit ");
2155 } else {
2156 printk(", transmit ");
2157 }
2158 printk("flow control ON");
2159 }
2160 printk("\n");
f1410647 2161
c18487ee
YR
2162 } else { /* link_down */
2163 netif_carrier_off(bp->dev);
2164 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2165 }
c18487ee
YR
2166}
2167
b5bf9068 2168static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2169{
19680c48
EG
2170 if (!BP_NOMCP(bp)) {
2171 u8 rc;
a2fbb9ea 2172
19680c48 2173 /* Initialize link parameters structure variables */
8c99e7b0
YR
2174 /* It is recommended to turn off RX FC for jumbo frames
2175 for better performance */
0c593270 2176 if (bp->dev->mtu > 5000)
c0700f90 2177 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2178 else
c0700f90 2179 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2180
4a37fb66 2181 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2182
2183 if (load_mode == LOAD_DIAG)
2184 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2185
19680c48 2186 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2187
4a37fb66 2188 bnx2x_release_phy_lock(bp);
a2fbb9ea 2189
3c96c68b
EG
2190 bnx2x_calc_fc_adv(bp);
2191
b5bf9068
EG
2192 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2193 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2194 bnx2x_link_report(bp);
b5bf9068 2195 }
34f80b04 2196
19680c48
EG
2197 return rc;
2198 }
f5372251 2199 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2200 return -EINVAL;
a2fbb9ea
ET
2201}
2202
c18487ee 2203static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2204{
19680c48 2205 if (!BP_NOMCP(bp)) {
4a37fb66 2206 bnx2x_acquire_phy_lock(bp);
19680c48 2207 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2208 bnx2x_release_phy_lock(bp);
a2fbb9ea 2209
19680c48
EG
2210 bnx2x_calc_fc_adv(bp);
2211 } else
f5372251 2212 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2213}
a2fbb9ea 2214
c18487ee
YR
2215static void bnx2x__link_reset(struct bnx2x *bp)
2216{
19680c48 2217 if (!BP_NOMCP(bp)) {
4a37fb66 2218 bnx2x_acquire_phy_lock(bp);
589abe3a 2219 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2220 bnx2x_release_phy_lock(bp);
19680c48 2221 } else
f5372251 2222 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2223}
a2fbb9ea 2224
c18487ee
YR
2225static u8 bnx2x_link_test(struct bnx2x *bp)
2226{
2227 u8 rc;
a2fbb9ea 2228
4a37fb66 2229 bnx2x_acquire_phy_lock(bp);
c18487ee 2230 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2231 bnx2x_release_phy_lock(bp);
a2fbb9ea 2232
c18487ee
YR
2233 return rc;
2234}
a2fbb9ea 2235
8a1c38d1 2236static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2237{
8a1c38d1
EG
2238 u32 r_param = bp->link_vars.line_speed / 8;
2239 u32 fair_periodic_timeout_usec;
2240 u32 t_fair;
34f80b04 2241
8a1c38d1
EG
2242 memset(&(bp->cmng.rs_vars), 0,
2243 sizeof(struct rate_shaping_vars_per_port));
2244 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2245
8a1c38d1
EG
2246 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2247 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2248
8a1c38d1
EG
2249 /* this is the threshold below which no timer arming will occur
2250 1.25 coefficient is for the threshold to be a little bigger
2251 than the real time, to compensate for timer in-accuracy */
2252 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2253 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2254
8a1c38d1
EG
2255 /* resolution of fairness timer */
2256 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2257 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2258 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2259
8a1c38d1
EG
2260 /* this is the threshold below which we won't arm the timer anymore */
2261 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2262
8a1c38d1
EG
2263 /* we multiply by 1e3/8 to get bytes/msec.
2264 We don't want the credits to pass a credit
2265 of the t_fair*FAIR_MEM (algorithm resolution) */
2266 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2267 /* since each tick is 4 usec */
2268 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2269}
2270
2691d51d
EG
2271/* Calculates the sum of vn_min_rates.
2272 It's needed for further normalizing of the min_rates.
2273 Returns:
2274 sum of vn_min_rates.
2275 or
2276 0 - if all the min_rates are 0.
2277 In the later case fainess algorithm should be deactivated.
2278 If not all min_rates are zero then those that are zeroes will be set to 1.
2279 */
2280static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2281{
2282 int all_zero = 1;
2283 int port = BP_PORT(bp);
2284 int vn;
2285
2286 bp->vn_weight_sum = 0;
2287 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2288 int func = 2*vn + port;
2289 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2290 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2291 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2292
2293 /* Skip hidden vns */
2294 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2295 continue;
2296
2297 /* If min rate is zero - set it to 1 */
2298 if (!vn_min_rate)
2299 vn_min_rate = DEF_MIN_RATE;
2300 else
2301 all_zero = 0;
2302
2303 bp->vn_weight_sum += vn_min_rate;
2304 }
2305
2306 /* ... only if all min rates are zeros - disable fairness */
2307 if (all_zero)
2308 bp->vn_weight_sum = 0;
2309}
2310
8a1c38d1 2311static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2312{
2313 struct rate_shaping_vars_per_vn m_rs_vn;
2314 struct fairness_vars_per_vn m_fair_vn;
2315 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2316 u16 vn_min_rate, vn_max_rate;
2317 int i;
2318
2319 /* If function is hidden - set min and max to zeroes */
2320 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2321 vn_min_rate = 0;
2322 vn_max_rate = 0;
2323
2324 } else {
2325 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2326 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2327 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2328 if current min rate is zero - set it to 1.
33471629 2329 This is a requirement of the algorithm. */
8a1c38d1 2330 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2331 vn_min_rate = DEF_MIN_RATE;
2332 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2333 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2334 }
2335
8a1c38d1
EG
2336 DP(NETIF_MSG_IFUP,
2337 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2338 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2339
2340 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2341 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2342
2343 /* global vn counter - maximal Mbps for this vn */
2344 m_rs_vn.vn_counter.rate = vn_max_rate;
2345
2346 /* quota - number of bytes transmitted in this period */
2347 m_rs_vn.vn_counter.quota =
2348 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2349
8a1c38d1 2350 if (bp->vn_weight_sum) {
34f80b04
EG
2351 /* credit for each period of the fairness algorithm:
2352 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2353 vn_weight_sum should not be larger than 10000, thus
2354 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2355 than zero */
34f80b04 2356 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2357 max((u32)(vn_min_rate * (T_FAIR_COEF /
2358 (8 * bp->vn_weight_sum))),
2359 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2360 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2361 m_fair_vn.vn_credit_delta);
2362 }
2363
34f80b04
EG
2364 /* Store it to internal memory */
2365 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2366 REG_WR(bp, BAR_XSTRORM_INTMEM +
2367 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2368 ((u32 *)(&m_rs_vn))[i]);
2369
2370 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2371 REG_WR(bp, BAR_XSTRORM_INTMEM +
2372 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2373 ((u32 *)(&m_fair_vn))[i]);
2374}
2375
8a1c38d1 2376
c18487ee
YR
2377/* This function is called upon link interrupt */
2378static void bnx2x_link_attn(struct bnx2x *bp)
2379{
bb2a0f7a
YG
2380 /* Make sure that we are synced with the current statistics */
2381 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2382
c18487ee 2383 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2384
bb2a0f7a
YG
2385 if (bp->link_vars.link_up) {
2386
1c06328c 2387 /* dropless flow control */
a18f5128 2388 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
2389 int port = BP_PORT(bp);
2390 u32 pause_enabled = 0;
2391
2392 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2393 pause_enabled = 1;
2394
2395 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2396 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2397 pause_enabled);
2398 }
2399
bb2a0f7a
YG
2400 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2401 struct host_port_stats *pstats;
2402
2403 pstats = bnx2x_sp(bp, port_stats);
2404 /* reset old bmac stats */
2405 memset(&(pstats->mac_stx[0]), 0,
2406 sizeof(struct mac_stx));
2407 }
2408 if ((bp->state == BNX2X_STATE_OPEN) ||
2409 (bp->state == BNX2X_STATE_DISABLED))
2410 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2411 }
2412
c18487ee
YR
2413 /* indicate link status */
2414 bnx2x_link_report(bp);
34f80b04
EG
2415
2416 if (IS_E1HMF(bp)) {
8a1c38d1 2417 int port = BP_PORT(bp);
34f80b04 2418 int func;
8a1c38d1 2419 int vn;
34f80b04 2420
ab6ad5a4 2421 /* Set the attention towards other drivers on the same port */
34f80b04
EG
2422 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2423 if (vn == BP_E1HVN(bp))
2424 continue;
2425
8a1c38d1 2426 func = ((vn << 1) | port);
34f80b04
EG
2427 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2428 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2429 }
34f80b04 2430
8a1c38d1
EG
2431 if (bp->link_vars.link_up) {
2432 int i;
2433
2434 /* Init rate shaping and fairness contexts */
2435 bnx2x_init_port_minmax(bp);
34f80b04 2436
34f80b04 2437 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2438 bnx2x_init_vn_minmax(bp, 2*vn + port);
2439
2440 /* Store it to internal memory */
2441 for (i = 0;
2442 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2443 REG_WR(bp, BAR_XSTRORM_INTMEM +
2444 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2445 ((u32 *)(&bp->cmng))[i]);
2446 }
34f80b04 2447 }
c18487ee 2448}
a2fbb9ea 2449
c18487ee
YR
2450static void bnx2x__link_status_update(struct bnx2x *bp)
2451{
2691d51d
EG
2452 int func = BP_FUNC(bp);
2453
c18487ee
YR
2454 if (bp->state != BNX2X_STATE_OPEN)
2455 return;
a2fbb9ea 2456
c18487ee 2457 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2458
bb2a0f7a
YG
2459 if (bp->link_vars.link_up)
2460 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2461 else
2462 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2463
2691d51d
EG
2464 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2465 bnx2x_calc_vn_weight_sum(bp);
2466
c18487ee
YR
2467 /* indicate link status */
2468 bnx2x_link_report(bp);
a2fbb9ea 2469}
a2fbb9ea 2470
34f80b04
EG
2471static void bnx2x_pmf_update(struct bnx2x *bp)
2472{
2473 int port = BP_PORT(bp);
2474 u32 val;
2475
2476 bp->port.pmf = 1;
2477 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2478
2479 /* enable nig attention */
2480 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2481 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2482 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2483
2484 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2485}
2486
c18487ee 2487/* end of Link */
a2fbb9ea
ET
2488
2489/* slow path */
2490
2491/*
2492 * General service functions
2493 */
2494
2691d51d
EG
2495/* send the MCP a request, block until there is a reply */
2496u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2497{
2498 int func = BP_FUNC(bp);
2499 u32 seq = ++bp->fw_seq;
2500 u32 rc = 0;
2501 u32 cnt = 1;
2502 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2503
2504 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2505 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2506
2507 do {
2508 /* let the FW do it's magic ... */
2509 msleep(delay);
2510
2511 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2512
2513 /* Give the FW up to 2 second (200*10ms) */
2514 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2515
2516 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2517 cnt*delay, rc, seq);
2518
2519 /* is this a reply to our command? */
2520 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2521 rc &= FW_MSG_CODE_MASK;
2522 else {
2523 /* FW BUG! */
2524 BNX2X_ERR("FW failed to respond!\n");
2525 bnx2x_fw_dump(bp);
2526 rc = 0;
2527 }
2528
2529 return rc;
2530}
2531
2532static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2533static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set);
2534static void bnx2x_set_rx_mode(struct net_device *dev);
2535
2536static void bnx2x_e1h_disable(struct bnx2x *bp)
2537{
2538 int port = BP_PORT(bp);
2539 int i;
2540
2541 bp->rx_mode = BNX2X_RX_MODE_NONE;
2542 bnx2x_set_storm_rx_mode(bp);
2543
2544 netif_tx_disable(bp->dev);
2545 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2546
2547 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2548
2549 bnx2x_set_mac_addr_e1h(bp, 0);
2550
2551 for (i = 0; i < MC_HASH_SIZE; i++)
2552 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2553
2554 netif_carrier_off(bp->dev);
2555}
2556
2557static void bnx2x_e1h_enable(struct bnx2x *bp)
2558{
2559 int port = BP_PORT(bp);
2560
2561 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2562
2563 bnx2x_set_mac_addr_e1h(bp, 1);
2564
2565 /* Tx queue should be only reenabled */
2566 netif_tx_wake_all_queues(bp->dev);
2567
2568 /* Initialize the receive filter. */
2569 bnx2x_set_rx_mode(bp->dev);
2570}
2571
2572static void bnx2x_update_min_max(struct bnx2x *bp)
2573{
2574 int port = BP_PORT(bp);
2575 int vn, i;
2576
2577 /* Init rate shaping and fairness contexts */
2578 bnx2x_init_port_minmax(bp);
2579
2580 bnx2x_calc_vn_weight_sum(bp);
2581
2582 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2583 bnx2x_init_vn_minmax(bp, 2*vn + port);
2584
2585 if (bp->port.pmf) {
2586 int func;
2587
2588 /* Set the attention towards other drivers on the same port */
2589 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2590 if (vn == BP_E1HVN(bp))
2591 continue;
2592
2593 func = ((vn << 1) | port);
2594 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2595 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2596 }
2597
2598 /* Store it to internal memory */
2599 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2600 REG_WR(bp, BAR_XSTRORM_INTMEM +
2601 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2602 ((u32 *)(&bp->cmng))[i]);
2603 }
2604}
2605
2606static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2607{
2608 int func = BP_FUNC(bp);
2609
2610 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2611 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2612
2613 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2614
2615 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2616 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2617 bp->state = BNX2X_STATE_DISABLED;
2618
2619 bnx2x_e1h_disable(bp);
2620 } else {
2621 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2622 bp->state = BNX2X_STATE_OPEN;
2623
2624 bnx2x_e1h_enable(bp);
2625 }
2626 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2627 }
2628 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2629
2630 bnx2x_update_min_max(bp);
2631 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2632 }
2633
2634 /* Report results to MCP */
2635 if (dcc_event)
2636 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2637 else
2638 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2639}
2640
28912902
MC
2641/* must be called under the spq lock */
2642static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2643{
2644 struct eth_spe *next_spe = bp->spq_prod_bd;
2645
2646 if (bp->spq_prod_bd == bp->spq_last_bd) {
2647 bp->spq_prod_bd = bp->spq;
2648 bp->spq_prod_idx = 0;
2649 DP(NETIF_MSG_TIMER, "end of spq\n");
2650 } else {
2651 bp->spq_prod_bd++;
2652 bp->spq_prod_idx++;
2653 }
2654 return next_spe;
2655}
2656
2657/* must be called under the spq lock */
2658static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2659{
2660 int func = BP_FUNC(bp);
2661
2662 /* Make sure that BD data is updated before writing the producer */
2663 wmb();
2664
2665 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2666 bp->spq_prod_idx);
2667 mmiowb();
2668}
2669
a2fbb9ea
ET
2670/* the slow path queue is odd since completions arrive on the fastpath ring */
2671static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2672 u32 data_hi, u32 data_lo, int common)
2673{
28912902 2674 struct eth_spe *spe;
a2fbb9ea 2675
34f80b04
EG
2676 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2677 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2678 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2679 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2680 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2681
2682#ifdef BNX2X_STOP_ON_ERROR
2683 if (unlikely(bp->panic))
2684 return -EIO;
2685#endif
2686
34f80b04 2687 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2688
2689 if (!bp->spq_left) {
2690 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2691 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2692 bnx2x_panic();
2693 return -EBUSY;
2694 }
f1410647 2695
28912902
MC
2696 spe = bnx2x_sp_get_next(bp);
2697
a2fbb9ea 2698 /* CID needs port number to be encoded int it */
28912902 2699 spe->hdr.conn_and_cmd_data =
a2fbb9ea
ET
2700 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2701 HW_CID(bp, cid)));
28912902 2702 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 2703 if (common)
28912902 2704 spe->hdr.type |=
a2fbb9ea
ET
2705 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2706
28912902
MC
2707 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2708 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
2709
2710 bp->spq_left--;
2711
28912902 2712 bnx2x_sp_prod_update(bp);
34f80b04 2713 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2714 return 0;
2715}
2716
2717/* acquire split MCP access lock register */
4a37fb66 2718static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2719{
a2fbb9ea 2720 u32 i, j, val;
34f80b04 2721 int rc = 0;
a2fbb9ea
ET
2722
2723 might_sleep();
2724 i = 100;
2725 for (j = 0; j < i*10; j++) {
2726 val = (1UL << 31);
2727 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2728 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2729 if (val & (1L << 31))
2730 break;
2731
2732 msleep(5);
2733 }
a2fbb9ea 2734 if (!(val & (1L << 31))) {
19680c48 2735 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2736 rc = -EBUSY;
2737 }
2738
2739 return rc;
2740}
2741
4a37fb66
YG
2742/* release split MCP access lock register */
2743static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2744{
2745 u32 val = 0;
2746
2747 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2748}
2749
2750static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2751{
2752 struct host_def_status_block *def_sb = bp->def_status_blk;
2753 u16 rc = 0;
2754
2755 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2756 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2757 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2758 rc |= 1;
2759 }
2760 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2761 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2762 rc |= 2;
2763 }
2764 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2765 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2766 rc |= 4;
2767 }
2768 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2769 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2770 rc |= 8;
2771 }
2772 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2773 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2774 rc |= 16;
2775 }
2776 return rc;
2777}
2778
2779/*
2780 * slow path service functions
2781 */
2782
2783static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2784{
34f80b04 2785 int port = BP_PORT(bp);
5c862848
EG
2786 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2787 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2788 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2789 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2790 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2791 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2792 u32 aeu_mask;
87942b46 2793 u32 nig_mask = 0;
a2fbb9ea 2794
a2fbb9ea
ET
2795 if (bp->attn_state & asserted)
2796 BNX2X_ERR("IGU ERROR\n");
2797
3fcaf2e5
EG
2798 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2799 aeu_mask = REG_RD(bp, aeu_addr);
2800
a2fbb9ea 2801 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2802 aeu_mask, asserted);
2803 aeu_mask &= ~(asserted & 0xff);
2804 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2805
3fcaf2e5
EG
2806 REG_WR(bp, aeu_addr, aeu_mask);
2807 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2808
3fcaf2e5 2809 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2810 bp->attn_state |= asserted;
3fcaf2e5 2811 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2812
2813 if (asserted & ATTN_HARD_WIRED_MASK) {
2814 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2815
a5e9a7cf
EG
2816 bnx2x_acquire_phy_lock(bp);
2817
877e9aa4 2818 /* save nig interrupt mask */
87942b46 2819 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2820 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2821
c18487ee 2822 bnx2x_link_attn(bp);
a2fbb9ea
ET
2823
2824 /* handle unicore attn? */
2825 }
2826 if (asserted & ATTN_SW_TIMER_4_FUNC)
2827 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2828
2829 if (asserted & GPIO_2_FUNC)
2830 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2831
2832 if (asserted & GPIO_3_FUNC)
2833 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2834
2835 if (asserted & GPIO_4_FUNC)
2836 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2837
2838 if (port == 0) {
2839 if (asserted & ATTN_GENERAL_ATTN_1) {
2840 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2841 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2842 }
2843 if (asserted & ATTN_GENERAL_ATTN_2) {
2844 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2845 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2846 }
2847 if (asserted & ATTN_GENERAL_ATTN_3) {
2848 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2849 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2850 }
2851 } else {
2852 if (asserted & ATTN_GENERAL_ATTN_4) {
2853 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2854 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2855 }
2856 if (asserted & ATTN_GENERAL_ATTN_5) {
2857 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2858 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2859 }
2860 if (asserted & ATTN_GENERAL_ATTN_6) {
2861 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2862 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2863 }
2864 }
2865
2866 } /* if hardwired */
2867
5c862848
EG
2868 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2869 asserted, hc_addr);
2870 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2871
2872 /* now set back the mask */
a5e9a7cf 2873 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2874 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2875 bnx2x_release_phy_lock(bp);
2876 }
a2fbb9ea
ET
2877}
2878
fd4ef40d
EG
2879static inline void bnx2x_fan_failure(struct bnx2x *bp)
2880{
2881 int port = BP_PORT(bp);
2882
2883 /* mark the failure */
2884 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2885 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2886 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2887 bp->link_params.ext_phy_config);
2888
2889 /* log the failure */
2890 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2891 " the driver to shutdown the card to prevent permanent"
2892 " damage. Please contact Dell Support for assistance\n",
2893 bp->dev->name);
2894}
ab6ad5a4 2895
877e9aa4 2896static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2897{
34f80b04 2898 int port = BP_PORT(bp);
877e9aa4 2899 int reg_offset;
4d295db0 2900 u32 val, swap_val, swap_override;
877e9aa4 2901
34f80b04
EG
2902 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2903 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2904
34f80b04 2905 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2906
2907 val = REG_RD(bp, reg_offset);
2908 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2909 REG_WR(bp, reg_offset, val);
2910
2911 BNX2X_ERR("SPIO5 hw attention\n");
2912
fd4ef40d 2913 /* Fan failure attention */
35b19ba5
EG
2914 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2915 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2916 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2917 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2918 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2919 /* The PHY reset is controlled by GPIO 1 */
2920 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2921 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2922 break;
2923
4d295db0
EG
2924 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2925 /* The PHY reset is controlled by GPIO 1 */
2926 /* fake the port number to cancel the swap done in
2927 set_gpio() */
2928 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2929 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2930 port = (swap_val && swap_override) ^ 1;
2931 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2932 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2933 break;
2934
877e9aa4
ET
2935 default:
2936 break;
2937 }
fd4ef40d 2938 bnx2x_fan_failure(bp);
877e9aa4 2939 }
34f80b04 2940
589abe3a
EG
2941 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2942 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2943 bnx2x_acquire_phy_lock(bp);
2944 bnx2x_handle_module_detect_int(&bp->link_params);
2945 bnx2x_release_phy_lock(bp);
2946 }
2947
34f80b04
EG
2948 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2949
2950 val = REG_RD(bp, reg_offset);
2951 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2952 REG_WR(bp, reg_offset, val);
2953
2954 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2955 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2956 bnx2x_panic();
2957 }
877e9aa4
ET
2958}
2959
2960static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2961{
2962 u32 val;
2963
0626b899 2964 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2965
2966 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2967 BNX2X_ERR("DB hw attention 0x%x\n", val);
2968 /* DORQ discard attention */
2969 if (val & 0x2)
2970 BNX2X_ERR("FATAL error from DORQ\n");
2971 }
34f80b04
EG
2972
2973 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2974
2975 int port = BP_PORT(bp);
2976 int reg_offset;
2977
2978 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2979 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2980
2981 val = REG_RD(bp, reg_offset);
2982 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2983 REG_WR(bp, reg_offset, val);
2984
2985 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 2986 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
2987 bnx2x_panic();
2988 }
877e9aa4
ET
2989}
2990
2991static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2992{
2993 u32 val;
2994
2995 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2996
2997 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2998 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2999 /* CFC error attention */
3000 if (val & 0x2)
3001 BNX2X_ERR("FATAL error from CFC\n");
3002 }
3003
3004 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3005
3006 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3007 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3008 /* RQ_USDMDP_FIFO_OVERFLOW */
3009 if (val & 0x18000)
3010 BNX2X_ERR("FATAL error from PXP\n");
3011 }
34f80b04
EG
3012
3013 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3014
3015 int port = BP_PORT(bp);
3016 int reg_offset;
3017
3018 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3019 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3020
3021 val = REG_RD(bp, reg_offset);
3022 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3023 REG_WR(bp, reg_offset, val);
3024
3025 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3026 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3027 bnx2x_panic();
3028 }
877e9aa4
ET
3029}
3030
3031static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3032{
34f80b04
EG
3033 u32 val;
3034
877e9aa4
ET
3035 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3036
34f80b04
EG
3037 if (attn & BNX2X_PMF_LINK_ASSERT) {
3038 int func = BP_FUNC(bp);
3039
3040 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2691d51d
EG
3041 val = SHMEM_RD(bp, func_mb[func].drv_status);
3042 if (val & DRV_STATUS_DCC_EVENT_MASK)
3043 bnx2x_dcc_event(bp,
3044 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3045 bnx2x__link_status_update(bp);
2691d51d 3046 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3047 bnx2x_pmf_update(bp);
3048
3049 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3050
3051 BNX2X_ERR("MC assert!\n");
3052 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3053 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3054 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3055 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3056 bnx2x_panic();
3057
3058 } else if (attn & BNX2X_MCP_ASSERT) {
3059
3060 BNX2X_ERR("MCP assert!\n");
3061 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3062 bnx2x_fw_dump(bp);
877e9aa4
ET
3063
3064 } else
3065 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3066 }
3067
3068 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3069 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3070 if (attn & BNX2X_GRC_TIMEOUT) {
3071 val = CHIP_IS_E1H(bp) ?
3072 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3073 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3074 }
3075 if (attn & BNX2X_GRC_RSV) {
3076 val = CHIP_IS_E1H(bp) ?
3077 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3078 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3079 }
877e9aa4 3080 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3081 }
3082}
3083
3084static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3085{
a2fbb9ea
ET
3086 struct attn_route attn;
3087 struct attn_route group_mask;
34f80b04 3088 int port = BP_PORT(bp);
877e9aa4 3089 int index;
a2fbb9ea
ET
3090 u32 reg_addr;
3091 u32 val;
3fcaf2e5 3092 u32 aeu_mask;
a2fbb9ea
ET
3093
3094 /* need to take HW lock because MCP or other port might also
3095 try to handle this event */
4a37fb66 3096 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
3097
3098 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3099 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3100 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3101 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3102 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3103 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3104
3105 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3106 if (deasserted & (1 << index)) {
3107 group_mask = bp->attn_group[index];
3108
34f80b04
EG
3109 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3110 index, group_mask.sig[0], group_mask.sig[1],
3111 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 3112
877e9aa4
ET
3113 bnx2x_attn_int_deasserted3(bp,
3114 attn.sig[3] & group_mask.sig[3]);
3115 bnx2x_attn_int_deasserted1(bp,
3116 attn.sig[1] & group_mask.sig[1]);
3117 bnx2x_attn_int_deasserted2(bp,
3118 attn.sig[2] & group_mask.sig[2]);
3119 bnx2x_attn_int_deasserted0(bp,
3120 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 3121
a2fbb9ea
ET
3122 if ((attn.sig[0] & group_mask.sig[0] &
3123 HW_PRTY_ASSERT_SET_0) ||
3124 (attn.sig[1] & group_mask.sig[1] &
3125 HW_PRTY_ASSERT_SET_1) ||
3126 (attn.sig[2] & group_mask.sig[2] &
3127 HW_PRTY_ASSERT_SET_2))
6378c025 3128 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
3129 }
3130 }
3131
4a37fb66 3132 bnx2x_release_alr(bp);
a2fbb9ea 3133
5c862848 3134 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3135
3136 val = ~deasserted;
3fcaf2e5
EG
3137 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3138 val, reg_addr);
5c862848 3139 REG_WR(bp, reg_addr, val);
a2fbb9ea 3140
a2fbb9ea 3141 if (~bp->attn_state & deasserted)
3fcaf2e5 3142 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3143
3144 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3145 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3146
3fcaf2e5
EG
3147 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3148 aeu_mask = REG_RD(bp, reg_addr);
3149
3150 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3151 aeu_mask, deasserted);
3152 aeu_mask |= (deasserted & 0xff);
3153 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3154
3fcaf2e5
EG
3155 REG_WR(bp, reg_addr, aeu_mask);
3156 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3157
3158 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3159 bp->attn_state &= ~deasserted;
3160 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3161}
3162
3163static void bnx2x_attn_int(struct bnx2x *bp)
3164{
3165 /* read local copy of bits */
68d59484
EG
3166 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3167 attn_bits);
3168 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3169 attn_bits_ack);
a2fbb9ea
ET
3170 u32 attn_state = bp->attn_state;
3171
3172 /* look for changed bits */
3173 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3174 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3175
3176 DP(NETIF_MSG_HW,
3177 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3178 attn_bits, attn_ack, asserted, deasserted);
3179
3180 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3181 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3182
3183 /* handle bits that were raised */
3184 if (asserted)
3185 bnx2x_attn_int_asserted(bp, asserted);
3186
3187 if (deasserted)
3188 bnx2x_attn_int_deasserted(bp, deasserted);
3189}
3190
3191static void bnx2x_sp_task(struct work_struct *work)
3192{
1cf167f2 3193 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3194 u16 status;
3195
34f80b04 3196
a2fbb9ea
ET
3197 /* Return here if interrupt is disabled */
3198 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3199 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3200 return;
3201 }
3202
3203 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3204/* if (status == 0) */
3205/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3206
3196a88a 3207 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 3208
877e9aa4
ET
3209 /* HW attentions */
3210 if (status & 0x1)
a2fbb9ea 3211 bnx2x_attn_int(bp);
a2fbb9ea 3212
68d59484 3213 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3214 IGU_INT_NOP, 1);
3215 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3216 IGU_INT_NOP, 1);
3217 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3218 IGU_INT_NOP, 1);
3219 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3220 IGU_INT_NOP, 1);
3221 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3222 IGU_INT_ENABLE, 1);
877e9aa4 3223
a2fbb9ea
ET
3224}
3225
3226static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3227{
3228 struct net_device *dev = dev_instance;
3229 struct bnx2x *bp = netdev_priv(dev);
3230
3231 /* Return here if interrupt is disabled */
3232 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3233 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3234 return IRQ_HANDLED;
3235 }
3236
8d9c5f34 3237 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3238
3239#ifdef BNX2X_STOP_ON_ERROR
3240 if (unlikely(bp->panic))
3241 return IRQ_HANDLED;
3242#endif
3243
1cf167f2 3244 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3245
3246 return IRQ_HANDLED;
3247}
3248
3249/* end of slow path */
3250
3251/* Statistics */
3252
3253/****************************************************************************
3254* Macros
3255****************************************************************************/
3256
a2fbb9ea
ET
3257/* sum[hi:lo] += add[hi:lo] */
3258#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3259 do { \
3260 s_lo += a_lo; \
f5ba6772 3261 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3262 } while (0)
3263
3264/* difference = minuend - subtrahend */
3265#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3266 do { \
bb2a0f7a
YG
3267 if (m_lo < s_lo) { \
3268 /* underflow */ \
a2fbb9ea 3269 d_hi = m_hi - s_hi; \
bb2a0f7a 3270 if (d_hi > 0) { \
6378c025 3271 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3272 d_hi--; \
3273 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3274 } else { \
6378c025 3275 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3276 d_hi = 0; \
3277 d_lo = 0; \
3278 } \
bb2a0f7a
YG
3279 } else { \
3280 /* m_lo >= s_lo */ \
a2fbb9ea 3281 if (m_hi < s_hi) { \
bb2a0f7a
YG
3282 d_hi = 0; \
3283 d_lo = 0; \
3284 } else { \
6378c025 3285 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3286 d_hi = m_hi - s_hi; \
3287 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3288 } \
3289 } \
3290 } while (0)
3291
bb2a0f7a 3292#define UPDATE_STAT64(s, t) \
a2fbb9ea 3293 do { \
bb2a0f7a
YG
3294 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3295 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3296 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3297 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3298 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3299 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3300 } while (0)
3301
bb2a0f7a 3302#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3303 do { \
bb2a0f7a
YG
3304 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3305 diff.lo, new->s##_lo, old->s##_lo); \
3306 ADD_64(estats->t##_hi, diff.hi, \
3307 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3308 } while (0)
3309
3310/* sum[hi:lo] += add */
3311#define ADD_EXTEND_64(s_hi, s_lo, a) \
3312 do { \
3313 s_lo += a; \
3314 s_hi += (s_lo < a) ? 1 : 0; \
3315 } while (0)
3316
bb2a0f7a 3317#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3318 do { \
bb2a0f7a
YG
3319 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3320 pstats->mac_stx[1].s##_lo, \
3321 new->s); \
a2fbb9ea
ET
3322 } while (0)
3323
bb2a0f7a 3324#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3325 do { \
4781bfad
EG
3326 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3327 old_tclient->s = tclient->s; \
de832a55
EG
3328 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3329 } while (0)
3330
3331#define UPDATE_EXTEND_USTAT(s, t) \
3332 do { \
3333 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3334 old_uclient->s = uclient->s; \
3335 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3336 } while (0)
3337
3338#define UPDATE_EXTEND_XSTAT(s, t) \
3339 do { \
4781bfad
EG
3340 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3341 old_xclient->s = xclient->s; \
de832a55
EG
3342 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3343 } while (0)
3344
3345/* minuend -= subtrahend */
3346#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3347 do { \
3348 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3349 } while (0)
3350
3351/* minuend[hi:lo] -= subtrahend */
3352#define SUB_EXTEND_64(m_hi, m_lo, s) \
3353 do { \
3354 SUB_64(m_hi, 0, m_lo, s); \
3355 } while (0)
3356
3357#define SUB_EXTEND_USTAT(s, t) \
3358 do { \
3359 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3360 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3361 } while (0)
3362
3363/*
3364 * General service functions
3365 */
3366
3367static inline long bnx2x_hilo(u32 *hiref)
3368{
3369 u32 lo = *(hiref + 1);
3370#if (BITS_PER_LONG == 64)
3371 u32 hi = *hiref;
3372
3373 return HILO_U64(hi, lo);
3374#else
3375 return lo;
3376#endif
3377}
3378
3379/*
3380 * Init service functions
3381 */
3382
bb2a0f7a
YG
3383static void bnx2x_storm_stats_post(struct bnx2x *bp)
3384{
3385 if (!bp->stats_pending) {
3386 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3387 int i, rc;
bb2a0f7a
YG
3388
3389 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3390 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3391 for_each_queue(bp, i)
3392 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3393
3394 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3395 ((u32 *)&ramrod_data)[1],
3396 ((u32 *)&ramrod_data)[0], 0);
3397 if (rc == 0) {
3398 /* stats ramrod has it's own slot on the spq */
3399 bp->spq_left++;
3400 bp->stats_pending = 1;
3401 }
3402 }
3403}
3404
bb2a0f7a
YG
3405static void bnx2x_hw_stats_post(struct bnx2x *bp)
3406{
3407 struct dmae_command *dmae = &bp->stats_dmae;
3408 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3409
3410 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3411 if (CHIP_REV_IS_SLOW(bp))
3412 return;
bb2a0f7a
YG
3413
3414 /* loader */
3415 if (bp->executer_idx) {
3416 int loader_idx = PMF_DMAE_C(bp);
3417
3418 memset(dmae, 0, sizeof(struct dmae_command));
3419
3420 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3421 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3422 DMAE_CMD_DST_RESET |
3423#ifdef __BIG_ENDIAN
3424 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3425#else
3426 DMAE_CMD_ENDIANITY_DW_SWAP |
3427#endif
3428 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3429 DMAE_CMD_PORT_0) |
3430 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3431 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3432 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3433 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3434 sizeof(struct dmae_command) *
3435 (loader_idx + 1)) >> 2;
3436 dmae->dst_addr_hi = 0;
3437 dmae->len = sizeof(struct dmae_command) >> 2;
3438 if (CHIP_IS_E1(bp))
3439 dmae->len--;
3440 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3441 dmae->comp_addr_hi = 0;
3442 dmae->comp_val = 1;
3443
3444 *stats_comp = 0;
3445 bnx2x_post_dmae(bp, dmae, loader_idx);
3446
3447 } else if (bp->func_stx) {
3448 *stats_comp = 0;
3449 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3450 }
3451}
3452
3453static int bnx2x_stats_comp(struct bnx2x *bp)
3454{
3455 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3456 int cnt = 10;
3457
3458 might_sleep();
3459 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3460 if (!cnt) {
3461 BNX2X_ERR("timeout waiting for stats finished\n");
3462 break;
3463 }
3464 cnt--;
12469401 3465 msleep(1);
bb2a0f7a
YG
3466 }
3467 return 1;
3468}
3469
3470/*
3471 * Statistics service functions
3472 */
3473
3474static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3475{
3476 struct dmae_command *dmae;
3477 u32 opcode;
3478 int loader_idx = PMF_DMAE_C(bp);
3479 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3480
3481 /* sanity */
3482 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3483 BNX2X_ERR("BUG!\n");
3484 return;
3485 }
3486
3487 bp->executer_idx = 0;
3488
3489 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3490 DMAE_CMD_C_ENABLE |
3491 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3492#ifdef __BIG_ENDIAN
3493 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3494#else
3495 DMAE_CMD_ENDIANITY_DW_SWAP |
3496#endif
3497 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3498 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3499
3500 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3501 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3502 dmae->src_addr_lo = bp->port.port_stx >> 2;
3503 dmae->src_addr_hi = 0;
3504 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3505 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3506 dmae->len = DMAE_LEN32_RD_MAX;
3507 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3508 dmae->comp_addr_hi = 0;
3509 dmae->comp_val = 1;
3510
3511 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3512 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3513 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3514 dmae->src_addr_hi = 0;
7a9b2557
VZ
3515 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3516 DMAE_LEN32_RD_MAX * 4);
3517 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3518 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3519 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3520 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3521 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3522 dmae->comp_val = DMAE_COMP_VAL;
3523
3524 *stats_comp = 0;
3525 bnx2x_hw_stats_post(bp);
3526 bnx2x_stats_comp(bp);
3527}
3528
3529static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3530{
3531 struct dmae_command *dmae;
34f80b04 3532 int port = BP_PORT(bp);
bb2a0f7a 3533 int vn = BP_E1HVN(bp);
a2fbb9ea 3534 u32 opcode;
bb2a0f7a 3535 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3536 u32 mac_addr;
bb2a0f7a
YG
3537 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3538
3539 /* sanity */
3540 if (!bp->link_vars.link_up || !bp->port.pmf) {
3541 BNX2X_ERR("BUG!\n");
3542 return;
3543 }
a2fbb9ea
ET
3544
3545 bp->executer_idx = 0;
bb2a0f7a
YG
3546
3547 /* MCP */
3548 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3549 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3550 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3551#ifdef __BIG_ENDIAN
bb2a0f7a 3552 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3553#else
bb2a0f7a 3554 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3555#endif
bb2a0f7a
YG
3556 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3557 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3558
bb2a0f7a 3559 if (bp->port.port_stx) {
a2fbb9ea
ET
3560
3561 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3562 dmae->opcode = opcode;
bb2a0f7a
YG
3563 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3564 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3565 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3566 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3567 dmae->len = sizeof(struct host_port_stats) >> 2;
3568 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3569 dmae->comp_addr_hi = 0;
3570 dmae->comp_val = 1;
a2fbb9ea
ET
3571 }
3572
bb2a0f7a
YG
3573 if (bp->func_stx) {
3574
3575 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3576 dmae->opcode = opcode;
3577 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3578 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3579 dmae->dst_addr_lo = bp->func_stx >> 2;
3580 dmae->dst_addr_hi = 0;
3581 dmae->len = sizeof(struct host_func_stats) >> 2;
3582 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3583 dmae->comp_addr_hi = 0;
3584 dmae->comp_val = 1;
a2fbb9ea
ET
3585 }
3586
bb2a0f7a 3587 /* MAC */
a2fbb9ea
ET
3588 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3589 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3590 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3591#ifdef __BIG_ENDIAN
3592 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3593#else
3594 DMAE_CMD_ENDIANITY_DW_SWAP |
3595#endif
bb2a0f7a
YG
3596 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3597 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3598
c18487ee 3599 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3600
3601 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3602 NIG_REG_INGRESS_BMAC0_MEM);
3603
3604 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3605 BIGMAC_REGISTER_TX_STAT_GTBYT */
3606 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3607 dmae->opcode = opcode;
3608 dmae->src_addr_lo = (mac_addr +
3609 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3610 dmae->src_addr_hi = 0;
3611 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3612 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3613 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3614 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3615 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3616 dmae->comp_addr_hi = 0;
3617 dmae->comp_val = 1;
3618
3619 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3620 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3621 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3622 dmae->opcode = opcode;
3623 dmae->src_addr_lo = (mac_addr +
3624 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3625 dmae->src_addr_hi = 0;
3626 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3627 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3628 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3629 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3630 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3631 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3632 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3633 dmae->comp_addr_hi = 0;
3634 dmae->comp_val = 1;
3635
c18487ee 3636 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3637
3638 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3639
3640 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3641 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3642 dmae->opcode = opcode;
3643 dmae->src_addr_lo = (mac_addr +
3644 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3645 dmae->src_addr_hi = 0;
3646 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3647 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3648 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3649 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3650 dmae->comp_addr_hi = 0;
3651 dmae->comp_val = 1;
3652
3653 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3654 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3655 dmae->opcode = opcode;
3656 dmae->src_addr_lo = (mac_addr +
3657 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3658 dmae->src_addr_hi = 0;
3659 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3660 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3661 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3662 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3663 dmae->len = 1;
3664 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3665 dmae->comp_addr_hi = 0;
3666 dmae->comp_val = 1;
3667
3668 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3669 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3670 dmae->opcode = opcode;
3671 dmae->src_addr_lo = (mac_addr +
3672 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3673 dmae->src_addr_hi = 0;
3674 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3675 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3676 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3677 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3678 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3679 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3680 dmae->comp_addr_hi = 0;
3681 dmae->comp_val = 1;
3682 }
3683
3684 /* NIG */
bb2a0f7a
YG
3685 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3686 dmae->opcode = opcode;
3687 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3688 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3689 dmae->src_addr_hi = 0;
3690 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3691 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3692 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3693 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3694 dmae->comp_addr_hi = 0;
3695 dmae->comp_val = 1;
3696
3697 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3698 dmae->opcode = opcode;
3699 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3700 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3701 dmae->src_addr_hi = 0;
3702 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3703 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3704 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3705 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3706 dmae->len = (2*sizeof(u32)) >> 2;
3707 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3708 dmae->comp_addr_hi = 0;
3709 dmae->comp_val = 1;
3710
a2fbb9ea
ET
3711 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3712 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3713 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3714 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3715#ifdef __BIG_ENDIAN
3716 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3717#else
3718 DMAE_CMD_ENDIANITY_DW_SWAP |
3719#endif
bb2a0f7a
YG
3720 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3721 (vn << DMAE_CMD_E1HVN_SHIFT));
3722 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3723 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3724 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3725 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3726 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3727 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3728 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3729 dmae->len = (2*sizeof(u32)) >> 2;
3730 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3731 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3732 dmae->comp_val = DMAE_COMP_VAL;
3733
3734 *stats_comp = 0;
a2fbb9ea
ET
3735}
3736
bb2a0f7a 3737static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3738{
bb2a0f7a
YG
3739 struct dmae_command *dmae = &bp->stats_dmae;
3740 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3741
bb2a0f7a
YG
3742 /* sanity */
3743 if (!bp->func_stx) {
3744 BNX2X_ERR("BUG!\n");
3745 return;
3746 }
a2fbb9ea 3747
bb2a0f7a
YG
3748 bp->executer_idx = 0;
3749 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3750
bb2a0f7a
YG
3751 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3752 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3753 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3754#ifdef __BIG_ENDIAN
3755 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3756#else
3757 DMAE_CMD_ENDIANITY_DW_SWAP |
3758#endif
3759 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3760 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3761 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3762 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3763 dmae->dst_addr_lo = bp->func_stx >> 2;
3764 dmae->dst_addr_hi = 0;
3765 dmae->len = sizeof(struct host_func_stats) >> 2;
3766 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3767 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3768 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3769
bb2a0f7a
YG
3770 *stats_comp = 0;
3771}
a2fbb9ea 3772
bb2a0f7a
YG
3773static void bnx2x_stats_start(struct bnx2x *bp)
3774{
3775 if (bp->port.pmf)
3776 bnx2x_port_stats_init(bp);
3777
3778 else if (bp->func_stx)
3779 bnx2x_func_stats_init(bp);
3780
3781 bnx2x_hw_stats_post(bp);
3782 bnx2x_storm_stats_post(bp);
3783}
3784
3785static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3786{
3787 bnx2x_stats_comp(bp);
3788 bnx2x_stats_pmf_update(bp);
3789 bnx2x_stats_start(bp);
3790}
3791
3792static void bnx2x_stats_restart(struct bnx2x *bp)
3793{
3794 bnx2x_stats_comp(bp);
3795 bnx2x_stats_start(bp);
3796}
3797
3798static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3799{
3800 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3801 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3802 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3803 struct {
3804 u32 lo;
3805 u32 hi;
3806 } diff;
bb2a0f7a
YG
3807
3808 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3809 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3810 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3811 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3812 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3813 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3814 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3815 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3816 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3817 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3818 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3819 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3820 UPDATE_STAT64(tx_stat_gt127,
3821 tx_stat_etherstatspkts65octetsto127octets);
3822 UPDATE_STAT64(tx_stat_gt255,
3823 tx_stat_etherstatspkts128octetsto255octets);
3824 UPDATE_STAT64(tx_stat_gt511,
3825 tx_stat_etherstatspkts256octetsto511octets);
3826 UPDATE_STAT64(tx_stat_gt1023,
3827 tx_stat_etherstatspkts512octetsto1023octets);
3828 UPDATE_STAT64(tx_stat_gt1518,
3829 tx_stat_etherstatspkts1024octetsto1522octets);
3830 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3831 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3832 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3833 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3834 UPDATE_STAT64(tx_stat_gterr,
3835 tx_stat_dot3statsinternalmactransmiterrors);
3836 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3837
3838 estats->pause_frames_received_hi =
3839 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3840 estats->pause_frames_received_lo =
3841 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3842
3843 estats->pause_frames_sent_hi =
3844 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3845 estats->pause_frames_sent_lo =
3846 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3847}
3848
3849static void bnx2x_emac_stats_update(struct bnx2x *bp)
3850{
3851 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3852 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3853 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3854
3855 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3856 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3857 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3858 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3859 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3860 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3861 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3862 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3863 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3864 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3865 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3866 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3867 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3868 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3869 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3870 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3871 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3872 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3873 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3874 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3875 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3876 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3877 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3878 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3879 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3880 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3881 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3882 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3883 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3884 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3885 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3886
3887 estats->pause_frames_received_hi =
3888 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3889 estats->pause_frames_received_lo =
3890 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3891 ADD_64(estats->pause_frames_received_hi,
3892 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3893 estats->pause_frames_received_lo,
3894 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3895
3896 estats->pause_frames_sent_hi =
3897 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3898 estats->pause_frames_sent_lo =
3899 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3900 ADD_64(estats->pause_frames_sent_hi,
3901 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3902 estats->pause_frames_sent_lo,
3903 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3904}
3905
3906static int bnx2x_hw_stats_update(struct bnx2x *bp)
3907{
3908 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3909 struct nig_stats *old = &(bp->port.old_nig_stats);
3910 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3911 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3912 struct {
3913 u32 lo;
3914 u32 hi;
3915 } diff;
de832a55 3916 u32 nig_timer_max;
bb2a0f7a
YG
3917
3918 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3919 bnx2x_bmac_stats_update(bp);
3920
3921 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3922 bnx2x_emac_stats_update(bp);
3923
3924 else { /* unreached */
c3eefaf6 3925 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3926 return -1;
3927 }
a2fbb9ea 3928
bb2a0f7a
YG
3929 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3930 new->brb_discard - old->brb_discard);
66e855f3
YG
3931 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3932 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3933
bb2a0f7a
YG
3934 UPDATE_STAT64_NIG(egress_mac_pkt0,
3935 etherstatspkts1024octetsto1522octets);
3936 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3937
bb2a0f7a 3938 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3939
bb2a0f7a
YG
3940 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3941 sizeof(struct mac_stx));
3942 estats->brb_drop_hi = pstats->brb_drop_hi;
3943 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3944
bb2a0f7a 3945 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3946
de832a55
EG
3947 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3948 if (nig_timer_max != estats->nig_timer_max) {
3949 estats->nig_timer_max = nig_timer_max;
3950 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3951 }
3952
bb2a0f7a 3953 return 0;
a2fbb9ea
ET
3954}
3955
bb2a0f7a 3956static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3957{
3958 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3959 struct tstorm_per_port_stats *tport =
de832a55 3960 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3961 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3962 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3963 int i;
3964
6fe49bb9
EG
3965 memcpy(&(fstats->total_bytes_received_hi),
3966 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
3967 sizeof(struct host_func_stats) - 2*sizeof(u32));
3968 estats->error_bytes_received_hi = 0;
3969 estats->error_bytes_received_lo = 0;
3970 estats->etherstatsoverrsizepkts_hi = 0;
3971 estats->etherstatsoverrsizepkts_lo = 0;
3972 estats->no_buff_discard_hi = 0;
3973 estats->no_buff_discard_lo = 0;
a2fbb9ea 3974
ca00392c 3975 for_each_rx_queue(bp, i) {
de832a55
EG
3976 struct bnx2x_fastpath *fp = &bp->fp[i];
3977 int cl_id = fp->cl_id;
3978 struct tstorm_per_client_stats *tclient =
3979 &stats->tstorm_common.client_statistics[cl_id];
3980 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3981 struct ustorm_per_client_stats *uclient =
3982 &stats->ustorm_common.client_statistics[cl_id];
3983 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3984 struct xstorm_per_client_stats *xclient =
3985 &stats->xstorm_common.client_statistics[cl_id];
3986 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3987 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3988 u32 diff;
3989
3990 /* are storm stats valid? */
3991 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3992 bp->stats_counter) {
de832a55
EG
3993 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3994 " xstorm counter (%d) != stats_counter (%d)\n",
3995 i, xclient->stats_counter, bp->stats_counter);
3996 return -1;
3997 }
3998 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3999 bp->stats_counter) {
de832a55
EG
4000 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4001 " tstorm counter (%d) != stats_counter (%d)\n",
4002 i, tclient->stats_counter, bp->stats_counter);
4003 return -2;
4004 }
4005 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4006 bp->stats_counter) {
4007 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4008 " ustorm counter (%d) != stats_counter (%d)\n",
4009 i, uclient->stats_counter, bp->stats_counter);
4010 return -4;
4011 }
a2fbb9ea 4012
de832a55 4013 qstats->total_bytes_received_hi =
ca00392c 4014 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 4015 qstats->total_bytes_received_lo =
ca00392c
EG
4016 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4017
4018 ADD_64(qstats->total_bytes_received_hi,
4019 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4020 qstats->total_bytes_received_lo,
4021 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4022
4023 ADD_64(qstats->total_bytes_received_hi,
4024 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4025 qstats->total_bytes_received_lo,
4026 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4027
4028 qstats->valid_bytes_received_hi =
4029 qstats->total_bytes_received_hi;
de832a55 4030 qstats->valid_bytes_received_lo =
ca00392c 4031 qstats->total_bytes_received_lo;
bb2a0f7a 4032
de832a55 4033 qstats->error_bytes_received_hi =
bb2a0f7a 4034 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4035 qstats->error_bytes_received_lo =
bb2a0f7a 4036 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4037
de832a55
EG
4038 ADD_64(qstats->total_bytes_received_hi,
4039 qstats->error_bytes_received_hi,
4040 qstats->total_bytes_received_lo,
4041 qstats->error_bytes_received_lo);
4042
4043 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4044 total_unicast_packets_received);
4045 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4046 total_multicast_packets_received);
4047 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4048 total_broadcast_packets_received);
4049 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4050 etherstatsoverrsizepkts);
4051 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4052
4053 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4054 total_unicast_packets_received);
4055 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4056 total_multicast_packets_received);
4057 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4058 total_broadcast_packets_received);
4059 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4060 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4061 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4062
4063 qstats->total_bytes_transmitted_hi =
ca00392c 4064 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4065 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4066 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4067
4068 ADD_64(qstats->total_bytes_transmitted_hi,
4069 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4070 qstats->total_bytes_transmitted_lo,
4071 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4072
4073 ADD_64(qstats->total_bytes_transmitted_hi,
4074 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4075 qstats->total_bytes_transmitted_lo,
4076 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4077
de832a55
EG
4078 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4079 total_unicast_packets_transmitted);
4080 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4081 total_multicast_packets_transmitted);
4082 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4083 total_broadcast_packets_transmitted);
4084
4085 old_tclient->checksum_discard = tclient->checksum_discard;
4086 old_tclient->ttl0_discard = tclient->ttl0_discard;
4087
4088 ADD_64(fstats->total_bytes_received_hi,
4089 qstats->total_bytes_received_hi,
4090 fstats->total_bytes_received_lo,
4091 qstats->total_bytes_received_lo);
4092 ADD_64(fstats->total_bytes_transmitted_hi,
4093 qstats->total_bytes_transmitted_hi,
4094 fstats->total_bytes_transmitted_lo,
4095 qstats->total_bytes_transmitted_lo);
4096 ADD_64(fstats->total_unicast_packets_received_hi,
4097 qstats->total_unicast_packets_received_hi,
4098 fstats->total_unicast_packets_received_lo,
4099 qstats->total_unicast_packets_received_lo);
4100 ADD_64(fstats->total_multicast_packets_received_hi,
4101 qstats->total_multicast_packets_received_hi,
4102 fstats->total_multicast_packets_received_lo,
4103 qstats->total_multicast_packets_received_lo);
4104 ADD_64(fstats->total_broadcast_packets_received_hi,
4105 qstats->total_broadcast_packets_received_hi,
4106 fstats->total_broadcast_packets_received_lo,
4107 qstats->total_broadcast_packets_received_lo);
4108 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4109 qstats->total_unicast_packets_transmitted_hi,
4110 fstats->total_unicast_packets_transmitted_lo,
4111 qstats->total_unicast_packets_transmitted_lo);
4112 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4113 qstats->total_multicast_packets_transmitted_hi,
4114 fstats->total_multicast_packets_transmitted_lo,
4115 qstats->total_multicast_packets_transmitted_lo);
4116 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4117 qstats->total_broadcast_packets_transmitted_hi,
4118 fstats->total_broadcast_packets_transmitted_lo,
4119 qstats->total_broadcast_packets_transmitted_lo);
4120 ADD_64(fstats->valid_bytes_received_hi,
4121 qstats->valid_bytes_received_hi,
4122 fstats->valid_bytes_received_lo,
4123 qstats->valid_bytes_received_lo);
4124
4125 ADD_64(estats->error_bytes_received_hi,
4126 qstats->error_bytes_received_hi,
4127 estats->error_bytes_received_lo,
4128 qstats->error_bytes_received_lo);
4129 ADD_64(estats->etherstatsoverrsizepkts_hi,
4130 qstats->etherstatsoverrsizepkts_hi,
4131 estats->etherstatsoverrsizepkts_lo,
4132 qstats->etherstatsoverrsizepkts_lo);
4133 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4134 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4135 }
4136
4137 ADD_64(fstats->total_bytes_received_hi,
4138 estats->rx_stat_ifhcinbadoctets_hi,
4139 fstats->total_bytes_received_lo,
4140 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4141
4142 memcpy(estats, &(fstats->total_bytes_received_hi),
4143 sizeof(struct host_func_stats) - 2*sizeof(u32));
4144
de832a55
EG
4145 ADD_64(estats->etherstatsoverrsizepkts_hi,
4146 estats->rx_stat_dot3statsframestoolong_hi,
4147 estats->etherstatsoverrsizepkts_lo,
4148 estats->rx_stat_dot3statsframestoolong_lo);
4149 ADD_64(estats->error_bytes_received_hi,
4150 estats->rx_stat_ifhcinbadoctets_hi,
4151 estats->error_bytes_received_lo,
4152 estats->rx_stat_ifhcinbadoctets_lo);
4153
4154 if (bp->port.pmf) {
4155 estats->mac_filter_discard =
4156 le32_to_cpu(tport->mac_filter_discard);
4157 estats->xxoverflow_discard =
4158 le32_to_cpu(tport->xxoverflow_discard);
4159 estats->brb_truncate_discard =
bb2a0f7a 4160 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4161 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4162 }
bb2a0f7a
YG
4163
4164 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4165
de832a55
EG
4166 bp->stats_pending = 0;
4167
a2fbb9ea
ET
4168 return 0;
4169}
4170
bb2a0f7a 4171static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4172{
bb2a0f7a 4173 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4174 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4175 int i;
a2fbb9ea
ET
4176
4177 nstats->rx_packets =
4178 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4179 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4180 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4181
4182 nstats->tx_packets =
4183 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4184 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4185 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4186
de832a55 4187 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4188
0e39e645 4189 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4190
de832a55 4191 nstats->rx_dropped = estats->mac_discard;
ca00392c 4192 for_each_rx_queue(bp, i)
de832a55
EG
4193 nstats->rx_dropped +=
4194 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4195
a2fbb9ea
ET
4196 nstats->tx_dropped = 0;
4197
4198 nstats->multicast =
de832a55 4199 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4200
bb2a0f7a 4201 nstats->collisions =
de832a55 4202 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4203
4204 nstats->rx_length_errors =
de832a55
EG
4205 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4206 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4207 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4208 bnx2x_hilo(&estats->brb_truncate_hi);
4209 nstats->rx_crc_errors =
4210 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4211 nstats->rx_frame_errors =
4212 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4213 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4214 nstats->rx_missed_errors = estats->xxoverflow_discard;
4215
4216 nstats->rx_errors = nstats->rx_length_errors +
4217 nstats->rx_over_errors +
4218 nstats->rx_crc_errors +
4219 nstats->rx_frame_errors +
0e39e645
ET
4220 nstats->rx_fifo_errors +
4221 nstats->rx_missed_errors;
a2fbb9ea 4222
bb2a0f7a 4223 nstats->tx_aborted_errors =
de832a55
EG
4224 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4225 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4226 nstats->tx_carrier_errors =
4227 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4228 nstats->tx_fifo_errors = 0;
4229 nstats->tx_heartbeat_errors = 0;
4230 nstats->tx_window_errors = 0;
4231
4232 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4233 nstats->tx_carrier_errors +
4234 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4235}
4236
4237static void bnx2x_drv_stats_update(struct bnx2x *bp)
4238{
4239 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4240 int i;
4241
4242 estats->driver_xoff = 0;
4243 estats->rx_err_discard_pkt = 0;
4244 estats->rx_skb_alloc_failed = 0;
4245 estats->hw_csum_err = 0;
ca00392c 4246 for_each_rx_queue(bp, i) {
de832a55
EG
4247 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4248
4249 estats->driver_xoff += qstats->driver_xoff;
4250 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4251 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4252 estats->hw_csum_err += qstats->hw_csum_err;
4253 }
a2fbb9ea
ET
4254}
4255
bb2a0f7a 4256static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4257{
bb2a0f7a 4258 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4259
bb2a0f7a
YG
4260 if (*stats_comp != DMAE_COMP_VAL)
4261 return;
4262
4263 if (bp->port.pmf)
de832a55 4264 bnx2x_hw_stats_update(bp);
a2fbb9ea 4265
de832a55
EG
4266 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4267 BNX2X_ERR("storm stats were not updated for 3 times\n");
4268 bnx2x_panic();
4269 return;
a2fbb9ea
ET
4270 }
4271
de832a55
EG
4272 bnx2x_net_stats_update(bp);
4273 bnx2x_drv_stats_update(bp);
4274
a2fbb9ea 4275 if (bp->msglevel & NETIF_MSG_TIMER) {
ca00392c
EG
4276 struct bnx2x_fastpath *fp0_rx = bp->fp;
4277 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
de832a55
EG
4278 struct tstorm_per_client_stats *old_tclient =
4279 &bp->fp->old_tclient;
4280 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4281 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4282 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4283 int i;
a2fbb9ea
ET
4284
4285 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4286 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4287 " tx pkt (%lx)\n",
ca00392c
EG
4288 bnx2x_tx_avail(fp0_tx),
4289 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4290 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4291 " rx pkt (%lx)\n",
ca00392c
EG
4292 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4293 fp0_rx->rx_comp_cons),
4294 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4295 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4296 "brb truncate %u\n",
4297 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4298 qstats->driver_xoff,
4299 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4300 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4301 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4302 "mac_discard %u mac_filter_discard %u "
4303 "xxovrflow_discard %u brb_truncate_discard %u "
4304 "ttl0_discard %u\n",
4781bfad 4305 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4306 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4307 bnx2x_hilo(&qstats->no_buff_discard_hi),
4308 estats->mac_discard, estats->mac_filter_discard,
4309 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4310 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4311
4312 for_each_queue(bp, i) {
4313 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4314 bnx2x_fp(bp, i, tx_pkt),
4315 bnx2x_fp(bp, i, rx_pkt),
4316 bnx2x_fp(bp, i, rx_calls));
4317 }
4318 }
4319
bb2a0f7a
YG
4320 bnx2x_hw_stats_post(bp);
4321 bnx2x_storm_stats_post(bp);
4322}
a2fbb9ea 4323
bb2a0f7a
YG
4324static void bnx2x_port_stats_stop(struct bnx2x *bp)
4325{
4326 struct dmae_command *dmae;
4327 u32 opcode;
4328 int loader_idx = PMF_DMAE_C(bp);
4329 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4330
bb2a0f7a 4331 bp->executer_idx = 0;
a2fbb9ea 4332
bb2a0f7a
YG
4333 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4334 DMAE_CMD_C_ENABLE |
4335 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4336#ifdef __BIG_ENDIAN
bb2a0f7a 4337 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4338#else
bb2a0f7a 4339 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4340#endif
bb2a0f7a
YG
4341 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4342 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4343
4344 if (bp->port.port_stx) {
4345
4346 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4347 if (bp->func_stx)
4348 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4349 else
4350 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4351 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4352 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4353 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4354 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4355 dmae->len = sizeof(struct host_port_stats) >> 2;
4356 if (bp->func_stx) {
4357 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4358 dmae->comp_addr_hi = 0;
4359 dmae->comp_val = 1;
4360 } else {
4361 dmae->comp_addr_lo =
4362 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4363 dmae->comp_addr_hi =
4364 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4365 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4366
bb2a0f7a
YG
4367 *stats_comp = 0;
4368 }
a2fbb9ea
ET
4369 }
4370
bb2a0f7a
YG
4371 if (bp->func_stx) {
4372
4373 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4374 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4375 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4376 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4377 dmae->dst_addr_lo = bp->func_stx >> 2;
4378 dmae->dst_addr_hi = 0;
4379 dmae->len = sizeof(struct host_func_stats) >> 2;
4380 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4381 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4382 dmae->comp_val = DMAE_COMP_VAL;
4383
4384 *stats_comp = 0;
a2fbb9ea 4385 }
bb2a0f7a
YG
4386}
4387
4388static void bnx2x_stats_stop(struct bnx2x *bp)
4389{
4390 int update = 0;
4391
4392 bnx2x_stats_comp(bp);
4393
4394 if (bp->port.pmf)
4395 update = (bnx2x_hw_stats_update(bp) == 0);
4396
4397 update |= (bnx2x_storm_stats_update(bp) == 0);
4398
4399 if (update) {
4400 bnx2x_net_stats_update(bp);
a2fbb9ea 4401
bb2a0f7a
YG
4402 if (bp->port.pmf)
4403 bnx2x_port_stats_stop(bp);
4404
4405 bnx2x_hw_stats_post(bp);
4406 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4407 }
4408}
4409
bb2a0f7a
YG
4410static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4411{
4412}
4413
4414static const struct {
4415 void (*action)(struct bnx2x *bp);
4416 enum bnx2x_stats_state next_state;
4417} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4418/* state event */
4419{
4420/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4421/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4422/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4423/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4424},
4425{
4426/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4427/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4428/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4429/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4430}
4431};
4432
4433static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4434{
4435 enum bnx2x_stats_state state = bp->stats_state;
4436
4437 bnx2x_stats_stm[state][event].action(bp);
4438 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4439
8924665a
EG
4440 /* Make sure the state has been "changed" */
4441 smp_wmb();
4442
bb2a0f7a
YG
4443 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4444 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4445 state, event, bp->stats_state);
4446}
4447
6fe49bb9
EG
4448static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4449{
4450 struct dmae_command *dmae;
4451 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4452
4453 /* sanity */
4454 if (!bp->port.pmf || !bp->port.port_stx) {
4455 BNX2X_ERR("BUG!\n");
4456 return;
4457 }
4458
4459 bp->executer_idx = 0;
4460
4461 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4462 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4463 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4464 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4465#ifdef __BIG_ENDIAN
4466 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4467#else
4468 DMAE_CMD_ENDIANITY_DW_SWAP |
4469#endif
4470 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4471 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4472 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4473 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4474 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4475 dmae->dst_addr_hi = 0;
4476 dmae->len = sizeof(struct host_port_stats) >> 2;
4477 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4478 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4479 dmae->comp_val = DMAE_COMP_VAL;
4480
4481 *stats_comp = 0;
4482 bnx2x_hw_stats_post(bp);
4483 bnx2x_stats_comp(bp);
4484}
4485
4486static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4487{
4488 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4489 int port = BP_PORT(bp);
4490 int func;
4491 u32 func_stx;
4492
4493 /* sanity */
4494 if (!bp->port.pmf || !bp->func_stx) {
4495 BNX2X_ERR("BUG!\n");
4496 return;
4497 }
4498
4499 /* save our func_stx */
4500 func_stx = bp->func_stx;
4501
4502 for (vn = VN_0; vn < vn_max; vn++) {
4503 func = 2*vn + port;
4504
4505 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4506 bnx2x_func_stats_init(bp);
4507 bnx2x_hw_stats_post(bp);
4508 bnx2x_stats_comp(bp);
4509 }
4510
4511 /* restore our func_stx */
4512 bp->func_stx = func_stx;
4513}
4514
4515static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4516{
4517 struct dmae_command *dmae = &bp->stats_dmae;
4518 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4519
4520 /* sanity */
4521 if (!bp->func_stx) {
4522 BNX2X_ERR("BUG!\n");
4523 return;
4524 }
4525
4526 bp->executer_idx = 0;
4527 memset(dmae, 0, sizeof(struct dmae_command));
4528
4529 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4530 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4531 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4532#ifdef __BIG_ENDIAN
4533 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4534#else
4535 DMAE_CMD_ENDIANITY_DW_SWAP |
4536#endif
4537 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4538 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4539 dmae->src_addr_lo = bp->func_stx >> 2;
4540 dmae->src_addr_hi = 0;
4541 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4542 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4543 dmae->len = sizeof(struct host_func_stats) >> 2;
4544 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4545 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4546 dmae->comp_val = DMAE_COMP_VAL;
4547
4548 *stats_comp = 0;
4549 bnx2x_hw_stats_post(bp);
4550 bnx2x_stats_comp(bp);
4551}
4552
4553static void bnx2x_stats_init(struct bnx2x *bp)
4554{
4555 int port = BP_PORT(bp);
4556 int func = BP_FUNC(bp);
4557 int i;
4558
4559 bp->stats_pending = 0;
4560 bp->executer_idx = 0;
4561 bp->stats_counter = 0;
4562
4563 /* port and func stats for management */
4564 if (!BP_NOMCP(bp)) {
4565 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4566 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4567
4568 } else {
4569 bp->port.port_stx = 0;
4570 bp->func_stx = 0;
4571 }
4572 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4573 bp->port.port_stx, bp->func_stx);
4574
4575 /* port stats */
4576 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4577 bp->port.old_nig_stats.brb_discard =
4578 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4579 bp->port.old_nig_stats.brb_truncate =
4580 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4581 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4582 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4583 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4584 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4585
4586 /* function stats */
4587 for_each_queue(bp, i) {
4588 struct bnx2x_fastpath *fp = &bp->fp[i];
4589
4590 memset(&fp->old_tclient, 0,
4591 sizeof(struct tstorm_per_client_stats));
4592 memset(&fp->old_uclient, 0,
4593 sizeof(struct ustorm_per_client_stats));
4594 memset(&fp->old_xclient, 0,
4595 sizeof(struct xstorm_per_client_stats));
4596 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4597 }
4598
4599 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4600 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4601
4602 bp->stats_state = STATS_STATE_DISABLED;
4603
4604 if (bp->port.pmf) {
4605 if (bp->port.port_stx)
4606 bnx2x_port_stats_base_init(bp);
4607
4608 if (bp->func_stx)
4609 bnx2x_func_stats_base_init(bp);
4610
4611 } else if (bp->func_stx)
4612 bnx2x_func_stats_base_update(bp);
4613}
4614
a2fbb9ea
ET
4615static void bnx2x_timer(unsigned long data)
4616{
4617 struct bnx2x *bp = (struct bnx2x *) data;
4618
4619 if (!netif_running(bp->dev))
4620 return;
4621
4622 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4623 goto timer_restart;
a2fbb9ea
ET
4624
4625 if (poll) {
4626 struct bnx2x_fastpath *fp = &bp->fp[0];
4627 int rc;
4628
7961f791 4629 bnx2x_tx_int(fp);
a2fbb9ea
ET
4630 rc = bnx2x_rx_int(fp, 1000);
4631 }
4632
34f80b04
EG
4633 if (!BP_NOMCP(bp)) {
4634 int func = BP_FUNC(bp);
a2fbb9ea
ET
4635 u32 drv_pulse;
4636 u32 mcp_pulse;
4637
4638 ++bp->fw_drv_pulse_wr_seq;
4639 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4640 /* TBD - add SYSTEM_TIME */
4641 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4642 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4643
34f80b04 4644 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4645 MCP_PULSE_SEQ_MASK);
4646 /* The delta between driver pulse and mcp response
4647 * should be 1 (before mcp response) or 0 (after mcp response)
4648 */
4649 if ((drv_pulse != mcp_pulse) &&
4650 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4651 /* someone lost a heartbeat... */
4652 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4653 drv_pulse, mcp_pulse);
4654 }
4655 }
4656
bb2a0f7a
YG
4657 if ((bp->state == BNX2X_STATE_OPEN) ||
4658 (bp->state == BNX2X_STATE_DISABLED))
4659 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4660
f1410647 4661timer_restart:
a2fbb9ea
ET
4662 mod_timer(&bp->timer, jiffies + bp->current_interval);
4663}
4664
4665/* end of Statistics */
4666
4667/* nic init */
4668
4669/*
4670 * nic init service functions
4671 */
4672
34f80b04 4673static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4674{
34f80b04
EG
4675 int port = BP_PORT(bp);
4676
ca00392c
EG
4677 /* "CSTORM" */
4678 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4679 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4680 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4681 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4682 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4683 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
4684}
4685
5c862848
EG
4686static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4687 dma_addr_t mapping, int sb_id)
34f80b04
EG
4688{
4689 int port = BP_PORT(bp);
bb2a0f7a 4690 int func = BP_FUNC(bp);
a2fbb9ea 4691 int index;
34f80b04 4692 u64 section;
a2fbb9ea
ET
4693
4694 /* USTORM */
4695 section = ((u64)mapping) + offsetof(struct host_status_block,
4696 u_status_block);
34f80b04 4697 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 4698
ca00392c
EG
4699 REG_WR(bp, BAR_CSTRORM_INTMEM +
4700 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4701 REG_WR(bp, BAR_CSTRORM_INTMEM +
4702 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4703 U64_HI(section));
ca00392c
EG
4704 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4705 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4706
4707 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
4708 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4709 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4710
4711 /* CSTORM */
4712 section = ((u64)mapping) + offsetof(struct host_status_block,
4713 c_status_block);
34f80b04 4714 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4715
4716 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4717 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4718 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4719 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4720 U64_HI(section));
7a9b2557 4721 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 4722 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4723
4724 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4725 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4726 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
4727
4728 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4729}
4730
4731static void bnx2x_zero_def_sb(struct bnx2x *bp)
4732{
4733 int func = BP_FUNC(bp);
a2fbb9ea 4734
ca00392c 4735 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
4736 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4737 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
4738 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4739 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4740 sizeof(struct cstorm_def_status_block_u)/4);
4741 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4742 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4743 sizeof(struct cstorm_def_status_block_c)/4);
4744 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
4745 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4746 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4747}
4748
4749static void bnx2x_init_def_sb(struct bnx2x *bp,
4750 struct host_def_status_block *def_sb,
34f80b04 4751 dma_addr_t mapping, int sb_id)
a2fbb9ea 4752{
34f80b04
EG
4753 int port = BP_PORT(bp);
4754 int func = BP_FUNC(bp);
a2fbb9ea
ET
4755 int index, val, reg_offset;
4756 u64 section;
4757
4758 /* ATTN */
4759 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4760 atten_status_block);
34f80b04 4761 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4762
49d66772
ET
4763 bp->attn_state = 0;
4764
a2fbb9ea
ET
4765 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4766 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4767
34f80b04 4768 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4769 bp->attn_group[index].sig[0] = REG_RD(bp,
4770 reg_offset + 0x10*index);
4771 bp->attn_group[index].sig[1] = REG_RD(bp,
4772 reg_offset + 0x4 + 0x10*index);
4773 bp->attn_group[index].sig[2] = REG_RD(bp,
4774 reg_offset + 0x8 + 0x10*index);
4775 bp->attn_group[index].sig[3] = REG_RD(bp,
4776 reg_offset + 0xc + 0x10*index);
4777 }
4778
a2fbb9ea
ET
4779 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4780 HC_REG_ATTN_MSG0_ADDR_L);
4781
4782 REG_WR(bp, reg_offset, U64_LO(section));
4783 REG_WR(bp, reg_offset + 4, U64_HI(section));
4784
4785 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4786
4787 val = REG_RD(bp, reg_offset);
34f80b04 4788 val |= sb_id;
a2fbb9ea
ET
4789 REG_WR(bp, reg_offset, val);
4790
4791 /* USTORM */
4792 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4793 u_def_status_block);
34f80b04 4794 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4795
ca00392c
EG
4796 REG_WR(bp, BAR_CSTRORM_INTMEM +
4797 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4798 REG_WR(bp, BAR_CSTRORM_INTMEM +
4799 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 4800 U64_HI(section));
ca00392c
EG
4801 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4802 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
4803
4804 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
4805 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4806 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
4807
4808 /* CSTORM */
4809 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4810 c_def_status_block);
34f80b04 4811 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4812
4813 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4814 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 4815 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4816 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 4817 U64_HI(section));
5c862848 4818 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 4819 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
4820
4821 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4822 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4823 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
4824
4825 /* TSTORM */
4826 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4827 t_def_status_block);
34f80b04 4828 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4829
4830 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4831 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4832 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4833 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4834 U64_HI(section));
5c862848 4835 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4836 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4837
4838 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4839 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4840 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4841
4842 /* XSTORM */
4843 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4844 x_def_status_block);
34f80b04 4845 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4846
4847 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4848 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4849 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4850 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4851 U64_HI(section));
5c862848 4852 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4853 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4854
4855 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4856 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4857 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4858
bb2a0f7a 4859 bp->stats_pending = 0;
66e855f3 4860 bp->set_mac_pending = 0;
bb2a0f7a 4861
34f80b04 4862 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4863}
4864
4865static void bnx2x_update_coalesce(struct bnx2x *bp)
4866{
34f80b04 4867 int port = BP_PORT(bp);
a2fbb9ea
ET
4868 int i;
4869
4870 for_each_queue(bp, i) {
34f80b04 4871 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4872
4873 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
4874 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4875 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4876 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4877 bp->rx_ticks/12);
ca00392c
EG
4878 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4879 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4880 U_SB_ETH_RX_CQ_INDEX),
3799cf47 4881 (bp->rx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4882
4883 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4884 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4885 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4886 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4887 bp->tx_ticks/12);
a2fbb9ea 4888 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4889 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4890 C_SB_ETH_TX_CQ_INDEX),
3799cf47 4891 (bp->tx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4892 }
4893}
4894
7a9b2557
VZ
4895static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4896 struct bnx2x_fastpath *fp, int last)
4897{
4898 int i;
4899
4900 for (i = 0; i < last; i++) {
4901 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4902 struct sk_buff *skb = rx_buf->skb;
4903
4904 if (skb == NULL) {
4905 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4906 continue;
4907 }
4908
4909 if (fp->tpa_state[i] == BNX2X_TPA_START)
4910 pci_unmap_single(bp->pdev,
4911 pci_unmap_addr(rx_buf, mapping),
356e2385 4912 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4913
4914 dev_kfree_skb(skb);
4915 rx_buf->skb = NULL;
4916 }
4917}
4918
a2fbb9ea
ET
4919static void bnx2x_init_rx_rings(struct bnx2x *bp)
4920{
7a9b2557 4921 int func = BP_FUNC(bp);
32626230
EG
4922 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4923 ETH_MAX_AGGREGATION_QUEUES_E1H;
4924 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4925 int i, j;
a2fbb9ea 4926
87942b46 4927 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4928 DP(NETIF_MSG_IFUP,
4929 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4930
7a9b2557 4931 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4932
555f6c78 4933 for_each_rx_queue(bp, j) {
32626230 4934 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4935
32626230 4936 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4937 fp->tpa_pool[i].skb =
4938 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4939 if (!fp->tpa_pool[i].skb) {
4940 BNX2X_ERR("Failed to allocate TPA "
4941 "skb pool for queue[%d] - "
4942 "disabling TPA on this "
4943 "queue!\n", j);
4944 bnx2x_free_tpa_pool(bp, fp, i);
4945 fp->disable_tpa = 1;
4946 break;
4947 }
4948 pci_unmap_addr_set((struct sw_rx_bd *)
4949 &bp->fp->tpa_pool[i],
4950 mapping, 0);
4951 fp->tpa_state[i] = BNX2X_TPA_STOP;
4952 }
4953 }
4954 }
4955
555f6c78 4956 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4957 struct bnx2x_fastpath *fp = &bp->fp[j];
4958
4959 fp->rx_bd_cons = 0;
4960 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4961 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4962
ca00392c
EG
4963 /* Mark queue as Rx */
4964 fp->is_rx_queue = 1;
4965
7a9b2557
VZ
4966 /* "next page" elements initialization */
4967 /* SGE ring */
4968 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4969 struct eth_rx_sge *sge;
4970
4971 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4972 sge->addr_hi =
4973 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4974 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4975 sge->addr_lo =
4976 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4977 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4978 }
4979
4980 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4981
7a9b2557 4982 /* RX BD ring */
a2fbb9ea
ET
4983 for (i = 1; i <= NUM_RX_RINGS; i++) {
4984 struct eth_rx_bd *rx_bd;
4985
4986 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4987 rx_bd->addr_hi =
4988 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4989 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4990 rx_bd->addr_lo =
4991 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4992 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4993 }
4994
34f80b04 4995 /* CQ ring */
a2fbb9ea
ET
4996 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4997 struct eth_rx_cqe_next_page *nextpg;
4998
4999 nextpg = (struct eth_rx_cqe_next_page *)
5000 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5001 nextpg->addr_hi =
5002 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 5003 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5004 nextpg->addr_lo =
5005 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 5006 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5007 }
5008
7a9b2557
VZ
5009 /* Allocate SGEs and initialize the ring elements */
5010 for (i = 0, ring_prod = 0;
5011 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 5012
7a9b2557
VZ
5013 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5014 BNX2X_ERR("was only able to allocate "
5015 "%d rx sges\n", i);
5016 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5017 /* Cleanup already allocated elements */
5018 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 5019 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
5020 fp->disable_tpa = 1;
5021 ring_prod = 0;
5022 break;
5023 }
5024 ring_prod = NEXT_SGE_IDX(ring_prod);
5025 }
5026 fp->rx_sge_prod = ring_prod;
5027
5028 /* Allocate BDs and initialize BD ring */
66e855f3 5029 fp->rx_comp_cons = 0;
7a9b2557 5030 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
5031 for (i = 0; i < bp->rx_ring_size; i++) {
5032 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5033 BNX2X_ERR("was only able to allocate "
de832a55
EG
5034 "%d rx skbs on queue[%d]\n", i, j);
5035 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
5036 break;
5037 }
5038 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 5039 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5040 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5041 }
5042
7a9b2557
VZ
5043 fp->rx_bd_prod = ring_prod;
5044 /* must not have more available CQEs than BDs */
5045 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5046 cqe_ring_prod);
a2fbb9ea
ET
5047 fp->rx_pkt = fp->rx_calls = 0;
5048
7a9b2557
VZ
5049 /* Warning!
5050 * this will generate an interrupt (to the TSTORM)
5051 * must only be done after chip is initialized
5052 */
5053 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5054 fp->rx_sge_prod);
a2fbb9ea
ET
5055 if (j != 0)
5056 continue;
5057
5058 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5059 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5060 U64_LO(fp->rx_comp_mapping));
5061 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5062 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5063 U64_HI(fp->rx_comp_mapping));
5064 }
5065}
5066
5067static void bnx2x_init_tx_ring(struct bnx2x *bp)
5068{
5069 int i, j;
5070
555f6c78 5071 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
5072 struct bnx2x_fastpath *fp = &bp->fp[j];
5073
5074 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5075 struct eth_tx_next_bd *tx_next_bd =
5076 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5077
ca00392c 5078 tx_next_bd->addr_hi =
a2fbb9ea 5079 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5080 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5081 tx_next_bd->addr_lo =
a2fbb9ea 5082 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5083 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5084 }
5085
ca00392c
EG
5086 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5087 fp->tx_db.data.zero_fill1 = 0;
5088 fp->tx_db.data.prod = 0;
5089
a2fbb9ea
ET
5090 fp->tx_pkt_prod = 0;
5091 fp->tx_pkt_cons = 0;
5092 fp->tx_bd_prod = 0;
5093 fp->tx_bd_cons = 0;
5094 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5095 fp->tx_pkt = 0;
5096 }
6fe49bb9
EG
5097
5098 /* clean tx statistics */
5099 for_each_rx_queue(bp, i)
5100 bnx2x_fp(bp, i, tx_pkt) = 0;
a2fbb9ea
ET
5101}
5102
5103static void bnx2x_init_sp_ring(struct bnx2x *bp)
5104{
34f80b04 5105 int func = BP_FUNC(bp);
a2fbb9ea
ET
5106
5107 spin_lock_init(&bp->spq_lock);
5108
5109 bp->spq_left = MAX_SPQ_PENDING;
5110 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5111 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5112 bp->spq_prod_bd = bp->spq;
5113 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5114
34f80b04 5115 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5116 U64_LO(bp->spq_mapping));
34f80b04
EG
5117 REG_WR(bp,
5118 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5119 U64_HI(bp->spq_mapping));
5120
34f80b04 5121 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5122 bp->spq_prod_idx);
5123}
5124
5125static void bnx2x_init_context(struct bnx2x *bp)
5126{
5127 int i;
5128
ca00392c 5129 for_each_rx_queue(bp, i) {
a2fbb9ea
ET
5130 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5131 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5132 u8 cl_id = fp->cl_id;
a2fbb9ea 5133
34f80b04
EG
5134 context->ustorm_st_context.common.sb_index_numbers =
5135 BNX2X_RX_SB_INDEX_NUM;
0626b899 5136 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5137 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5138 context->ustorm_st_context.common.flags =
de832a55
EG
5139 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5140 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5141 context->ustorm_st_context.common.statistics_counter_id =
5142 cl_id;
8d9c5f34 5143 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5144 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5145 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5146 bp->rx_buf_size;
34f80b04 5147 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5148 U64_HI(fp->rx_desc_mapping);
34f80b04 5149 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5150 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5151 if (!fp->disable_tpa) {
5152 context->ustorm_st_context.common.flags |=
ca00392c 5153 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5154 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
5155 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5156 (u32)0xffff);
7a9b2557
VZ
5157 context->ustorm_st_context.common.sge_page_base_hi =
5158 U64_HI(fp->rx_sge_mapping);
5159 context->ustorm_st_context.common.sge_page_base_lo =
5160 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5161
5162 context->ustorm_st_context.common.max_sges_for_packet =
5163 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5164 context->ustorm_st_context.common.max_sges_for_packet =
5165 ((context->ustorm_st_context.common.
5166 max_sges_for_packet + PAGES_PER_SGE - 1) &
5167 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5168 }
5169
8d9c5f34
EG
5170 context->ustorm_ag_context.cdu_usage =
5171 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5172 CDU_REGION_NUMBER_UCM_AG,
5173 ETH_CONNECTION_TYPE);
5174
ca00392c
EG
5175 context->xstorm_ag_context.cdu_reserved =
5176 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5177 CDU_REGION_NUMBER_XCM_AG,
5178 ETH_CONNECTION_TYPE);
5179 }
5180
5181 for_each_tx_queue(bp, i) {
5182 struct bnx2x_fastpath *fp = &bp->fp[i];
5183 struct eth_context *context =
5184 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5185
5186 context->cstorm_st_context.sb_index_number =
5187 C_SB_ETH_TX_CQ_INDEX;
5188 context->cstorm_st_context.status_block_id = fp->sb_id;
5189
8d9c5f34
EG
5190 context->xstorm_st_context.tx_bd_page_base_hi =
5191 U64_HI(fp->tx_desc_mapping);
5192 context->xstorm_st_context.tx_bd_page_base_lo =
5193 U64_LO(fp->tx_desc_mapping);
ca00392c 5194 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5195 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5196 }
5197}
5198
5199static void bnx2x_init_ind_table(struct bnx2x *bp)
5200{
26c8fa4d 5201 int func = BP_FUNC(bp);
a2fbb9ea
ET
5202 int i;
5203
555f6c78 5204 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5205 return;
5206
555f6c78
EG
5207 DP(NETIF_MSG_IFUP,
5208 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5209 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5210 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5211 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 5212 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
5213}
5214
49d66772
ET
5215static void bnx2x_set_client_config(struct bnx2x *bp)
5216{
49d66772 5217 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5218 int port = BP_PORT(bp);
5219 int i;
49d66772 5220
e7799c5f 5221 tstorm_client.mtu = bp->dev->mtu;
49d66772 5222 tstorm_client.config_flags =
de832a55
EG
5223 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5224 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5225#ifdef BCM_VLAN
0c6671b0 5226 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5227 tstorm_client.config_flags |=
8d9c5f34 5228 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5229 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5230 }
5231#endif
49d66772
ET
5232
5233 for_each_queue(bp, i) {
de832a55
EG
5234 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5235
49d66772 5236 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5237 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5238 ((u32 *)&tstorm_client)[0]);
5239 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5240 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5241 ((u32 *)&tstorm_client)[1]);
5242 }
5243
34f80b04
EG
5244 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5245 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5246}
5247
a2fbb9ea
ET
5248static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5249{
a2fbb9ea 5250 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
5251 int mode = bp->rx_mode;
5252 int mask = (1 << BP_L_ID(bp));
5253 int func = BP_FUNC(bp);
581ce43d 5254 int port = BP_PORT(bp);
a2fbb9ea 5255 int i;
581ce43d
EG
5256 /* All but management unicast packets should pass to the host as well */
5257 u32 llh_mask =
5258 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5259 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5260 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5261 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5262
3196a88a 5263 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5264
5265 switch (mode) {
5266 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5267 tstorm_mac_filter.ucast_drop_all = mask;
5268 tstorm_mac_filter.mcast_drop_all = mask;
5269 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5270 break;
356e2385 5271
a2fbb9ea 5272 case BNX2X_RX_MODE_NORMAL:
34f80b04 5273 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5274 break;
356e2385 5275
a2fbb9ea 5276 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5277 tstorm_mac_filter.mcast_accept_all = mask;
5278 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5279 break;
356e2385 5280
a2fbb9ea 5281 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5282 tstorm_mac_filter.ucast_accept_all = mask;
5283 tstorm_mac_filter.mcast_accept_all = mask;
5284 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5285 /* pass management unicast packets as well */
5286 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5287 break;
356e2385 5288
a2fbb9ea 5289 default:
34f80b04
EG
5290 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5291 break;
a2fbb9ea
ET
5292 }
5293
581ce43d
EG
5294 REG_WR(bp,
5295 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5296 llh_mask);
5297
a2fbb9ea
ET
5298 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5299 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5300 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5301 ((u32 *)&tstorm_mac_filter)[i]);
5302
34f80b04 5303/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5304 ((u32 *)&tstorm_mac_filter)[i]); */
5305 }
a2fbb9ea 5306
49d66772
ET
5307 if (mode != BNX2X_RX_MODE_NONE)
5308 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5309}
5310
471de716
EG
5311static void bnx2x_init_internal_common(struct bnx2x *bp)
5312{
5313 int i;
5314
5315 /* Zero this manually as its initialization is
5316 currently missing in the initTool */
5317 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5318 REG_WR(bp, BAR_USTRORM_INTMEM +
5319 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5320}
5321
5322static void bnx2x_init_internal_port(struct bnx2x *bp)
5323{
5324 int port = BP_PORT(bp);
5325
ca00392c
EG
5326 REG_WR(bp,
5327 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5328 REG_WR(bp,
5329 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5330 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5331 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5332}
5333
5334static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5335{
a2fbb9ea
ET
5336 struct tstorm_eth_function_common_config tstorm_config = {0};
5337 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5338 int port = BP_PORT(bp);
5339 int func = BP_FUNC(bp);
de832a55
EG
5340 int i, j;
5341 u32 offset;
471de716 5342 u16 max_agg_size;
a2fbb9ea
ET
5343
5344 if (is_multi(bp)) {
555f6c78 5345 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5346 tstorm_config.rss_result_mask = MULTI_MASK;
5347 }
ca00392c
EG
5348
5349 /* Enable TPA if needed */
5350 if (bp->flags & TPA_ENABLE_FLAG)
5351 tstorm_config.config_flags |=
5352 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5353
8d9c5f34
EG
5354 if (IS_E1HMF(bp))
5355 tstorm_config.config_flags |=
5356 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5357
34f80b04
EG
5358 tstorm_config.leading_client_id = BP_L_ID(bp);
5359
a2fbb9ea 5360 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5361 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5362 (*(u32 *)&tstorm_config));
5363
c14423fe 5364 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
5365 bnx2x_set_storm_rx_mode(bp);
5366
de832a55
EG
5367 for_each_queue(bp, i) {
5368 u8 cl_id = bp->fp[i].cl_id;
5369
5370 /* reset xstorm per client statistics */
5371 offset = BAR_XSTRORM_INTMEM +
5372 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5373 for (j = 0;
5374 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5375 REG_WR(bp, offset + j*4, 0);
5376
5377 /* reset tstorm per client statistics */
5378 offset = BAR_TSTRORM_INTMEM +
5379 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5380 for (j = 0;
5381 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5382 REG_WR(bp, offset + j*4, 0);
5383
5384 /* reset ustorm per client statistics */
5385 offset = BAR_USTRORM_INTMEM +
5386 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5387 for (j = 0;
5388 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5389 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5390 }
5391
5392 /* Init statistics related context */
34f80b04 5393 stats_flags.collect_eth = 1;
a2fbb9ea 5394
66e855f3 5395 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5396 ((u32 *)&stats_flags)[0]);
66e855f3 5397 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5398 ((u32 *)&stats_flags)[1]);
5399
66e855f3 5400 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5401 ((u32 *)&stats_flags)[0]);
66e855f3 5402 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5403 ((u32 *)&stats_flags)[1]);
5404
de832a55
EG
5405 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5406 ((u32 *)&stats_flags)[0]);
5407 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5408 ((u32 *)&stats_flags)[1]);
5409
66e855f3 5410 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5411 ((u32 *)&stats_flags)[0]);
66e855f3 5412 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5413 ((u32 *)&stats_flags)[1]);
5414
66e855f3
YG
5415 REG_WR(bp, BAR_XSTRORM_INTMEM +
5416 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5417 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5418 REG_WR(bp, BAR_XSTRORM_INTMEM +
5419 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5420 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5421
5422 REG_WR(bp, BAR_TSTRORM_INTMEM +
5423 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5424 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5425 REG_WR(bp, BAR_TSTRORM_INTMEM +
5426 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5427 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5428
de832a55
EG
5429 REG_WR(bp, BAR_USTRORM_INTMEM +
5430 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5431 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5432 REG_WR(bp, BAR_USTRORM_INTMEM +
5433 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5434 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5435
34f80b04
EG
5436 if (CHIP_IS_E1H(bp)) {
5437 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5438 IS_E1HMF(bp));
5439 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5440 IS_E1HMF(bp));
5441 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5442 IS_E1HMF(bp));
5443 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5444 IS_E1HMF(bp));
5445
7a9b2557
VZ
5446 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5447 bp->e1hov);
34f80b04
EG
5448 }
5449
4f40f2cb
EG
5450 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5451 max_agg_size =
5452 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5453 SGE_PAGE_SIZE * PAGES_PER_SGE),
5454 (u32)0xffff);
555f6c78 5455 for_each_rx_queue(bp, i) {
7a9b2557 5456 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5457
5458 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5459 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5460 U64_LO(fp->rx_comp_mapping));
5461 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5462 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5463 U64_HI(fp->rx_comp_mapping));
5464
ca00392c
EG
5465 /* Next page */
5466 REG_WR(bp, BAR_USTRORM_INTMEM +
5467 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5468 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5469 REG_WR(bp, BAR_USTRORM_INTMEM +
5470 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5471 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5472
7a9b2557 5473 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5474 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5475 max_agg_size);
5476 }
8a1c38d1 5477
1c06328c
EG
5478 /* dropless flow control */
5479 if (CHIP_IS_E1H(bp)) {
5480 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5481
5482 rx_pause.bd_thr_low = 250;
5483 rx_pause.cqe_thr_low = 250;
5484 rx_pause.cos = 1;
5485 rx_pause.sge_thr_low = 0;
5486 rx_pause.bd_thr_high = 350;
5487 rx_pause.cqe_thr_high = 350;
5488 rx_pause.sge_thr_high = 0;
5489
5490 for_each_rx_queue(bp, i) {
5491 struct bnx2x_fastpath *fp = &bp->fp[i];
5492
5493 if (!fp->disable_tpa) {
5494 rx_pause.sge_thr_low = 150;
5495 rx_pause.sge_thr_high = 250;
5496 }
5497
5498
5499 offset = BAR_USTRORM_INTMEM +
5500 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5501 fp->cl_id);
5502 for (j = 0;
5503 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5504 j++)
5505 REG_WR(bp, offset + j*4,
5506 ((u32 *)&rx_pause)[j]);
5507 }
5508 }
5509
8a1c38d1
EG
5510 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5511
5512 /* Init rate shaping and fairness contexts */
5513 if (IS_E1HMF(bp)) {
5514 int vn;
5515
5516 /* During init there is no active link
5517 Until link is up, set link rate to 10Gbps */
5518 bp->link_vars.line_speed = SPEED_10000;
5519 bnx2x_init_port_minmax(bp);
5520
5521 bnx2x_calc_vn_weight_sum(bp);
5522
5523 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5524 bnx2x_init_vn_minmax(bp, 2*vn + port);
5525
5526 /* Enable rate shaping and fairness */
5527 bp->cmng.flags.cmng_enables =
5528 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5529 if (bp->vn_weight_sum)
5530 bp->cmng.flags.cmng_enables |=
5531 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5532 else
5533 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5534 " fairness will be disabled\n");
5535 } else {
5536 /* rate shaping and fairness are disabled */
5537 DP(NETIF_MSG_IFUP,
5538 "single function mode minmax will be disabled\n");
5539 }
5540
5541
5542 /* Store it to internal memory */
5543 if (bp->port.pmf)
5544 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5545 REG_WR(bp, BAR_XSTRORM_INTMEM +
5546 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5547 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5548}
5549
471de716
EG
5550static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5551{
5552 switch (load_code) {
5553 case FW_MSG_CODE_DRV_LOAD_COMMON:
5554 bnx2x_init_internal_common(bp);
5555 /* no break */
5556
5557 case FW_MSG_CODE_DRV_LOAD_PORT:
5558 bnx2x_init_internal_port(bp);
5559 /* no break */
5560
5561 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5562 bnx2x_init_internal_func(bp);
5563 break;
5564
5565 default:
5566 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5567 break;
5568 }
5569}
5570
5571static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5572{
5573 int i;
5574
5575 for_each_queue(bp, i) {
5576 struct bnx2x_fastpath *fp = &bp->fp[i];
5577
34f80b04 5578 fp->bp = bp;
a2fbb9ea 5579 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5580 fp->index = i;
34f80b04
EG
5581 fp->cl_id = BP_L_ID(bp) + i;
5582 fp->sb_id = fp->cl_id;
ca00392c
EG
5583 /* Suitable Rx and Tx SBs are served by the same client */
5584 if (i >= bp->num_rx_queues)
5585 fp->cl_id -= bp->num_rx_queues;
34f80b04 5586 DP(NETIF_MSG_IFUP,
f5372251
EG
5587 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5588 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5589 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5590 fp->sb_id);
5c862848 5591 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5592 }
5593
16119785
EG
5594 /* ensure status block indices were read */
5595 rmb();
5596
5597
5c862848
EG
5598 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5599 DEF_SB_ID);
5600 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5601 bnx2x_update_coalesce(bp);
5602 bnx2x_init_rx_rings(bp);
5603 bnx2x_init_tx_ring(bp);
5604 bnx2x_init_sp_ring(bp);
5605 bnx2x_init_context(bp);
471de716 5606 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5607 bnx2x_init_ind_table(bp);
0ef00459
EG
5608 bnx2x_stats_init(bp);
5609
5610 /* At this point, we are ready for interrupts */
5611 atomic_set(&bp->intr_sem, 0);
5612
5613 /* flush all before enabling interrupts */
5614 mb();
5615 mmiowb();
5616
615f8fd9 5617 bnx2x_int_enable(bp);
eb8da205
EG
5618
5619 /* Check for SPIO5 */
5620 bnx2x_attn_int_deasserted0(bp,
5621 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5622 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5623}
5624
5625/* end of nic init */
5626
5627/*
5628 * gzip service functions
5629 */
5630
5631static int bnx2x_gunzip_init(struct bnx2x *bp)
5632{
5633 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5634 &bp->gunzip_mapping);
5635 if (bp->gunzip_buf == NULL)
5636 goto gunzip_nomem1;
5637
5638 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5639 if (bp->strm == NULL)
5640 goto gunzip_nomem2;
5641
5642 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5643 GFP_KERNEL);
5644 if (bp->strm->workspace == NULL)
5645 goto gunzip_nomem3;
5646
5647 return 0;
5648
5649gunzip_nomem3:
5650 kfree(bp->strm);
5651 bp->strm = NULL;
5652
5653gunzip_nomem2:
5654 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5655 bp->gunzip_mapping);
5656 bp->gunzip_buf = NULL;
5657
5658gunzip_nomem1:
5659 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5660 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5661 return -ENOMEM;
5662}
5663
5664static void bnx2x_gunzip_end(struct bnx2x *bp)
5665{
5666 kfree(bp->strm->workspace);
5667
5668 kfree(bp->strm);
5669 bp->strm = NULL;
5670
5671 if (bp->gunzip_buf) {
5672 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5673 bp->gunzip_mapping);
5674 bp->gunzip_buf = NULL;
5675 }
5676}
5677
94a78b79 5678static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5679{
5680 int n, rc;
5681
5682 /* check gzip header */
94a78b79
VZ
5683 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5684 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5685 return -EINVAL;
94a78b79 5686 }
a2fbb9ea
ET
5687
5688 n = 10;
5689
34f80b04 5690#define FNAME 0x8
a2fbb9ea
ET
5691
5692 if (zbuf[3] & FNAME)
5693 while ((zbuf[n++] != 0) && (n < len));
5694
94a78b79 5695 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5696 bp->strm->avail_in = len - n;
5697 bp->strm->next_out = bp->gunzip_buf;
5698 bp->strm->avail_out = FW_BUF_SIZE;
5699
5700 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5701 if (rc != Z_OK)
5702 return rc;
5703
5704 rc = zlib_inflate(bp->strm, Z_FINISH);
5705 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5706 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5707 bp->dev->name, bp->strm->msg);
5708
5709 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5710 if (bp->gunzip_outlen & 0x3)
5711 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5712 " gunzip_outlen (%d) not aligned\n",
5713 bp->dev->name, bp->gunzip_outlen);
5714 bp->gunzip_outlen >>= 2;
5715
5716 zlib_inflateEnd(bp->strm);
5717
5718 if (rc == Z_STREAM_END)
5719 return 0;
5720
5721 return rc;
5722}
5723
5724/* nic load/unload */
5725
5726/*
34f80b04 5727 * General service functions
a2fbb9ea
ET
5728 */
5729
5730/* send a NIG loopback debug packet */
5731static void bnx2x_lb_pckt(struct bnx2x *bp)
5732{
a2fbb9ea 5733 u32 wb_write[3];
a2fbb9ea
ET
5734
5735 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5736 wb_write[0] = 0x55555555;
5737 wb_write[1] = 0x55555555;
34f80b04 5738 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5739 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5740
5741 /* NON-IP protocol */
a2fbb9ea
ET
5742 wb_write[0] = 0x09000000;
5743 wb_write[1] = 0x55555555;
34f80b04 5744 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5745 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5746}
5747
5748/* some of the internal memories
5749 * are not directly readable from the driver
5750 * to test them we send debug packets
5751 */
5752static int bnx2x_int_mem_test(struct bnx2x *bp)
5753{
5754 int factor;
5755 int count, i;
5756 u32 val = 0;
5757
ad8d3948 5758 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5759 factor = 120;
ad8d3948
EG
5760 else if (CHIP_REV_IS_EMUL(bp))
5761 factor = 200;
5762 else
a2fbb9ea 5763 factor = 1;
a2fbb9ea
ET
5764
5765 DP(NETIF_MSG_HW, "start part1\n");
5766
5767 /* Disable inputs of parser neighbor blocks */
5768 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5769 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5770 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5771 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5772
5773 /* Write 0 to parser credits for CFC search request */
5774 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5775
5776 /* send Ethernet packet */
5777 bnx2x_lb_pckt(bp);
5778
5779 /* TODO do i reset NIG statistic? */
5780 /* Wait until NIG register shows 1 packet of size 0x10 */
5781 count = 1000 * factor;
5782 while (count) {
34f80b04 5783
a2fbb9ea
ET
5784 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5785 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5786 if (val == 0x10)
5787 break;
5788
5789 msleep(10);
5790 count--;
5791 }
5792 if (val != 0x10) {
5793 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5794 return -1;
5795 }
5796
5797 /* Wait until PRS register shows 1 packet */
5798 count = 1000 * factor;
5799 while (count) {
5800 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5801 if (val == 1)
5802 break;
5803
5804 msleep(10);
5805 count--;
5806 }
5807 if (val != 0x1) {
5808 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5809 return -2;
5810 }
5811
5812 /* Reset and init BRB, PRS */
34f80b04 5813 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5814 msleep(50);
34f80b04 5815 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5816 msleep(50);
94a78b79
VZ
5817 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5818 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5819
5820 DP(NETIF_MSG_HW, "part2\n");
5821
5822 /* Disable inputs of parser neighbor blocks */
5823 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5824 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5825 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5826 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5827
5828 /* Write 0 to parser credits for CFC search request */
5829 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5830
5831 /* send 10 Ethernet packets */
5832 for (i = 0; i < 10; i++)
5833 bnx2x_lb_pckt(bp);
5834
5835 /* Wait until NIG register shows 10 + 1
5836 packets of size 11*0x10 = 0xb0 */
5837 count = 1000 * factor;
5838 while (count) {
34f80b04 5839
a2fbb9ea
ET
5840 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5841 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5842 if (val == 0xb0)
5843 break;
5844
5845 msleep(10);
5846 count--;
5847 }
5848 if (val != 0xb0) {
5849 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5850 return -3;
5851 }
5852
5853 /* Wait until PRS register shows 2 packets */
5854 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5855 if (val != 2)
5856 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5857
5858 /* Write 1 to parser credits for CFC search request */
5859 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5860
5861 /* Wait until PRS register shows 3 packets */
5862 msleep(10 * factor);
5863 /* Wait until NIG register shows 1 packet of size 0x10 */
5864 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5865 if (val != 3)
5866 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5867
5868 /* clear NIG EOP FIFO */
5869 for (i = 0; i < 11; i++)
5870 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5871 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5872 if (val != 1) {
5873 BNX2X_ERR("clear of NIG failed\n");
5874 return -4;
5875 }
5876
5877 /* Reset and init BRB, PRS, NIG */
5878 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5879 msleep(50);
5880 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5881 msleep(50);
94a78b79
VZ
5882 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5883 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5884#ifndef BCM_ISCSI
5885 /* set NIC mode */
5886 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5887#endif
5888
5889 /* Enable inputs of parser neighbor blocks */
5890 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5891 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5892 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5893 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5894
5895 DP(NETIF_MSG_HW, "done\n");
5896
5897 return 0; /* OK */
5898}
5899
5900static void enable_blocks_attention(struct bnx2x *bp)
5901{
5902 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5903 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5904 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5905 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5906 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5907 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5908 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5909 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5910 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5911/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5912/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5913 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5914 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5915 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5916/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5917/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5918 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5919 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5920 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5921 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5922/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5923/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5924 if (CHIP_REV_IS_FPGA(bp))
5925 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5926 else
5927 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5928 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5929 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5930 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5931/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5932/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5933 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5934 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5935/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5936 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5937}
5938
34f80b04 5939
81f75bbf
EG
5940static void bnx2x_reset_common(struct bnx2x *bp)
5941{
5942 /* reset_common */
5943 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5944 0xd3ffff7f);
5945 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5946}
5947
573f2035
EG
5948static void bnx2x_init_pxp(struct bnx2x *bp)
5949{
5950 u16 devctl;
5951 int r_order, w_order;
5952
5953 pci_read_config_word(bp->pdev,
5954 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5955 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5956 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5957 if (bp->mrrs == -1)
5958 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5959 else {
5960 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5961 r_order = bp->mrrs;
5962 }
5963
5964 bnx2x_init_pxp_arb(bp, r_order, w_order);
5965}
fd4ef40d
EG
5966
5967static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5968{
5969 u32 val;
5970 u8 port;
5971 u8 is_required = 0;
5972
5973 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5974 SHARED_HW_CFG_FAN_FAILURE_MASK;
5975
5976 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5977 is_required = 1;
5978
5979 /*
5980 * The fan failure mechanism is usually related to the PHY type since
5981 * the power consumption of the board is affected by the PHY. Currently,
5982 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5983 */
5984 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5985 for (port = PORT_0; port < PORT_MAX; port++) {
5986 u32 phy_type =
5987 SHMEM_RD(bp, dev_info.port_hw_config[port].
5988 external_phy_config) &
5989 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5990 is_required |=
5991 ((phy_type ==
5992 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
5993 (phy_type ==
5994 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
5995 (phy_type ==
5996 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5997 }
5998
5999 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6000
6001 if (is_required == 0)
6002 return;
6003
6004 /* Fan failure is indicated by SPIO 5 */
6005 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6006 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6007
6008 /* set to active low mode */
6009 val = REG_RD(bp, MISC_REG_SPIO_INT);
6010 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6011 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6012 REG_WR(bp, MISC_REG_SPIO_INT, val);
6013
6014 /* enable interrupt to signal the IGU */
6015 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6016 val |= (1 << MISC_REGISTERS_SPIO_5);
6017 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6018}
6019
34f80b04 6020static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 6021{
a2fbb9ea 6022 u32 val, i;
a2fbb9ea 6023
34f80b04 6024 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 6025
81f75bbf 6026 bnx2x_reset_common(bp);
34f80b04
EG
6027 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6028 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 6029
94a78b79 6030 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
6031 if (CHIP_IS_E1H(bp))
6032 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 6033
34f80b04
EG
6034 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6035 msleep(30);
6036 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 6037
94a78b79 6038 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
6039 if (CHIP_IS_E1(bp)) {
6040 /* enable HW interrupt from PXP on USDM overflow
6041 bit 16 on INT_MASK_0 */
6042 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6043 }
a2fbb9ea 6044
94a78b79 6045 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 6046 bnx2x_init_pxp(bp);
a2fbb9ea
ET
6047
6048#ifdef __BIG_ENDIAN
34f80b04
EG
6049 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6050 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6051 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6052 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6053 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6054 /* make sure this value is 0 */
6055 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6056
6057/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6058 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6059 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6060 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6061 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6062#endif
6063
34f80b04 6064 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 6065#ifdef BCM_ISCSI
34f80b04
EG
6066 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6067 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6068 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6069#endif
6070
34f80b04
EG
6071 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6072 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6073
34f80b04
EG
6074 /* let the HW do it's magic ... */
6075 msleep(100);
6076 /* finish PXP init */
6077 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6078 if (val != 1) {
6079 BNX2X_ERR("PXP2 CFG failed\n");
6080 return -EBUSY;
6081 }
6082 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6083 if (val != 1) {
6084 BNX2X_ERR("PXP2 RD_INIT failed\n");
6085 return -EBUSY;
6086 }
a2fbb9ea 6087
34f80b04
EG
6088 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6089 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6090
94a78b79 6091 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6092
34f80b04
EG
6093 /* clean the DMAE memory */
6094 bp->dmae_ready = 1;
6095 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6096
94a78b79
VZ
6097 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6098 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6099 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6100 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6101
34f80b04
EG
6102 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6103 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6104 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6105 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6106
94a78b79 6107 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
34f80b04
EG
6108 /* soft reset pulse */
6109 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6110 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
6111
6112#ifdef BCM_ISCSI
94a78b79 6113 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6114#endif
a2fbb9ea 6115
94a78b79 6116 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6117 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6118 if (!CHIP_REV_IS_SLOW(bp)) {
6119 /* enable hw interrupt from doorbell Q */
6120 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6121 }
a2fbb9ea 6122
94a78b79
VZ
6123 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6124 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6125 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
6126 /* set NIC mode */
6127 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
6128 if (CHIP_IS_E1H(bp))
6129 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6130
94a78b79
VZ
6131 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6132 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6133 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6134 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6135
ca00392c
EG
6136 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6137 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6138 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6139 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6140
94a78b79
VZ
6141 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6142 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6143 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6144 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6145
34f80b04
EG
6146 /* sync semi rtc */
6147 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6148 0x80000000);
6149 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6150 0x80000000);
a2fbb9ea 6151
94a78b79
VZ
6152 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6153 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6154 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6155
34f80b04
EG
6156 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6157 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6158 REG_WR(bp, i, 0xc0cac01a);
6159 /* TODO: replace with something meaningful */
6160 }
94a78b79 6161 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
34f80b04 6162 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6163
34f80b04
EG
6164 if (sizeof(union cdu_context) != 1024)
6165 /* we currently assume that a context is 1024 bytes */
6166 printk(KERN_ALERT PFX "please adjust the size of"
6167 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 6168
94a78b79 6169 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6170 val = (4 << 24) + (0 << 12) + 1024;
6171 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6172
94a78b79 6173 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6174 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6175 /* enable context validation interrupt from CFC */
6176 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6177
6178 /* set the thresholds to prevent CFC/CDU race */
6179 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6180
94a78b79
VZ
6181 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6182 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6183
94a78b79 6184 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6185 /* Reset PCIE errors for debug */
6186 REG_WR(bp, 0x2814, 0xffffffff);
6187 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6188
94a78b79 6189 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6190 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6191 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6192 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6193
94a78b79 6194 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6195 if (CHIP_IS_E1H(bp)) {
6196 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6197 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6198 }
6199
6200 if (CHIP_REV_IS_SLOW(bp))
6201 msleep(200);
6202
6203 /* finish CFC init */
6204 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6205 if (val != 1) {
6206 BNX2X_ERR("CFC LL_INIT failed\n");
6207 return -EBUSY;
6208 }
6209 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6210 if (val != 1) {
6211 BNX2X_ERR("CFC AC_INIT failed\n");
6212 return -EBUSY;
6213 }
6214 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6215 if (val != 1) {
6216 BNX2X_ERR("CFC CAM_INIT failed\n");
6217 return -EBUSY;
6218 }
6219 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6220
34f80b04
EG
6221 /* read NIG statistic
6222 to see if this is our first up since powerup */
6223 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6224 val = *bnx2x_sp(bp, wb_data[0]);
6225
6226 /* do internal memory self test */
6227 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6228 BNX2X_ERR("internal mem self test failed\n");
6229 return -EBUSY;
6230 }
6231
35b19ba5 6232 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6233 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6234 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6235 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6236 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6237 bp->port.need_hw_lock = 1;
6238 break;
6239
34f80b04
EG
6240 default:
6241 break;
6242 }
f1410647 6243
fd4ef40d
EG
6244 bnx2x_setup_fan_failure_detection(bp);
6245
34f80b04
EG
6246 /* clear PXP2 attentions */
6247 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6248
34f80b04 6249 enable_blocks_attention(bp);
a2fbb9ea 6250
6bbca910
YR
6251 if (!BP_NOMCP(bp)) {
6252 bnx2x_acquire_phy_lock(bp);
6253 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6254 bnx2x_release_phy_lock(bp);
6255 } else
6256 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6257
34f80b04
EG
6258 return 0;
6259}
a2fbb9ea 6260
34f80b04
EG
6261static int bnx2x_init_port(struct bnx2x *bp)
6262{
6263 int port = BP_PORT(bp);
94a78b79 6264 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6265 u32 low, high;
34f80b04 6266 u32 val;
a2fbb9ea 6267
34f80b04
EG
6268 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6269
6270 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6271
94a78b79 6272 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6273 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6274
6275 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6276 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6277 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
a2fbb9ea
ET
6278#ifdef BCM_ISCSI
6279 /* Port0 1
6280 * Port1 385 */
6281 i++;
6282 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6283 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6284 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6285 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6286
6287 /* Port0 2
6288 * Port1 386 */
6289 i++;
6290 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6291 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6292 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6293 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6294
6295 /* Port0 3
6296 * Port1 387 */
6297 i++;
6298 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6299 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6300 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6301 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6302#endif
94a78b79 6303 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6304
a2fbb9ea
ET
6305#ifdef BCM_ISCSI
6306 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6307 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6308
94a78b79 6309 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
a2fbb9ea 6310#endif
94a78b79 6311 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6312
94a78b79 6313 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6314 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6315 /* no pause for emulation and FPGA */
6316 low = 0;
6317 high = 513;
6318 } else {
6319 if (IS_E1HMF(bp))
6320 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6321 else if (bp->dev->mtu > 4096) {
6322 if (bp->flags & ONE_PORT_FLAG)
6323 low = 160;
6324 else {
6325 val = bp->dev->mtu;
6326 /* (24*1024 + val*4)/256 */
6327 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6328 }
6329 } else
6330 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6331 high = low + 56; /* 14*1024/256 */
6332 }
6333 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6334 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6335
6336
94a78b79 6337 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6338
94a78b79 6339 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6340 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6341 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6342 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6343
94a78b79
VZ
6344 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6345 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6346 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6347 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6348
94a78b79 6349 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6350 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6351
94a78b79 6352 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6353
6354 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6355 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6356
6357 /* update threshold */
34f80b04 6358 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6359 /* update init credit */
34f80b04 6360 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6361
6362 /* probe changes */
34f80b04 6363 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6364 msleep(5);
34f80b04 6365 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
6366
6367#ifdef BCM_ISCSI
6368 /* tell the searcher where the T2 table is */
6369 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6370
6371 wb_write[0] = U64_LO(bp->t2_mapping);
6372 wb_write[1] = U64_HI(bp->t2_mapping);
6373 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6374 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6375 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6376 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6377
6378 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
a2fbb9ea 6379#endif
94a78b79 6380 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6381 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6382
6383 if (CHIP_IS_E1(bp)) {
6384 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6385 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6386 }
94a78b79 6387 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6388
94a78b79 6389 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6390 /* init aeu_mask_attn_func_0/1:
6391 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6392 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6393 * bits 4-7 are used for "per vn group attention" */
6394 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6395 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6396
94a78b79 6397 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6398 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6399 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6400 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6401 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6402
94a78b79 6403 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6404
6405 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6406
6407 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6408 /* 0x2 disable e1hov, 0x1 enable */
6409 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6410 (IS_E1HMF(bp) ? 0x1 : 0x2));
6411
1c06328c
EG
6412 {
6413 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6414 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6415 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6416 }
34f80b04
EG
6417 }
6418
94a78b79 6419 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6420 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6421
35b19ba5 6422 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6423 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6424 {
6425 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6426
6427 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6428 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6429
6430 /* The GPIO should be swapped if the swap register is
6431 set and active */
6432 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6433 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6434
6435 /* Select function upon port-swap configuration */
6436 if (port == 0) {
6437 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6438 aeu_gpio_mask = (swap_val && swap_override) ?
6439 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6440 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6441 } else {
6442 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6443 aeu_gpio_mask = (swap_val && swap_override) ?
6444 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6445 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6446 }
6447 val = REG_RD(bp, offset);
6448 /* add GPIO3 to group */
6449 val |= aeu_gpio_mask;
6450 REG_WR(bp, offset, val);
6451 }
6452 break;
6453
35b19ba5 6454 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6455 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6456 /* add SPIO 5 to group 0 */
4d295db0
EG
6457 {
6458 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6459 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6460 val = REG_RD(bp, reg_addr);
f1410647 6461 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6462 REG_WR(bp, reg_addr, val);
6463 }
f1410647
ET
6464 break;
6465
6466 default:
6467 break;
6468 }
6469
c18487ee 6470 bnx2x__link_reset(bp);
a2fbb9ea 6471
34f80b04
EG
6472 return 0;
6473}
6474
6475#define ILT_PER_FUNC (768/2)
6476#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6477/* the phys address is shifted right 12 bits and has an added
6478 1=valid bit added to the 53rd bit
6479 then since this is a wide register(TM)
6480 we split it into two 32 bit writes
6481 */
6482#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6483#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6484#define PXP_ONE_ILT(x) (((x) << 10) | x)
6485#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6486
6487#define CNIC_ILT_LINES 0
6488
6489static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6490{
6491 int reg;
6492
6493 if (CHIP_IS_E1H(bp))
6494 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6495 else /* E1 */
6496 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6497
6498 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6499}
6500
6501static int bnx2x_init_func(struct bnx2x *bp)
6502{
6503 int port = BP_PORT(bp);
6504 int func = BP_FUNC(bp);
8badd27a 6505 u32 addr, val;
34f80b04
EG
6506 int i;
6507
6508 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6509
8badd27a
EG
6510 /* set MSI reconfigure capability */
6511 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6512 val = REG_RD(bp, addr);
6513 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6514 REG_WR(bp, addr, val);
6515
34f80b04
EG
6516 i = FUNC_ILT_BASE(func);
6517
6518 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6519 if (CHIP_IS_E1H(bp)) {
6520 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6521 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6522 } else /* E1 */
6523 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6524 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6525
6526
6527 if (CHIP_IS_E1H(bp)) {
573f2035
EG
6528 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6529 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6530 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6531 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6532 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6533 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6534 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6535 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6536 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
6537
6538 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6539 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6540 }
6541
6542 /* HC init per function */
6543 if (CHIP_IS_E1H(bp)) {
6544 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6545
6546 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6547 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6548 }
94a78b79 6549 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6550
c14423fe 6551 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6552 REG_WR(bp, 0x2114, 0xffffffff);
6553 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6554
34f80b04
EG
6555 return 0;
6556}
6557
6558static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6559{
6560 int i, rc = 0;
a2fbb9ea 6561
34f80b04
EG
6562 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6563 BP_FUNC(bp), load_code);
a2fbb9ea 6564
34f80b04
EG
6565 bp->dmae_ready = 0;
6566 mutex_init(&bp->dmae_mutex);
54016b26
EG
6567 rc = bnx2x_gunzip_init(bp);
6568 if (rc)
6569 return rc;
a2fbb9ea 6570
34f80b04
EG
6571 switch (load_code) {
6572 case FW_MSG_CODE_DRV_LOAD_COMMON:
6573 rc = bnx2x_init_common(bp);
6574 if (rc)
6575 goto init_hw_err;
6576 /* no break */
6577
6578 case FW_MSG_CODE_DRV_LOAD_PORT:
6579 bp->dmae_ready = 1;
6580 rc = bnx2x_init_port(bp);
6581 if (rc)
6582 goto init_hw_err;
6583 /* no break */
6584
6585 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6586 bp->dmae_ready = 1;
6587 rc = bnx2x_init_func(bp);
6588 if (rc)
6589 goto init_hw_err;
6590 break;
6591
6592 default:
6593 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6594 break;
6595 }
6596
6597 if (!BP_NOMCP(bp)) {
6598 int func = BP_FUNC(bp);
a2fbb9ea
ET
6599
6600 bp->fw_drv_pulse_wr_seq =
34f80b04 6601 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6602 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
6603 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6604 }
a2fbb9ea 6605
34f80b04
EG
6606 /* this needs to be done before gunzip end */
6607 bnx2x_zero_def_sb(bp);
6608 for_each_queue(bp, i)
6609 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6610
6611init_hw_err:
6612 bnx2x_gunzip_end(bp);
6613
6614 return rc;
a2fbb9ea
ET
6615}
6616
a2fbb9ea
ET
6617static void bnx2x_free_mem(struct bnx2x *bp)
6618{
6619
6620#define BNX2X_PCI_FREE(x, y, size) \
6621 do { \
6622 if (x) { \
6623 pci_free_consistent(bp->pdev, size, x, y); \
6624 x = NULL; \
6625 y = 0; \
6626 } \
6627 } while (0)
6628
6629#define BNX2X_FREE(x) \
6630 do { \
6631 if (x) { \
6632 vfree(x); \
6633 x = NULL; \
6634 } \
6635 } while (0)
6636
6637 int i;
6638
6639 /* fastpath */
555f6c78 6640 /* Common */
a2fbb9ea
ET
6641 for_each_queue(bp, i) {
6642
555f6c78 6643 /* status blocks */
a2fbb9ea
ET
6644 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6645 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6646 sizeof(struct host_status_block));
555f6c78
EG
6647 }
6648 /* Rx */
6649 for_each_rx_queue(bp, i) {
a2fbb9ea 6650
555f6c78 6651 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6652 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6653 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6654 bnx2x_fp(bp, i, rx_desc_mapping),
6655 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6656
6657 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6658 bnx2x_fp(bp, i, rx_comp_mapping),
6659 sizeof(struct eth_fast_path_rx_cqe) *
6660 NUM_RCQ_BD);
a2fbb9ea 6661
7a9b2557 6662 /* SGE ring */
32626230 6663 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6664 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6665 bnx2x_fp(bp, i, rx_sge_mapping),
6666 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6667 }
555f6c78
EG
6668 /* Tx */
6669 for_each_tx_queue(bp, i) {
6670
6671 /* fastpath tx rings: tx_buf tx_desc */
6672 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6673 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6674 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6675 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6676 }
a2fbb9ea
ET
6677 /* end of fastpath */
6678
6679 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6680 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6681
6682 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6683 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6684
6685#ifdef BCM_ISCSI
6686 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6687 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6688 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6689 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6690#endif
7a9b2557 6691 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6692
6693#undef BNX2X_PCI_FREE
6694#undef BNX2X_KFREE
6695}
6696
6697static int bnx2x_alloc_mem(struct bnx2x *bp)
6698{
6699
6700#define BNX2X_PCI_ALLOC(x, y, size) \
6701 do { \
6702 x = pci_alloc_consistent(bp->pdev, size, y); \
6703 if (x == NULL) \
6704 goto alloc_mem_err; \
6705 memset(x, 0, size); \
6706 } while (0)
6707
6708#define BNX2X_ALLOC(x, size) \
6709 do { \
6710 x = vmalloc(size); \
6711 if (x == NULL) \
6712 goto alloc_mem_err; \
6713 memset(x, 0, size); \
6714 } while (0)
6715
6716 int i;
6717
6718 /* fastpath */
555f6c78 6719 /* Common */
a2fbb9ea
ET
6720 for_each_queue(bp, i) {
6721 bnx2x_fp(bp, i, bp) = bp;
6722
555f6c78 6723 /* status blocks */
a2fbb9ea
ET
6724 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6725 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6726 sizeof(struct host_status_block));
555f6c78
EG
6727 }
6728 /* Rx */
6729 for_each_rx_queue(bp, i) {
a2fbb9ea 6730
555f6c78 6731 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6732 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6733 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6734 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6735 &bnx2x_fp(bp, i, rx_desc_mapping),
6736 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6737
6738 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6739 &bnx2x_fp(bp, i, rx_comp_mapping),
6740 sizeof(struct eth_fast_path_rx_cqe) *
6741 NUM_RCQ_BD);
6742
7a9b2557
VZ
6743 /* SGE ring */
6744 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6745 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6746 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6747 &bnx2x_fp(bp, i, rx_sge_mapping),
6748 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6749 }
555f6c78
EG
6750 /* Tx */
6751 for_each_tx_queue(bp, i) {
6752
555f6c78
EG
6753 /* fastpath tx rings: tx_buf tx_desc */
6754 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6755 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6756 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6757 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6758 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6759 }
a2fbb9ea
ET
6760 /* end of fastpath */
6761
6762 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6763 sizeof(struct host_def_status_block));
6764
6765 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6766 sizeof(struct bnx2x_slowpath));
6767
6768#ifdef BCM_ISCSI
6769 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6770
6771 /* Initialize T1 */
6772 for (i = 0; i < 64*1024; i += 64) {
6773 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6774 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6775 }
6776
6777 /* allocate searcher T2 table
6778 we allocate 1/4 of alloc num for T2
6779 (which is not entered into the ILT) */
6780 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6781
6782 /* Initialize T2 */
6783 for (i = 0; i < 16*1024; i += 64)
6784 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6785
c14423fe 6786 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6787 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6788
6789 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6790 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6791
6792 /* QM queues (128*MAX_CONN) */
6793 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6794#endif
6795
6796 /* Slow path ring */
6797 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6798
6799 return 0;
6800
6801alloc_mem_err:
6802 bnx2x_free_mem(bp);
6803 return -ENOMEM;
6804
6805#undef BNX2X_PCI_ALLOC
6806#undef BNX2X_ALLOC
6807}
6808
6809static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6810{
6811 int i;
6812
555f6c78 6813 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6814 struct bnx2x_fastpath *fp = &bp->fp[i];
6815
6816 u16 bd_cons = fp->tx_bd_cons;
6817 u16 sw_prod = fp->tx_pkt_prod;
6818 u16 sw_cons = fp->tx_pkt_cons;
6819
a2fbb9ea
ET
6820 while (sw_cons != sw_prod) {
6821 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6822 sw_cons++;
6823 }
6824 }
6825}
6826
6827static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6828{
6829 int i, j;
6830
555f6c78 6831 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6832 struct bnx2x_fastpath *fp = &bp->fp[j];
6833
a2fbb9ea
ET
6834 for (i = 0; i < NUM_RX_BD; i++) {
6835 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6836 struct sk_buff *skb = rx_buf->skb;
6837
6838 if (skb == NULL)
6839 continue;
6840
6841 pci_unmap_single(bp->pdev,
6842 pci_unmap_addr(rx_buf, mapping),
356e2385 6843 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6844
6845 rx_buf->skb = NULL;
6846 dev_kfree_skb(skb);
6847 }
7a9b2557 6848 if (!fp->disable_tpa)
32626230
EG
6849 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6850 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6851 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6852 }
6853}
6854
6855static void bnx2x_free_skbs(struct bnx2x *bp)
6856{
6857 bnx2x_free_tx_skbs(bp);
6858 bnx2x_free_rx_skbs(bp);
6859}
6860
6861static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6862{
34f80b04 6863 int i, offset = 1;
a2fbb9ea
ET
6864
6865 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6866 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6867 bp->msix_table[0].vector);
6868
6869 for_each_queue(bp, i) {
c14423fe 6870 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6871 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6872 bnx2x_fp(bp, i, state));
6873
34f80b04 6874 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6875 }
a2fbb9ea
ET
6876}
6877
6878static void bnx2x_free_irq(struct bnx2x *bp)
6879{
a2fbb9ea 6880 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6881 bnx2x_free_msix_irqs(bp);
6882 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6883 bp->flags &= ~USING_MSIX_FLAG;
6884
8badd27a
EG
6885 } else if (bp->flags & USING_MSI_FLAG) {
6886 free_irq(bp->pdev->irq, bp->dev);
6887 pci_disable_msi(bp->pdev);
6888 bp->flags &= ~USING_MSI_FLAG;
6889
a2fbb9ea
ET
6890 } else
6891 free_irq(bp->pdev->irq, bp->dev);
6892}
6893
6894static int bnx2x_enable_msix(struct bnx2x *bp)
6895{
8badd27a
EG
6896 int i, rc, offset = 1;
6897 int igu_vec = 0;
a2fbb9ea 6898
8badd27a
EG
6899 bp->msix_table[0].entry = igu_vec;
6900 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6901
34f80b04 6902 for_each_queue(bp, i) {
8badd27a 6903 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6904 bp->msix_table[i + offset].entry = igu_vec;
6905 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6906 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6907 }
6908
34f80b04 6909 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6910 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6911 if (rc) {
8badd27a
EG
6912 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6913 return rc;
34f80b04 6914 }
8badd27a 6915
a2fbb9ea
ET
6916 bp->flags |= USING_MSIX_FLAG;
6917
6918 return 0;
a2fbb9ea
ET
6919}
6920
a2fbb9ea
ET
6921static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6922{
34f80b04 6923 int i, rc, offset = 1;
a2fbb9ea 6924
a2fbb9ea
ET
6925 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6926 bp->dev->name, bp->dev);
a2fbb9ea
ET
6927 if (rc) {
6928 BNX2X_ERR("request sp irq failed\n");
6929 return -EBUSY;
6930 }
6931
6932 for_each_queue(bp, i) {
555f6c78
EG
6933 struct bnx2x_fastpath *fp = &bp->fp[i];
6934
ca00392c
EG
6935 if (i < bp->num_rx_queues)
6936 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6937 else
6938 sprintf(fp->name, "%s-tx-%d",
6939 bp->dev->name, i - bp->num_rx_queues);
6940
34f80b04 6941 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6942 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6943 if (rc) {
555f6c78 6944 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6945 bnx2x_free_msix_irqs(bp);
6946 return -EBUSY;
6947 }
6948
555f6c78 6949 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6950 }
6951
555f6c78 6952 i = BNX2X_NUM_QUEUES(bp);
ca00392c
EG
6953 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
6954 " ... fp[%d] %d\n",
6955 bp->dev->name, bp->msix_table[0].vector,
6956 0, bp->msix_table[offset].vector,
6957 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 6958
a2fbb9ea 6959 return 0;
a2fbb9ea
ET
6960}
6961
8badd27a
EG
6962static int bnx2x_enable_msi(struct bnx2x *bp)
6963{
6964 int rc;
6965
6966 rc = pci_enable_msi(bp->pdev);
6967 if (rc) {
6968 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6969 return -1;
6970 }
6971 bp->flags |= USING_MSI_FLAG;
6972
6973 return 0;
6974}
6975
a2fbb9ea
ET
6976static int bnx2x_req_irq(struct bnx2x *bp)
6977{
8badd27a 6978 unsigned long flags;
34f80b04 6979 int rc;
a2fbb9ea 6980
8badd27a
EG
6981 if (bp->flags & USING_MSI_FLAG)
6982 flags = 0;
6983 else
6984 flags = IRQF_SHARED;
6985
6986 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6987 bp->dev->name, bp->dev);
a2fbb9ea
ET
6988 if (!rc)
6989 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6990
6991 return rc;
a2fbb9ea
ET
6992}
6993
65abd74d
YG
6994static void bnx2x_napi_enable(struct bnx2x *bp)
6995{
6996 int i;
6997
555f6c78 6998 for_each_rx_queue(bp, i)
65abd74d
YG
6999 napi_enable(&bnx2x_fp(bp, i, napi));
7000}
7001
7002static void bnx2x_napi_disable(struct bnx2x *bp)
7003{
7004 int i;
7005
555f6c78 7006 for_each_rx_queue(bp, i)
65abd74d
YG
7007 napi_disable(&bnx2x_fp(bp, i, napi));
7008}
7009
7010static void bnx2x_netif_start(struct bnx2x *bp)
7011{
e1510706
EG
7012 int intr_sem;
7013
7014 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7015 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7016
7017 if (intr_sem) {
65abd74d 7018 if (netif_running(bp->dev)) {
65abd74d
YG
7019 bnx2x_napi_enable(bp);
7020 bnx2x_int_enable(bp);
555f6c78
EG
7021 if (bp->state == BNX2X_STATE_OPEN)
7022 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
7023 }
7024 }
7025}
7026
f8ef6e44 7027static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 7028{
f8ef6e44 7029 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 7030 bnx2x_napi_disable(bp);
762d5f6c
EG
7031 netif_tx_disable(bp->dev);
7032 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
7033}
7034
a2fbb9ea
ET
7035/*
7036 * Init service functions
7037 */
7038
3101c2bc 7039static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
7040{
7041 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 7042 int port = BP_PORT(bp);
a2fbb9ea
ET
7043
7044 /* CAM allocation
7045 * unicasts 0-31:port0 32-63:port1
7046 * multicast 64-127:port0 128-191:port1
7047 */
8d9c5f34 7048 config->hdr.length = 2;
af246401 7049 config->hdr.offset = port ? 32 : 0;
0626b899 7050 config->hdr.client_id = bp->fp->cl_id;
a2fbb9ea
ET
7051 config->hdr.reserved1 = 0;
7052
7053 /* primary MAC */
7054 config->config_table[0].cam_entry.msb_mac_addr =
7055 swab16(*(u16 *)&bp->dev->dev_addr[0]);
7056 config->config_table[0].cam_entry.middle_mac_addr =
7057 swab16(*(u16 *)&bp->dev->dev_addr[2]);
7058 config->config_table[0].cam_entry.lsb_mac_addr =
7059 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 7060 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7061 if (set)
7062 config->config_table[0].target_table_entry.flags = 0;
7063 else
7064 CAM_INVALIDATE(config->config_table[0]);
ca00392c
EG
7065 config->config_table[0].target_table_entry.clients_bit_vector =
7066 cpu_to_le32(1 << BP_L_ID(bp));
a2fbb9ea
ET
7067 config->config_table[0].target_table_entry.vlan_id = 0;
7068
3101c2bc
YG
7069 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7070 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7071 config->config_table[0].cam_entry.msb_mac_addr,
7072 config->config_table[0].cam_entry.middle_mac_addr,
7073 config->config_table[0].cam_entry.lsb_mac_addr);
7074
7075 /* broadcast */
4781bfad
EG
7076 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
7077 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
7078 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
34f80b04 7079 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7080 if (set)
7081 config->config_table[1].target_table_entry.flags =
a2fbb9ea 7082 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
7083 else
7084 CAM_INVALIDATE(config->config_table[1]);
ca00392c
EG
7085 config->config_table[1].target_table_entry.clients_bit_vector =
7086 cpu_to_le32(1 << BP_L_ID(bp));
a2fbb9ea
ET
7087 config->config_table[1].target_table_entry.vlan_id = 0;
7088
7089 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7090 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7091 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7092}
7093
3101c2bc 7094static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
7095{
7096 struct mac_configuration_cmd_e1h *config =
7097 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7098
34f80b04
EG
7099 /* CAM allocation for E1H
7100 * unicasts: by func number
7101 * multicast: 20+FUNC*20, 20 each
7102 */
8d9c5f34 7103 config->hdr.length = 1;
34f80b04 7104 config->hdr.offset = BP_FUNC(bp);
0626b899 7105 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
7106 config->hdr.reserved1 = 0;
7107
7108 /* primary MAC */
7109 config->config_table[0].msb_mac_addr =
7110 swab16(*(u16 *)&bp->dev->dev_addr[0]);
7111 config->config_table[0].middle_mac_addr =
7112 swab16(*(u16 *)&bp->dev->dev_addr[2]);
7113 config->config_table[0].lsb_mac_addr =
7114 swab16(*(u16 *)&bp->dev->dev_addr[4]);
ca00392c
EG
7115 config->config_table[0].clients_bit_vector =
7116 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
7117 config->config_table[0].vlan_id = 0;
7118 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7119 if (set)
7120 config->config_table[0].flags = BP_PORT(bp);
7121 else
7122 config->config_table[0].flags =
7123 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7124
3101c2bc
YG
7125 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
7126 (set ? "setting" : "clearing"),
34f80b04
EG
7127 config->config_table[0].msb_mac_addr,
7128 config->config_table[0].middle_mac_addr,
7129 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
7130
7131 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7132 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7133 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7134}
7135
a2fbb9ea
ET
7136static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7137 int *state_p, int poll)
7138{
7139 /* can take a while if any port is running */
8b3a0f0b 7140 int cnt = 5000;
a2fbb9ea 7141
c14423fe
ET
7142 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7143 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7144
7145 might_sleep();
34f80b04 7146 while (cnt--) {
a2fbb9ea
ET
7147 if (poll) {
7148 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7149 /* if index is different from 0
7150 * the reply for some commands will
3101c2bc 7151 * be on the non default queue
a2fbb9ea
ET
7152 */
7153 if (idx)
7154 bnx2x_rx_int(&bp->fp[idx], 10);
7155 }
a2fbb9ea 7156
3101c2bc 7157 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7158 if (*state_p == state) {
7159#ifdef BNX2X_STOP_ON_ERROR
7160 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7161#endif
a2fbb9ea 7162 return 0;
8b3a0f0b 7163 }
a2fbb9ea 7164
a2fbb9ea 7165 msleep(1);
e3553b29
EG
7166
7167 if (bp->panic)
7168 return -EIO;
a2fbb9ea
ET
7169 }
7170
a2fbb9ea 7171 /* timeout! */
49d66772
ET
7172 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7173 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7174#ifdef BNX2X_STOP_ON_ERROR
7175 bnx2x_panic();
7176#endif
a2fbb9ea 7177
49d66772 7178 return -EBUSY;
a2fbb9ea
ET
7179}
7180
7181static int bnx2x_setup_leading(struct bnx2x *bp)
7182{
34f80b04 7183 int rc;
a2fbb9ea 7184
c14423fe 7185 /* reset IGU state */
34f80b04 7186 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7187
7188 /* SETUP ramrod */
7189 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7190
34f80b04
EG
7191 /* Wait for completion */
7192 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7193
34f80b04 7194 return rc;
a2fbb9ea
ET
7195}
7196
7197static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7198{
555f6c78
EG
7199 struct bnx2x_fastpath *fp = &bp->fp[index];
7200
a2fbb9ea 7201 /* reset IGU state */
555f6c78 7202 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7203
228241eb 7204 /* SETUP ramrod */
555f6c78
EG
7205 fp->state = BNX2X_FP_STATE_OPENING;
7206 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7207 fp->cl_id, 0);
a2fbb9ea
ET
7208
7209 /* Wait for completion */
7210 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7211 &(fp->state), 0);
a2fbb9ea
ET
7212}
7213
a2fbb9ea 7214static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7215
ca00392c
EG
7216static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7217 int *num_tx_queues_out)
7218{
7219 int _num_rx_queues = 0, _num_tx_queues = 0;
7220
7221 switch (bp->multi_mode) {
7222 case ETH_RSS_MODE_DISABLED:
7223 _num_rx_queues = 1;
7224 _num_tx_queues = 1;
7225 break;
7226
7227 case ETH_RSS_MODE_REGULAR:
7228 if (num_rx_queues)
7229 _num_rx_queues = min_t(u32, num_rx_queues,
7230 BNX2X_MAX_QUEUES(bp));
7231 else
7232 _num_rx_queues = min_t(u32, num_online_cpus(),
7233 BNX2X_MAX_QUEUES(bp));
7234
7235 if (num_tx_queues)
7236 _num_tx_queues = min_t(u32, num_tx_queues,
7237 BNX2X_MAX_QUEUES(bp));
7238 else
7239 _num_tx_queues = min_t(u32, num_online_cpus(),
7240 BNX2X_MAX_QUEUES(bp));
7241
7242 /* There must be not more Tx queues than Rx queues */
7243 if (_num_tx_queues > _num_rx_queues) {
7244 BNX2X_ERR("number of tx queues (%d) > "
7245 "number of rx queues (%d)"
7246 " defaulting to %d\n",
7247 _num_tx_queues, _num_rx_queues,
7248 _num_rx_queues);
7249 _num_tx_queues = _num_rx_queues;
7250 }
7251 break;
7252
7253
7254 default:
7255 _num_rx_queues = 1;
7256 _num_tx_queues = 1;
7257 break;
7258 }
7259
7260 *num_rx_queues_out = _num_rx_queues;
7261 *num_tx_queues_out = _num_tx_queues;
7262}
7263
7264static int bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 7265{
ca00392c 7266 int rc = 0;
a2fbb9ea 7267
8badd27a
EG
7268 switch (int_mode) {
7269 case INT_MODE_INTx:
7270 case INT_MODE_MSI:
ca00392c
EG
7271 bp->num_rx_queues = 1;
7272 bp->num_tx_queues = 1;
7273 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a
EG
7274 break;
7275
7276 case INT_MODE_MSIX:
7277 default:
ca00392c
EG
7278 /* Set interrupt mode according to bp->multi_mode value */
7279 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7280 &bp->num_tx_queues);
7281
7282 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
555f6c78 7283 bp->num_rx_queues, bp->num_tx_queues);
ca00392c 7284
2dfe0e1f
EG
7285 /* if we can't use MSI-X we only need one fp,
7286 * so try to enable MSI-X with the requested number of fp's
7287 * and fallback to MSI or legacy INTx with one fp
7288 */
ca00392c
EG
7289 rc = bnx2x_enable_msix(bp);
7290 if (rc) {
34f80b04 7291 /* failed to enable MSI-X */
555f6c78
EG
7292 if (bp->multi_mode)
7293 BNX2X_ERR("Multi requested but failed to "
ca00392c
EG
7294 "enable MSI-X (rx %d tx %d), "
7295 "set number of queues to 1\n",
7296 bp->num_rx_queues, bp->num_tx_queues);
7297 bp->num_rx_queues = 1;
7298 bp->num_tx_queues = 1;
a2fbb9ea 7299 }
8badd27a 7300 break;
a2fbb9ea 7301 }
555f6c78 7302 bp->dev->real_num_tx_queues = bp->num_tx_queues;
ca00392c 7303 return rc;
8badd27a
EG
7304}
7305
8badd27a
EG
7306
7307/* must be called with rtnl_lock */
7308static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7309{
7310 u32 load_code;
ca00392c
EG
7311 int i, rc;
7312
8badd27a 7313#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7314 if (unlikely(bp->panic))
7315 return -EPERM;
7316#endif
7317
7318 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7319
ca00392c 7320 rc = bnx2x_set_int_mode(bp);
c14423fe 7321
a2fbb9ea
ET
7322 if (bnx2x_alloc_mem(bp))
7323 return -ENOMEM;
7324
555f6c78 7325 for_each_rx_queue(bp, i)
7a9b2557
VZ
7326 bnx2x_fp(bp, i, disable_tpa) =
7327 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7328
555f6c78 7329 for_each_rx_queue(bp, i)
2dfe0e1f
EG
7330 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7331 bnx2x_poll, 128);
7332
2dfe0e1f
EG
7333 bnx2x_napi_enable(bp);
7334
34f80b04
EG
7335 if (bp->flags & USING_MSIX_FLAG) {
7336 rc = bnx2x_req_msix_irqs(bp);
7337 if (rc) {
7338 pci_disable_msix(bp->pdev);
2dfe0e1f 7339 goto load_error1;
34f80b04
EG
7340 }
7341 } else {
ca00392c
EG
7342 /* Fall to INTx if failed to enable MSI-X due to lack of
7343 memory (in bnx2x_set_int_mode()) */
8badd27a
EG
7344 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7345 bnx2x_enable_msi(bp);
34f80b04
EG
7346 bnx2x_ack_int(bp);
7347 rc = bnx2x_req_irq(bp);
7348 if (rc) {
2dfe0e1f 7349 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
7350 if (bp->flags & USING_MSI_FLAG)
7351 pci_disable_msi(bp->pdev);
2dfe0e1f 7352 goto load_error1;
a2fbb9ea 7353 }
8badd27a
EG
7354 if (bp->flags & USING_MSI_FLAG) {
7355 bp->dev->irq = bp->pdev->irq;
7356 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7357 bp->dev->name, bp->pdev->irq);
7358 }
a2fbb9ea
ET
7359 }
7360
2dfe0e1f
EG
7361 /* Send LOAD_REQUEST command to MCP
7362 Returns the type of LOAD command:
7363 if it is the first port to be initialized
7364 common blocks should be initialized, otherwise - not
7365 */
7366 if (!BP_NOMCP(bp)) {
7367 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7368 if (!load_code) {
7369 BNX2X_ERR("MCP response failure, aborting\n");
7370 rc = -EBUSY;
7371 goto load_error2;
7372 }
7373 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7374 rc = -EBUSY; /* other port in diagnostic mode */
7375 goto load_error2;
7376 }
7377
7378 } else {
7379 int port = BP_PORT(bp);
7380
f5372251 7381 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7382 load_count[0], load_count[1], load_count[2]);
7383 load_count[0]++;
7384 load_count[1 + port]++;
f5372251 7385 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7386 load_count[0], load_count[1], load_count[2]);
7387 if (load_count[0] == 1)
7388 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7389 else if (load_count[1 + port] == 1)
7390 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7391 else
7392 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7393 }
7394
7395 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7396 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7397 bp->port.pmf = 1;
7398 else
7399 bp->port.pmf = 0;
7400 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7401
a2fbb9ea 7402 /* Initialize HW */
34f80b04
EG
7403 rc = bnx2x_init_hw(bp, load_code);
7404 if (rc) {
a2fbb9ea 7405 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 7406 goto load_error2;
a2fbb9ea
ET
7407 }
7408
a2fbb9ea 7409 /* Setup NIC internals and enable interrupts */
471de716 7410 bnx2x_nic_init(bp, load_code);
a2fbb9ea 7411
2691d51d
EG
7412 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7413 (bp->common.shmem2_base))
7414 SHMEM2_WR(bp, dcc_support,
7415 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7416 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7417
a2fbb9ea 7418 /* Send LOAD_DONE command to MCP */
34f80b04 7419 if (!BP_NOMCP(bp)) {
228241eb
ET
7420 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7421 if (!load_code) {
da5a662a 7422 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7423 rc = -EBUSY;
2dfe0e1f 7424 goto load_error3;
a2fbb9ea
ET
7425 }
7426 }
7427
7428 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7429
34f80b04
EG
7430 rc = bnx2x_setup_leading(bp);
7431 if (rc) {
da5a662a 7432 BNX2X_ERR("Setup leading failed!\n");
e3553b29 7433#ifndef BNX2X_STOP_ON_ERROR
2dfe0e1f 7434 goto load_error3;
e3553b29
EG
7435#else
7436 bp->panic = 1;
7437 return -EBUSY;
7438#endif
34f80b04 7439 }
a2fbb9ea 7440
34f80b04
EG
7441 if (CHIP_IS_E1H(bp))
7442 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7443 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
7444 bp->state = BNX2X_STATE_DISABLED;
7445 }
a2fbb9ea 7446
ca00392c 7447 if (bp->state == BNX2X_STATE_OPEN) {
34f80b04
EG
7448 for_each_nondefault_queue(bp, i) {
7449 rc = bnx2x_setup_multi(bp, i);
7450 if (rc)
2dfe0e1f 7451 goto load_error3;
34f80b04 7452 }
a2fbb9ea 7453
ca00392c
EG
7454 if (CHIP_IS_E1(bp))
7455 bnx2x_set_mac_addr_e1(bp, 1);
7456 else
7457 bnx2x_set_mac_addr_e1h(bp, 1);
7458 }
34f80b04
EG
7459
7460 if (bp->port.pmf)
b5bf9068 7461 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7462
7463 /* Start fast path */
34f80b04
EG
7464 switch (load_mode) {
7465 case LOAD_NORMAL:
ca00392c
EG
7466 if (bp->state == BNX2X_STATE_OPEN) {
7467 /* Tx queue should be only reenabled */
7468 netif_tx_wake_all_queues(bp->dev);
7469 }
2dfe0e1f 7470 /* Initialize the receive filter. */
34f80b04
EG
7471 bnx2x_set_rx_mode(bp->dev);
7472 break;
7473
7474 case LOAD_OPEN:
555f6c78 7475 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
7476 if (bp->state != BNX2X_STATE_OPEN)
7477 netif_tx_disable(bp->dev);
2dfe0e1f 7478 /* Initialize the receive filter. */
34f80b04 7479 bnx2x_set_rx_mode(bp->dev);
34f80b04 7480 break;
a2fbb9ea 7481
34f80b04 7482 case LOAD_DIAG:
2dfe0e1f 7483 /* Initialize the receive filter. */
a2fbb9ea 7484 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7485 bp->state = BNX2X_STATE_DIAG;
7486 break;
7487
7488 default:
7489 break;
a2fbb9ea
ET
7490 }
7491
34f80b04
EG
7492 if (!bp->port.pmf)
7493 bnx2x__link_status_update(bp);
7494
a2fbb9ea
ET
7495 /* start the timer */
7496 mod_timer(&bp->timer, jiffies + bp->current_interval);
7497
34f80b04 7498
a2fbb9ea
ET
7499 return 0;
7500
2dfe0e1f
EG
7501load_error3:
7502 bnx2x_int_disable_sync(bp, 1);
7503 if (!BP_NOMCP(bp)) {
7504 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7505 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7506 }
7507 bp->port.pmf = 0;
7a9b2557
VZ
7508 /* Free SKBs, SGEs, TPA pool and driver internals */
7509 bnx2x_free_skbs(bp);
555f6c78 7510 for_each_rx_queue(bp, i)
3196a88a 7511 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7512load_error2:
d1014634
YG
7513 /* Release IRQs */
7514 bnx2x_free_irq(bp);
2dfe0e1f
EG
7515load_error1:
7516 bnx2x_napi_disable(bp);
555f6c78 7517 for_each_rx_queue(bp, i)
7cde1c8b 7518 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7519 bnx2x_free_mem(bp);
7520
34f80b04 7521 return rc;
a2fbb9ea
ET
7522}
7523
7524static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7525{
555f6c78 7526 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7527 int rc;
7528
c14423fe 7529 /* halt the connection */
555f6c78
EG
7530 fp->state = BNX2X_FP_STATE_HALTING;
7531 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7532
34f80b04 7533 /* Wait for completion */
a2fbb9ea 7534 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7535 &(fp->state), 1);
c14423fe 7536 if (rc) /* timeout */
a2fbb9ea
ET
7537 return rc;
7538
7539 /* delete cfc entry */
7540 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7541
34f80b04
EG
7542 /* Wait for completion */
7543 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7544 &(fp->state), 1);
34f80b04 7545 return rc;
a2fbb9ea
ET
7546}
7547
da5a662a 7548static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7549{
4781bfad 7550 __le16 dsb_sp_prod_idx;
c14423fe 7551 /* if the other port is handling traffic,
a2fbb9ea 7552 this can take a lot of time */
34f80b04
EG
7553 int cnt = 500;
7554 int rc;
a2fbb9ea
ET
7555
7556 might_sleep();
7557
7558 /* Send HALT ramrod */
7559 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7560 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7561
34f80b04
EG
7562 /* Wait for completion */
7563 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7564 &(bp->fp[0].state), 1);
7565 if (rc) /* timeout */
da5a662a 7566 return rc;
a2fbb9ea 7567
49d66772 7568 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7569
228241eb 7570 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7571 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7572
49d66772 7573 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7574 we are going to reset the chip anyway
7575 so there is not much to do if this times out
7576 */
34f80b04 7577 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7578 if (!cnt) {
7579 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7580 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7581 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7582#ifdef BNX2X_STOP_ON_ERROR
7583 bnx2x_panic();
7584#endif
36e552ab 7585 rc = -EBUSY;
34f80b04
EG
7586 break;
7587 }
7588 cnt--;
da5a662a 7589 msleep(1);
5650d9d4 7590 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7591 }
7592 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7593 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7594
7595 return rc;
a2fbb9ea
ET
7596}
7597
34f80b04
EG
7598static void bnx2x_reset_func(struct bnx2x *bp)
7599{
7600 int port = BP_PORT(bp);
7601 int func = BP_FUNC(bp);
7602 int base, i;
7603
7604 /* Configure IGU */
7605 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7606 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7607
34f80b04
EG
7608 /* Clear ILT */
7609 base = FUNC_ILT_BASE(func);
7610 for (i = base; i < base + ILT_PER_FUNC; i++)
7611 bnx2x_ilt_wr(bp, i, 0);
7612}
7613
7614static void bnx2x_reset_port(struct bnx2x *bp)
7615{
7616 int port = BP_PORT(bp);
7617 u32 val;
7618
7619 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7620
7621 /* Do not rcv packets to BRB */
7622 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7623 /* Do not direct rcv packets that are not for MCP to the BRB */
7624 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7625 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7626
7627 /* Configure AEU */
7628 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7629
7630 msleep(100);
7631 /* Check for BRB port occupancy */
7632 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7633 if (val)
7634 DP(NETIF_MSG_IFDOWN,
33471629 7635 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7636
7637 /* TODO: Close Doorbell port? */
7638}
7639
34f80b04
EG
7640static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7641{
7642 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7643 BP_FUNC(bp), reset_code);
7644
7645 switch (reset_code) {
7646 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7647 bnx2x_reset_port(bp);
7648 bnx2x_reset_func(bp);
7649 bnx2x_reset_common(bp);
7650 break;
7651
7652 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7653 bnx2x_reset_port(bp);
7654 bnx2x_reset_func(bp);
7655 break;
7656
7657 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7658 bnx2x_reset_func(bp);
7659 break;
49d66772 7660
34f80b04
EG
7661 default:
7662 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7663 break;
7664 }
7665}
7666
33471629 7667/* must be called with rtnl_lock */
34f80b04 7668static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7669{
da5a662a 7670 int port = BP_PORT(bp);
a2fbb9ea 7671 u32 reset_code = 0;
da5a662a 7672 int i, cnt, rc;
a2fbb9ea
ET
7673
7674 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7675
ab6ad5a4 7676 /* Set "drop all" */
228241eb
ET
7677 bp->rx_mode = BNX2X_RX_MODE_NONE;
7678 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7679
ab6ad5a4 7680 /* Disable HW interrupts, NAPI and Tx */
f8ef6e44 7681 bnx2x_netif_stop(bp, 1);
e94d8af3 7682
34f80b04
EG
7683 del_timer_sync(&bp->timer);
7684 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7685 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7686 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7687
70b9986c
EG
7688 /* Release IRQs */
7689 bnx2x_free_irq(bp);
7690
555f6c78
EG
7691 /* Wait until tx fastpath tasks complete */
7692 for_each_tx_queue(bp, i) {
228241eb
ET
7693 struct bnx2x_fastpath *fp = &bp->fp[i];
7694
34f80b04 7695 cnt = 1000;
e8b5fc51 7696 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7697
7961f791 7698 bnx2x_tx_int(fp);
34f80b04
EG
7699 if (!cnt) {
7700 BNX2X_ERR("timeout waiting for queue[%d]\n",
7701 i);
7702#ifdef BNX2X_STOP_ON_ERROR
7703 bnx2x_panic();
7704 return -EBUSY;
7705#else
7706 break;
7707#endif
7708 }
7709 cnt--;
da5a662a 7710 msleep(1);
34f80b04 7711 }
228241eb 7712 }
da5a662a
VZ
7713 /* Give HW time to discard old tx messages */
7714 msleep(1);
a2fbb9ea 7715
3101c2bc
YG
7716 if (CHIP_IS_E1(bp)) {
7717 struct mac_configuration_cmd *config =
7718 bnx2x_sp(bp, mcast_config);
7719
7720 bnx2x_set_mac_addr_e1(bp, 0);
7721
8d9c5f34 7722 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7723 CAM_INVALIDATE(config->config_table[i]);
7724
8d9c5f34 7725 config->hdr.length = i;
3101c2bc
YG
7726 if (CHIP_REV_IS_SLOW(bp))
7727 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7728 else
7729 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7730 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7731 config->hdr.reserved1 = 0;
7732
7733 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7734 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7735 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7736
7737 } else { /* E1H */
65abd74d
YG
7738 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7739
3101c2bc
YG
7740 bnx2x_set_mac_addr_e1h(bp, 0);
7741
7742 for (i = 0; i < MC_HASH_SIZE; i++)
7743 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
7744
7745 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc
YG
7746 }
7747
65abd74d
YG
7748 if (unload_mode == UNLOAD_NORMAL)
7749 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7750
7d0446c2 7751 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7752 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7753
7d0446c2 7754 else if (bp->wol) {
65abd74d
YG
7755 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7756 u8 *mac_addr = bp->dev->dev_addr;
7757 u32 val;
7758 /* The mac address is written to entries 1-4 to
7759 preserve entry 0 which is used by the PMF */
7760 u8 entry = (BP_E1HVN(bp) + 1)*8;
7761
7762 val = (mac_addr[0] << 8) | mac_addr[1];
7763 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7764
7765 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7766 (mac_addr[4] << 8) | mac_addr[5];
7767 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7768
7769 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7770
7771 } else
7772 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7773
34f80b04
EG
7774 /* Close multi and leading connections
7775 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7776 for_each_nondefault_queue(bp, i)
7777 if (bnx2x_stop_multi(bp, i))
228241eb 7778 goto unload_error;
a2fbb9ea 7779
da5a662a
VZ
7780 rc = bnx2x_stop_leading(bp);
7781 if (rc) {
34f80b04 7782 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7783#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7784 return -EBUSY;
da5a662a
VZ
7785#else
7786 goto unload_error;
34f80b04 7787#endif
228241eb
ET
7788 }
7789
7790unload_error:
34f80b04 7791 if (!BP_NOMCP(bp))
228241eb 7792 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7793 else {
f5372251 7794 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7795 load_count[0], load_count[1], load_count[2]);
7796 load_count[0]--;
da5a662a 7797 load_count[1 + port]--;
f5372251 7798 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7799 load_count[0], load_count[1], load_count[2]);
7800 if (load_count[0] == 0)
7801 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7802 else if (load_count[1 + port] == 0)
34f80b04
EG
7803 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7804 else
7805 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7806 }
a2fbb9ea 7807
34f80b04
EG
7808 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7809 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7810 bnx2x__link_reset(bp);
a2fbb9ea
ET
7811
7812 /* Reset the chip */
228241eb 7813 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7814
7815 /* Report UNLOAD_DONE to MCP */
34f80b04 7816 if (!BP_NOMCP(bp))
a2fbb9ea 7817 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 7818
9a035440 7819 bp->port.pmf = 0;
a2fbb9ea 7820
7a9b2557 7821 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7822 bnx2x_free_skbs(bp);
555f6c78 7823 for_each_rx_queue(bp, i)
3196a88a 7824 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7825 for_each_rx_queue(bp, i)
7cde1c8b 7826 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7827 bnx2x_free_mem(bp);
7828
7829 bp->state = BNX2X_STATE_CLOSED;
228241eb 7830
a2fbb9ea
ET
7831 netif_carrier_off(bp->dev);
7832
7833 return 0;
7834}
7835
34f80b04
EG
7836static void bnx2x_reset_task(struct work_struct *work)
7837{
7838 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7839
7840#ifdef BNX2X_STOP_ON_ERROR
7841 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7842 " so reset not done to allow debug dump,\n"
ad361c98 7843 " you will need to reboot when done\n");
34f80b04
EG
7844 return;
7845#endif
7846
7847 rtnl_lock();
7848
7849 if (!netif_running(bp->dev))
7850 goto reset_task_exit;
7851
7852 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7853 bnx2x_nic_load(bp, LOAD_NORMAL);
7854
7855reset_task_exit:
7856 rtnl_unlock();
7857}
7858
a2fbb9ea
ET
7859/* end of nic load/unload */
7860
7861/* ethtool_ops */
7862
7863/*
7864 * Init service functions
7865 */
7866
f1ef27ef
EG
7867static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7868{
7869 switch (func) {
7870 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7871 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7872 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7873 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7874 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7875 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7876 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7877 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7878 default:
7879 BNX2X_ERR("Unsupported function index: %d\n", func);
7880 return (u32)(-1);
7881 }
7882}
7883
7884static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7885{
7886 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7887
7888 /* Flush all outstanding writes */
7889 mmiowb();
7890
7891 /* Pretend to be function 0 */
7892 REG_WR(bp, reg, 0);
7893 /* Flush the GRC transaction (in the chip) */
7894 new_val = REG_RD(bp, reg);
7895 if (new_val != 0) {
7896 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7897 new_val);
7898 BUG();
7899 }
7900
7901 /* From now we are in the "like-E1" mode */
7902 bnx2x_int_disable(bp);
7903
7904 /* Flush all outstanding writes */
7905 mmiowb();
7906
7907 /* Restore the original funtion settings */
7908 REG_WR(bp, reg, orig_func);
7909 new_val = REG_RD(bp, reg);
7910 if (new_val != orig_func) {
7911 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7912 orig_func, new_val);
7913 BUG();
7914 }
7915}
7916
7917static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7918{
7919 if (CHIP_IS_E1H(bp))
7920 bnx2x_undi_int_disable_e1h(bp, func);
7921 else
7922 bnx2x_int_disable(bp);
7923}
7924
34f80b04
EG
7925static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7926{
7927 u32 val;
7928
7929 /* Check if there is any driver already loaded */
7930 val = REG_RD(bp, MISC_REG_UNPREPARED);
7931 if (val == 0x1) {
7932 /* Check if it is the UNDI driver
7933 * UNDI driver initializes CID offset for normal bell to 0x7
7934 */
4a37fb66 7935 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7936 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7937 if (val == 0x7) {
7938 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7939 /* save our func */
34f80b04 7940 int func = BP_FUNC(bp);
da5a662a
VZ
7941 u32 swap_en;
7942 u32 swap_val;
34f80b04 7943
b4661739
EG
7944 /* clear the UNDI indication */
7945 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7946
34f80b04
EG
7947 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7948
7949 /* try unload UNDI on port 0 */
7950 bp->func = 0;
da5a662a
VZ
7951 bp->fw_seq =
7952 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7953 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7954 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7955
7956 /* if UNDI is loaded on the other port */
7957 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7958
da5a662a
VZ
7959 /* send "DONE" for previous unload */
7960 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7961
7962 /* unload UNDI on port 1 */
34f80b04 7963 bp->func = 1;
da5a662a
VZ
7964 bp->fw_seq =
7965 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7966 DRV_MSG_SEQ_NUMBER_MASK);
7967 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7968
7969 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7970 }
7971
b4661739
EG
7972 /* now it's safe to release the lock */
7973 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7974
f1ef27ef 7975 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7976
7977 /* close input traffic and wait for it */
7978 /* Do not rcv packets to BRB */
7979 REG_WR(bp,
7980 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7981 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7982 /* Do not direct rcv packets that are not for MCP to
7983 * the BRB */
7984 REG_WR(bp,
7985 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7986 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7987 /* clear AEU */
7988 REG_WR(bp,
7989 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7990 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7991 msleep(10);
7992
7993 /* save NIG port swap info */
7994 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7995 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7996 /* reset device */
7997 REG_WR(bp,
7998 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7999 0xd3ffffff);
34f80b04
EG
8000 REG_WR(bp,
8001 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8002 0x1403);
da5a662a
VZ
8003 /* take the NIG out of reset and restore swap values */
8004 REG_WR(bp,
8005 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8006 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8007 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8008 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8009
8010 /* send unload done to the MCP */
8011 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8012
8013 /* restore our func and fw_seq */
8014 bp->func = func;
8015 bp->fw_seq =
8016 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8017 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
8018
8019 } else
8020 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
8021 }
8022}
8023
8024static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8025{
8026 u32 val, val2, val3, val4, id;
72ce58c3 8027 u16 pmc;
34f80b04
EG
8028
8029 /* Get the chip revision id and number. */
8030 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8031 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8032 id = ((val & 0xffff) << 16);
8033 val = REG_RD(bp, MISC_REG_CHIP_REV);
8034 id |= ((val & 0xf) << 12);
8035 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8036 id |= ((val & 0xff) << 4);
5a40e08e 8037 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
8038 id |= (val & 0xf);
8039 bp->common.chip_id = id;
8040 bp->link_params.chip_id = bp->common.chip_id;
8041 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8042
1c06328c
EG
8043 val = (REG_RD(bp, 0x2874) & 0x55);
8044 if ((bp->common.chip_id & 0x1) ||
8045 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8046 bp->flags |= ONE_PORT_FLAG;
8047 BNX2X_DEV_INFO("single port device\n");
8048 }
8049
34f80b04
EG
8050 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8051 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8052 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8053 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8054 bp->common.flash_size, bp->common.flash_size);
8055
8056 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 8057 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 8058 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
8059 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8060 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
8061
8062 if (!bp->common.shmem_base ||
8063 (bp->common.shmem_base < 0xA0000) ||
8064 (bp->common.shmem_base >= 0xC0000)) {
8065 BNX2X_DEV_INFO("MCP not active\n");
8066 bp->flags |= NO_MCP_FLAG;
8067 return;
8068 }
8069
8070 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8071 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8072 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8073 BNX2X_ERR("BAD MCP validity signature\n");
8074
8075 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 8076 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
8077
8078 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8079 SHARED_HW_CFG_LED_MODE_MASK) >>
8080 SHARED_HW_CFG_LED_MODE_SHIFT);
8081
c2c8b03e
EG
8082 bp->link_params.feature_config_flags = 0;
8083 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8084 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8085 bp->link_params.feature_config_flags |=
8086 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8087 else
8088 bp->link_params.feature_config_flags &=
8089 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8090
34f80b04
EG
8091 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8092 bp->common.bc_ver = val;
8093 BNX2X_DEV_INFO("bc_ver %X\n", val);
8094 if (val < BNX2X_BC_VER) {
8095 /* for now only warn
8096 * later we might need to enforce this */
8097 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8098 " please upgrade BC\n", BNX2X_BC_VER, val);
8099 }
4d295db0
EG
8100 bp->link_params.feature_config_flags |=
8101 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8102 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
8103
8104 if (BP_E1HVN(bp) == 0) {
8105 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8106 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8107 } else {
8108 /* no WOL capability for E1HVN != 0 */
8109 bp->flags |= NO_WOL_FLAG;
8110 }
8111 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 8112 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
8113
8114 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8115 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8116 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8117 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8118
8119 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8120 val, val2, val3, val4);
8121}
8122
8123static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8124 u32 switch_cfg)
a2fbb9ea 8125{
34f80b04 8126 int port = BP_PORT(bp);
a2fbb9ea
ET
8127 u32 ext_phy_type;
8128
a2fbb9ea
ET
8129 switch (switch_cfg) {
8130 case SWITCH_CFG_1G:
8131 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8132
c18487ee
YR
8133 ext_phy_type =
8134 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8135 switch (ext_phy_type) {
8136 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8137 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8138 ext_phy_type);
8139
34f80b04
EG
8140 bp->port.supported |= (SUPPORTED_10baseT_Half |
8141 SUPPORTED_10baseT_Full |
8142 SUPPORTED_100baseT_Half |
8143 SUPPORTED_100baseT_Full |
8144 SUPPORTED_1000baseT_Full |
8145 SUPPORTED_2500baseX_Full |
8146 SUPPORTED_TP |
8147 SUPPORTED_FIBRE |
8148 SUPPORTED_Autoneg |
8149 SUPPORTED_Pause |
8150 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8151 break;
8152
8153 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8154 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8155 ext_phy_type);
8156
34f80b04
EG
8157 bp->port.supported |= (SUPPORTED_10baseT_Half |
8158 SUPPORTED_10baseT_Full |
8159 SUPPORTED_100baseT_Half |
8160 SUPPORTED_100baseT_Full |
8161 SUPPORTED_1000baseT_Full |
8162 SUPPORTED_TP |
8163 SUPPORTED_FIBRE |
8164 SUPPORTED_Autoneg |
8165 SUPPORTED_Pause |
8166 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8167 break;
8168
8169 default:
8170 BNX2X_ERR("NVRAM config error. "
8171 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 8172 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8173 return;
8174 }
8175
34f80b04
EG
8176 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8177 port*0x10);
8178 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
8179 break;
8180
8181 case SWITCH_CFG_10G:
8182 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8183
c18487ee
YR
8184 ext_phy_type =
8185 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8186 switch (ext_phy_type) {
8187 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8188 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8189 ext_phy_type);
8190
34f80b04
EG
8191 bp->port.supported |= (SUPPORTED_10baseT_Half |
8192 SUPPORTED_10baseT_Full |
8193 SUPPORTED_100baseT_Half |
8194 SUPPORTED_100baseT_Full |
8195 SUPPORTED_1000baseT_Full |
8196 SUPPORTED_2500baseX_Full |
8197 SUPPORTED_10000baseT_Full |
8198 SUPPORTED_TP |
8199 SUPPORTED_FIBRE |
8200 SUPPORTED_Autoneg |
8201 SUPPORTED_Pause |
8202 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8203 break;
8204
589abe3a
EG
8205 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8206 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 8207 ext_phy_type);
f1410647 8208
34f80b04 8209 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8210 SUPPORTED_1000baseT_Full |
34f80b04 8211 SUPPORTED_FIBRE |
589abe3a 8212 SUPPORTED_Autoneg |
34f80b04
EG
8213 SUPPORTED_Pause |
8214 SUPPORTED_Asym_Pause);
f1410647
ET
8215 break;
8216
589abe3a
EG
8217 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8218 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
8219 ext_phy_type);
8220
34f80b04 8221 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8222 SUPPORTED_2500baseX_Full |
34f80b04 8223 SUPPORTED_1000baseT_Full |
589abe3a
EG
8224 SUPPORTED_FIBRE |
8225 SUPPORTED_Autoneg |
8226 SUPPORTED_Pause |
8227 SUPPORTED_Asym_Pause);
8228 break;
8229
8230 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8231 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8232 ext_phy_type);
8233
8234 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
8235 SUPPORTED_FIBRE |
8236 SUPPORTED_Pause |
8237 SUPPORTED_Asym_Pause);
f1410647
ET
8238 break;
8239
589abe3a
EG
8240 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8241 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
8242 ext_phy_type);
8243
34f80b04
EG
8244 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8245 SUPPORTED_1000baseT_Full |
8246 SUPPORTED_FIBRE |
34f80b04
EG
8247 SUPPORTED_Pause |
8248 SUPPORTED_Asym_Pause);
f1410647
ET
8249 break;
8250
589abe3a
EG
8251 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8252 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
8253 ext_phy_type);
8254
34f80b04 8255 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 8256 SUPPORTED_1000baseT_Full |
34f80b04 8257 SUPPORTED_Autoneg |
589abe3a 8258 SUPPORTED_FIBRE |
34f80b04
EG
8259 SUPPORTED_Pause |
8260 SUPPORTED_Asym_Pause);
c18487ee
YR
8261 break;
8262
4d295db0
EG
8263 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8264 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8265 ext_phy_type);
8266
8267 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8268 SUPPORTED_1000baseT_Full |
8269 SUPPORTED_Autoneg |
8270 SUPPORTED_FIBRE |
8271 SUPPORTED_Pause |
8272 SUPPORTED_Asym_Pause);
8273 break;
8274
f1410647
ET
8275 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8276 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8277 ext_phy_type);
8278
34f80b04
EG
8279 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8280 SUPPORTED_TP |
8281 SUPPORTED_Autoneg |
8282 SUPPORTED_Pause |
8283 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8284 break;
8285
28577185
EG
8286 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8287 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8288 ext_phy_type);
8289
8290 bp->port.supported |= (SUPPORTED_10baseT_Half |
8291 SUPPORTED_10baseT_Full |
8292 SUPPORTED_100baseT_Half |
8293 SUPPORTED_100baseT_Full |
8294 SUPPORTED_1000baseT_Full |
8295 SUPPORTED_10000baseT_Full |
8296 SUPPORTED_TP |
8297 SUPPORTED_Autoneg |
8298 SUPPORTED_Pause |
8299 SUPPORTED_Asym_Pause);
8300 break;
8301
c18487ee
YR
8302 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8303 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8304 bp->link_params.ext_phy_config);
8305 break;
8306
a2fbb9ea
ET
8307 default:
8308 BNX2X_ERR("NVRAM config error. "
8309 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 8310 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8311 return;
8312 }
8313
34f80b04
EG
8314 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8315 port*0x18);
8316 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 8317
a2fbb9ea
ET
8318 break;
8319
8320 default:
8321 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 8322 bp->port.link_config);
a2fbb9ea
ET
8323 return;
8324 }
34f80b04 8325 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
8326
8327 /* mask what we support according to speed_cap_mask */
c18487ee
YR
8328 if (!(bp->link_params.speed_cap_mask &
8329 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 8330 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 8331
c18487ee
YR
8332 if (!(bp->link_params.speed_cap_mask &
8333 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 8334 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 8335
c18487ee
YR
8336 if (!(bp->link_params.speed_cap_mask &
8337 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 8338 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 8339
c18487ee
YR
8340 if (!(bp->link_params.speed_cap_mask &
8341 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 8342 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 8343
c18487ee
YR
8344 if (!(bp->link_params.speed_cap_mask &
8345 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
8346 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8347 SUPPORTED_1000baseT_Full);
a2fbb9ea 8348
c18487ee
YR
8349 if (!(bp->link_params.speed_cap_mask &
8350 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 8351 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 8352
c18487ee
YR
8353 if (!(bp->link_params.speed_cap_mask &
8354 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 8355 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 8356
34f80b04 8357 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
8358}
8359
34f80b04 8360static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8361{
c18487ee 8362 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 8363
34f80b04 8364 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 8365 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 8366 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 8367 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8368 bp->port.advertising = bp->port.supported;
a2fbb9ea 8369 } else {
c18487ee
YR
8370 u32 ext_phy_type =
8371 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8372
8373 if ((ext_phy_type ==
8374 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8375 (ext_phy_type ==
8376 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 8377 /* force 10G, no AN */
c18487ee 8378 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 8379 bp->port.advertising =
a2fbb9ea
ET
8380 (ADVERTISED_10000baseT_Full |
8381 ADVERTISED_FIBRE);
8382 break;
8383 }
8384 BNX2X_ERR("NVRAM config error. "
8385 "Invalid link_config 0x%x"
8386 " Autoneg not supported\n",
34f80b04 8387 bp->port.link_config);
a2fbb9ea
ET
8388 return;
8389 }
8390 break;
8391
8392 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 8393 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 8394 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
8395 bp->port.advertising = (ADVERTISED_10baseT_Full |
8396 ADVERTISED_TP);
a2fbb9ea
ET
8397 } else {
8398 BNX2X_ERR("NVRAM config error. "
8399 "Invalid link_config 0x%x"
8400 " speed_cap_mask 0x%x\n",
34f80b04 8401 bp->port.link_config,
c18487ee 8402 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8403 return;
8404 }
8405 break;
8406
8407 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 8408 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
8409 bp->link_params.req_line_speed = SPEED_10;
8410 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8411 bp->port.advertising = (ADVERTISED_10baseT_Half |
8412 ADVERTISED_TP);
a2fbb9ea
ET
8413 } else {
8414 BNX2X_ERR("NVRAM config error. "
8415 "Invalid link_config 0x%x"
8416 " speed_cap_mask 0x%x\n",
34f80b04 8417 bp->port.link_config,
c18487ee 8418 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8419 return;
8420 }
8421 break;
8422
8423 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 8424 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 8425 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
8426 bp->port.advertising = (ADVERTISED_100baseT_Full |
8427 ADVERTISED_TP);
a2fbb9ea
ET
8428 } else {
8429 BNX2X_ERR("NVRAM config error. "
8430 "Invalid link_config 0x%x"
8431 " speed_cap_mask 0x%x\n",
34f80b04 8432 bp->port.link_config,
c18487ee 8433 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8434 return;
8435 }
8436 break;
8437
8438 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 8439 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
8440 bp->link_params.req_line_speed = SPEED_100;
8441 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8442 bp->port.advertising = (ADVERTISED_100baseT_Half |
8443 ADVERTISED_TP);
a2fbb9ea
ET
8444 } else {
8445 BNX2X_ERR("NVRAM config error. "
8446 "Invalid link_config 0x%x"
8447 " speed_cap_mask 0x%x\n",
34f80b04 8448 bp->port.link_config,
c18487ee 8449 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8450 return;
8451 }
8452 break;
8453
8454 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 8455 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 8456 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
8457 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8458 ADVERTISED_TP);
a2fbb9ea
ET
8459 } else {
8460 BNX2X_ERR("NVRAM config error. "
8461 "Invalid link_config 0x%x"
8462 " speed_cap_mask 0x%x\n",
34f80b04 8463 bp->port.link_config,
c18487ee 8464 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8465 return;
8466 }
8467 break;
8468
8469 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8470 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8471 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8472 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8473 ADVERTISED_TP);
a2fbb9ea
ET
8474 } else {
8475 BNX2X_ERR("NVRAM config error. "
8476 "Invalid link_config 0x%x"
8477 " speed_cap_mask 0x%x\n",
34f80b04 8478 bp->port.link_config,
c18487ee 8479 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8480 return;
8481 }
8482 break;
8483
8484 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8485 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8486 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8487 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8488 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8489 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8490 ADVERTISED_FIBRE);
a2fbb9ea
ET
8491 } else {
8492 BNX2X_ERR("NVRAM config error. "
8493 "Invalid link_config 0x%x"
8494 " speed_cap_mask 0x%x\n",
34f80b04 8495 bp->port.link_config,
c18487ee 8496 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8497 return;
8498 }
8499 break;
8500
8501 default:
8502 BNX2X_ERR("NVRAM config error. "
8503 "BAD link speed link_config 0x%x\n",
34f80b04 8504 bp->port.link_config);
c18487ee 8505 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8506 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8507 break;
8508 }
a2fbb9ea 8509
34f80b04
EG
8510 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8511 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8512 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8513 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8514 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8515
c18487ee 8516 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8517 " advertising 0x%x\n",
c18487ee
YR
8518 bp->link_params.req_line_speed,
8519 bp->link_params.req_duplex,
34f80b04 8520 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8521}
8522
34f80b04 8523static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8524{
34f80b04
EG
8525 int port = BP_PORT(bp);
8526 u32 val, val2;
589abe3a 8527 u32 config;
c2c8b03e 8528 u16 i;
01cd4528 8529 u32 ext_phy_type;
a2fbb9ea 8530
c18487ee 8531 bp->link_params.bp = bp;
34f80b04 8532 bp->link_params.port = port;
c18487ee 8533
c18487ee 8534 bp->link_params.lane_config =
a2fbb9ea 8535 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8536 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8537 SHMEM_RD(bp,
8538 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
8539 /* BCM8727_NOC => BCM8727 no over current */
8540 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8541 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8542 bp->link_params.ext_phy_config &=
8543 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8544 bp->link_params.ext_phy_config |=
8545 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8546 bp->link_params.feature_config_flags |=
8547 FEATURE_CONFIG_BCM8727_NOC;
8548 }
8549
c18487ee 8550 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8551 SHMEM_RD(bp,
8552 dev_info.port_hw_config[port].speed_capability_mask);
8553
34f80b04 8554 bp->port.link_config =
a2fbb9ea
ET
8555 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8556
c2c8b03e
EG
8557 /* Get the 4 lanes xgxs config rx and tx */
8558 for (i = 0; i < 2; i++) {
8559 val = SHMEM_RD(bp,
8560 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8561 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8562 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8563
8564 val = SHMEM_RD(bp,
8565 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8566 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8567 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8568 }
8569
3ce2c3f9
EG
8570 /* If the device is capable of WoL, set the default state according
8571 * to the HW
8572 */
4d295db0 8573 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8574 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8575 (config & PORT_FEATURE_WOL_ENABLED));
8576
c2c8b03e
EG
8577 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8578 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8579 bp->link_params.lane_config,
8580 bp->link_params.ext_phy_config,
34f80b04 8581 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8582
4d295db0
EG
8583 bp->link_params.switch_cfg |= (bp->port.link_config &
8584 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 8585 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8586
8587 bnx2x_link_settings_requested(bp);
8588
01cd4528
EG
8589 /*
8590 * If connected directly, work with the internal PHY, otherwise, work
8591 * with the external PHY
8592 */
8593 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8594 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8595 bp->mdio.prtad = bp->link_params.phy_addr;
8596
8597 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8598 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8599 bp->mdio.prtad =
659bc5c4 8600 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
01cd4528 8601
a2fbb9ea
ET
8602 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8603 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8604 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8605 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8606 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8607 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8608 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8609 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8610 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8611 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8612}
8613
8614static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8615{
8616 int func = BP_FUNC(bp);
8617 u32 val, val2;
8618 int rc = 0;
a2fbb9ea 8619
34f80b04 8620 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8621
34f80b04
EG
8622 bp->e1hov = 0;
8623 bp->e1hmf = 0;
8624 if (CHIP_IS_E1H(bp)) {
8625 bp->mf_config =
8626 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8627
2691d51d 8628 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 8629 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 8630 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 8631 bp->e1hmf = 1;
2691d51d
EG
8632 BNX2X_DEV_INFO("%s function mode\n",
8633 IS_E1HMF(bp) ? "multi" : "single");
8634
8635 if (IS_E1HMF(bp)) {
8636 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8637 e1hov_tag) &
8638 FUNC_MF_CFG_E1HOV_TAG_MASK);
8639 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8640 bp->e1hov = val;
8641 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8642 "(0x%04x)\n",
8643 func, bp->e1hov, bp->e1hov);
8644 } else {
34f80b04
EG
8645 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8646 " aborting\n", func);
8647 rc = -EPERM;
8648 }
2691d51d
EG
8649 } else {
8650 if (BP_E1HVN(bp)) {
8651 BNX2X_ERR("!!! VN %d in single function mode,"
8652 " aborting\n", BP_E1HVN(bp));
8653 rc = -EPERM;
8654 }
34f80b04
EG
8655 }
8656 }
a2fbb9ea 8657
34f80b04
EG
8658 if (!BP_NOMCP(bp)) {
8659 bnx2x_get_port_hwinfo(bp);
8660
8661 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8662 DRV_MSG_SEQ_NUMBER_MASK);
8663 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8664 }
8665
8666 if (IS_E1HMF(bp)) {
8667 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8668 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8669 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8670 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8671 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8672 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8673 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8674 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8675 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8676 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8677 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8678 ETH_ALEN);
8679 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8680 ETH_ALEN);
a2fbb9ea 8681 }
34f80b04
EG
8682
8683 return rc;
a2fbb9ea
ET
8684 }
8685
34f80b04
EG
8686 if (BP_NOMCP(bp)) {
8687 /* only supposed to happen on emulation/FPGA */
33471629 8688 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8689 random_ether_addr(bp->dev->dev_addr);
8690 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8691 }
a2fbb9ea 8692
34f80b04
EG
8693 return rc;
8694}
8695
8696static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8697{
8698 int func = BP_FUNC(bp);
87942b46 8699 int timer_interval;
34f80b04
EG
8700 int rc;
8701
da5a662a
VZ
8702 /* Disable interrupt handling until HW is initialized */
8703 atomic_set(&bp->intr_sem, 1);
e1510706 8704 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8705
34f80b04 8706 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8707
1cf167f2 8708 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8709 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8710
8711 rc = bnx2x_get_hwinfo(bp);
8712
8713 /* need to reset chip if undi was active */
8714 if (!BP_NOMCP(bp))
8715 bnx2x_undi_unload(bp);
8716
8717 if (CHIP_REV_IS_FPGA(bp))
8718 printk(KERN_ERR PFX "FPGA detected\n");
8719
8720 if (BP_NOMCP(bp) && (func == 0))
8721 printk(KERN_ERR PFX
8722 "MCP disabled, must load devices in order!\n");
8723
555f6c78 8724 /* Set multi queue mode */
8badd27a
EG
8725 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8726 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8727 printk(KERN_ERR PFX
8badd27a 8728 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8729 multi_mode = ETH_RSS_MODE_DISABLED;
8730 }
8731 bp->multi_mode = multi_mode;
8732
8733
7a9b2557
VZ
8734 /* Set TPA flags */
8735 if (disable_tpa) {
8736 bp->flags &= ~TPA_ENABLE_FLAG;
8737 bp->dev->features &= ~NETIF_F_LRO;
8738 } else {
8739 bp->flags |= TPA_ENABLE_FLAG;
8740 bp->dev->features |= NETIF_F_LRO;
8741 }
8742
a18f5128
EG
8743 if (CHIP_IS_E1(bp))
8744 bp->dropless_fc = 0;
8745 else
8746 bp->dropless_fc = dropless_fc;
8747
8d5726c4 8748 bp->mrrs = mrrs;
7a9b2557 8749
34f80b04
EG
8750 bp->tx_ring_size = MAX_TX_AVAIL;
8751 bp->rx_ring_size = MAX_RX_AVAIL;
8752
8753 bp->rx_csum = 1;
34f80b04
EG
8754
8755 bp->tx_ticks = 50;
8756 bp->rx_ticks = 25;
8757
87942b46
EG
8758 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8759 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8760
8761 init_timer(&bp->timer);
8762 bp->timer.expires = jiffies + bp->current_interval;
8763 bp->timer.data = (unsigned long) bp;
8764 bp->timer.function = bnx2x_timer;
8765
8766 return rc;
a2fbb9ea
ET
8767}
8768
8769/*
8770 * ethtool service functions
8771 */
8772
8773/* All ethtool functions called with rtnl_lock */
8774
8775static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8776{
8777 struct bnx2x *bp = netdev_priv(dev);
8778
34f80b04
EG
8779 cmd->supported = bp->port.supported;
8780 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8781
8782 if (netif_carrier_ok(dev)) {
c18487ee
YR
8783 cmd->speed = bp->link_vars.line_speed;
8784 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8785 } else {
c18487ee
YR
8786 cmd->speed = bp->link_params.req_line_speed;
8787 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8788 }
34f80b04
EG
8789 if (IS_E1HMF(bp)) {
8790 u16 vn_max_rate;
8791
8792 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8793 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8794 if (vn_max_rate < cmd->speed)
8795 cmd->speed = vn_max_rate;
8796 }
a2fbb9ea 8797
c18487ee
YR
8798 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8799 u32 ext_phy_type =
8800 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8801
8802 switch (ext_phy_type) {
8803 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8804 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8805 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8806 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8807 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8808 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 8809 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
8810 cmd->port = PORT_FIBRE;
8811 break;
8812
8813 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8814 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8815 cmd->port = PORT_TP;
8816 break;
8817
c18487ee
YR
8818 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8819 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8820 bp->link_params.ext_phy_config);
8821 break;
8822
f1410647
ET
8823 default:
8824 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8825 bp->link_params.ext_phy_config);
8826 break;
f1410647
ET
8827 }
8828 } else
a2fbb9ea 8829 cmd->port = PORT_TP;
a2fbb9ea 8830
01cd4528 8831 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
8832 cmd->transceiver = XCVR_INTERNAL;
8833
c18487ee 8834 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8835 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8836 else
a2fbb9ea 8837 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8838
8839 cmd->maxtxpkt = 0;
8840 cmd->maxrxpkt = 0;
8841
8842 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8843 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8844 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8845 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8846 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8847 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8848 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8849
8850 return 0;
8851}
8852
8853static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8854{
8855 struct bnx2x *bp = netdev_priv(dev);
8856 u32 advertising;
8857
34f80b04
EG
8858 if (IS_E1HMF(bp))
8859 return 0;
8860
a2fbb9ea
ET
8861 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8862 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8863 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8864 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8865 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8866 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8867 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8868
a2fbb9ea 8869 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8870 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8871 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8872 return -EINVAL;
f1410647 8873 }
a2fbb9ea
ET
8874
8875 /* advertise the requested speed and duplex if supported */
34f80b04 8876 cmd->advertising &= bp->port.supported;
a2fbb9ea 8877
c18487ee
YR
8878 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8879 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8880 bp->port.advertising |= (ADVERTISED_Autoneg |
8881 cmd->advertising);
a2fbb9ea
ET
8882
8883 } else { /* forced speed */
8884 /* advertise the requested speed and duplex if supported */
8885 switch (cmd->speed) {
8886 case SPEED_10:
8887 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8888 if (!(bp->port.supported &
f1410647
ET
8889 SUPPORTED_10baseT_Full)) {
8890 DP(NETIF_MSG_LINK,
8891 "10M full not supported\n");
a2fbb9ea 8892 return -EINVAL;
f1410647 8893 }
a2fbb9ea
ET
8894
8895 advertising = (ADVERTISED_10baseT_Full |
8896 ADVERTISED_TP);
8897 } else {
34f80b04 8898 if (!(bp->port.supported &
f1410647
ET
8899 SUPPORTED_10baseT_Half)) {
8900 DP(NETIF_MSG_LINK,
8901 "10M half not supported\n");
a2fbb9ea 8902 return -EINVAL;
f1410647 8903 }
a2fbb9ea
ET
8904
8905 advertising = (ADVERTISED_10baseT_Half |
8906 ADVERTISED_TP);
8907 }
8908 break;
8909
8910 case SPEED_100:
8911 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8912 if (!(bp->port.supported &
f1410647
ET
8913 SUPPORTED_100baseT_Full)) {
8914 DP(NETIF_MSG_LINK,
8915 "100M full not supported\n");
a2fbb9ea 8916 return -EINVAL;
f1410647 8917 }
a2fbb9ea
ET
8918
8919 advertising = (ADVERTISED_100baseT_Full |
8920 ADVERTISED_TP);
8921 } else {
34f80b04 8922 if (!(bp->port.supported &
f1410647
ET
8923 SUPPORTED_100baseT_Half)) {
8924 DP(NETIF_MSG_LINK,
8925 "100M half not supported\n");
a2fbb9ea 8926 return -EINVAL;
f1410647 8927 }
a2fbb9ea
ET
8928
8929 advertising = (ADVERTISED_100baseT_Half |
8930 ADVERTISED_TP);
8931 }
8932 break;
8933
8934 case SPEED_1000:
f1410647
ET
8935 if (cmd->duplex != DUPLEX_FULL) {
8936 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8937 return -EINVAL;
f1410647 8938 }
a2fbb9ea 8939
34f80b04 8940 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8941 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8942 return -EINVAL;
f1410647 8943 }
a2fbb9ea
ET
8944
8945 advertising = (ADVERTISED_1000baseT_Full |
8946 ADVERTISED_TP);
8947 break;
8948
8949 case SPEED_2500:
f1410647
ET
8950 if (cmd->duplex != DUPLEX_FULL) {
8951 DP(NETIF_MSG_LINK,
8952 "2.5G half not supported\n");
a2fbb9ea 8953 return -EINVAL;
f1410647 8954 }
a2fbb9ea 8955
34f80b04 8956 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8957 DP(NETIF_MSG_LINK,
8958 "2.5G full not supported\n");
a2fbb9ea 8959 return -EINVAL;
f1410647 8960 }
a2fbb9ea 8961
f1410647 8962 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8963 ADVERTISED_TP);
8964 break;
8965
8966 case SPEED_10000:
f1410647
ET
8967 if (cmd->duplex != DUPLEX_FULL) {
8968 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8969 return -EINVAL;
f1410647 8970 }
a2fbb9ea 8971
34f80b04 8972 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8973 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8974 return -EINVAL;
f1410647 8975 }
a2fbb9ea
ET
8976
8977 advertising = (ADVERTISED_10000baseT_Full |
8978 ADVERTISED_FIBRE);
8979 break;
8980
8981 default:
f1410647 8982 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8983 return -EINVAL;
8984 }
8985
c18487ee
YR
8986 bp->link_params.req_line_speed = cmd->speed;
8987 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8988 bp->port.advertising = advertising;
a2fbb9ea
ET
8989 }
8990
c18487ee 8991 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8992 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8993 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8994 bp->port.advertising);
a2fbb9ea 8995
34f80b04 8996 if (netif_running(dev)) {
bb2a0f7a 8997 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8998 bnx2x_link_set(bp);
8999 }
a2fbb9ea
ET
9000
9001 return 0;
9002}
9003
0a64ea57
EG
9004#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9005#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9006
9007static int bnx2x_get_regs_len(struct net_device *dev)
9008{
0a64ea57 9009 struct bnx2x *bp = netdev_priv(dev);
0d28e49a 9010 int regdump_len = 0;
0a64ea57
EG
9011 int i;
9012
0a64ea57
EG
9013 if (CHIP_IS_E1(bp)) {
9014 for (i = 0; i < REGS_COUNT; i++)
9015 if (IS_E1_ONLINE(reg_addrs[i].info))
9016 regdump_len += reg_addrs[i].size;
9017
9018 for (i = 0; i < WREGS_COUNT_E1; i++)
9019 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9020 regdump_len += wreg_addrs_e1[i].size *
9021 (1 + wreg_addrs_e1[i].read_regs_count);
9022
9023 } else { /* E1H */
9024 for (i = 0; i < REGS_COUNT; i++)
9025 if (IS_E1H_ONLINE(reg_addrs[i].info))
9026 regdump_len += reg_addrs[i].size;
9027
9028 for (i = 0; i < WREGS_COUNT_E1H; i++)
9029 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9030 regdump_len += wreg_addrs_e1h[i].size *
9031 (1 + wreg_addrs_e1h[i].read_regs_count);
9032 }
9033 regdump_len *= 4;
9034 regdump_len += sizeof(struct dump_hdr);
9035
9036 return regdump_len;
9037}
9038
9039static void bnx2x_get_regs(struct net_device *dev,
9040 struct ethtool_regs *regs, void *_p)
9041{
9042 u32 *p = _p, i, j;
9043 struct bnx2x *bp = netdev_priv(dev);
9044 struct dump_hdr dump_hdr = {0};
9045
9046 regs->version = 0;
9047 memset(p, 0, regs->len);
9048
9049 if (!netif_running(bp->dev))
9050 return;
9051
9052 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9053 dump_hdr.dump_sign = dump_sign_all;
9054 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9055 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9056 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9057 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9058 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9059
9060 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9061 p += dump_hdr.hdr_size + 1;
9062
9063 if (CHIP_IS_E1(bp)) {
9064 for (i = 0; i < REGS_COUNT; i++)
9065 if (IS_E1_ONLINE(reg_addrs[i].info))
9066 for (j = 0; j < reg_addrs[i].size; j++)
9067 *p++ = REG_RD(bp,
9068 reg_addrs[i].addr + j*4);
9069
9070 } else { /* E1H */
9071 for (i = 0; i < REGS_COUNT; i++)
9072 if (IS_E1H_ONLINE(reg_addrs[i].info))
9073 for (j = 0; j < reg_addrs[i].size; j++)
9074 *p++ = REG_RD(bp,
9075 reg_addrs[i].addr + j*4);
9076 }
9077}
9078
0d28e49a
EG
9079#define PHY_FW_VER_LEN 10
9080
9081static void bnx2x_get_drvinfo(struct net_device *dev,
9082 struct ethtool_drvinfo *info)
9083{
9084 struct bnx2x *bp = netdev_priv(dev);
9085 u8 phy_fw_ver[PHY_FW_VER_LEN];
9086
9087 strcpy(info->driver, DRV_MODULE_NAME);
9088 strcpy(info->version, DRV_MODULE_VERSION);
9089
9090 phy_fw_ver[0] = '\0';
9091 if (bp->port.pmf) {
9092 bnx2x_acquire_phy_lock(bp);
9093 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9094 (bp->state != BNX2X_STATE_CLOSED),
9095 phy_fw_ver, PHY_FW_VER_LEN);
9096 bnx2x_release_phy_lock(bp);
9097 }
9098
9099 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9100 (bp->common.bc_ver & 0xff0000) >> 16,
9101 (bp->common.bc_ver & 0xff00) >> 8,
9102 (bp->common.bc_ver & 0xff),
9103 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9104 strcpy(info->bus_info, pci_name(bp->pdev));
9105 info->n_stats = BNX2X_NUM_STATS;
9106 info->testinfo_len = BNX2X_NUM_TESTS;
9107 info->eedump_len = bp->common.flash_size;
9108 info->regdump_len = bnx2x_get_regs_len(dev);
9109}
9110
a2fbb9ea
ET
9111static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9112{
9113 struct bnx2x *bp = netdev_priv(dev);
9114
9115 if (bp->flags & NO_WOL_FLAG) {
9116 wol->supported = 0;
9117 wol->wolopts = 0;
9118 } else {
9119 wol->supported = WAKE_MAGIC;
9120 if (bp->wol)
9121 wol->wolopts = WAKE_MAGIC;
9122 else
9123 wol->wolopts = 0;
9124 }
9125 memset(&wol->sopass, 0, sizeof(wol->sopass));
9126}
9127
9128static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9129{
9130 struct bnx2x *bp = netdev_priv(dev);
9131
9132 if (wol->wolopts & ~WAKE_MAGIC)
9133 return -EINVAL;
9134
9135 if (wol->wolopts & WAKE_MAGIC) {
9136 if (bp->flags & NO_WOL_FLAG)
9137 return -EINVAL;
9138
9139 bp->wol = 1;
34f80b04 9140 } else
a2fbb9ea 9141 bp->wol = 0;
34f80b04 9142
a2fbb9ea
ET
9143 return 0;
9144}
9145
9146static u32 bnx2x_get_msglevel(struct net_device *dev)
9147{
9148 struct bnx2x *bp = netdev_priv(dev);
9149
9150 return bp->msglevel;
9151}
9152
9153static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9154{
9155 struct bnx2x *bp = netdev_priv(dev);
9156
9157 if (capable(CAP_NET_ADMIN))
9158 bp->msglevel = level;
9159}
9160
9161static int bnx2x_nway_reset(struct net_device *dev)
9162{
9163 struct bnx2x *bp = netdev_priv(dev);
9164
34f80b04
EG
9165 if (!bp->port.pmf)
9166 return 0;
a2fbb9ea 9167
34f80b04 9168 if (netif_running(dev)) {
bb2a0f7a 9169 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9170 bnx2x_link_set(bp);
9171 }
a2fbb9ea
ET
9172
9173 return 0;
9174}
9175
ab6ad5a4 9176static u32 bnx2x_get_link(struct net_device *dev)
01e53298
NO
9177{
9178 struct bnx2x *bp = netdev_priv(dev);
9179
9180 return bp->link_vars.link_up;
9181}
9182
a2fbb9ea
ET
9183static int bnx2x_get_eeprom_len(struct net_device *dev)
9184{
9185 struct bnx2x *bp = netdev_priv(dev);
9186
34f80b04 9187 return bp->common.flash_size;
a2fbb9ea
ET
9188}
9189
9190static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9191{
34f80b04 9192 int port = BP_PORT(bp);
a2fbb9ea
ET
9193 int count, i;
9194 u32 val = 0;
9195
9196 /* adjust timeout for emulation/FPGA */
9197 count = NVRAM_TIMEOUT_COUNT;
9198 if (CHIP_REV_IS_SLOW(bp))
9199 count *= 100;
9200
9201 /* request access to nvram interface */
9202 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9203 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9204
9205 for (i = 0; i < count*10; i++) {
9206 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9207 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9208 break;
9209
9210 udelay(5);
9211 }
9212
9213 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 9214 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
9215 return -EBUSY;
9216 }
9217
9218 return 0;
9219}
9220
9221static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9222{
34f80b04 9223 int port = BP_PORT(bp);
a2fbb9ea
ET
9224 int count, i;
9225 u32 val = 0;
9226
9227 /* adjust timeout for emulation/FPGA */
9228 count = NVRAM_TIMEOUT_COUNT;
9229 if (CHIP_REV_IS_SLOW(bp))
9230 count *= 100;
9231
9232 /* relinquish nvram interface */
9233 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9234 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9235
9236 for (i = 0; i < count*10; i++) {
9237 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9238 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9239 break;
9240
9241 udelay(5);
9242 }
9243
9244 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 9245 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
9246 return -EBUSY;
9247 }
9248
9249 return 0;
9250}
9251
9252static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9253{
9254 u32 val;
9255
9256 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9257
9258 /* enable both bits, even on read */
9259 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9260 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9261 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9262}
9263
9264static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9265{
9266 u32 val;
9267
9268 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9269
9270 /* disable both bits, even after read */
9271 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9272 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9273 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9274}
9275
4781bfad 9276static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
9277 u32 cmd_flags)
9278{
f1410647 9279 int count, i, rc;
a2fbb9ea
ET
9280 u32 val;
9281
9282 /* build the command word */
9283 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9284
9285 /* need to clear DONE bit separately */
9286 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9287
9288 /* address of the NVRAM to read from */
9289 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9290 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9291
9292 /* issue a read command */
9293 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9294
9295 /* adjust timeout for emulation/FPGA */
9296 count = NVRAM_TIMEOUT_COUNT;
9297 if (CHIP_REV_IS_SLOW(bp))
9298 count *= 100;
9299
9300 /* wait for completion */
9301 *ret_val = 0;
9302 rc = -EBUSY;
9303 for (i = 0; i < count; i++) {
9304 udelay(5);
9305 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9306
9307 if (val & MCPR_NVM_COMMAND_DONE) {
9308 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
9309 /* we read nvram data in cpu order
9310 * but ethtool sees it as an array of bytes
9311 * converting to big-endian will do the work */
4781bfad 9312 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
9313 rc = 0;
9314 break;
9315 }
9316 }
9317
9318 return rc;
9319}
9320
9321static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9322 int buf_size)
9323{
9324 int rc;
9325 u32 cmd_flags;
4781bfad 9326 __be32 val;
a2fbb9ea
ET
9327
9328 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9329 DP(BNX2X_MSG_NVM,
c14423fe 9330 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9331 offset, buf_size);
9332 return -EINVAL;
9333 }
9334
34f80b04
EG
9335 if (offset + buf_size > bp->common.flash_size) {
9336 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9337 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9338 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9339 return -EINVAL;
9340 }
9341
9342 /* request access to nvram interface */
9343 rc = bnx2x_acquire_nvram_lock(bp);
9344 if (rc)
9345 return rc;
9346
9347 /* enable access to nvram interface */
9348 bnx2x_enable_nvram_access(bp);
9349
9350 /* read the first word(s) */
9351 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9352 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9353 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9354 memcpy(ret_buf, &val, 4);
9355
9356 /* advance to the next dword */
9357 offset += sizeof(u32);
9358 ret_buf += sizeof(u32);
9359 buf_size -= sizeof(u32);
9360 cmd_flags = 0;
9361 }
9362
9363 if (rc == 0) {
9364 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9365 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9366 memcpy(ret_buf, &val, 4);
9367 }
9368
9369 /* disable access to nvram interface */
9370 bnx2x_disable_nvram_access(bp);
9371 bnx2x_release_nvram_lock(bp);
9372
9373 return rc;
9374}
9375
9376static int bnx2x_get_eeprom(struct net_device *dev,
9377 struct ethtool_eeprom *eeprom, u8 *eebuf)
9378{
9379 struct bnx2x *bp = netdev_priv(dev);
9380 int rc;
9381
2add3acb
EG
9382 if (!netif_running(dev))
9383 return -EAGAIN;
9384
34f80b04 9385 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9386 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9387 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9388 eeprom->len, eeprom->len);
9389
9390 /* parameters already validated in ethtool_get_eeprom */
9391
9392 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9393
9394 return rc;
9395}
9396
9397static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9398 u32 cmd_flags)
9399{
f1410647 9400 int count, i, rc;
a2fbb9ea
ET
9401
9402 /* build the command word */
9403 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9404
9405 /* need to clear DONE bit separately */
9406 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9407
9408 /* write the data */
9409 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9410
9411 /* address of the NVRAM to write to */
9412 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9413 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9414
9415 /* issue the write command */
9416 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9417
9418 /* adjust timeout for emulation/FPGA */
9419 count = NVRAM_TIMEOUT_COUNT;
9420 if (CHIP_REV_IS_SLOW(bp))
9421 count *= 100;
9422
9423 /* wait for completion */
9424 rc = -EBUSY;
9425 for (i = 0; i < count; i++) {
9426 udelay(5);
9427 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9428 if (val & MCPR_NVM_COMMAND_DONE) {
9429 rc = 0;
9430 break;
9431 }
9432 }
9433
9434 return rc;
9435}
9436
f1410647 9437#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
9438
9439static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9440 int buf_size)
9441{
9442 int rc;
9443 u32 cmd_flags;
9444 u32 align_offset;
4781bfad 9445 __be32 val;
a2fbb9ea 9446
34f80b04
EG
9447 if (offset + buf_size > bp->common.flash_size) {
9448 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9449 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9450 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9451 return -EINVAL;
9452 }
9453
9454 /* request access to nvram interface */
9455 rc = bnx2x_acquire_nvram_lock(bp);
9456 if (rc)
9457 return rc;
9458
9459 /* enable access to nvram interface */
9460 bnx2x_enable_nvram_access(bp);
9461
9462 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9463 align_offset = (offset & ~0x03);
9464 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9465
9466 if (rc == 0) {
9467 val &= ~(0xff << BYTE_OFFSET(offset));
9468 val |= (*data_buf << BYTE_OFFSET(offset));
9469
9470 /* nvram data is returned as an array of bytes
9471 * convert it back to cpu order */
9472 val = be32_to_cpu(val);
9473
a2fbb9ea
ET
9474 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9475 cmd_flags);
9476 }
9477
9478 /* disable access to nvram interface */
9479 bnx2x_disable_nvram_access(bp);
9480 bnx2x_release_nvram_lock(bp);
9481
9482 return rc;
9483}
9484
9485static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9486 int buf_size)
9487{
9488 int rc;
9489 u32 cmd_flags;
9490 u32 val;
9491 u32 written_so_far;
9492
34f80b04 9493 if (buf_size == 1) /* ethtool */
a2fbb9ea 9494 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9495
9496 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9497 DP(BNX2X_MSG_NVM,
c14423fe 9498 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9499 offset, buf_size);
9500 return -EINVAL;
9501 }
9502
34f80b04
EG
9503 if (offset + buf_size > bp->common.flash_size) {
9504 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9505 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9506 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9507 return -EINVAL;
9508 }
9509
9510 /* request access to nvram interface */
9511 rc = bnx2x_acquire_nvram_lock(bp);
9512 if (rc)
9513 return rc;
9514
9515 /* enable access to nvram interface */
9516 bnx2x_enable_nvram_access(bp);
9517
9518 written_so_far = 0;
9519 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9520 while ((written_so_far < buf_size) && (rc == 0)) {
9521 if (written_so_far == (buf_size - sizeof(u32)))
9522 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9523 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9524 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9525 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9526 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9527
9528 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9529
9530 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9531
9532 /* advance to the next dword */
9533 offset += sizeof(u32);
9534 data_buf += sizeof(u32);
9535 written_so_far += sizeof(u32);
9536 cmd_flags = 0;
9537 }
9538
9539 /* disable access to nvram interface */
9540 bnx2x_disable_nvram_access(bp);
9541 bnx2x_release_nvram_lock(bp);
9542
9543 return rc;
9544}
9545
9546static int bnx2x_set_eeprom(struct net_device *dev,
9547 struct ethtool_eeprom *eeprom, u8 *eebuf)
9548{
9549 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
9550 int port = BP_PORT(bp);
9551 int rc = 0;
a2fbb9ea 9552
9f4c9583
EG
9553 if (!netif_running(dev))
9554 return -EAGAIN;
9555
34f80b04 9556 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9557 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9558 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9559 eeprom->len, eeprom->len);
9560
9561 /* parameters already validated in ethtool_set_eeprom */
9562
f57a6025
EG
9563 /* PHY eeprom can be accessed only by the PMF */
9564 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9565 !bp->port.pmf)
9566 return -EINVAL;
9567
9568 if (eeprom->magic == 0x50485950) {
9569 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9570 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 9571
f57a6025
EG
9572 bnx2x_acquire_phy_lock(bp);
9573 rc |= bnx2x_link_reset(&bp->link_params,
9574 &bp->link_vars, 0);
9575 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9576 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9577 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9578 MISC_REGISTERS_GPIO_HIGH, port);
9579 bnx2x_release_phy_lock(bp);
9580 bnx2x_link_report(bp);
9581
9582 } else if (eeprom->magic == 0x50485952) {
9583 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9584 if ((bp->state == BNX2X_STATE_OPEN) ||
9585 (bp->state == BNX2X_STATE_DISABLED)) {
4a37fb66 9586 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
9587 rc |= bnx2x_link_reset(&bp->link_params,
9588 &bp->link_vars, 1);
9589
9590 rc |= bnx2x_phy_init(&bp->link_params,
9591 &bp->link_vars);
4a37fb66 9592 bnx2x_release_phy_lock(bp);
f57a6025
EG
9593 bnx2x_calc_fc_adv(bp);
9594 }
9595 } else if (eeprom->magic == 0x53985943) {
9596 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9597 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9598 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9599 u8 ext_phy_addr =
659bc5c4 9600 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
f57a6025
EG
9601
9602 /* DSP Remove Download Mode */
9603 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9604 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 9605
f57a6025
EG
9606 bnx2x_acquire_phy_lock(bp);
9607
9608 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9609
9610 /* wait 0.5 sec to allow it to run */
9611 msleep(500);
9612 bnx2x_ext_phy_hw_reset(bp, port);
9613 msleep(500);
9614 bnx2x_release_phy_lock(bp);
9615 }
9616 } else
c18487ee 9617 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9618
9619 return rc;
9620}
9621
9622static int bnx2x_get_coalesce(struct net_device *dev,
9623 struct ethtool_coalesce *coal)
9624{
9625 struct bnx2x *bp = netdev_priv(dev);
9626
9627 memset(coal, 0, sizeof(struct ethtool_coalesce));
9628
9629 coal->rx_coalesce_usecs = bp->rx_ticks;
9630 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9631
9632 return 0;
9633}
9634
ca00392c 9635#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
a2fbb9ea
ET
9636static int bnx2x_set_coalesce(struct net_device *dev,
9637 struct ethtool_coalesce *coal)
9638{
9639 struct bnx2x *bp = netdev_priv(dev);
9640
9641 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
ca00392c
EG
9642 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9643 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea
ET
9644
9645 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
ca00392c
EG
9646 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9647 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea 9648
34f80b04 9649 if (netif_running(dev))
a2fbb9ea
ET
9650 bnx2x_update_coalesce(bp);
9651
9652 return 0;
9653}
9654
9655static void bnx2x_get_ringparam(struct net_device *dev,
9656 struct ethtool_ringparam *ering)
9657{
9658 struct bnx2x *bp = netdev_priv(dev);
9659
9660 ering->rx_max_pending = MAX_RX_AVAIL;
9661 ering->rx_mini_max_pending = 0;
9662 ering->rx_jumbo_max_pending = 0;
9663
9664 ering->rx_pending = bp->rx_ring_size;
9665 ering->rx_mini_pending = 0;
9666 ering->rx_jumbo_pending = 0;
9667
9668 ering->tx_max_pending = MAX_TX_AVAIL;
9669 ering->tx_pending = bp->tx_ring_size;
9670}
9671
9672static int bnx2x_set_ringparam(struct net_device *dev,
9673 struct ethtool_ringparam *ering)
9674{
9675 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9676 int rc = 0;
a2fbb9ea
ET
9677
9678 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9679 (ering->tx_pending > MAX_TX_AVAIL) ||
9680 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9681 return -EINVAL;
9682
9683 bp->rx_ring_size = ering->rx_pending;
9684 bp->tx_ring_size = ering->tx_pending;
9685
34f80b04
EG
9686 if (netif_running(dev)) {
9687 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9688 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9689 }
9690
34f80b04 9691 return rc;
a2fbb9ea
ET
9692}
9693
9694static void bnx2x_get_pauseparam(struct net_device *dev,
9695 struct ethtool_pauseparam *epause)
9696{
9697 struct bnx2x *bp = netdev_priv(dev);
9698
356e2385
EG
9699 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9700 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9701 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9702
c0700f90
DM
9703 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9704 BNX2X_FLOW_CTRL_RX);
9705 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9706 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9707
9708 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9709 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9710 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9711}
9712
9713static int bnx2x_set_pauseparam(struct net_device *dev,
9714 struct ethtool_pauseparam *epause)
9715{
9716 struct bnx2x *bp = netdev_priv(dev);
9717
34f80b04
EG
9718 if (IS_E1HMF(bp))
9719 return 0;
9720
a2fbb9ea
ET
9721 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9722 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9723 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9724
c0700f90 9725 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9726
f1410647 9727 if (epause->rx_pause)
c0700f90 9728 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9729
f1410647 9730 if (epause->tx_pause)
c0700f90 9731 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9732
c0700f90
DM
9733 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9734 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9735
c18487ee 9736 if (epause->autoneg) {
34f80b04 9737 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9738 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9739 return -EINVAL;
9740 }
a2fbb9ea 9741
c18487ee 9742 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9743 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9744 }
a2fbb9ea 9745
c18487ee
YR
9746 DP(NETIF_MSG_LINK,
9747 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9748
9749 if (netif_running(dev)) {
bb2a0f7a 9750 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9751 bnx2x_link_set(bp);
9752 }
a2fbb9ea
ET
9753
9754 return 0;
9755}
9756
df0f2343
VZ
9757static int bnx2x_set_flags(struct net_device *dev, u32 data)
9758{
9759 struct bnx2x *bp = netdev_priv(dev);
9760 int changed = 0;
9761 int rc = 0;
9762
9763 /* TPA requires Rx CSUM offloading */
9764 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9765 if (!(dev->features & NETIF_F_LRO)) {
9766 dev->features |= NETIF_F_LRO;
9767 bp->flags |= TPA_ENABLE_FLAG;
9768 changed = 1;
9769 }
9770
9771 } else if (dev->features & NETIF_F_LRO) {
9772 dev->features &= ~NETIF_F_LRO;
9773 bp->flags &= ~TPA_ENABLE_FLAG;
9774 changed = 1;
9775 }
9776
9777 if (changed && netif_running(dev)) {
9778 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9779 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9780 }
9781
9782 return rc;
9783}
9784
a2fbb9ea
ET
9785static u32 bnx2x_get_rx_csum(struct net_device *dev)
9786{
9787 struct bnx2x *bp = netdev_priv(dev);
9788
9789 return bp->rx_csum;
9790}
9791
9792static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9793{
9794 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9795 int rc = 0;
a2fbb9ea
ET
9796
9797 bp->rx_csum = data;
df0f2343
VZ
9798
9799 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9800 TPA'ed packets will be discarded due to wrong TCP CSUM */
9801 if (!data) {
9802 u32 flags = ethtool_op_get_flags(dev);
9803
9804 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9805 }
9806
9807 return rc;
a2fbb9ea
ET
9808}
9809
9810static int bnx2x_set_tso(struct net_device *dev, u32 data)
9811{
755735eb 9812 if (data) {
a2fbb9ea 9813 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9814 dev->features |= NETIF_F_TSO6;
9815 } else {
a2fbb9ea 9816 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9817 dev->features &= ~NETIF_F_TSO6;
9818 }
9819
a2fbb9ea
ET
9820 return 0;
9821}
9822
f3c87cdd 9823static const struct {
a2fbb9ea
ET
9824 char string[ETH_GSTRING_LEN];
9825} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9826 { "register_test (offline)" },
9827 { "memory_test (offline)" },
9828 { "loopback_test (offline)" },
9829 { "nvram_test (online)" },
9830 { "interrupt_test (online)" },
9831 { "link_test (online)" },
d3d4f495 9832 { "idle check (online)" }
a2fbb9ea
ET
9833};
9834
f3c87cdd
YG
9835static int bnx2x_test_registers(struct bnx2x *bp)
9836{
9837 int idx, i, rc = -ENODEV;
9838 u32 wr_val = 0;
9dabc424 9839 int port = BP_PORT(bp);
f3c87cdd
YG
9840 static const struct {
9841 u32 offset0;
9842 u32 offset1;
9843 u32 mask;
9844 } reg_tbl[] = {
9845/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9846 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9847 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9848 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9849 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9850 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9851 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9852 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9853 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9854 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9855/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9856 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9857 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9858 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9859 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9860 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9861 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9862 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 9863 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
9864 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9865/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
9866 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9867 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9868 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9869 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9870 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9871 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9872 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9873 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
9874 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9875/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
9876 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9877 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9878 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9879 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9880 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9881 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9882
9883 { 0xffffffff, 0, 0x00000000 }
9884 };
9885
9886 if (!netif_running(bp->dev))
9887 return rc;
9888
9889 /* Repeat the test twice:
9890 First by writing 0x00000000, second by writing 0xffffffff */
9891 for (idx = 0; idx < 2; idx++) {
9892
9893 switch (idx) {
9894 case 0:
9895 wr_val = 0;
9896 break;
9897 case 1:
9898 wr_val = 0xffffffff;
9899 break;
9900 }
9901
9902 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9903 u32 offset, mask, save_val, val;
f3c87cdd
YG
9904
9905 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9906 mask = reg_tbl[i].mask;
9907
9908 save_val = REG_RD(bp, offset);
9909
9910 REG_WR(bp, offset, wr_val);
9911 val = REG_RD(bp, offset);
9912
9913 /* Restore the original register's value */
9914 REG_WR(bp, offset, save_val);
9915
9916 /* verify that value is as expected value */
9917 if ((val & mask) != (wr_val & mask))
9918 goto test_reg_exit;
9919 }
9920 }
9921
9922 rc = 0;
9923
9924test_reg_exit:
9925 return rc;
9926}
9927
9928static int bnx2x_test_memory(struct bnx2x *bp)
9929{
9930 int i, j, rc = -ENODEV;
9931 u32 val;
9932 static const struct {
9933 u32 offset;
9934 int size;
9935 } mem_tbl[] = {
9936 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9937 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9938 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9939 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9940 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9941 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9942 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9943
9944 { 0xffffffff, 0 }
9945 };
9946 static const struct {
9947 char *name;
9948 u32 offset;
9dabc424
YG
9949 u32 e1_mask;
9950 u32 e1h_mask;
f3c87cdd 9951 } prty_tbl[] = {
9dabc424
YG
9952 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9953 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9954 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9955 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9956 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9957 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9958
9959 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9960 };
9961
9962 if (!netif_running(bp->dev))
9963 return rc;
9964
9965 /* Go through all the memories */
9966 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9967 for (j = 0; j < mem_tbl[i].size; j++)
9968 REG_RD(bp, mem_tbl[i].offset + j*4);
9969
9970 /* Check the parity status */
9971 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9972 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9973 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9974 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9975 DP(NETIF_MSG_HW,
9976 "%s is 0x%x\n", prty_tbl[i].name, val);
9977 goto test_mem_exit;
9978 }
9979 }
9980
9981 rc = 0;
9982
9983test_mem_exit:
9984 return rc;
9985}
9986
f3c87cdd
YG
9987static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9988{
9989 int cnt = 1000;
9990
9991 if (link_up)
9992 while (bnx2x_link_test(bp) && cnt--)
9993 msleep(10);
9994}
9995
9996static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9997{
9998 unsigned int pkt_size, num_pkts, i;
9999 struct sk_buff *skb;
10000 unsigned char *packet;
ca00392c
EG
10001 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10002 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
f3c87cdd
YG
10003 u16 tx_start_idx, tx_idx;
10004 u16 rx_start_idx, rx_idx;
ca00392c 10005 u16 pkt_prod, bd_prod;
f3c87cdd 10006 struct sw_tx_bd *tx_buf;
ca00392c
EG
10007 struct eth_tx_start_bd *tx_start_bd;
10008 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
10009 dma_addr_t mapping;
10010 union eth_rx_cqe *cqe;
10011 u8 cqe_fp_flags;
10012 struct sw_rx_bd *rx_buf;
10013 u16 len;
10014 int rc = -ENODEV;
10015
b5bf9068
EG
10016 /* check the loopback mode */
10017 switch (loopback_mode) {
10018 case BNX2X_PHY_LOOPBACK:
10019 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10020 return -EINVAL;
10021 break;
10022 case BNX2X_MAC_LOOPBACK:
f3c87cdd 10023 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 10024 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
10025 break;
10026 default:
f3c87cdd 10027 return -EINVAL;
b5bf9068 10028 }
f3c87cdd 10029
b5bf9068
EG
10030 /* prepare the loopback packet */
10031 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10032 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
10033 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10034 if (!skb) {
10035 rc = -ENOMEM;
10036 goto test_loopback_exit;
10037 }
10038 packet = skb_put(skb, pkt_size);
10039 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
10040 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10041 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
10042 for (i = ETH_HLEN; i < pkt_size; i++)
10043 packet[i] = (unsigned char) (i & 0xff);
10044
b5bf9068 10045 /* send the loopback packet */
f3c87cdd 10046 num_pkts = 0;
ca00392c
EG
10047 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10048 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 10049
ca00392c
EG
10050 pkt_prod = fp_tx->tx_pkt_prod++;
10051 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10052 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 10053 tx_buf->skb = skb;
ca00392c 10054 tx_buf->flags = 0;
f3c87cdd 10055
ca00392c
EG
10056 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10057 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
f3c87cdd
YG
10058 mapping = pci_map_single(bp->pdev, skb->data,
10059 skb_headlen(skb), PCI_DMA_TODEVICE);
ca00392c
EG
10060 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10061 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10062 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10063 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10064 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10065 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10066 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10067 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10068
10069 /* turn on parsing and get a BD */
10070 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10071 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10072
10073 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 10074
58f4c4cf
EG
10075 wmb();
10076
ca00392c
EG
10077 fp_tx->tx_db.data.prod += 2;
10078 barrier();
10079 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
f3c87cdd
YG
10080
10081 mmiowb();
10082
10083 num_pkts++;
ca00392c 10084 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
10085 bp->dev->trans_start = jiffies;
10086
10087 udelay(100);
10088
ca00392c 10089 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
10090 if (tx_idx != tx_start_idx + num_pkts)
10091 goto test_loopback_exit;
10092
ca00392c 10093 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
10094 if (rx_idx != rx_start_idx + num_pkts)
10095 goto test_loopback_exit;
10096
ca00392c 10097 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
10098 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10099 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10100 goto test_loopback_rx_exit;
10101
10102 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10103 if (len != pkt_size)
10104 goto test_loopback_rx_exit;
10105
ca00392c 10106 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
10107 skb = rx_buf->skb;
10108 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10109 for (i = ETH_HLEN; i < pkt_size; i++)
10110 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10111 goto test_loopback_rx_exit;
10112
10113 rc = 0;
10114
10115test_loopback_rx_exit:
f3c87cdd 10116
ca00392c
EG
10117 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10118 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10119 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10120 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
10121
10122 /* Update producers */
ca00392c
EG
10123 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10124 fp_rx->rx_sge_prod);
f3c87cdd
YG
10125
10126test_loopback_exit:
10127 bp->link_params.loopback_mode = LOOPBACK_NONE;
10128
10129 return rc;
10130}
10131
10132static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10133{
b5bf9068 10134 int rc = 0, res;
f3c87cdd
YG
10135
10136 if (!netif_running(bp->dev))
10137 return BNX2X_LOOPBACK_FAILED;
10138
f8ef6e44 10139 bnx2x_netif_stop(bp, 1);
3910c8ae 10140 bnx2x_acquire_phy_lock(bp);
f3c87cdd 10141
b5bf9068
EG
10142 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10143 if (res) {
10144 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10145 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
10146 }
10147
b5bf9068
EG
10148 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10149 if (res) {
10150 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10151 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
10152 }
10153
3910c8ae 10154 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
10155 bnx2x_netif_start(bp);
10156
10157 return rc;
10158}
10159
10160#define CRC32_RESIDUAL 0xdebb20e3
10161
10162static int bnx2x_test_nvram(struct bnx2x *bp)
10163{
10164 static const struct {
10165 int offset;
10166 int size;
10167 } nvram_tbl[] = {
10168 { 0, 0x14 }, /* bootstrap */
10169 { 0x14, 0xec }, /* dir */
10170 { 0x100, 0x350 }, /* manuf_info */
10171 { 0x450, 0xf0 }, /* feature_info */
10172 { 0x640, 0x64 }, /* upgrade_key_info */
10173 { 0x6a4, 0x64 },
10174 { 0x708, 0x70 }, /* manuf_key_info */
10175 { 0x778, 0x70 },
10176 { 0, 0 }
10177 };
4781bfad 10178 __be32 buf[0x350 / 4];
f3c87cdd
YG
10179 u8 *data = (u8 *)buf;
10180 int i, rc;
ab6ad5a4 10181 u32 magic, crc;
f3c87cdd
YG
10182
10183 rc = bnx2x_nvram_read(bp, 0, data, 4);
10184 if (rc) {
f5372251 10185 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
10186 goto test_nvram_exit;
10187 }
10188
10189 magic = be32_to_cpu(buf[0]);
10190 if (magic != 0x669955aa) {
10191 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10192 rc = -ENODEV;
10193 goto test_nvram_exit;
10194 }
10195
10196 for (i = 0; nvram_tbl[i].size; i++) {
10197
10198 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10199 nvram_tbl[i].size);
10200 if (rc) {
10201 DP(NETIF_MSG_PROBE,
f5372251 10202 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
10203 goto test_nvram_exit;
10204 }
10205
ab6ad5a4
EG
10206 crc = ether_crc_le(nvram_tbl[i].size, data);
10207 if (crc != CRC32_RESIDUAL) {
f3c87cdd 10208 DP(NETIF_MSG_PROBE,
ab6ad5a4 10209 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
f3c87cdd
YG
10210 rc = -ENODEV;
10211 goto test_nvram_exit;
10212 }
10213 }
10214
10215test_nvram_exit:
10216 return rc;
10217}
10218
10219static int bnx2x_test_intr(struct bnx2x *bp)
10220{
10221 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10222 int i, rc;
10223
10224 if (!netif_running(bp->dev))
10225 return -ENODEV;
10226
8d9c5f34 10227 config->hdr.length = 0;
af246401
EG
10228 if (CHIP_IS_E1(bp))
10229 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10230 else
10231 config->hdr.offset = BP_FUNC(bp);
0626b899 10232 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
10233 config->hdr.reserved1 = 0;
10234
10235 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10236 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10237 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10238 if (rc == 0) {
10239 bp->set_mac_pending++;
10240 for (i = 0; i < 10; i++) {
10241 if (!bp->set_mac_pending)
10242 break;
10243 msleep_interruptible(10);
10244 }
10245 if (i == 10)
10246 rc = -ENODEV;
10247 }
10248
10249 return rc;
10250}
10251
a2fbb9ea
ET
10252static void bnx2x_self_test(struct net_device *dev,
10253 struct ethtool_test *etest, u64 *buf)
10254{
10255 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
10256
10257 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10258
f3c87cdd 10259 if (!netif_running(dev))
a2fbb9ea 10260 return;
a2fbb9ea 10261
33471629 10262 /* offline tests are not supported in MF mode */
f3c87cdd
YG
10263 if (IS_E1HMF(bp))
10264 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10265
10266 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
10267 int port = BP_PORT(bp);
10268 u32 val;
f3c87cdd
YG
10269 u8 link_up;
10270
279abdf5
EG
10271 /* save current value of input enable for TX port IF */
10272 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10273 /* disable input for TX port IF */
10274 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10275
f3c87cdd
YG
10276 link_up = bp->link_vars.link_up;
10277 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10278 bnx2x_nic_load(bp, LOAD_DIAG);
10279 /* wait until link state is restored */
10280 bnx2x_wait_for_link(bp, link_up);
10281
10282 if (bnx2x_test_registers(bp) != 0) {
10283 buf[0] = 1;
10284 etest->flags |= ETH_TEST_FL_FAILED;
10285 }
10286 if (bnx2x_test_memory(bp) != 0) {
10287 buf[1] = 1;
10288 etest->flags |= ETH_TEST_FL_FAILED;
10289 }
10290 buf[2] = bnx2x_test_loopback(bp, link_up);
10291 if (buf[2] != 0)
10292 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 10293
f3c87cdd 10294 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
10295
10296 /* restore input for TX port IF */
10297 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10298
f3c87cdd
YG
10299 bnx2x_nic_load(bp, LOAD_NORMAL);
10300 /* wait until link state is restored */
10301 bnx2x_wait_for_link(bp, link_up);
10302 }
10303 if (bnx2x_test_nvram(bp) != 0) {
10304 buf[3] = 1;
a2fbb9ea
ET
10305 etest->flags |= ETH_TEST_FL_FAILED;
10306 }
f3c87cdd
YG
10307 if (bnx2x_test_intr(bp) != 0) {
10308 buf[4] = 1;
10309 etest->flags |= ETH_TEST_FL_FAILED;
10310 }
10311 if (bp->port.pmf)
10312 if (bnx2x_link_test(bp) != 0) {
10313 buf[5] = 1;
10314 etest->flags |= ETH_TEST_FL_FAILED;
10315 }
f3c87cdd
YG
10316
10317#ifdef BNX2X_EXTRA_DEBUG
10318 bnx2x_panic_dump(bp);
10319#endif
a2fbb9ea
ET
10320}
10321
de832a55
EG
10322static const struct {
10323 long offset;
10324 int size;
10325 u8 string[ETH_GSTRING_LEN];
10326} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10327/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10328 { Q_STATS_OFFSET32(error_bytes_received_hi),
10329 8, "[%d]: rx_error_bytes" },
10330 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10331 8, "[%d]: rx_ucast_packets" },
10332 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10333 8, "[%d]: rx_mcast_packets" },
10334 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10335 8, "[%d]: rx_bcast_packets" },
10336 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10337 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10338 4, "[%d]: rx_phy_ip_err_discards"},
10339 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10340 4, "[%d]: rx_skb_alloc_discard" },
10341 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10342
10343/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10344 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10345 8, "[%d]: tx_packets" }
10346};
10347
bb2a0f7a
YG
10348static const struct {
10349 long offset;
10350 int size;
10351 u32 flags;
66e855f3
YG
10352#define STATS_FLAGS_PORT 1
10353#define STATS_FLAGS_FUNC 2
de832a55 10354#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 10355 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 10356} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
10357/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10358 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 10359 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 10360 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 10361 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 10362 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 10363 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 10364 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 10365 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 10366 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 10367 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 10368 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 10369 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 10370 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
10371 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10372 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10373 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10374 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10375/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10376 8, STATS_FLAGS_PORT, "rx_fragments" },
10377 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10378 8, STATS_FLAGS_PORT, "rx_jabbers" },
10379 { STATS_OFFSET32(no_buff_discard_hi),
10380 8, STATS_FLAGS_BOTH, "rx_discards" },
10381 { STATS_OFFSET32(mac_filter_discard),
10382 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10383 { STATS_OFFSET32(xxoverflow_discard),
10384 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10385 { STATS_OFFSET32(brb_drop_hi),
10386 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10387 { STATS_OFFSET32(brb_truncate_hi),
10388 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10389 { STATS_OFFSET32(pause_frames_received_hi),
10390 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10391 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10392 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10393 { STATS_OFFSET32(nig_timer_max),
10394 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10395/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10396 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10397 { STATS_OFFSET32(rx_skb_alloc_failed),
10398 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10399 { STATS_OFFSET32(hw_csum_err),
10400 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10401
10402 { STATS_OFFSET32(total_bytes_transmitted_hi),
10403 8, STATS_FLAGS_BOTH, "tx_bytes" },
10404 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10405 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10406 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10407 8, STATS_FLAGS_BOTH, "tx_packets" },
10408 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10409 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10410 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10411 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 10412 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 10413 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 10414 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 10415 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 10416/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 10417 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 10418 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 10419 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 10420 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 10421 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 10422 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 10423 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 10424 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 10425 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 10426 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 10427 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 10428 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 10429 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 10430 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 10431 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 10432 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 10433 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 10434 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 10435 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 10436/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 10437 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
10438 { STATS_OFFSET32(pause_frames_sent_hi),
10439 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
10440};
10441
de832a55
EG
10442#define IS_PORT_STAT(i) \
10443 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10444#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10445#define IS_E1HMF_MODE_STAT(bp) \
10446 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 10447
15f0a394
BH
10448static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10449{
10450 struct bnx2x *bp = netdev_priv(dev);
10451 int i, num_stats;
10452
10453 switch(stringset) {
10454 case ETH_SS_STATS:
10455 if (is_multi(bp)) {
10456 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10457 if (!IS_E1HMF_MODE_STAT(bp))
10458 num_stats += BNX2X_NUM_STATS;
10459 } else {
10460 if (IS_E1HMF_MODE_STAT(bp)) {
10461 num_stats = 0;
10462 for (i = 0; i < BNX2X_NUM_STATS; i++)
10463 if (IS_FUNC_STAT(i))
10464 num_stats++;
10465 } else
10466 num_stats = BNX2X_NUM_STATS;
10467 }
10468 return num_stats;
10469
10470 case ETH_SS_TEST:
10471 return BNX2X_NUM_TESTS;
10472
10473 default:
10474 return -EINVAL;
10475 }
10476}
10477
a2fbb9ea
ET
10478static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10479{
bb2a0f7a 10480 struct bnx2x *bp = netdev_priv(dev);
de832a55 10481 int i, j, k;
bb2a0f7a 10482
a2fbb9ea
ET
10483 switch (stringset) {
10484 case ETH_SS_STATS:
de832a55
EG
10485 if (is_multi(bp)) {
10486 k = 0;
ca00392c 10487 for_each_rx_queue(bp, i) {
de832a55
EG
10488 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10489 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10490 bnx2x_q_stats_arr[j].string, i);
10491 k += BNX2X_NUM_Q_STATS;
10492 }
10493 if (IS_E1HMF_MODE_STAT(bp))
10494 break;
10495 for (j = 0; j < BNX2X_NUM_STATS; j++)
10496 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10497 bnx2x_stats_arr[j].string);
10498 } else {
10499 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10500 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10501 continue;
10502 strcpy(buf + j*ETH_GSTRING_LEN,
10503 bnx2x_stats_arr[i].string);
10504 j++;
10505 }
bb2a0f7a 10506 }
a2fbb9ea
ET
10507 break;
10508
10509 case ETH_SS_TEST:
10510 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10511 break;
10512 }
10513}
10514
a2fbb9ea
ET
10515static void bnx2x_get_ethtool_stats(struct net_device *dev,
10516 struct ethtool_stats *stats, u64 *buf)
10517{
10518 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
10519 u32 *hw_stats, *offset;
10520 int i, j, k;
bb2a0f7a 10521
de832a55
EG
10522 if (is_multi(bp)) {
10523 k = 0;
ca00392c 10524 for_each_rx_queue(bp, i) {
de832a55
EG
10525 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10526 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10527 if (bnx2x_q_stats_arr[j].size == 0) {
10528 /* skip this counter */
10529 buf[k + j] = 0;
10530 continue;
10531 }
10532 offset = (hw_stats +
10533 bnx2x_q_stats_arr[j].offset);
10534 if (bnx2x_q_stats_arr[j].size == 4) {
10535 /* 4-byte counter */
10536 buf[k + j] = (u64) *offset;
10537 continue;
10538 }
10539 /* 8-byte counter */
10540 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10541 }
10542 k += BNX2X_NUM_Q_STATS;
10543 }
10544 if (IS_E1HMF_MODE_STAT(bp))
10545 return;
10546 hw_stats = (u32 *)&bp->eth_stats;
10547 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10548 if (bnx2x_stats_arr[j].size == 0) {
10549 /* skip this counter */
10550 buf[k + j] = 0;
10551 continue;
10552 }
10553 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10554 if (bnx2x_stats_arr[j].size == 4) {
10555 /* 4-byte counter */
10556 buf[k + j] = (u64) *offset;
10557 continue;
10558 }
10559 /* 8-byte counter */
10560 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10561 }
de832a55
EG
10562 } else {
10563 hw_stats = (u32 *)&bp->eth_stats;
10564 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10565 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10566 continue;
10567 if (bnx2x_stats_arr[i].size == 0) {
10568 /* skip this counter */
10569 buf[j] = 0;
10570 j++;
10571 continue;
10572 }
10573 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10574 if (bnx2x_stats_arr[i].size == 4) {
10575 /* 4-byte counter */
10576 buf[j] = (u64) *offset;
10577 j++;
10578 continue;
10579 }
10580 /* 8-byte counter */
10581 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10582 j++;
a2fbb9ea 10583 }
a2fbb9ea
ET
10584 }
10585}
10586
10587static int bnx2x_phys_id(struct net_device *dev, u32 data)
10588{
10589 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10590 int port = BP_PORT(bp);
a2fbb9ea
ET
10591 int i;
10592
34f80b04
EG
10593 if (!netif_running(dev))
10594 return 0;
10595
10596 if (!bp->port.pmf)
10597 return 0;
10598
a2fbb9ea
ET
10599 if (data == 0)
10600 data = 2;
10601
10602 for (i = 0; i < (data * 2); i++) {
c18487ee 10603 if ((i % 2) == 0)
34f80b04 10604 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
10605 bp->link_params.hw_led_mode,
10606 bp->link_params.chip_id);
10607 else
34f80b04 10608 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
10609 bp->link_params.hw_led_mode,
10610 bp->link_params.chip_id);
10611
a2fbb9ea
ET
10612 msleep_interruptible(500);
10613 if (signal_pending(current))
10614 break;
10615 }
10616
c18487ee 10617 if (bp->link_vars.link_up)
34f80b04 10618 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
10619 bp->link_vars.line_speed,
10620 bp->link_params.hw_led_mode,
10621 bp->link_params.chip_id);
a2fbb9ea
ET
10622
10623 return 0;
10624}
10625
0fc0b732 10626static const struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10627 .get_settings = bnx2x_get_settings,
10628 .set_settings = bnx2x_set_settings,
10629 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10630 .get_regs_len = bnx2x_get_regs_len,
10631 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10632 .get_wol = bnx2x_get_wol,
10633 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10634 .get_msglevel = bnx2x_get_msglevel,
10635 .set_msglevel = bnx2x_set_msglevel,
10636 .nway_reset = bnx2x_nway_reset,
01e53298 10637 .get_link = bnx2x_get_link,
7a9b2557
VZ
10638 .get_eeprom_len = bnx2x_get_eeprom_len,
10639 .get_eeprom = bnx2x_get_eeprom,
10640 .set_eeprom = bnx2x_set_eeprom,
10641 .get_coalesce = bnx2x_get_coalesce,
10642 .set_coalesce = bnx2x_set_coalesce,
10643 .get_ringparam = bnx2x_get_ringparam,
10644 .set_ringparam = bnx2x_set_ringparam,
10645 .get_pauseparam = bnx2x_get_pauseparam,
10646 .set_pauseparam = bnx2x_set_pauseparam,
10647 .get_rx_csum = bnx2x_get_rx_csum,
10648 .set_rx_csum = bnx2x_set_rx_csum,
10649 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10650 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10651 .set_flags = bnx2x_set_flags,
10652 .get_flags = ethtool_op_get_flags,
10653 .get_sg = ethtool_op_get_sg,
10654 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10655 .get_tso = ethtool_op_get_tso,
10656 .set_tso = bnx2x_set_tso,
7a9b2557 10657 .self_test = bnx2x_self_test,
15f0a394 10658 .get_sset_count = bnx2x_get_sset_count,
7a9b2557 10659 .get_strings = bnx2x_get_strings,
a2fbb9ea 10660 .phys_id = bnx2x_phys_id,
bb2a0f7a 10661 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10662};
10663
10664/* end of ethtool_ops */
10665
10666/****************************************************************************
10667* General service functions
10668****************************************************************************/
10669
10670static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10671{
10672 u16 pmcsr;
10673
10674 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10675
10676 switch (state) {
10677 case PCI_D0:
34f80b04 10678 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10679 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10680 PCI_PM_CTRL_PME_STATUS));
10681
10682 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10683 /* delay required during transition out of D3hot */
a2fbb9ea 10684 msleep(20);
34f80b04 10685 break;
a2fbb9ea 10686
34f80b04
EG
10687 case PCI_D3hot:
10688 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10689 pmcsr |= 3;
a2fbb9ea 10690
34f80b04
EG
10691 if (bp->wol)
10692 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10693
34f80b04
EG
10694 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10695 pmcsr);
a2fbb9ea 10696
34f80b04
EG
10697 /* No more memory access after this point until
10698 * device is brought back to D0.
10699 */
10700 break;
10701
10702 default:
10703 return -EINVAL;
10704 }
10705 return 0;
a2fbb9ea
ET
10706}
10707
237907c1
EG
10708static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10709{
10710 u16 rx_cons_sb;
10711
10712 /* Tell compiler that status block fields can change */
10713 barrier();
10714 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10715 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10716 rx_cons_sb++;
10717 return (fp->rx_comp_cons != rx_cons_sb);
10718}
10719
34f80b04
EG
10720/*
10721 * net_device service functions
10722 */
10723
a2fbb9ea
ET
10724static int bnx2x_poll(struct napi_struct *napi, int budget)
10725{
10726 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10727 napi);
10728 struct bnx2x *bp = fp->bp;
10729 int work_done = 0;
10730
10731#ifdef BNX2X_STOP_ON_ERROR
10732 if (unlikely(bp->panic))
34f80b04 10733 goto poll_panic;
a2fbb9ea
ET
10734#endif
10735
a2fbb9ea
ET
10736 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10737 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10738
10739 bnx2x_update_fpsb_idx(fp);
10740
8534f32c 10741 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 10742 work_done = bnx2x_rx_int(fp, budget);
356e2385 10743
8534f32c
EG
10744 /* must not complete if we consumed full budget */
10745 if (work_done >= budget)
10746 goto poll_again;
10747 }
a2fbb9ea 10748
ca00392c 10749 /* bnx2x_has_rx_work() reads the status block, thus we need to
8534f32c 10750 * ensure that status block indices have been actually read
ca00392c 10751 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
8534f32c 10752 * so that we won't write the "newer" value of the status block to IGU
ca00392c 10753 * (if there was a DMA right after bnx2x_has_rx_work and
8534f32c
EG
10754 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10755 * may be postponed to right before bnx2x_ack_sb). In this case
10756 * there will never be another interrupt until there is another update
10757 * of the status block, while there is still unhandled work.
10758 */
10759 rmb();
a2fbb9ea 10760
ca00392c 10761 if (!bnx2x_has_rx_work(fp)) {
a2fbb9ea 10762#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10763poll_panic:
a2fbb9ea 10764#endif
288379f0 10765 napi_complete(napi);
a2fbb9ea 10766
0626b899 10767 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10768 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10769 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10770 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10771 }
356e2385 10772
8534f32c 10773poll_again:
a2fbb9ea
ET
10774 return work_done;
10775}
10776
755735eb
EG
10777
10778/* we split the first BD into headers and data BDs
33471629 10779 * to ease the pain of our fellow microcode engineers
755735eb
EG
10780 * we use one mapping for both BDs
10781 * So far this has only been observed to happen
10782 * in Other Operating Systems(TM)
10783 */
10784static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10785 struct bnx2x_fastpath *fp,
ca00392c
EG
10786 struct sw_tx_bd *tx_buf,
10787 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
10788 u16 bd_prod, int nbd)
10789{
ca00392c 10790 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
10791 struct eth_tx_bd *d_tx_bd;
10792 dma_addr_t mapping;
10793 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10794
10795 /* first fix first BD */
10796 h_tx_bd->nbd = cpu_to_le16(nbd);
10797 h_tx_bd->nbytes = cpu_to_le16(hlen);
10798
10799 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10800 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10801 h_tx_bd->addr_lo, h_tx_bd->nbd);
10802
10803 /* now get a new data BD
10804 * (after the pbd) and fill it */
10805 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 10806 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
10807
10808 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10809 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10810
10811 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10812 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10813 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
10814
10815 /* this marks the BD as one that has no individual mapping */
10816 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10817
755735eb
EG
10818 DP(NETIF_MSG_TX_QUEUED,
10819 "TSO split data size is %d (%x:%x)\n",
10820 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10821
ca00392c
EG
10822 /* update tx_bd */
10823 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
10824
10825 return bd_prod;
10826}
10827
10828static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10829{
10830 if (fix > 0)
10831 csum = (u16) ~csum_fold(csum_sub(csum,
10832 csum_partial(t_header - fix, fix, 0)));
10833
10834 else if (fix < 0)
10835 csum = (u16) ~csum_fold(csum_add(csum,
10836 csum_partial(t_header, -fix, 0)));
10837
10838 return swab16(csum);
10839}
10840
10841static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10842{
10843 u32 rc;
10844
10845 if (skb->ip_summed != CHECKSUM_PARTIAL)
10846 rc = XMIT_PLAIN;
10847
10848 else {
4781bfad 10849 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10850 rc = XMIT_CSUM_V6;
10851 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10852 rc |= XMIT_CSUM_TCP;
10853
10854 } else {
10855 rc = XMIT_CSUM_V4;
10856 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10857 rc |= XMIT_CSUM_TCP;
10858 }
10859 }
10860
10861 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10862 rc |= XMIT_GSO_V4;
10863
10864 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10865 rc |= XMIT_GSO_V6;
10866
10867 return rc;
10868}
10869
632da4d6 10870#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10871/* check if packet requires linearization (packet is too fragmented)
10872 no need to check fragmentation if page size > 8K (there will be no
10873 violation to FW restrictions) */
755735eb
EG
10874static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10875 u32 xmit_type)
10876{
10877 int to_copy = 0;
10878 int hlen = 0;
10879 int first_bd_sz = 0;
10880
10881 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10882 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10883
10884 if (xmit_type & XMIT_GSO) {
10885 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10886 /* Check if LSO packet needs to be copied:
10887 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10888 int wnd_size = MAX_FETCH_BD - 3;
33471629 10889 /* Number of windows to check */
755735eb
EG
10890 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10891 int wnd_idx = 0;
10892 int frag_idx = 0;
10893 u32 wnd_sum = 0;
10894
10895 /* Headers length */
10896 hlen = (int)(skb_transport_header(skb) - skb->data) +
10897 tcp_hdrlen(skb);
10898
10899 /* Amount of data (w/o headers) on linear part of SKB*/
10900 first_bd_sz = skb_headlen(skb) - hlen;
10901
10902 wnd_sum = first_bd_sz;
10903
10904 /* Calculate the first sum - it's special */
10905 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10906 wnd_sum +=
10907 skb_shinfo(skb)->frags[frag_idx].size;
10908
10909 /* If there was data on linear skb data - check it */
10910 if (first_bd_sz > 0) {
10911 if (unlikely(wnd_sum < lso_mss)) {
10912 to_copy = 1;
10913 goto exit_lbl;
10914 }
10915
10916 wnd_sum -= first_bd_sz;
10917 }
10918
10919 /* Others are easier: run through the frag list and
10920 check all windows */
10921 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10922 wnd_sum +=
10923 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10924
10925 if (unlikely(wnd_sum < lso_mss)) {
10926 to_copy = 1;
10927 break;
10928 }
10929 wnd_sum -=
10930 skb_shinfo(skb)->frags[wnd_idx].size;
10931 }
755735eb
EG
10932 } else {
10933 /* in non-LSO too fragmented packet should always
10934 be linearized */
10935 to_copy = 1;
10936 }
10937 }
10938
10939exit_lbl:
10940 if (unlikely(to_copy))
10941 DP(NETIF_MSG_TX_QUEUED,
10942 "Linearization IS REQUIRED for %s packet. "
10943 "num_frags %d hlen %d first_bd_sz %d\n",
10944 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10945 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10946
10947 return to_copy;
10948}
632da4d6 10949#endif
755735eb
EG
10950
10951/* called with netif_tx_lock
a2fbb9ea 10952 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10953 * netif_wake_queue()
a2fbb9ea 10954 */
61357325 10955static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
a2fbb9ea
ET
10956{
10957 struct bnx2x *bp = netdev_priv(dev);
ca00392c 10958 struct bnx2x_fastpath *fp, *fp_stat;
555f6c78 10959 struct netdev_queue *txq;
a2fbb9ea 10960 struct sw_tx_bd *tx_buf;
ca00392c
EG
10961 struct eth_tx_start_bd *tx_start_bd;
10962 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
10963 struct eth_tx_parse_bd *pbd = NULL;
10964 u16 pkt_prod, bd_prod;
755735eb 10965 int nbd, fp_index;
a2fbb9ea 10966 dma_addr_t mapping;
755735eb 10967 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
10968 int i;
10969 u8 hlen = 0;
ca00392c 10970 __le16 pkt_size = 0;
a2fbb9ea
ET
10971
10972#ifdef BNX2X_STOP_ON_ERROR
10973 if (unlikely(bp->panic))
10974 return NETDEV_TX_BUSY;
10975#endif
10976
555f6c78
EG
10977 fp_index = skb_get_queue_mapping(skb);
10978 txq = netdev_get_tx_queue(dev, fp_index);
10979
ca00392c
EG
10980 fp = &bp->fp[fp_index + bp->num_rx_queues];
10981 fp_stat = &bp->fp[fp_index];
755735eb 10982
231fd58a 10983 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
ca00392c 10984 fp_stat->eth_q_stats.driver_xoff++;
555f6c78 10985 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10986 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10987 return NETDEV_TX_BUSY;
10988 }
10989
755735eb
EG
10990 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10991 " gso type %x xmit_type %x\n",
10992 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10993 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10994
632da4d6 10995#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10996 /* First, check if we need to linearize the skb (due to FW
10997 restrictions). No need to check fragmentation if page size > 8K
10998 (there will be no violation to FW restrictions) */
755735eb
EG
10999 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11000 /* Statistics of linearization */
11001 bp->lin_cnt++;
11002 if (skb_linearize(skb) != 0) {
11003 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11004 "silently dropping this SKB\n");
11005 dev_kfree_skb_any(skb);
da5a662a 11006 return NETDEV_TX_OK;
755735eb
EG
11007 }
11008 }
632da4d6 11009#endif
755735eb 11010
a2fbb9ea 11011 /*
755735eb 11012 Please read carefully. First we use one BD which we mark as start,
ca00392c 11013 then we have a parsing info BD (used for TSO or xsum),
755735eb 11014 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
11015 (don't forget to mark the last one as last,
11016 and to unmap only AFTER you write to the BD ...)
755735eb 11017 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
11018 */
11019
11020 pkt_prod = fp->tx_pkt_prod++;
755735eb 11021 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 11022
755735eb 11023 /* get a tx_buf and first BD */
a2fbb9ea 11024 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 11025 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 11026
ca00392c
EG
11027 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11028 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11029 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 11030 /* header nbd */
ca00392c 11031 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 11032
755735eb
EG
11033 /* remember the first BD of the packet */
11034 tx_buf->first_bd = fp->tx_bd_prod;
11035 tx_buf->skb = skb;
ca00392c 11036 tx_buf->flags = 0;
a2fbb9ea
ET
11037
11038 DP(NETIF_MSG_TX_QUEUED,
11039 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 11040 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 11041
0c6671b0
EG
11042#ifdef BCM_VLAN
11043 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11044 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
11045 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11046 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 11047 } else
0c6671b0 11048#endif
ca00392c 11049 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 11050
ca00392c
EG
11051 /* turn on parsing and get a BD */
11052 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11053 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 11054
ca00392c 11055 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
11056
11057 if (xmit_type & XMIT_CSUM) {
ca00392c 11058 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
11059
11060 /* for now NS flag is not used in Linux */
4781bfad
EG
11061 pbd->global_data =
11062 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11063 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 11064
755735eb
EG
11065 pbd->ip_hlen = (skb_transport_header(skb) -
11066 skb_network_header(skb)) / 2;
11067
11068 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 11069
755735eb 11070 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 11071 hlen = hlen*2;
a2fbb9ea 11072
ca00392c 11073 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
11074
11075 if (xmit_type & XMIT_CSUM_V4)
ca00392c 11076 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
11077 ETH_TX_BD_FLAGS_IP_CSUM;
11078 else
ca00392c
EG
11079 tx_start_bd->bd_flags.as_bitfield |=
11080 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
11081
11082 if (xmit_type & XMIT_CSUM_TCP) {
11083 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11084
11085 } else {
11086 s8 fix = SKB_CS_OFF(skb); /* signed! */
11087
ca00392c 11088 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 11089
755735eb 11090 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11091 "hlen %d fix %d csum before fix %x\n",
11092 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
11093
11094 /* HW bug: fixup the CSUM */
11095 pbd->tcp_pseudo_csum =
11096 bnx2x_csum_fix(skb_transport_header(skb),
11097 SKB_CS(skb), fix);
11098
11099 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11100 pbd->tcp_pseudo_csum);
11101 }
a2fbb9ea
ET
11102 }
11103
11104 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 11105 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea 11106
ca00392c
EG
11107 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11108 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11109 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11110 tx_start_bd->nbd = cpu_to_le16(nbd);
11111 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11112 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
11113
11114 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 11115 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
11116 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11117 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11118 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 11119
755735eb 11120 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
11121
11122 DP(NETIF_MSG_TX_QUEUED,
11123 "TSO packet len %d hlen %d total len %d tso size %d\n",
11124 skb->len, hlen, skb_headlen(skb),
11125 skb_shinfo(skb)->gso_size);
11126
ca00392c 11127 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 11128
755735eb 11129 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
11130 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11131 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
11132
11133 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11134 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
11135 pbd->tcp_flags = pbd_tcp_flags(skb);
11136
11137 if (xmit_type & XMIT_GSO_V4) {
11138 pbd->ip_id = swab16(ip_hdr(skb)->id);
11139 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
11140 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11141 ip_hdr(skb)->daddr,
11142 0, IPPROTO_TCP, 0));
755735eb
EG
11143
11144 } else
11145 pbd->tcp_pseudo_csum =
11146 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11147 &ipv6_hdr(skb)->daddr,
11148 0, IPPROTO_TCP, 0));
11149
a2fbb9ea
ET
11150 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11151 }
ca00392c 11152 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 11153
755735eb
EG
11154 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11155 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 11156
755735eb 11157 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
11158 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11159 if (total_pkt_bd == NULL)
11160 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 11161
755735eb
EG
11162 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11163 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 11164
ca00392c
EG
11165 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11166 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11167 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11168 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 11169
755735eb 11170 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11171 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11172 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11173 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
11174 }
11175
ca00392c 11176 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 11177
a2fbb9ea
ET
11178 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11179
755735eb 11180 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
11181 * if the packet contains or ends with it
11182 */
11183 if (TX_BD_POFF(bd_prod) < nbd)
11184 nbd++;
11185
ca00392c
EG
11186 if (total_pkt_bd != NULL)
11187 total_pkt_bd->total_pkt_bytes = pkt_size;
11188
a2fbb9ea
ET
11189 if (pbd)
11190 DP(NETIF_MSG_TX_QUEUED,
11191 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11192 " tcp_flags %x xsum %x seq %u hlen %u\n",
11193 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11194 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 11195 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 11196
755735eb 11197 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 11198
58f4c4cf
EG
11199 /*
11200 * Make sure that the BD data is updated before updating the producer
11201 * since FW might read the BD right after the producer is updated.
11202 * This is only applicable for weak-ordered memory model archs such
11203 * as IA-64. The following barrier is also mandatory since FW will
11204 * assumes packets must have BDs.
11205 */
11206 wmb();
11207
ca00392c
EG
11208 fp->tx_db.data.prod += nbd;
11209 barrier();
11210 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
a2fbb9ea
ET
11211
11212 mmiowb();
11213
755735eb 11214 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
11215
11216 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 11217 netif_tx_stop_queue(txq);
58f4c4cf
EG
11218 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11219 if we put Tx into XOFF state. */
11220 smp_mb();
ca00392c 11221 fp_stat->eth_q_stats.driver_xoff++;
a2fbb9ea 11222 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 11223 netif_tx_wake_queue(txq);
a2fbb9ea 11224 }
ca00392c 11225 fp_stat->tx_pkt++;
a2fbb9ea
ET
11226
11227 return NETDEV_TX_OK;
11228}
11229
bb2a0f7a 11230/* called with rtnl_lock */
a2fbb9ea
ET
11231static int bnx2x_open(struct net_device *dev)
11232{
11233 struct bnx2x *bp = netdev_priv(dev);
11234
6eccabb3
EG
11235 netif_carrier_off(dev);
11236
a2fbb9ea
ET
11237 bnx2x_set_power_state(bp, PCI_D0);
11238
bb2a0f7a 11239 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
11240}
11241
bb2a0f7a 11242/* called with rtnl_lock */
a2fbb9ea
ET
11243static int bnx2x_close(struct net_device *dev)
11244{
a2fbb9ea
ET
11245 struct bnx2x *bp = netdev_priv(dev);
11246
11247 /* Unload the driver, release IRQs */
bb2a0f7a
YG
11248 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11249 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11250 if (!CHIP_REV_IS_SLOW(bp))
11251 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
11252
11253 return 0;
11254}
11255
f5372251 11256/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
11257static void bnx2x_set_rx_mode(struct net_device *dev)
11258{
11259 struct bnx2x *bp = netdev_priv(dev);
11260 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11261 int port = BP_PORT(bp);
11262
11263 if (bp->state != BNX2X_STATE_OPEN) {
11264 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11265 return;
11266 }
11267
11268 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11269
11270 if (dev->flags & IFF_PROMISC)
11271 rx_mode = BNX2X_RX_MODE_PROMISC;
11272
11273 else if ((dev->flags & IFF_ALLMULTI) ||
11274 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11275 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11276
11277 else { /* some multicasts */
11278 if (CHIP_IS_E1(bp)) {
11279 int i, old, offset;
11280 struct dev_mc_list *mclist;
11281 struct mac_configuration_cmd *config =
11282 bnx2x_sp(bp, mcast_config);
11283
11284 for (i = 0, mclist = dev->mc_list;
11285 mclist && (i < dev->mc_count);
11286 i++, mclist = mclist->next) {
11287
11288 config->config_table[i].
11289 cam_entry.msb_mac_addr =
11290 swab16(*(u16 *)&mclist->dmi_addr[0]);
11291 config->config_table[i].
11292 cam_entry.middle_mac_addr =
11293 swab16(*(u16 *)&mclist->dmi_addr[2]);
11294 config->config_table[i].
11295 cam_entry.lsb_mac_addr =
11296 swab16(*(u16 *)&mclist->dmi_addr[4]);
11297 config->config_table[i].cam_entry.flags =
11298 cpu_to_le16(port);
11299 config->config_table[i].
11300 target_table_entry.flags = 0;
ca00392c
EG
11301 config->config_table[i].target_table_entry.
11302 clients_bit_vector =
11303 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
11304 config->config_table[i].
11305 target_table_entry.vlan_id = 0;
11306
11307 DP(NETIF_MSG_IFUP,
11308 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11309 config->config_table[i].
11310 cam_entry.msb_mac_addr,
11311 config->config_table[i].
11312 cam_entry.middle_mac_addr,
11313 config->config_table[i].
11314 cam_entry.lsb_mac_addr);
11315 }
8d9c5f34 11316 old = config->hdr.length;
34f80b04
EG
11317 if (old > i) {
11318 for (; i < old; i++) {
11319 if (CAM_IS_INVALID(config->
11320 config_table[i])) {
af246401 11321 /* already invalidated */
34f80b04
EG
11322 break;
11323 }
11324 /* invalidate */
11325 CAM_INVALIDATE(config->
11326 config_table[i]);
11327 }
11328 }
11329
11330 if (CHIP_REV_IS_SLOW(bp))
11331 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11332 else
11333 offset = BNX2X_MAX_MULTICAST*(1 + port);
11334
8d9c5f34 11335 config->hdr.length = i;
34f80b04 11336 config->hdr.offset = offset;
8d9c5f34 11337 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
11338 config->hdr.reserved1 = 0;
11339
11340 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11341 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11342 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11343 0);
11344 } else { /* E1H */
11345 /* Accept one or more multicasts */
11346 struct dev_mc_list *mclist;
11347 u32 mc_filter[MC_HASH_SIZE];
11348 u32 crc, bit, regidx;
11349 int i;
11350
11351 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11352
11353 for (i = 0, mclist = dev->mc_list;
11354 mclist && (i < dev->mc_count);
11355 i++, mclist = mclist->next) {
11356
7c510e4b
JB
11357 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11358 mclist->dmi_addr);
34f80b04
EG
11359
11360 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11361 bit = (crc >> 24) & 0xff;
11362 regidx = bit >> 5;
11363 bit &= 0x1f;
11364 mc_filter[regidx] |= (1 << bit);
11365 }
11366
11367 for (i = 0; i < MC_HASH_SIZE; i++)
11368 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11369 mc_filter[i]);
11370 }
11371 }
11372
11373 bp->rx_mode = rx_mode;
11374 bnx2x_set_storm_rx_mode(bp);
11375}
11376
11377/* called with rtnl_lock */
a2fbb9ea
ET
11378static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11379{
11380 struct sockaddr *addr = p;
11381 struct bnx2x *bp = netdev_priv(dev);
11382
34f80b04 11383 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
11384 return -EINVAL;
11385
11386 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
11387 if (netif_running(dev)) {
11388 if (CHIP_IS_E1(bp))
3101c2bc 11389 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 11390 else
3101c2bc 11391 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 11392 }
a2fbb9ea
ET
11393
11394 return 0;
11395}
11396
c18487ee 11397/* called with rtnl_lock */
01cd4528
EG
11398static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11399 int devad, u16 addr)
a2fbb9ea 11400{
01cd4528
EG
11401 struct bnx2x *bp = netdev_priv(netdev);
11402 u16 value;
11403 int rc;
11404 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 11405
01cd4528
EG
11406 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11407 prtad, devad, addr);
a2fbb9ea 11408
01cd4528
EG
11409 if (prtad != bp->mdio.prtad) {
11410 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11411 prtad, bp->mdio.prtad);
11412 return -EINVAL;
11413 }
11414
11415 /* The HW expects different devad if CL22 is used */
11416 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 11417
01cd4528
EG
11418 bnx2x_acquire_phy_lock(bp);
11419 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11420 devad, addr, &value);
11421 bnx2x_release_phy_lock(bp);
11422 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 11423
01cd4528
EG
11424 if (!rc)
11425 rc = value;
11426 return rc;
11427}
a2fbb9ea 11428
01cd4528
EG
11429/* called with rtnl_lock */
11430static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11431 u16 addr, u16 value)
11432{
11433 struct bnx2x *bp = netdev_priv(netdev);
11434 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11435 int rc;
11436
11437 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11438 " value 0x%x\n", prtad, devad, addr, value);
11439
11440 if (prtad != bp->mdio.prtad) {
11441 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11442 prtad, bp->mdio.prtad);
11443 return -EINVAL;
a2fbb9ea
ET
11444 }
11445
01cd4528
EG
11446 /* The HW expects different devad if CL22 is used */
11447 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 11448
01cd4528
EG
11449 bnx2x_acquire_phy_lock(bp);
11450 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11451 devad, addr, value);
11452 bnx2x_release_phy_lock(bp);
11453 return rc;
11454}
c18487ee 11455
01cd4528
EG
11456/* called with rtnl_lock */
11457static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11458{
11459 struct bnx2x *bp = netdev_priv(dev);
11460 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 11461
01cd4528
EG
11462 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11463 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 11464
01cd4528
EG
11465 if (!netif_running(dev))
11466 return -EAGAIN;
11467
11468 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
11469}
11470
34f80b04 11471/* called with rtnl_lock */
a2fbb9ea
ET
11472static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11473{
11474 struct bnx2x *bp = netdev_priv(dev);
34f80b04 11475 int rc = 0;
a2fbb9ea
ET
11476
11477 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11478 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11479 return -EINVAL;
11480
11481 /* This does not race with packet allocation
c14423fe 11482 * because the actual alloc size is
a2fbb9ea
ET
11483 * only updated as part of load
11484 */
11485 dev->mtu = new_mtu;
11486
11487 if (netif_running(dev)) {
34f80b04
EG
11488 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11489 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 11490 }
34f80b04
EG
11491
11492 return rc;
a2fbb9ea
ET
11493}
11494
11495static void bnx2x_tx_timeout(struct net_device *dev)
11496{
11497 struct bnx2x *bp = netdev_priv(dev);
11498
11499#ifdef BNX2X_STOP_ON_ERROR
11500 if (!bp->panic)
11501 bnx2x_panic();
11502#endif
11503 /* This allows the netif to be shutdown gracefully before resetting */
11504 schedule_work(&bp->reset_task);
11505}
11506
11507#ifdef BCM_VLAN
34f80b04 11508/* called with rtnl_lock */
a2fbb9ea
ET
11509static void bnx2x_vlan_rx_register(struct net_device *dev,
11510 struct vlan_group *vlgrp)
11511{
11512 struct bnx2x *bp = netdev_priv(dev);
11513
11514 bp->vlgrp = vlgrp;
0c6671b0
EG
11515
11516 /* Set flags according to the required capabilities */
11517 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11518
11519 if (dev->features & NETIF_F_HW_VLAN_TX)
11520 bp->flags |= HW_VLAN_TX_FLAG;
11521
11522 if (dev->features & NETIF_F_HW_VLAN_RX)
11523 bp->flags |= HW_VLAN_RX_FLAG;
11524
a2fbb9ea 11525 if (netif_running(dev))
49d66772 11526 bnx2x_set_client_config(bp);
a2fbb9ea 11527}
34f80b04 11528
a2fbb9ea
ET
11529#endif
11530
11531#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11532static void poll_bnx2x(struct net_device *dev)
11533{
11534 struct bnx2x *bp = netdev_priv(dev);
11535
11536 disable_irq(bp->pdev->irq);
11537 bnx2x_interrupt(bp->pdev->irq, dev);
11538 enable_irq(bp->pdev->irq);
11539}
11540#endif
11541
c64213cd
SH
11542static const struct net_device_ops bnx2x_netdev_ops = {
11543 .ndo_open = bnx2x_open,
11544 .ndo_stop = bnx2x_close,
11545 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 11546 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
11547 .ndo_set_mac_address = bnx2x_change_mac_addr,
11548 .ndo_validate_addr = eth_validate_addr,
11549 .ndo_do_ioctl = bnx2x_ioctl,
11550 .ndo_change_mtu = bnx2x_change_mtu,
11551 .ndo_tx_timeout = bnx2x_tx_timeout,
11552#ifdef BCM_VLAN
11553 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11554#endif
11555#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11556 .ndo_poll_controller = poll_bnx2x,
11557#endif
11558};
11559
34f80b04
EG
11560static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11561 struct net_device *dev)
a2fbb9ea
ET
11562{
11563 struct bnx2x *bp;
11564 int rc;
11565
11566 SET_NETDEV_DEV(dev, &pdev->dev);
11567 bp = netdev_priv(dev);
11568
34f80b04
EG
11569 bp->dev = dev;
11570 bp->pdev = pdev;
a2fbb9ea 11571 bp->flags = 0;
34f80b04 11572 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11573
11574 rc = pci_enable_device(pdev);
11575 if (rc) {
11576 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11577 goto err_out;
11578 }
11579
11580 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11581 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11582 " aborting\n");
11583 rc = -ENODEV;
11584 goto err_out_disable;
11585 }
11586
11587 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11588 printk(KERN_ERR PFX "Cannot find second PCI device"
11589 " base address, aborting\n");
11590 rc = -ENODEV;
11591 goto err_out_disable;
11592 }
11593
34f80b04
EG
11594 if (atomic_read(&pdev->enable_cnt) == 1) {
11595 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11596 if (rc) {
11597 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11598 " aborting\n");
11599 goto err_out_disable;
11600 }
a2fbb9ea 11601
34f80b04
EG
11602 pci_set_master(pdev);
11603 pci_save_state(pdev);
11604 }
a2fbb9ea
ET
11605
11606 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11607 if (bp->pm_cap == 0) {
11608 printk(KERN_ERR PFX "Cannot find power management"
11609 " capability, aborting\n");
11610 rc = -EIO;
11611 goto err_out_release;
11612 }
11613
11614 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11615 if (bp->pcie_cap == 0) {
11616 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11617 " aborting\n");
11618 rc = -EIO;
11619 goto err_out_release;
11620 }
11621
6a35528a 11622 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11623 bp->flags |= USING_DAC_FLAG;
6a35528a 11624 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
11625 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11626 " failed, aborting\n");
11627 rc = -EIO;
11628 goto err_out_release;
11629 }
11630
284901a9 11631 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a2fbb9ea
ET
11632 printk(KERN_ERR PFX "System does not support DMA,"
11633 " aborting\n");
11634 rc = -EIO;
11635 goto err_out_release;
11636 }
11637
34f80b04
EG
11638 dev->mem_start = pci_resource_start(pdev, 0);
11639 dev->base_addr = dev->mem_start;
11640 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11641
11642 dev->irq = pdev->irq;
11643
275f165f 11644 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11645 if (!bp->regview) {
11646 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11647 rc = -ENOMEM;
11648 goto err_out_release;
11649 }
11650
34f80b04
EG
11651 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11652 min_t(u64, BNX2X_DB_SIZE,
11653 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11654 if (!bp->doorbells) {
11655 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11656 rc = -ENOMEM;
11657 goto err_out_unmap;
11658 }
11659
11660 bnx2x_set_power_state(bp, PCI_D0);
11661
34f80b04
EG
11662 /* clean indirect addresses */
11663 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11664 PCICFG_VENDOR_ID_OFFSET);
11665 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11666 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11667 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11668 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11669
34f80b04 11670 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11671
c64213cd 11672 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11673 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11674 dev->features |= NETIF_F_SG;
11675 dev->features |= NETIF_F_HW_CSUM;
11676 if (bp->flags & USING_DAC_FLAG)
11677 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
11678 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11679 dev->features |= NETIF_F_TSO6;
34f80b04
EG
11680#ifdef BCM_VLAN
11681 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11682 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
11683
11684 dev->vlan_features |= NETIF_F_SG;
11685 dev->vlan_features |= NETIF_F_HW_CSUM;
11686 if (bp->flags & USING_DAC_FLAG)
11687 dev->vlan_features |= NETIF_F_HIGHDMA;
11688 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11689 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 11690#endif
a2fbb9ea 11691
01cd4528
EG
11692 /* get_port_hwinfo() will set prtad and mmds properly */
11693 bp->mdio.prtad = MDIO_PRTAD_NONE;
11694 bp->mdio.mmds = 0;
11695 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11696 bp->mdio.dev = dev;
11697 bp->mdio.mdio_read = bnx2x_mdio_read;
11698 bp->mdio.mdio_write = bnx2x_mdio_write;
11699
a2fbb9ea
ET
11700 return 0;
11701
11702err_out_unmap:
11703 if (bp->regview) {
11704 iounmap(bp->regview);
11705 bp->regview = NULL;
11706 }
a2fbb9ea
ET
11707 if (bp->doorbells) {
11708 iounmap(bp->doorbells);
11709 bp->doorbells = NULL;
11710 }
11711
11712err_out_release:
34f80b04
EG
11713 if (atomic_read(&pdev->enable_cnt) == 1)
11714 pci_release_regions(pdev);
a2fbb9ea
ET
11715
11716err_out_disable:
11717 pci_disable_device(pdev);
11718 pci_set_drvdata(pdev, NULL);
11719
11720err_out:
11721 return rc;
11722}
11723
37f9ce62
EG
11724static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11725 int *width, int *speed)
25047950
ET
11726{
11727 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11728
37f9ce62 11729 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 11730
37f9ce62
EG
11731 /* return value of 1=2.5GHz 2=5GHz */
11732 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 11733}
37f9ce62 11734
94a78b79
VZ
11735static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11736{
37f9ce62 11737 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
11738 struct bnx2x_fw_file_hdr *fw_hdr;
11739 struct bnx2x_fw_file_section *sections;
94a78b79 11740 u32 offset, len, num_ops;
37f9ce62 11741 u16 *ops_offsets;
94a78b79 11742 int i;
37f9ce62 11743 const u8 *fw_ver;
94a78b79
VZ
11744
11745 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11746 return -EINVAL;
11747
11748 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11749 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11750
11751 /* Make sure none of the offsets and sizes make us read beyond
11752 * the end of the firmware data */
11753 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11754 offset = be32_to_cpu(sections[i].offset);
11755 len = be32_to_cpu(sections[i].len);
11756 if (offset + len > firmware->size) {
37f9ce62
EG
11757 printk(KERN_ERR PFX "Section %d length is out of "
11758 "bounds\n", i);
94a78b79
VZ
11759 return -EINVAL;
11760 }
11761 }
11762
11763 /* Likewise for the init_ops offsets */
11764 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11765 ops_offsets = (u16 *)(firmware->data + offset);
11766 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11767
11768 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11769 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
37f9ce62
EG
11770 printk(KERN_ERR PFX "Section offset %d is out of "
11771 "bounds\n", i);
94a78b79
VZ
11772 return -EINVAL;
11773 }
11774 }
11775
11776 /* Check FW version */
11777 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11778 fw_ver = firmware->data + offset;
11779 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11780 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11781 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11782 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11783 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11784 " Should be %d.%d.%d.%d\n",
11785 fw_ver[0], fw_ver[1], fw_ver[2],
11786 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11787 BCM_5710_FW_MINOR_VERSION,
11788 BCM_5710_FW_REVISION_VERSION,
11789 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 11790 return -EINVAL;
94a78b79
VZ
11791 }
11792
11793 return 0;
11794}
11795
ab6ad5a4 11796static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 11797{
ab6ad5a4
EG
11798 const __be32 *source = (const __be32 *)_source;
11799 u32 *target = (u32 *)_target;
94a78b79 11800 u32 i;
94a78b79
VZ
11801
11802 for (i = 0; i < n/4; i++)
11803 target[i] = be32_to_cpu(source[i]);
11804}
11805
11806/*
11807 Ops array is stored in the following format:
11808 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11809 */
ab6ad5a4 11810static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 11811{
ab6ad5a4
EG
11812 const __be32 *source = (const __be32 *)_source;
11813 struct raw_op *target = (struct raw_op *)_target;
94a78b79 11814 u32 i, j, tmp;
94a78b79 11815
ab6ad5a4 11816 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
11817 tmp = be32_to_cpu(source[j]);
11818 target[i].op = (tmp >> 24) & 0xff;
11819 target[i].offset = tmp & 0xffffff;
11820 target[i].raw_data = be32_to_cpu(source[j+1]);
11821 }
11822}
ab6ad5a4
EG
11823
11824static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 11825{
ab6ad5a4
EG
11826 const __be16 *source = (const __be16 *)_source;
11827 u16 *target = (u16 *)_target;
94a78b79 11828 u32 i;
94a78b79
VZ
11829
11830 for (i = 0; i < n/2; i++)
11831 target[i] = be16_to_cpu(source[i]);
11832}
11833
11834#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
ab6ad5a4
EG
11835 do { \
11836 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11837 bp->arr = kmalloc(len, GFP_KERNEL); \
94a78b79 11838 if (!bp->arr) { \
ab6ad5a4
EG
11839 printk(KERN_ERR PFX "Failed to allocate %d bytes " \
11840 "for "#arr"\n", len); \
94a78b79
VZ
11841 goto lbl; \
11842 } \
ab6ad5a4
EG
11843 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
11844 (u8 *)bp->arr, len); \
94a78b79
VZ
11845 } while (0)
11846
94a78b79
VZ
11847static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11848{
11849 char fw_file_name[40] = {0};
94a78b79 11850 struct bnx2x_fw_file_hdr *fw_hdr;
ab6ad5a4 11851 int rc, offset;
94a78b79
VZ
11852
11853 /* Create a FW file name */
11854 if (CHIP_IS_E1(bp))
ab6ad5a4 11855 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
94a78b79
VZ
11856 else
11857 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11858
11859 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11860 BCM_5710_FW_MAJOR_VERSION,
ab6ad5a4
EG
11861 BCM_5710_FW_MINOR_VERSION,
11862 BCM_5710_FW_REVISION_VERSION,
11863 BCM_5710_FW_ENGINEERING_VERSION);
94a78b79
VZ
11864
11865 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11866
11867 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11868 if (rc) {
ab6ad5a4
EG
11869 printk(KERN_ERR PFX "Can't load firmware file %s\n",
11870 fw_file_name);
94a78b79
VZ
11871 goto request_firmware_exit;
11872 }
11873
11874 rc = bnx2x_check_firmware(bp);
11875 if (rc) {
11876 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11877 goto request_firmware_exit;
11878 }
11879
11880 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11881
11882 /* Initialize the pointers to the init arrays */
11883 /* Blob */
11884 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11885
11886 /* Opcodes */
11887 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11888
11889 /* Offsets */
ab6ad5a4
EG
11890 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
11891 be16_to_cpu_n);
94a78b79
VZ
11892
11893 /* STORMs firmware */
573f2035
EG
11894 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11895 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11896 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
11897 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11898 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11899 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11900 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
11901 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11902 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11903 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11904 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
11905 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11906 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11907 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11908 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
11909 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
11910
11911 return 0;
ab6ad5a4 11912
94a78b79
VZ
11913init_offsets_alloc_err:
11914 kfree(bp->init_ops);
11915init_ops_alloc_err:
11916 kfree(bp->init_data);
11917request_firmware_exit:
11918 release_firmware(bp->firmware);
11919
11920 return rc;
11921}
11922
11923
a2fbb9ea
ET
11924static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11925 const struct pci_device_id *ent)
11926{
a2fbb9ea
ET
11927 struct net_device *dev = NULL;
11928 struct bnx2x *bp;
37f9ce62 11929 int pcie_width, pcie_speed;
25047950 11930 int rc;
a2fbb9ea 11931
a2fbb9ea 11932 /* dev zeroed in init_etherdev */
555f6c78 11933 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11934 if (!dev) {
11935 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11936 return -ENOMEM;
34f80b04 11937 }
a2fbb9ea 11938
a2fbb9ea
ET
11939 bp = netdev_priv(dev);
11940 bp->msglevel = debug;
11941
df4770de
EG
11942 pci_set_drvdata(pdev, dev);
11943
34f80b04 11944 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11945 if (rc < 0) {
11946 free_netdev(dev);
11947 return rc;
11948 }
11949
34f80b04 11950 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11951 if (rc)
11952 goto init_one_exit;
11953
94a78b79
VZ
11954 /* Set init arrays */
11955 rc = bnx2x_init_firmware(bp, &pdev->dev);
11956 if (rc) {
11957 printk(KERN_ERR PFX "Error loading firmware\n");
11958 goto init_one_exit;
11959 }
11960
693fc0d1 11961 rc = register_netdev(dev);
34f80b04 11962 if (rc) {
693fc0d1 11963 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11964 goto init_one_exit;
11965 }
11966
37f9ce62 11967 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
25047950 11968 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11969 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11970 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
37f9ce62 11971 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
25047950 11972 dev->base_addr, bp->pdev->irq);
e174961c 11973 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 11974
a2fbb9ea 11975 return 0;
34f80b04
EG
11976
11977init_one_exit:
11978 if (bp->regview)
11979 iounmap(bp->regview);
11980
11981 if (bp->doorbells)
11982 iounmap(bp->doorbells);
11983
11984 free_netdev(dev);
11985
11986 if (atomic_read(&pdev->enable_cnt) == 1)
11987 pci_release_regions(pdev);
11988
11989 pci_disable_device(pdev);
11990 pci_set_drvdata(pdev, NULL);
11991
11992 return rc;
a2fbb9ea
ET
11993}
11994
11995static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11996{
11997 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11998 struct bnx2x *bp;
11999
12000 if (!dev) {
228241eb
ET
12001 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12002 return;
12003 }
228241eb 12004 bp = netdev_priv(dev);
a2fbb9ea 12005
a2fbb9ea
ET
12006 unregister_netdev(dev);
12007
94a78b79
VZ
12008 kfree(bp->init_ops_offsets);
12009 kfree(bp->init_ops);
12010 kfree(bp->init_data);
12011 release_firmware(bp->firmware);
12012
a2fbb9ea
ET
12013 if (bp->regview)
12014 iounmap(bp->regview);
12015
12016 if (bp->doorbells)
12017 iounmap(bp->doorbells);
12018
12019 free_netdev(dev);
34f80b04
EG
12020
12021 if (atomic_read(&pdev->enable_cnt) == 1)
12022 pci_release_regions(pdev);
12023
a2fbb9ea
ET
12024 pci_disable_device(pdev);
12025 pci_set_drvdata(pdev, NULL);
12026}
12027
12028static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12029{
12030 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
12031 struct bnx2x *bp;
12032
34f80b04
EG
12033 if (!dev) {
12034 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12035 return -ENODEV;
12036 }
12037 bp = netdev_priv(dev);
a2fbb9ea 12038
34f80b04 12039 rtnl_lock();
a2fbb9ea 12040
34f80b04 12041 pci_save_state(pdev);
228241eb 12042
34f80b04
EG
12043 if (!netif_running(dev)) {
12044 rtnl_unlock();
12045 return 0;
12046 }
a2fbb9ea
ET
12047
12048 netif_device_detach(dev);
a2fbb9ea 12049
da5a662a 12050 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 12051
a2fbb9ea 12052 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 12053
34f80b04
EG
12054 rtnl_unlock();
12055
a2fbb9ea
ET
12056 return 0;
12057}
12058
12059static int bnx2x_resume(struct pci_dev *pdev)
12060{
12061 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 12062 struct bnx2x *bp;
a2fbb9ea
ET
12063 int rc;
12064
228241eb
ET
12065 if (!dev) {
12066 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12067 return -ENODEV;
12068 }
228241eb 12069 bp = netdev_priv(dev);
a2fbb9ea 12070
34f80b04
EG
12071 rtnl_lock();
12072
228241eb 12073 pci_restore_state(pdev);
34f80b04
EG
12074
12075 if (!netif_running(dev)) {
12076 rtnl_unlock();
12077 return 0;
12078 }
12079
a2fbb9ea
ET
12080 bnx2x_set_power_state(bp, PCI_D0);
12081 netif_device_attach(dev);
12082
da5a662a 12083 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 12084
34f80b04
EG
12085 rtnl_unlock();
12086
12087 return rc;
a2fbb9ea
ET
12088}
12089
f8ef6e44
YG
12090static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12091{
12092 int i;
12093
12094 bp->state = BNX2X_STATE_ERROR;
12095
12096 bp->rx_mode = BNX2X_RX_MODE_NONE;
12097
12098 bnx2x_netif_stop(bp, 0);
12099
12100 del_timer_sync(&bp->timer);
12101 bp->stats_state = STATS_STATE_DISABLED;
12102 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12103
12104 /* Release IRQs */
12105 bnx2x_free_irq(bp);
12106
12107 if (CHIP_IS_E1(bp)) {
12108 struct mac_configuration_cmd *config =
12109 bnx2x_sp(bp, mcast_config);
12110
8d9c5f34 12111 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
12112 CAM_INVALIDATE(config->config_table[i]);
12113 }
12114
12115 /* Free SKBs, SGEs, TPA pool and driver internals */
12116 bnx2x_free_skbs(bp);
555f6c78 12117 for_each_rx_queue(bp, i)
f8ef6e44 12118 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 12119 for_each_rx_queue(bp, i)
7cde1c8b 12120 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
12121 bnx2x_free_mem(bp);
12122
12123 bp->state = BNX2X_STATE_CLOSED;
12124
12125 netif_carrier_off(bp->dev);
12126
12127 return 0;
12128}
12129
12130static void bnx2x_eeh_recover(struct bnx2x *bp)
12131{
12132 u32 val;
12133
12134 mutex_init(&bp->port.phy_mutex);
12135
12136 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12137 bp->link_params.shmem_base = bp->common.shmem_base;
12138 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12139
12140 if (!bp->common.shmem_base ||
12141 (bp->common.shmem_base < 0xA0000) ||
12142 (bp->common.shmem_base >= 0xC0000)) {
12143 BNX2X_DEV_INFO("MCP not active\n");
12144 bp->flags |= NO_MCP_FLAG;
12145 return;
12146 }
12147
12148 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12149 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12150 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12151 BNX2X_ERR("BAD MCP validity signature\n");
12152
12153 if (!BP_NOMCP(bp)) {
12154 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12155 & DRV_MSG_SEQ_NUMBER_MASK);
12156 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12157 }
12158}
12159
493adb1f
WX
12160/**
12161 * bnx2x_io_error_detected - called when PCI error is detected
12162 * @pdev: Pointer to PCI device
12163 * @state: The current pci connection state
12164 *
12165 * This function is called after a PCI bus error affecting
12166 * this device has been detected.
12167 */
12168static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12169 pci_channel_state_t state)
12170{
12171 struct net_device *dev = pci_get_drvdata(pdev);
12172 struct bnx2x *bp = netdev_priv(dev);
12173
12174 rtnl_lock();
12175
12176 netif_device_detach(dev);
12177
07ce50e4
DN
12178 if (state == pci_channel_io_perm_failure) {
12179 rtnl_unlock();
12180 return PCI_ERS_RESULT_DISCONNECT;
12181 }
12182
493adb1f 12183 if (netif_running(dev))
f8ef6e44 12184 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
12185
12186 pci_disable_device(pdev);
12187
12188 rtnl_unlock();
12189
12190 /* Request a slot reset */
12191 return PCI_ERS_RESULT_NEED_RESET;
12192}
12193
12194/**
12195 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12196 * @pdev: Pointer to PCI device
12197 *
12198 * Restart the card from scratch, as if from a cold-boot.
12199 */
12200static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12201{
12202 struct net_device *dev = pci_get_drvdata(pdev);
12203 struct bnx2x *bp = netdev_priv(dev);
12204
12205 rtnl_lock();
12206
12207 if (pci_enable_device(pdev)) {
12208 dev_err(&pdev->dev,
12209 "Cannot re-enable PCI device after reset\n");
12210 rtnl_unlock();
12211 return PCI_ERS_RESULT_DISCONNECT;
12212 }
12213
12214 pci_set_master(pdev);
12215 pci_restore_state(pdev);
12216
12217 if (netif_running(dev))
12218 bnx2x_set_power_state(bp, PCI_D0);
12219
12220 rtnl_unlock();
12221
12222 return PCI_ERS_RESULT_RECOVERED;
12223}
12224
12225/**
12226 * bnx2x_io_resume - called when traffic can start flowing again
12227 * @pdev: Pointer to PCI device
12228 *
12229 * This callback is called when the error recovery driver tells us that
12230 * its OK to resume normal operation.
12231 */
12232static void bnx2x_io_resume(struct pci_dev *pdev)
12233{
12234 struct net_device *dev = pci_get_drvdata(pdev);
12235 struct bnx2x *bp = netdev_priv(dev);
12236
12237 rtnl_lock();
12238
f8ef6e44
YG
12239 bnx2x_eeh_recover(bp);
12240
493adb1f 12241 if (netif_running(dev))
f8ef6e44 12242 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
12243
12244 netif_device_attach(dev);
12245
12246 rtnl_unlock();
12247}
12248
12249static struct pci_error_handlers bnx2x_err_handler = {
12250 .error_detected = bnx2x_io_error_detected,
356e2385
EG
12251 .slot_reset = bnx2x_io_slot_reset,
12252 .resume = bnx2x_io_resume,
493adb1f
WX
12253};
12254
a2fbb9ea 12255static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
12256 .name = DRV_MODULE_NAME,
12257 .id_table = bnx2x_pci_tbl,
12258 .probe = bnx2x_init_one,
12259 .remove = __devexit_p(bnx2x_remove_one),
12260 .suspend = bnx2x_suspend,
12261 .resume = bnx2x_resume,
12262 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
12263};
12264
12265static int __init bnx2x_init(void)
12266{
dd21ca6d
SG
12267 int ret;
12268
938cf541
EG
12269 printk(KERN_INFO "%s", version);
12270
1cf167f2
EG
12271 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12272 if (bnx2x_wq == NULL) {
12273 printk(KERN_ERR PFX "Cannot create workqueue\n");
12274 return -ENOMEM;
12275 }
12276
dd21ca6d
SG
12277 ret = pci_register_driver(&bnx2x_pci_driver);
12278 if (ret) {
12279 printk(KERN_ERR PFX "Cannot register driver\n");
12280 destroy_workqueue(bnx2x_wq);
12281 }
12282 return ret;
a2fbb9ea
ET
12283}
12284
12285static void __exit bnx2x_cleanup(void)
12286{
12287 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
12288
12289 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
12290}
12291
12292module_init(bnx2x_init);
12293module_exit(bnx2x_cleanup);
12294
94a78b79 12295