]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: update version to 1.52.1
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
0a64ea57 57#include "bnx2x_dump.h"
a2fbb9ea 58
c458bc50
EG
59#define DRV_MODULE_VERSION "1.52.1"
60#define DRV_MODULE_RELDATE "2009/08/12"
34f80b04 61#define BNX2X_BC_VER 0x040200
a2fbb9ea 62
94a78b79
VZ
63#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
ab6ad5a4
EG
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
94a78b79 68
34f80b04
EG
69/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
a2fbb9ea 71
53a10565 72static char version[] __devinitdata =
34f80b04 73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
24e3fcef 76MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 80
555f6c78
EG
81static int multi_mode = 1;
82module_param(multi_mode, int, 0);
ca00392c
EG
83MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
85
86static int num_rx_queues;
87module_param(num_rx_queues, int, 0);
88MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
90
91static int num_tx_queues;
92module_param(num_tx_queues, int, 0);
93MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
555f6c78 95
19680c48 96static int disable_tpa;
19680c48 97module_param(disable_tpa, int, 0);
9898f86d 98MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
99
100static int int_mode;
101module_param(int_mode, int, 0);
102MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
a18f5128
EG
104static int dropless_fc;
105module_param(dropless_fc, int, 0);
106MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
107
9898f86d 108static int poll;
a2fbb9ea 109module_param(poll, int, 0);
9898f86d 110MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
111
112static int mrrs = -1;
113module_param(mrrs, int, 0);
114MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
115
9898f86d 116static int debug;
a2fbb9ea 117module_param(debug, int, 0);
9898f86d
EG
118MODULE_PARM_DESC(debug, " Default debug msglevel");
119
120static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 121
1cf167f2 122static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
123
124enum bnx2x_board_type {
125 BCM57710 = 0,
34f80b04
EG
126 BCM57711 = 1,
127 BCM57711E = 2,
a2fbb9ea
ET
128};
129
34f80b04 130/* indexed by board_type, above */
53a10565 131static struct {
a2fbb9ea
ET
132 char *name;
133} board_info[] __devinitdata = {
34f80b04
EG
134 { "Broadcom NetXtreme II BCM57710 XGb" },
135 { "Broadcom NetXtreme II BCM57711 XGb" },
136 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
137};
138
34f80b04 139
a2fbb9ea 140static const struct pci_device_id bnx2x_pci_tbl[] = {
e4ed7113
EG
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
143 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
144 { 0 }
145};
146
147MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
148
149/****************************************************************************
150* General service functions
151****************************************************************************/
152
153/* used only at init
154 * locking is done by mcp
155 */
573f2035 156void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
157{
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
160 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
161 PCICFG_VENDOR_ID_OFFSET);
162}
163
a2fbb9ea
ET
164static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
165{
166 u32 val;
167
168 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
169 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
170 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
171 PCICFG_VENDOR_ID_OFFSET);
172
173 return val;
174}
a2fbb9ea
ET
175
176static const u32 dmae_reg_go_c[] = {
177 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
178 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
179 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
180 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
181};
182
183/* copy command into DMAE command memory and set DMAE command go */
184static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
185 int idx)
186{
187 u32 cmd_offset;
188 int i;
189
190 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
191 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
192 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
193
ad8d3948
EG
194 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
195 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
196 }
197 REG_WR(bp, dmae_reg_go_c[idx], 1);
198}
199
ad8d3948
EG
200void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
201 u32 len32)
a2fbb9ea 202{
5ff7b6d4 203 struct dmae_command dmae;
a2fbb9ea 204 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
205 int cnt = 200;
206
207 if (!bp->dmae_ready) {
208 u32 *data = bnx2x_sp(bp, wb_data[0]);
209
210 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
211 " using indirect\n", dst_addr, len32);
212 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
213 return;
214 }
215
5ff7b6d4 216 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 217
5ff7b6d4
EG
218 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
219 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
220 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 221#ifdef __BIG_ENDIAN
5ff7b6d4 222 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 223#else
5ff7b6d4 224 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 225#endif
5ff7b6d4
EG
226 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
227 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
228 dmae.src_addr_lo = U64_LO(dma_addr);
229 dmae.src_addr_hi = U64_HI(dma_addr);
230 dmae.dst_addr_lo = dst_addr >> 2;
231 dmae.dst_addr_hi = 0;
232 dmae.len = len32;
233 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
234 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
235 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 236
c3eefaf6 237 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
238 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
239 "dst_addr [%x:%08x (%08x)]\n"
240 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
241 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
242 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
243 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 244 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
245 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
246 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 247
5ff7b6d4
EG
248 mutex_lock(&bp->dmae_mutex);
249
a2fbb9ea
ET
250 *wb_comp = 0;
251
5ff7b6d4 252 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
253
254 udelay(5);
ad8d3948
EG
255
256 while (*wb_comp != DMAE_COMP_VAL) {
257 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
258
ad8d3948 259 if (!cnt) {
c3eefaf6 260 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
261 break;
262 }
ad8d3948 263 cnt--;
12469401
YG
264 /* adjust delay for emulation/FPGA */
265 if (CHIP_REV_IS_SLOW(bp))
266 msleep(100);
267 else
268 udelay(5);
a2fbb9ea 269 }
ad8d3948
EG
270
271 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
272}
273
c18487ee 274void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 275{
5ff7b6d4 276 struct dmae_command dmae;
a2fbb9ea 277 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
278 int cnt = 200;
279
280 if (!bp->dmae_ready) {
281 u32 *data = bnx2x_sp(bp, wb_data[0]);
282 int i;
283
284 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
285 " using indirect\n", src_addr, len32);
286 for (i = 0; i < len32; i++)
287 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
288 return;
289 }
290
5ff7b6d4 291 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 292
5ff7b6d4
EG
293 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
294 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
295 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 296#ifdef __BIG_ENDIAN
5ff7b6d4 297 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 298#else
5ff7b6d4 299 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 300#endif
5ff7b6d4
EG
301 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
302 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
303 dmae.src_addr_lo = src_addr >> 2;
304 dmae.src_addr_hi = 0;
305 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
306 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
307 dmae.len = len32;
308 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
309 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
310 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 311
c3eefaf6 312 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
313 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
314 "dst_addr [%x:%08x (%08x)]\n"
315 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
316 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
317 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
318 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 319
5ff7b6d4
EG
320 mutex_lock(&bp->dmae_mutex);
321
322 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
323 *wb_comp = 0;
324
5ff7b6d4 325 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
326
327 udelay(5);
ad8d3948
EG
328
329 while (*wb_comp != DMAE_COMP_VAL) {
330
ad8d3948 331 if (!cnt) {
c3eefaf6 332 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
333 break;
334 }
ad8d3948 335 cnt--;
12469401
YG
336 /* adjust delay for emulation/FPGA */
337 if (CHIP_REV_IS_SLOW(bp))
338 msleep(100);
339 else
340 udelay(5);
a2fbb9ea 341 }
ad8d3948 342 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
343 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
344 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
345
346 mutex_unlock(&bp->dmae_mutex);
347}
348
573f2035
EG
349void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
350 u32 addr, u32 len)
351{
352 int offset = 0;
353
354 while (len > DMAE_LEN32_WR_MAX) {
355 bnx2x_write_dmae(bp, phys_addr + offset,
356 addr + offset, DMAE_LEN32_WR_MAX);
357 offset += DMAE_LEN32_WR_MAX * 4;
358 len -= DMAE_LEN32_WR_MAX;
359 }
360
361 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
362}
363
ad8d3948
EG
364/* used only for slowpath so not inlined */
365static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
366{
367 u32 wb_write[2];
368
369 wb_write[0] = val_hi;
370 wb_write[1] = val_lo;
371 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 372}
a2fbb9ea 373
ad8d3948
EG
374#ifdef USE_WB_RD
375static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
376{
377 u32 wb_data[2];
378
379 REG_RD_DMAE(bp, reg, wb_data, 2);
380
381 return HILO_U64(wb_data[0], wb_data[1]);
382}
383#endif
384
a2fbb9ea
ET
385static int bnx2x_mc_assert(struct bnx2x *bp)
386{
a2fbb9ea 387 char last_idx;
34f80b04
EG
388 int i, rc = 0;
389 u32 row0, row1, row2, row3;
390
391 /* XSTORM */
392 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
393 XSTORM_ASSERT_LIST_INDEX_OFFSET);
394 if (last_idx)
395 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
396
397 /* print the asserts */
398 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
399
400 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
401 XSTORM_ASSERT_LIST_OFFSET(i));
402 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
403 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
404 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
406 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
408
409 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
410 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
411 " 0x%08x 0x%08x 0x%08x\n",
412 i, row3, row2, row1, row0);
413 rc++;
414 } else {
415 break;
416 }
417 }
418
419 /* TSTORM */
420 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
421 TSTORM_ASSERT_LIST_INDEX_OFFSET);
422 if (last_idx)
423 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
424
425 /* print the asserts */
426 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
427
428 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
429 TSTORM_ASSERT_LIST_OFFSET(i));
430 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
431 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
432 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
434 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
436
437 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
438 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
439 " 0x%08x 0x%08x 0x%08x\n",
440 i, row3, row2, row1, row0);
441 rc++;
442 } else {
443 break;
444 }
445 }
446
447 /* CSTORM */
448 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
449 CSTORM_ASSERT_LIST_INDEX_OFFSET);
450 if (last_idx)
451 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
452
453 /* print the asserts */
454 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
455
456 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
457 CSTORM_ASSERT_LIST_OFFSET(i));
458 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
459 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
460 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
462 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
464
465 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
466 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
467 " 0x%08x 0x%08x 0x%08x\n",
468 i, row3, row2, row1, row0);
469 rc++;
470 } else {
471 break;
472 }
473 }
474
475 /* USTORM */
476 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
477 USTORM_ASSERT_LIST_INDEX_OFFSET);
478 if (last_idx)
479 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
480
481 /* print the asserts */
482 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
483
484 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
485 USTORM_ASSERT_LIST_OFFSET(i));
486 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
487 USTORM_ASSERT_LIST_OFFSET(i) + 4);
488 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
489 USTORM_ASSERT_LIST_OFFSET(i) + 8);
490 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
491 USTORM_ASSERT_LIST_OFFSET(i) + 12);
492
493 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
494 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
495 " 0x%08x 0x%08x 0x%08x\n",
496 i, row3, row2, row1, row0);
497 rc++;
498 } else {
499 break;
a2fbb9ea
ET
500 }
501 }
34f80b04 502
a2fbb9ea
ET
503 return rc;
504}
c14423fe 505
a2fbb9ea
ET
506static void bnx2x_fw_dump(struct bnx2x *bp)
507{
508 u32 mark, offset;
4781bfad 509 __be32 data[9];
a2fbb9ea
ET
510 int word;
511
512 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772 513 mark = ((mark + 0x3) & ~0x3);
ad361c98 514 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 515
ad361c98 516 printk(KERN_ERR PFX);
a2fbb9ea
ET
517 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
518 for (word = 0; word < 8; word++)
519 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
520 offset + 4*word));
521 data[8] = 0x0;
49d66772 522 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
523 }
524 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
525 for (word = 0; word < 8; word++)
526 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
527 offset + 4*word));
528 data[8] = 0x0;
49d66772 529 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea 530 }
ad361c98 531 printk(KERN_ERR PFX "end of fw dump\n");
a2fbb9ea
ET
532}
533
534static void bnx2x_panic_dump(struct bnx2x *bp)
535{
536 int i;
537 u16 j, start, end;
538
66e855f3
YG
539 bp->stats_state = STATS_STATE_DISABLED;
540 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
541
a2fbb9ea
ET
542 BNX2X_ERR("begin crash dump -----------------\n");
543
8440d2b6
EG
544 /* Indices */
545 /* Common */
546 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
547 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
548 " spq_prod_idx(%u)\n",
549 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
550 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
551
552 /* Rx */
553 for_each_rx_queue(bp, i) {
a2fbb9ea 554 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 555
c3eefaf6 556 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
557 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
558 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 559 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
560 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
561 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 562 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
563 " fp_u_idx(%x) *sb_u_idx(%x)\n",
564 fp->rx_sge_prod, fp->last_max_sge,
565 le16_to_cpu(fp->fp_u_idx),
566 fp->status_blk->u_status_block.status_block_index);
567 }
a2fbb9ea 568
8440d2b6
EG
569 /* Tx */
570 for_each_tx_queue(bp, i) {
571 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 572
c3eefaf6 573 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
574 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
575 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
576 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 577 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
ca00392c 578 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 579 fp->status_blk->c_status_block.status_block_index,
ca00392c 580 fp->tx_db.data.prod);
8440d2b6 581 }
a2fbb9ea 582
8440d2b6
EG
583 /* Rings */
584 /* Rx */
585 for_each_rx_queue(bp, i) {
586 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
587
588 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
589 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 590 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
591 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
592 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
593
c3eefaf6
EG
594 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
595 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
596 }
597
3196a88a
EG
598 start = RX_SGE(fp->rx_sge_prod);
599 end = RX_SGE(fp->last_max_sge);
8440d2b6 600 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
601 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
602 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
603
c3eefaf6
EG
604 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
605 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
606 }
607
a2fbb9ea
ET
608 start = RCQ_BD(fp->rx_comp_cons - 10);
609 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 610 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
611 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
612
c3eefaf6
EG
613 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
614 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
615 }
616 }
617
8440d2b6
EG
618 /* Tx */
619 for_each_tx_queue(bp, i) {
620 struct bnx2x_fastpath *fp = &bp->fp[i];
621
622 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
623 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
624 for (j = start; j != end; j = TX_BD(j + 1)) {
625 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
626
c3eefaf6
EG
627 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
628 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
629 }
630
631 start = TX_BD(fp->tx_bd_cons - 10);
632 end = TX_BD(fp->tx_bd_cons + 254);
633 for (j = start; j != end; j = TX_BD(j + 1)) {
634 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
635
c3eefaf6
EG
636 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
637 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
638 }
639 }
a2fbb9ea 640
34f80b04 641 bnx2x_fw_dump(bp);
a2fbb9ea
ET
642 bnx2x_mc_assert(bp);
643 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
644}
645
615f8fd9 646static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 647{
34f80b04 648 int port = BP_PORT(bp);
a2fbb9ea
ET
649 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
650 u32 val = REG_RD(bp, addr);
651 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 652 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
653
654 if (msix) {
8badd27a
EG
655 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
656 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
657 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
658 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
659 } else if (msi) {
660 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
661 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
662 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
664 } else {
665 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 666 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
667 HC_CONFIG_0_REG_INT_LINE_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 669
8badd27a
EG
670 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
671 val, port, addr);
615f8fd9
ET
672
673 REG_WR(bp, addr, val);
674
a2fbb9ea
ET
675 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
676 }
677
8badd27a
EG
678 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
679 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
680
681 REG_WR(bp, addr, val);
37dbbf32
EG
682 /*
683 * Ensure that HC_CONFIG is written before leading/trailing edge config
684 */
685 mmiowb();
686 barrier();
34f80b04
EG
687
688 if (CHIP_IS_E1H(bp)) {
689 /* init leading/trailing edge */
690 if (IS_E1HMF(bp)) {
8badd27a 691 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 692 if (bp->port.pmf)
4acac6a5
EG
693 /* enable nig and gpio3 attention */
694 val |= 0x1100;
34f80b04
EG
695 } else
696 val = 0xffff;
697
698 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
699 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
700 }
37dbbf32
EG
701
702 /* Make sure that interrupts are indeed enabled from here on */
703 mmiowb();
a2fbb9ea
ET
704}
705
615f8fd9 706static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 707{
34f80b04 708 int port = BP_PORT(bp);
a2fbb9ea
ET
709 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
710 u32 val = REG_RD(bp, addr);
711
712 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
713 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
714 HC_CONFIG_0_REG_INT_LINE_EN_0 |
715 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
716
717 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
718 val, port, addr);
719
8badd27a
EG
720 /* flush all outstanding writes */
721 mmiowb();
722
a2fbb9ea
ET
723 REG_WR(bp, addr, val);
724 if (REG_RD(bp, addr) != val)
725 BNX2X_ERR("BUG! proper val not read from IGU!\n");
726}
727
f8ef6e44 728static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 729{
a2fbb9ea 730 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 731 int i, offset;
a2fbb9ea 732
34f80b04 733 /* disable interrupt handling */
a2fbb9ea 734 atomic_inc(&bp->intr_sem);
e1510706
EG
735 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
736
f8ef6e44
YG
737 if (disable_hw)
738 /* prevent the HW from sending interrupts */
739 bnx2x_int_disable(bp);
a2fbb9ea
ET
740
741 /* make sure all ISRs are done */
742 if (msix) {
8badd27a
EG
743 synchronize_irq(bp->msix_table[0].vector);
744 offset = 1;
a2fbb9ea 745 for_each_queue(bp, i)
8badd27a 746 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
747 } else
748 synchronize_irq(bp->pdev->irq);
749
750 /* make sure sp_task is not running */
1cf167f2
EG
751 cancel_delayed_work(&bp->sp_task);
752 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
753}
754
34f80b04 755/* fast path */
a2fbb9ea
ET
756
757/*
34f80b04 758 * General service functions
a2fbb9ea
ET
759 */
760
34f80b04 761static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
762 u8 storm, u16 index, u8 op, u8 update)
763{
5c862848
EG
764 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
765 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
766 struct igu_ack_register igu_ack;
767
768 igu_ack.status_block_index = index;
769 igu_ack.sb_id_and_flags =
34f80b04 770 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
771 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
772 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
773 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
774
5c862848
EG
775 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
776 (*(u32 *)&igu_ack), hc_addr);
777 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
778
779 /* Make sure that ACK is written */
780 mmiowb();
781 barrier();
a2fbb9ea
ET
782}
783
784static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
785{
786 struct host_status_block *fpsb = fp->status_blk;
787 u16 rc = 0;
788
789 barrier(); /* status block is written to by the chip */
790 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
791 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
792 rc |= 1;
793 }
794 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
795 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
796 rc |= 2;
797 }
798 return rc;
799}
800
a2fbb9ea
ET
801static u16 bnx2x_ack_int(struct bnx2x *bp)
802{
5c862848
EG
803 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
804 COMMAND_REG_SIMD_MASK);
805 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 806
5c862848
EG
807 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
808 result, hc_addr);
a2fbb9ea 809
a2fbb9ea
ET
810 return result;
811}
812
813
814/*
815 * fast path service functions
816 */
817
e8b5fc51
VZ
818static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
819{
820 /* Tell compiler that consumer and producer can change */
821 barrier();
822 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
823}
824
a2fbb9ea
ET
825/* free skb in the packet ring at pos idx
826 * return idx of last bd freed
827 */
828static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
829 u16 idx)
830{
831 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
832 struct eth_tx_start_bd *tx_start_bd;
833 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 834 struct sk_buff *skb = tx_buf->skb;
34f80b04 835 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
836 int nbd;
837
838 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
839 idx, tx_buf, skb);
840
841 /* unmap first bd */
842 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c
EG
843 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
844 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
845 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 846
ca00392c 847 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 848#ifdef BNX2X_STOP_ON_ERROR
ca00392c 849 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 850 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
851 bnx2x_panic();
852 }
853#endif
ca00392c 854 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 855
ca00392c
EG
856 /* Get the next bd */
857 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 858
ca00392c
EG
859 /* Skip a parse bd... */
860 --nbd;
861 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
862
863 /* ...and the TSO split header bd since they have no mapping */
864 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
865 --nbd;
866 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
867 }
868
869 /* now free frags */
870 while (nbd > 0) {
871
872 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c
EG
873 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
874 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
875 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
a2fbb9ea
ET
876 if (--nbd)
877 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
878 }
879
880 /* release skb */
53e5e96e 881 WARN_ON(!skb);
ca00392c 882 dev_kfree_skb_any(skb);
a2fbb9ea
ET
883 tx_buf->first_bd = 0;
884 tx_buf->skb = NULL;
885
34f80b04 886 return new_cons;
a2fbb9ea
ET
887}
888
34f80b04 889static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 890{
34f80b04
EG
891 s16 used;
892 u16 prod;
893 u16 cons;
a2fbb9ea 894
34f80b04 895 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
896 prod = fp->tx_bd_prod;
897 cons = fp->tx_bd_cons;
898
34f80b04
EG
899 /* NUM_TX_RINGS = number of "next-page" entries
900 It will be used as a threshold */
901 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 902
34f80b04 903#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
904 WARN_ON(used < 0);
905 WARN_ON(used > fp->bp->tx_ring_size);
906 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 907#endif
a2fbb9ea 908
34f80b04 909 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
910}
911
7961f791 912static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
913{
914 struct bnx2x *bp = fp->bp;
555f6c78 915 struct netdev_queue *txq;
a2fbb9ea
ET
916 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
917 int done = 0;
918
919#ifdef BNX2X_STOP_ON_ERROR
920 if (unlikely(bp->panic))
921 return;
922#endif
923
ca00392c 924 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
a2fbb9ea
ET
925 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
926 sw_cons = fp->tx_pkt_cons;
927
928 while (sw_cons != hw_cons) {
929 u16 pkt_cons;
930
931 pkt_cons = TX_BD(sw_cons);
932
933 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
934
34f80b04 935 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
936 hw_cons, sw_cons, pkt_cons);
937
34f80b04 938/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
939 rmb();
940 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
941 }
942*/
943 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
944 sw_cons++;
945 done++;
a2fbb9ea
ET
946 }
947
948 fp->tx_pkt_cons = sw_cons;
949 fp->tx_bd_cons = bd_cons;
950
a2fbb9ea 951 /* TBD need a thresh? */
555f6c78 952 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 953
6044735d
EG
954 /* Need to make the tx_bd_cons update visible to start_xmit()
955 * before checking for netif_tx_queue_stopped(). Without the
956 * memory barrier, there is a small possibility that
957 * start_xmit() will miss it and cause the queue to be stopped
958 * forever.
959 */
960 smp_mb();
961
555f6c78 962 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 963 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 964 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 965 netif_tx_wake_queue(txq);
a2fbb9ea
ET
966 }
967}
968
3196a88a 969
a2fbb9ea
ET
970static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
971 union eth_rx_cqe *rr_cqe)
972{
973 struct bnx2x *bp = fp->bp;
974 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
975 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
976
34f80b04 977 DP(BNX2X_MSG_SP,
a2fbb9ea 978 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 979 fp->index, cid, command, bp->state,
34f80b04 980 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
981
982 bp->spq_left++;
983
0626b899 984 if (fp->index) {
a2fbb9ea
ET
985 switch (command | fp->state) {
986 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
987 BNX2X_FP_STATE_OPENING):
988 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
989 cid);
990 fp->state = BNX2X_FP_STATE_OPEN;
991 break;
992
993 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
994 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
995 cid);
996 fp->state = BNX2X_FP_STATE_HALTED;
997 break;
998
999 default:
34f80b04
EG
1000 BNX2X_ERR("unexpected MC reply (%d) "
1001 "fp->state is %x\n", command, fp->state);
1002 break;
a2fbb9ea 1003 }
34f80b04 1004 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1005 return;
1006 }
c14423fe 1007
a2fbb9ea
ET
1008 switch (command | bp->state) {
1009 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1010 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1011 bp->state = BNX2X_STATE_OPEN;
1012 break;
1013
1014 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1015 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1016 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1017 fp->state = BNX2X_FP_STATE_HALTED;
1018 break;
1019
a2fbb9ea 1020 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1021 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1022 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1023 break;
1024
3196a88a 1025
a2fbb9ea 1026 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1027 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1028 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 1029 bp->set_mac_pending = 0;
a2fbb9ea
ET
1030 break;
1031
49d66772 1032 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
ca00392c 1033 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
34f80b04 1034 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
1035 break;
1036
a2fbb9ea 1037 default:
34f80b04 1038 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1039 command, bp->state);
34f80b04 1040 break;
a2fbb9ea 1041 }
34f80b04 1042 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1043}
1044
7a9b2557
VZ
1045static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1046 struct bnx2x_fastpath *fp, u16 index)
1047{
1048 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1049 struct page *page = sw_buf->page;
1050 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1051
1052 /* Skip "next page" elements */
1053 if (!page)
1054 return;
1055
1056 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1057 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1058 __free_pages(page, PAGES_PER_SGE_SHIFT);
1059
1060 sw_buf->page = NULL;
1061 sge->addr_hi = 0;
1062 sge->addr_lo = 0;
1063}
1064
1065static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1066 struct bnx2x_fastpath *fp, int last)
1067{
1068 int i;
1069
1070 for (i = 0; i < last; i++)
1071 bnx2x_free_rx_sge(bp, fp, i);
1072}
1073
1074static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1075 struct bnx2x_fastpath *fp, u16 index)
1076{
1077 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1078 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1079 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1080 dma_addr_t mapping;
1081
1082 if (unlikely(page == NULL))
1083 return -ENOMEM;
1084
4f40f2cb 1085 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1086 PCI_DMA_FROMDEVICE);
8d8bb39b 1087 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1088 __free_pages(page, PAGES_PER_SGE_SHIFT);
1089 return -ENOMEM;
1090 }
1091
1092 sw_buf->page = page;
1093 pci_unmap_addr_set(sw_buf, mapping, mapping);
1094
1095 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1096 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1097
1098 return 0;
1099}
1100
a2fbb9ea
ET
1101static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1102 struct bnx2x_fastpath *fp, u16 index)
1103{
1104 struct sk_buff *skb;
1105 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1106 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1107 dma_addr_t mapping;
1108
1109 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1110 if (unlikely(skb == NULL))
1111 return -ENOMEM;
1112
437cf2f1 1113 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1114 PCI_DMA_FROMDEVICE);
8d8bb39b 1115 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1116 dev_kfree_skb(skb);
1117 return -ENOMEM;
1118 }
1119
1120 rx_buf->skb = skb;
1121 pci_unmap_addr_set(rx_buf, mapping, mapping);
1122
1123 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1124 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1125
1126 return 0;
1127}
1128
1129/* note that we are not allocating a new skb,
1130 * we are just moving one from cons to prod
1131 * we are not creating a new mapping,
1132 * so there is no need to check for dma_mapping_error().
1133 */
1134static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1135 struct sk_buff *skb, u16 cons, u16 prod)
1136{
1137 struct bnx2x *bp = fp->bp;
1138 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1139 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1140 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1141 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1142
1143 pci_dma_sync_single_for_device(bp->pdev,
1144 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1145 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1146
1147 prod_rx_buf->skb = cons_rx_buf->skb;
1148 pci_unmap_addr_set(prod_rx_buf, mapping,
1149 pci_unmap_addr(cons_rx_buf, mapping));
1150 *prod_bd = *cons_bd;
1151}
1152
7a9b2557
VZ
1153static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1154 u16 idx)
1155{
1156 u16 last_max = fp->last_max_sge;
1157
1158 if (SUB_S16(idx, last_max) > 0)
1159 fp->last_max_sge = idx;
1160}
1161
1162static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1163{
1164 int i, j;
1165
1166 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1167 int idx = RX_SGE_CNT * i - 1;
1168
1169 for (j = 0; j < 2; j++) {
1170 SGE_MASK_CLEAR_BIT(fp, idx);
1171 idx--;
1172 }
1173 }
1174}
1175
1176static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1177 struct eth_fast_path_rx_cqe *fp_cqe)
1178{
1179 struct bnx2x *bp = fp->bp;
4f40f2cb 1180 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1181 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1182 SGE_PAGE_SHIFT;
7a9b2557
VZ
1183 u16 last_max, last_elem, first_elem;
1184 u16 delta = 0;
1185 u16 i;
1186
1187 if (!sge_len)
1188 return;
1189
1190 /* First mark all used pages */
1191 for (i = 0; i < sge_len; i++)
1192 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1193
1194 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1195 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1196
1197 /* Here we assume that the last SGE index is the biggest */
1198 prefetch((void *)(fp->sge_mask));
1199 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1200
1201 last_max = RX_SGE(fp->last_max_sge);
1202 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1203 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1204
1205 /* If ring is not full */
1206 if (last_elem + 1 != first_elem)
1207 last_elem++;
1208
1209 /* Now update the prod */
1210 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1211 if (likely(fp->sge_mask[i]))
1212 break;
1213
1214 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1215 delta += RX_SGE_MASK_ELEM_SZ;
1216 }
1217
1218 if (delta > 0) {
1219 fp->rx_sge_prod += delta;
1220 /* clear page-end entries */
1221 bnx2x_clear_sge_mask_next_elems(fp);
1222 }
1223
1224 DP(NETIF_MSG_RX_STATUS,
1225 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1226 fp->last_max_sge, fp->rx_sge_prod);
1227}
1228
1229static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1230{
1231 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1232 memset(fp->sge_mask, 0xff,
1233 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1234
33471629
EG
1235 /* Clear the two last indices in the page to 1:
1236 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1237 hence will never be indicated and should be removed from
1238 the calculations. */
1239 bnx2x_clear_sge_mask_next_elems(fp);
1240}
1241
1242static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1243 struct sk_buff *skb, u16 cons, u16 prod)
1244{
1245 struct bnx2x *bp = fp->bp;
1246 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1247 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1248 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1249 dma_addr_t mapping;
1250
1251 /* move empty skb from pool to prod and map it */
1252 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1253 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1254 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1255 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1256
1257 /* move partial skb from cons to pool (don't unmap yet) */
1258 fp->tpa_pool[queue] = *cons_rx_buf;
1259
1260 /* mark bin state as start - print error if current state != stop */
1261 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1262 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1263
1264 fp->tpa_state[queue] = BNX2X_TPA_START;
1265
1266 /* point prod_bd to new skb */
1267 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1268 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1269
1270#ifdef BNX2X_STOP_ON_ERROR
1271 fp->tpa_queue_used |= (1 << queue);
1272#ifdef __powerpc64__
1273 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1274#else
1275 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1276#endif
1277 fp->tpa_queue_used);
1278#endif
1279}
1280
1281static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1282 struct sk_buff *skb,
1283 struct eth_fast_path_rx_cqe *fp_cqe,
1284 u16 cqe_idx)
1285{
1286 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1287 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1288 u32 i, frag_len, frag_size, pages;
1289 int err;
1290 int j;
1291
1292 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1293 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1294
1295 /* This is needed in order to enable forwarding support */
1296 if (frag_size)
4f40f2cb 1297 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1298 max(frag_size, (u32)len_on_bd));
1299
1300#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1301 if (pages >
1302 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1303 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1304 pages, cqe_idx);
1305 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1306 fp_cqe->pkt_len, len_on_bd);
1307 bnx2x_panic();
1308 return -EINVAL;
1309 }
1310#endif
1311
1312 /* Run through the SGL and compose the fragmented skb */
1313 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1314 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1315
1316 /* FW gives the indices of the SGE as if the ring is an array
1317 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1318 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1319 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1320 old_rx_pg = *rx_pg;
1321
1322 /* If we fail to allocate a substitute page, we simply stop
1323 where we are and drop the whole packet */
1324 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1325 if (unlikely(err)) {
de832a55 1326 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1327 return err;
1328 }
1329
1330 /* Unmap the page as we r going to pass it to the stack */
1331 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1332 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1333
1334 /* Add one frag and update the appropriate fields in the skb */
1335 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1336
1337 skb->data_len += frag_len;
1338 skb->truesize += frag_len;
1339 skb->len += frag_len;
1340
1341 frag_size -= frag_len;
1342 }
1343
1344 return 0;
1345}
1346
1347static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1348 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1349 u16 cqe_idx)
1350{
1351 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1352 struct sk_buff *skb = rx_buf->skb;
1353 /* alloc new skb */
1354 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1355
1356 /* Unmap skb in the pool anyway, as we are going to change
1357 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1358 fails. */
1359 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1360 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1361
7a9b2557 1362 if (likely(new_skb)) {
66e855f3
YG
1363 /* fix ip xsum and give it to the stack */
1364 /* (no need to map the new skb) */
0c6671b0
EG
1365#ifdef BCM_VLAN
1366 int is_vlan_cqe =
1367 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1368 PARSING_FLAGS_VLAN);
1369 int is_not_hwaccel_vlan_cqe =
1370 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1371#endif
7a9b2557
VZ
1372
1373 prefetch(skb);
1374 prefetch(((char *)(skb)) + 128);
1375
7a9b2557
VZ
1376#ifdef BNX2X_STOP_ON_ERROR
1377 if (pad + len > bp->rx_buf_size) {
1378 BNX2X_ERR("skb_put is about to fail... "
1379 "pad %d len %d rx_buf_size %d\n",
1380 pad, len, bp->rx_buf_size);
1381 bnx2x_panic();
1382 return;
1383 }
1384#endif
1385
1386 skb_reserve(skb, pad);
1387 skb_put(skb, len);
1388
1389 skb->protocol = eth_type_trans(skb, bp->dev);
1390 skb->ip_summed = CHECKSUM_UNNECESSARY;
1391
1392 {
1393 struct iphdr *iph;
1394
1395 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1396#ifdef BCM_VLAN
1397 /* If there is no Rx VLAN offloading -
1398 take VLAN tag into an account */
1399 if (unlikely(is_not_hwaccel_vlan_cqe))
1400 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1401#endif
7a9b2557
VZ
1402 iph->check = 0;
1403 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1404 }
1405
1406 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1407 &cqe->fast_path_cqe, cqe_idx)) {
1408#ifdef BCM_VLAN
0c6671b0
EG
1409 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1410 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1411 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1412 le16_to_cpu(cqe->fast_path_cqe.
1413 vlan_tag));
1414 else
1415#endif
1416 netif_receive_skb(skb);
1417 } else {
1418 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1419 " - dropping packet!\n");
1420 dev_kfree_skb(skb);
1421 }
1422
7a9b2557
VZ
1423
1424 /* put new skb in bin */
1425 fp->tpa_pool[queue].skb = new_skb;
1426
1427 } else {
66e855f3 1428 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1429 DP(NETIF_MSG_RX_STATUS,
1430 "Failed to allocate new skb - dropping packet!\n");
de832a55 1431 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1432 }
1433
1434 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1435}
1436
1437static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1438 struct bnx2x_fastpath *fp,
1439 u16 bd_prod, u16 rx_comp_prod,
1440 u16 rx_sge_prod)
1441{
8d9c5f34 1442 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1443 int i;
1444
1445 /* Update producers */
1446 rx_prods.bd_prod = bd_prod;
1447 rx_prods.cqe_prod = rx_comp_prod;
1448 rx_prods.sge_prod = rx_sge_prod;
1449
58f4c4cf
EG
1450 /*
1451 * Make sure that the BD and SGE data is updated before updating the
1452 * producers since FW might read the BD/SGE right after the producer
1453 * is updated.
1454 * This is only applicable for weak-ordered memory model archs such
1455 * as IA-64. The following barrier is also mandatory since FW will
1456 * assumes BDs must have buffers.
1457 */
1458 wmb();
1459
8d9c5f34
EG
1460 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1461 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1462 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1463 ((u32 *)&rx_prods)[i]);
1464
58f4c4cf
EG
1465 mmiowb(); /* keep prod updates ordered */
1466
7a9b2557 1467 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1468 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1469 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1470}
1471
a2fbb9ea
ET
1472static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1473{
1474 struct bnx2x *bp = fp->bp;
34f80b04 1475 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1476 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1477 int rx_pkt = 0;
1478
1479#ifdef BNX2X_STOP_ON_ERROR
1480 if (unlikely(bp->panic))
1481 return 0;
1482#endif
1483
34f80b04
EG
1484 /* CQ "next element" is of the size of the regular element,
1485 that's why it's ok here */
a2fbb9ea
ET
1486 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1487 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1488 hw_comp_cons++;
1489
1490 bd_cons = fp->rx_bd_cons;
1491 bd_prod = fp->rx_bd_prod;
34f80b04 1492 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1493 sw_comp_cons = fp->rx_comp_cons;
1494 sw_comp_prod = fp->rx_comp_prod;
1495
1496 /* Memory barrier necessary as speculative reads of the rx
1497 * buffer can be ahead of the index in the status block
1498 */
1499 rmb();
1500
1501 DP(NETIF_MSG_RX_STATUS,
1502 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1503 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1504
1505 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1506 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1507 struct sk_buff *skb;
1508 union eth_rx_cqe *cqe;
34f80b04
EG
1509 u8 cqe_fp_flags;
1510 u16 len, pad;
a2fbb9ea
ET
1511
1512 comp_ring_cons = RCQ_BD(sw_comp_cons);
1513 bd_prod = RX_BD(bd_prod);
1514 bd_cons = RX_BD(bd_cons);
1515
619e7a66
EG
1516 /* Prefetch the page containing the BD descriptor
1517 at producer's index. It will be needed when new skb is
1518 allocated */
1519 prefetch((void *)(PAGE_ALIGN((unsigned long)
1520 (&fp->rx_desc_ring[bd_prod])) -
1521 PAGE_SIZE + 1));
1522
a2fbb9ea 1523 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1524 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1525
a2fbb9ea 1526 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1527 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1528 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1529 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1530 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1531 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1532
1533 /* is this a slowpath msg? */
34f80b04 1534 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1535 bnx2x_sp_event(fp, cqe);
1536 goto next_cqe;
1537
1538 /* this is an rx packet */
1539 } else {
1540 rx_buf = &fp->rx_buf_ring[bd_cons];
1541 skb = rx_buf->skb;
a2fbb9ea
ET
1542 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1543 pad = cqe->fast_path_cqe.placement_offset;
1544
7a9b2557
VZ
1545 /* If CQE is marked both TPA_START and TPA_END
1546 it is a non-TPA CQE */
1547 if ((!fp->disable_tpa) &&
1548 (TPA_TYPE(cqe_fp_flags) !=
1549 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1550 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1551
1552 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1553 DP(NETIF_MSG_RX_STATUS,
1554 "calling tpa_start on queue %d\n",
1555 queue);
1556
1557 bnx2x_tpa_start(fp, queue, skb,
1558 bd_cons, bd_prod);
1559 goto next_rx;
1560 }
1561
1562 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1563 DP(NETIF_MSG_RX_STATUS,
1564 "calling tpa_stop on queue %d\n",
1565 queue);
1566
1567 if (!BNX2X_RX_SUM_FIX(cqe))
1568 BNX2X_ERR("STOP on none TCP "
1569 "data\n");
1570
1571 /* This is a size of the linear data
1572 on this skb */
1573 len = le16_to_cpu(cqe->fast_path_cqe.
1574 len_on_bd);
1575 bnx2x_tpa_stop(bp, fp, queue, pad,
1576 len, cqe, comp_ring_cons);
1577#ifdef BNX2X_STOP_ON_ERROR
1578 if (bp->panic)
17cb4006 1579 return 0;
7a9b2557
VZ
1580#endif
1581
1582 bnx2x_update_sge_prod(fp,
1583 &cqe->fast_path_cqe);
1584 goto next_cqe;
1585 }
1586 }
1587
a2fbb9ea
ET
1588 pci_dma_sync_single_for_device(bp->pdev,
1589 pci_unmap_addr(rx_buf, mapping),
1590 pad + RX_COPY_THRESH,
1591 PCI_DMA_FROMDEVICE);
1592 prefetch(skb);
1593 prefetch(((char *)(skb)) + 128);
1594
1595 /* is this an error packet? */
34f80b04 1596 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1597 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1598 "ERROR flags %x rx packet %u\n",
1599 cqe_fp_flags, sw_comp_cons);
de832a55 1600 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1601 goto reuse_rx;
1602 }
1603
1604 /* Since we don't have a jumbo ring
1605 * copy small packets if mtu > 1500
1606 */
1607 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1608 (len <= RX_COPY_THRESH)) {
1609 struct sk_buff *new_skb;
1610
1611 new_skb = netdev_alloc_skb(bp->dev,
1612 len + pad);
1613 if (new_skb == NULL) {
1614 DP(NETIF_MSG_RX_ERR,
34f80b04 1615 "ERROR packet dropped "
a2fbb9ea 1616 "because of alloc failure\n");
de832a55 1617 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1618 goto reuse_rx;
1619 }
1620
1621 /* aligned copy */
1622 skb_copy_from_linear_data_offset(skb, pad,
1623 new_skb->data + pad, len);
1624 skb_reserve(new_skb, pad);
1625 skb_put(new_skb, len);
1626
1627 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1628
1629 skb = new_skb;
1630
a119a069
EG
1631 } else
1632 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
a2fbb9ea
ET
1633 pci_unmap_single(bp->pdev,
1634 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1635 bp->rx_buf_size,
a2fbb9ea
ET
1636 PCI_DMA_FROMDEVICE);
1637 skb_reserve(skb, pad);
1638 skb_put(skb, len);
1639
1640 } else {
1641 DP(NETIF_MSG_RX_ERR,
34f80b04 1642 "ERROR packet dropped because "
a2fbb9ea 1643 "of alloc failure\n");
de832a55 1644 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1645reuse_rx:
1646 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1647 goto next_rx;
1648 }
1649
1650 skb->protocol = eth_type_trans(skb, bp->dev);
1651
1652 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1653 if (bp->rx_csum) {
1adcd8be
EG
1654 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1655 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1656 else
de832a55 1657 fp->eth_q_stats.hw_csum_err++;
66e855f3 1658 }
a2fbb9ea
ET
1659 }
1660
748e5439 1661 skb_record_rx_queue(skb, fp->index);
ab6ad5a4 1662
a2fbb9ea 1663#ifdef BCM_VLAN
0c6671b0 1664 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1665 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1666 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1667 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1668 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1669 else
1670#endif
34f80b04 1671 netif_receive_skb(skb);
a2fbb9ea 1672
a2fbb9ea
ET
1673
1674next_rx:
1675 rx_buf->skb = NULL;
1676
1677 bd_cons = NEXT_RX_IDX(bd_cons);
1678 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1679 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1680 rx_pkt++;
a2fbb9ea
ET
1681next_cqe:
1682 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1683 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1684
34f80b04 1685 if (rx_pkt == budget)
a2fbb9ea
ET
1686 break;
1687 } /* while */
1688
1689 fp->rx_bd_cons = bd_cons;
34f80b04 1690 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1691 fp->rx_comp_cons = sw_comp_cons;
1692 fp->rx_comp_prod = sw_comp_prod;
1693
7a9b2557
VZ
1694 /* Update producers */
1695 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1696 fp->rx_sge_prod);
a2fbb9ea
ET
1697
1698 fp->rx_pkt += rx_pkt;
1699 fp->rx_calls++;
1700
1701 return rx_pkt;
1702}
1703
1704static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1705{
1706 struct bnx2x_fastpath *fp = fp_cookie;
1707 struct bnx2x *bp = fp->bp;
a2fbb9ea 1708
da5a662a
VZ
1709 /* Return here if interrupt is disabled */
1710 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1711 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1712 return IRQ_HANDLED;
1713 }
1714
34f80b04 1715 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1716 fp->index, fp->sb_id);
0626b899 1717 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1718
1719#ifdef BNX2X_STOP_ON_ERROR
1720 if (unlikely(bp->panic))
1721 return IRQ_HANDLED;
1722#endif
ca00392c
EG
1723 /* Handle Rx or Tx according to MSI-X vector */
1724 if (fp->is_rx_queue) {
1725 prefetch(fp->rx_cons_sb);
1726 prefetch(&fp->status_blk->u_status_block.status_block_index);
a2fbb9ea 1727
ca00392c 1728 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1729
ca00392c
EG
1730 } else {
1731 prefetch(fp->tx_cons_sb);
1732 prefetch(&fp->status_blk->c_status_block.status_block_index);
1733
1734 bnx2x_update_fpsb_idx(fp);
1735 rmb();
1736 bnx2x_tx_int(fp);
1737
1738 /* Re-enable interrupts */
1739 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1740 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1741 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1742 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1743 }
34f80b04 1744
a2fbb9ea
ET
1745 return IRQ_HANDLED;
1746}
1747
1748static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1749{
555f6c78 1750 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1751 u16 status = bnx2x_ack_int(bp);
34f80b04 1752 u16 mask;
ca00392c 1753 int i;
a2fbb9ea 1754
34f80b04 1755 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1756 if (unlikely(status == 0)) {
1757 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1758 return IRQ_NONE;
1759 }
f5372251 1760 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1761
34f80b04 1762 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1763 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1764 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1765 return IRQ_HANDLED;
1766 }
1767
3196a88a
EG
1768#ifdef BNX2X_STOP_ON_ERROR
1769 if (unlikely(bp->panic))
1770 return IRQ_HANDLED;
1771#endif
1772
ca00392c
EG
1773 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1774 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1775
ca00392c
EG
1776 mask = 0x2 << fp->sb_id;
1777 if (status & mask) {
1778 /* Handle Rx or Tx according to SB id */
1779 if (fp->is_rx_queue) {
1780 prefetch(fp->rx_cons_sb);
1781 prefetch(&fp->status_blk->u_status_block.
1782 status_block_index);
a2fbb9ea 1783
ca00392c 1784 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1785
ca00392c
EG
1786 } else {
1787 prefetch(fp->tx_cons_sb);
1788 prefetch(&fp->status_blk->c_status_block.
1789 status_block_index);
1790
1791 bnx2x_update_fpsb_idx(fp);
1792 rmb();
1793 bnx2x_tx_int(fp);
1794
1795 /* Re-enable interrupts */
1796 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1797 le16_to_cpu(fp->fp_u_idx),
1798 IGU_INT_NOP, 1);
1799 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1800 le16_to_cpu(fp->fp_c_idx),
1801 IGU_INT_ENABLE, 1);
1802 }
1803 status &= ~mask;
1804 }
a2fbb9ea
ET
1805 }
1806
a2fbb9ea 1807
34f80b04 1808 if (unlikely(status & 0x1)) {
1cf167f2 1809 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1810
1811 status &= ~0x1;
1812 if (!status)
1813 return IRQ_HANDLED;
1814 }
1815
34f80b04
EG
1816 if (status)
1817 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1818 status);
a2fbb9ea 1819
c18487ee 1820 return IRQ_HANDLED;
a2fbb9ea
ET
1821}
1822
c18487ee 1823/* end of fast path */
a2fbb9ea 1824
bb2a0f7a 1825static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1826
c18487ee
YR
1827/* Link */
1828
1829/*
1830 * General service functions
1831 */
a2fbb9ea 1832
4a37fb66 1833static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1834{
1835 u32 lock_status;
1836 u32 resource_bit = (1 << resource);
4a37fb66
YG
1837 int func = BP_FUNC(bp);
1838 u32 hw_lock_control_reg;
c18487ee 1839 int cnt;
a2fbb9ea 1840
c18487ee
YR
1841 /* Validating that the resource is within range */
1842 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1843 DP(NETIF_MSG_HW,
1844 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1845 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1846 return -EINVAL;
1847 }
a2fbb9ea 1848
4a37fb66
YG
1849 if (func <= 5) {
1850 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1851 } else {
1852 hw_lock_control_reg =
1853 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1854 }
1855
c18487ee 1856 /* Validating that the resource is not already taken */
4a37fb66 1857 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1858 if (lock_status & resource_bit) {
1859 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1860 lock_status, resource_bit);
1861 return -EEXIST;
1862 }
a2fbb9ea 1863
46230476
EG
1864 /* Try for 5 second every 5ms */
1865 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1866 /* Try to acquire the lock */
4a37fb66
YG
1867 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1868 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1869 if (lock_status & resource_bit)
1870 return 0;
a2fbb9ea 1871
c18487ee 1872 msleep(5);
a2fbb9ea 1873 }
c18487ee
YR
1874 DP(NETIF_MSG_HW, "Timeout\n");
1875 return -EAGAIN;
1876}
a2fbb9ea 1877
4a37fb66 1878static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1879{
1880 u32 lock_status;
1881 u32 resource_bit = (1 << resource);
4a37fb66
YG
1882 int func = BP_FUNC(bp);
1883 u32 hw_lock_control_reg;
a2fbb9ea 1884
c18487ee
YR
1885 /* Validating that the resource is within range */
1886 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1887 DP(NETIF_MSG_HW,
1888 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1889 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1890 return -EINVAL;
1891 }
1892
4a37fb66
YG
1893 if (func <= 5) {
1894 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1895 } else {
1896 hw_lock_control_reg =
1897 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1898 }
1899
c18487ee 1900 /* Validating that the resource is currently taken */
4a37fb66 1901 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1902 if (!(lock_status & resource_bit)) {
1903 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1904 lock_status, resource_bit);
1905 return -EFAULT;
a2fbb9ea
ET
1906 }
1907
4a37fb66 1908 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1909 return 0;
1910}
1911
1912/* HW Lock for shared dual port PHYs */
4a37fb66 1913static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1914{
34f80b04 1915 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1916
46c6a674
EG
1917 if (bp->port.need_hw_lock)
1918 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1919}
a2fbb9ea 1920
4a37fb66 1921static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1922{
46c6a674
EG
1923 if (bp->port.need_hw_lock)
1924 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1925
34f80b04 1926 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1927}
a2fbb9ea 1928
4acac6a5
EG
1929int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1930{
1931 /* The GPIO should be swapped if swap register is set and active */
1932 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1933 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1934 int gpio_shift = gpio_num +
1935 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1936 u32 gpio_mask = (1 << gpio_shift);
1937 u32 gpio_reg;
1938 int value;
1939
1940 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1941 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1942 return -EINVAL;
1943 }
1944
1945 /* read GPIO value */
1946 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1947
1948 /* get the requested pin value */
1949 if ((gpio_reg & gpio_mask) == gpio_mask)
1950 value = 1;
1951 else
1952 value = 0;
1953
1954 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1955
1956 return value;
1957}
1958
17de50b7 1959int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1960{
1961 /* The GPIO should be swapped if swap register is set and active */
1962 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1963 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1964 int gpio_shift = gpio_num +
1965 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1966 u32 gpio_mask = (1 << gpio_shift);
1967 u32 gpio_reg;
a2fbb9ea 1968
c18487ee
YR
1969 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1970 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1971 return -EINVAL;
1972 }
a2fbb9ea 1973
4a37fb66 1974 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1975 /* read GPIO and mask except the float bits */
1976 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1977
c18487ee
YR
1978 switch (mode) {
1979 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1980 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1981 gpio_num, gpio_shift);
1982 /* clear FLOAT and set CLR */
1983 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1984 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1985 break;
a2fbb9ea 1986
c18487ee
YR
1987 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1988 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1989 gpio_num, gpio_shift);
1990 /* clear FLOAT and set SET */
1991 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1992 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1993 break;
a2fbb9ea 1994
17de50b7 1995 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1996 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1997 gpio_num, gpio_shift);
1998 /* set FLOAT */
1999 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2000 break;
a2fbb9ea 2001
c18487ee
YR
2002 default:
2003 break;
a2fbb9ea
ET
2004 }
2005
c18487ee 2006 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 2007 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 2008
c18487ee 2009 return 0;
a2fbb9ea
ET
2010}
2011
4acac6a5
EG
2012int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2013{
2014 /* The GPIO should be swapped if swap register is set and active */
2015 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2016 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2017 int gpio_shift = gpio_num +
2018 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2019 u32 gpio_mask = (1 << gpio_shift);
2020 u32 gpio_reg;
2021
2022 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2023 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2024 return -EINVAL;
2025 }
2026
2027 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2028 /* read GPIO int */
2029 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2030
2031 switch (mode) {
2032 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2033 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2034 "output low\n", gpio_num, gpio_shift);
2035 /* clear SET and set CLR */
2036 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2037 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2038 break;
2039
2040 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2041 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2042 "output high\n", gpio_num, gpio_shift);
2043 /* clear CLR and set SET */
2044 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2045 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2046 break;
2047
2048 default:
2049 break;
2050 }
2051
2052 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2053 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2054
2055 return 0;
2056}
2057
c18487ee 2058static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2059{
c18487ee
YR
2060 u32 spio_mask = (1 << spio_num);
2061 u32 spio_reg;
a2fbb9ea 2062
c18487ee
YR
2063 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2064 (spio_num > MISC_REGISTERS_SPIO_7)) {
2065 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2066 return -EINVAL;
a2fbb9ea
ET
2067 }
2068
4a37fb66 2069 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2070 /* read SPIO and mask except the float bits */
2071 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2072
c18487ee 2073 switch (mode) {
6378c025 2074 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2075 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2076 /* clear FLOAT and set CLR */
2077 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2078 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2079 break;
a2fbb9ea 2080
6378c025 2081 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2082 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2083 /* clear FLOAT and set SET */
2084 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2085 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2086 break;
a2fbb9ea 2087
c18487ee
YR
2088 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2089 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2090 /* set FLOAT */
2091 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2092 break;
a2fbb9ea 2093
c18487ee
YR
2094 default:
2095 break;
a2fbb9ea
ET
2096 }
2097
c18487ee 2098 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2099 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2100
a2fbb9ea
ET
2101 return 0;
2102}
2103
c18487ee 2104static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2105{
ad33ea3a
EG
2106 switch (bp->link_vars.ieee_fc &
2107 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2108 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2109 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2110 ADVERTISED_Pause);
2111 break;
356e2385 2112
c18487ee 2113 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2114 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2115 ADVERTISED_Pause);
2116 break;
356e2385 2117
c18487ee 2118 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2119 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2120 break;
356e2385 2121
c18487ee 2122 default:
34f80b04 2123 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2124 ADVERTISED_Pause);
2125 break;
2126 }
2127}
f1410647 2128
c18487ee
YR
2129static void bnx2x_link_report(struct bnx2x *bp)
2130{
2691d51d
EG
2131 if (bp->state == BNX2X_STATE_DISABLED) {
2132 netif_carrier_off(bp->dev);
2133 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2134 return;
2135 }
2136
c18487ee
YR
2137 if (bp->link_vars.link_up) {
2138 if (bp->state == BNX2X_STATE_OPEN)
2139 netif_carrier_on(bp->dev);
2140 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2141
c18487ee 2142 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2143
c18487ee
YR
2144 if (bp->link_vars.duplex == DUPLEX_FULL)
2145 printk("full duplex");
2146 else
2147 printk("half duplex");
f1410647 2148
c0700f90
DM
2149 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2150 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2151 printk(", receive ");
356e2385
EG
2152 if (bp->link_vars.flow_ctrl &
2153 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2154 printk("& transmit ");
2155 } else {
2156 printk(", transmit ");
2157 }
2158 printk("flow control ON");
2159 }
2160 printk("\n");
f1410647 2161
c18487ee
YR
2162 } else { /* link_down */
2163 netif_carrier_off(bp->dev);
2164 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2165 }
c18487ee
YR
2166}
2167
b5bf9068 2168static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2169{
19680c48
EG
2170 if (!BP_NOMCP(bp)) {
2171 u8 rc;
a2fbb9ea 2172
19680c48 2173 /* Initialize link parameters structure variables */
8c99e7b0
YR
2174 /* It is recommended to turn off RX FC for jumbo frames
2175 for better performance */
0c593270 2176 if (bp->dev->mtu > 5000)
c0700f90 2177 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2178 else
c0700f90 2179 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2180
4a37fb66 2181 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2182
2183 if (load_mode == LOAD_DIAG)
2184 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2185
19680c48 2186 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2187
4a37fb66 2188 bnx2x_release_phy_lock(bp);
a2fbb9ea 2189
3c96c68b
EG
2190 bnx2x_calc_fc_adv(bp);
2191
b5bf9068
EG
2192 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2193 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2194 bnx2x_link_report(bp);
b5bf9068 2195 }
34f80b04 2196
19680c48
EG
2197 return rc;
2198 }
f5372251 2199 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2200 return -EINVAL;
a2fbb9ea
ET
2201}
2202
c18487ee 2203static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2204{
19680c48 2205 if (!BP_NOMCP(bp)) {
4a37fb66 2206 bnx2x_acquire_phy_lock(bp);
19680c48 2207 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2208 bnx2x_release_phy_lock(bp);
a2fbb9ea 2209
19680c48
EG
2210 bnx2x_calc_fc_adv(bp);
2211 } else
f5372251 2212 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2213}
a2fbb9ea 2214
c18487ee
YR
2215static void bnx2x__link_reset(struct bnx2x *bp)
2216{
19680c48 2217 if (!BP_NOMCP(bp)) {
4a37fb66 2218 bnx2x_acquire_phy_lock(bp);
589abe3a 2219 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2220 bnx2x_release_phy_lock(bp);
19680c48 2221 } else
f5372251 2222 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2223}
a2fbb9ea 2224
c18487ee
YR
2225static u8 bnx2x_link_test(struct bnx2x *bp)
2226{
2227 u8 rc;
a2fbb9ea 2228
4a37fb66 2229 bnx2x_acquire_phy_lock(bp);
c18487ee 2230 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2231 bnx2x_release_phy_lock(bp);
a2fbb9ea 2232
c18487ee
YR
2233 return rc;
2234}
a2fbb9ea 2235
8a1c38d1 2236static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2237{
8a1c38d1
EG
2238 u32 r_param = bp->link_vars.line_speed / 8;
2239 u32 fair_periodic_timeout_usec;
2240 u32 t_fair;
34f80b04 2241
8a1c38d1
EG
2242 memset(&(bp->cmng.rs_vars), 0,
2243 sizeof(struct rate_shaping_vars_per_port));
2244 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2245
8a1c38d1
EG
2246 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2247 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2248
8a1c38d1
EG
2249 /* this is the threshold below which no timer arming will occur
2250 1.25 coefficient is for the threshold to be a little bigger
2251 than the real time, to compensate for timer in-accuracy */
2252 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2253 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2254
8a1c38d1
EG
2255 /* resolution of fairness timer */
2256 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2257 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2258 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2259
8a1c38d1
EG
2260 /* this is the threshold below which we won't arm the timer anymore */
2261 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2262
8a1c38d1
EG
2263 /* we multiply by 1e3/8 to get bytes/msec.
2264 We don't want the credits to pass a credit
2265 of the t_fair*FAIR_MEM (algorithm resolution) */
2266 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2267 /* since each tick is 4 usec */
2268 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2269}
2270
2691d51d
EG
2271/* Calculates the sum of vn_min_rates.
2272 It's needed for further normalizing of the min_rates.
2273 Returns:
2274 sum of vn_min_rates.
2275 or
2276 0 - if all the min_rates are 0.
2277 In the later case fainess algorithm should be deactivated.
2278 If not all min_rates are zero then those that are zeroes will be set to 1.
2279 */
2280static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2281{
2282 int all_zero = 1;
2283 int port = BP_PORT(bp);
2284 int vn;
2285
2286 bp->vn_weight_sum = 0;
2287 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2288 int func = 2*vn + port;
2289 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2290 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2291 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2292
2293 /* Skip hidden vns */
2294 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2295 continue;
2296
2297 /* If min rate is zero - set it to 1 */
2298 if (!vn_min_rate)
2299 vn_min_rate = DEF_MIN_RATE;
2300 else
2301 all_zero = 0;
2302
2303 bp->vn_weight_sum += vn_min_rate;
2304 }
2305
2306 /* ... only if all min rates are zeros - disable fairness */
2307 if (all_zero)
2308 bp->vn_weight_sum = 0;
2309}
2310
8a1c38d1 2311static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2312{
2313 struct rate_shaping_vars_per_vn m_rs_vn;
2314 struct fairness_vars_per_vn m_fair_vn;
2315 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2316 u16 vn_min_rate, vn_max_rate;
2317 int i;
2318
2319 /* If function is hidden - set min and max to zeroes */
2320 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2321 vn_min_rate = 0;
2322 vn_max_rate = 0;
2323
2324 } else {
2325 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2326 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2327 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2328 if current min rate is zero - set it to 1.
33471629 2329 This is a requirement of the algorithm. */
8a1c38d1 2330 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2331 vn_min_rate = DEF_MIN_RATE;
2332 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2333 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2334 }
2335
8a1c38d1
EG
2336 DP(NETIF_MSG_IFUP,
2337 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2338 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2339
2340 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2341 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2342
2343 /* global vn counter - maximal Mbps for this vn */
2344 m_rs_vn.vn_counter.rate = vn_max_rate;
2345
2346 /* quota - number of bytes transmitted in this period */
2347 m_rs_vn.vn_counter.quota =
2348 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2349
8a1c38d1 2350 if (bp->vn_weight_sum) {
34f80b04
EG
2351 /* credit for each period of the fairness algorithm:
2352 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2353 vn_weight_sum should not be larger than 10000, thus
2354 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2355 than zero */
34f80b04 2356 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2357 max((u32)(vn_min_rate * (T_FAIR_COEF /
2358 (8 * bp->vn_weight_sum))),
2359 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2360 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2361 m_fair_vn.vn_credit_delta);
2362 }
2363
34f80b04
EG
2364 /* Store it to internal memory */
2365 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2366 REG_WR(bp, BAR_XSTRORM_INTMEM +
2367 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2368 ((u32 *)(&m_rs_vn))[i]);
2369
2370 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2371 REG_WR(bp, BAR_XSTRORM_INTMEM +
2372 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2373 ((u32 *)(&m_fair_vn))[i]);
2374}
2375
8a1c38d1 2376
c18487ee
YR
2377/* This function is called upon link interrupt */
2378static void bnx2x_link_attn(struct bnx2x *bp)
2379{
bb2a0f7a
YG
2380 /* Make sure that we are synced with the current statistics */
2381 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2382
c18487ee 2383 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2384
bb2a0f7a
YG
2385 if (bp->link_vars.link_up) {
2386
1c06328c 2387 /* dropless flow control */
a18f5128 2388 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
2389 int port = BP_PORT(bp);
2390 u32 pause_enabled = 0;
2391
2392 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2393 pause_enabled = 1;
2394
2395 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2396 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2397 pause_enabled);
2398 }
2399
bb2a0f7a
YG
2400 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2401 struct host_port_stats *pstats;
2402
2403 pstats = bnx2x_sp(bp, port_stats);
2404 /* reset old bmac stats */
2405 memset(&(pstats->mac_stx[0]), 0,
2406 sizeof(struct mac_stx));
2407 }
2408 if ((bp->state == BNX2X_STATE_OPEN) ||
2409 (bp->state == BNX2X_STATE_DISABLED))
2410 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2411 }
2412
c18487ee
YR
2413 /* indicate link status */
2414 bnx2x_link_report(bp);
34f80b04
EG
2415
2416 if (IS_E1HMF(bp)) {
8a1c38d1 2417 int port = BP_PORT(bp);
34f80b04 2418 int func;
8a1c38d1 2419 int vn;
34f80b04 2420
ab6ad5a4 2421 /* Set the attention towards other drivers on the same port */
34f80b04
EG
2422 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2423 if (vn == BP_E1HVN(bp))
2424 continue;
2425
8a1c38d1 2426 func = ((vn << 1) | port);
34f80b04
EG
2427 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2428 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2429 }
34f80b04 2430
8a1c38d1
EG
2431 if (bp->link_vars.link_up) {
2432 int i;
2433
2434 /* Init rate shaping and fairness contexts */
2435 bnx2x_init_port_minmax(bp);
34f80b04 2436
34f80b04 2437 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2438 bnx2x_init_vn_minmax(bp, 2*vn + port);
2439
2440 /* Store it to internal memory */
2441 for (i = 0;
2442 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2443 REG_WR(bp, BAR_XSTRORM_INTMEM +
2444 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2445 ((u32 *)(&bp->cmng))[i]);
2446 }
34f80b04 2447 }
c18487ee 2448}
a2fbb9ea 2449
c18487ee
YR
2450static void bnx2x__link_status_update(struct bnx2x *bp)
2451{
2691d51d
EG
2452 int func = BP_FUNC(bp);
2453
c18487ee
YR
2454 if (bp->state != BNX2X_STATE_OPEN)
2455 return;
a2fbb9ea 2456
c18487ee 2457 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2458
bb2a0f7a
YG
2459 if (bp->link_vars.link_up)
2460 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2461 else
2462 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2463
2691d51d
EG
2464 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2465 bnx2x_calc_vn_weight_sum(bp);
2466
c18487ee
YR
2467 /* indicate link status */
2468 bnx2x_link_report(bp);
a2fbb9ea 2469}
a2fbb9ea 2470
34f80b04
EG
2471static void bnx2x_pmf_update(struct bnx2x *bp)
2472{
2473 int port = BP_PORT(bp);
2474 u32 val;
2475
2476 bp->port.pmf = 1;
2477 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2478
2479 /* enable nig attention */
2480 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2481 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2482 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2483
2484 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2485}
2486
c18487ee 2487/* end of Link */
a2fbb9ea
ET
2488
2489/* slow path */
2490
2491/*
2492 * General service functions
2493 */
2494
2691d51d
EG
2495/* send the MCP a request, block until there is a reply */
2496u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2497{
2498 int func = BP_FUNC(bp);
2499 u32 seq = ++bp->fw_seq;
2500 u32 rc = 0;
2501 u32 cnt = 1;
2502 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2503
2504 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2505 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2506
2507 do {
2508 /* let the FW do it's magic ... */
2509 msleep(delay);
2510
2511 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2512
2513 /* Give the FW up to 2 second (200*10ms) */
2514 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2515
2516 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2517 cnt*delay, rc, seq);
2518
2519 /* is this a reply to our command? */
2520 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2521 rc &= FW_MSG_CODE_MASK;
2522 else {
2523 /* FW BUG! */
2524 BNX2X_ERR("FW failed to respond!\n");
2525 bnx2x_fw_dump(bp);
2526 rc = 0;
2527 }
2528
2529 return rc;
2530}
2531
2532static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2533static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set);
2534static void bnx2x_set_rx_mode(struct net_device *dev);
2535
2536static void bnx2x_e1h_disable(struct bnx2x *bp)
2537{
2538 int port = BP_PORT(bp);
2539 int i;
2540
2541 bp->rx_mode = BNX2X_RX_MODE_NONE;
2542 bnx2x_set_storm_rx_mode(bp);
2543
2544 netif_tx_disable(bp->dev);
2545 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2546
2547 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2548
2549 bnx2x_set_mac_addr_e1h(bp, 0);
2550
2551 for (i = 0; i < MC_HASH_SIZE; i++)
2552 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2553
2554 netif_carrier_off(bp->dev);
2555}
2556
2557static void bnx2x_e1h_enable(struct bnx2x *bp)
2558{
2559 int port = BP_PORT(bp);
2560
2561 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2562
2563 bnx2x_set_mac_addr_e1h(bp, 1);
2564
2565 /* Tx queue should be only reenabled */
2566 netif_tx_wake_all_queues(bp->dev);
2567
2568 /* Initialize the receive filter. */
2569 bnx2x_set_rx_mode(bp->dev);
2570}
2571
2572static void bnx2x_update_min_max(struct bnx2x *bp)
2573{
2574 int port = BP_PORT(bp);
2575 int vn, i;
2576
2577 /* Init rate shaping and fairness contexts */
2578 bnx2x_init_port_minmax(bp);
2579
2580 bnx2x_calc_vn_weight_sum(bp);
2581
2582 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2583 bnx2x_init_vn_minmax(bp, 2*vn + port);
2584
2585 if (bp->port.pmf) {
2586 int func;
2587
2588 /* Set the attention towards other drivers on the same port */
2589 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2590 if (vn == BP_E1HVN(bp))
2591 continue;
2592
2593 func = ((vn << 1) | port);
2594 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2595 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2596 }
2597
2598 /* Store it to internal memory */
2599 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2600 REG_WR(bp, BAR_XSTRORM_INTMEM +
2601 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2602 ((u32 *)(&bp->cmng))[i]);
2603 }
2604}
2605
2606static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2607{
2608 int func = BP_FUNC(bp);
2609
2610 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2611 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2612
2613 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2614
2615 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2616 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2617 bp->state = BNX2X_STATE_DISABLED;
2618
2619 bnx2x_e1h_disable(bp);
2620 } else {
2621 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2622 bp->state = BNX2X_STATE_OPEN;
2623
2624 bnx2x_e1h_enable(bp);
2625 }
2626 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2627 }
2628 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2629
2630 bnx2x_update_min_max(bp);
2631 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2632 }
2633
2634 /* Report results to MCP */
2635 if (dcc_event)
2636 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2637 else
2638 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2639}
2640
a2fbb9ea
ET
2641/* the slow path queue is odd since completions arrive on the fastpath ring */
2642static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2643 u32 data_hi, u32 data_lo, int common)
2644{
34f80b04 2645 int func = BP_FUNC(bp);
a2fbb9ea 2646
34f80b04
EG
2647 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2648 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2649 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2650 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2651 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2652
2653#ifdef BNX2X_STOP_ON_ERROR
2654 if (unlikely(bp->panic))
2655 return -EIO;
2656#endif
2657
34f80b04 2658 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2659
2660 if (!bp->spq_left) {
2661 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2662 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2663 bnx2x_panic();
2664 return -EBUSY;
2665 }
f1410647 2666
a2fbb9ea
ET
2667 /* CID needs port number to be encoded int it */
2668 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2669 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2670 HW_CID(bp, cid)));
2671 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2672 if (common)
2673 bp->spq_prod_bd->hdr.type |=
2674 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2675
2676 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2677 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2678
2679 bp->spq_left--;
2680
2681 if (bp->spq_prod_bd == bp->spq_last_bd) {
2682 bp->spq_prod_bd = bp->spq;
2683 bp->spq_prod_idx = 0;
2684 DP(NETIF_MSG_TIMER, "end of spq\n");
2685
2686 } else {
2687 bp->spq_prod_bd++;
2688 bp->spq_prod_idx++;
2689 }
2690
37dbbf32
EG
2691 /* Make sure that BD data is updated before writing the producer */
2692 wmb();
2693
34f80b04 2694 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2695 bp->spq_prod_idx);
2696
37dbbf32
EG
2697 mmiowb();
2698
34f80b04 2699 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2700 return 0;
2701}
2702
2703/* acquire split MCP access lock register */
4a37fb66 2704static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2705{
a2fbb9ea 2706 u32 i, j, val;
34f80b04 2707 int rc = 0;
a2fbb9ea
ET
2708
2709 might_sleep();
2710 i = 100;
2711 for (j = 0; j < i*10; j++) {
2712 val = (1UL << 31);
2713 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2714 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2715 if (val & (1L << 31))
2716 break;
2717
2718 msleep(5);
2719 }
a2fbb9ea 2720 if (!(val & (1L << 31))) {
19680c48 2721 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2722 rc = -EBUSY;
2723 }
2724
2725 return rc;
2726}
2727
4a37fb66
YG
2728/* release split MCP access lock register */
2729static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2730{
2731 u32 val = 0;
2732
2733 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2734}
2735
2736static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2737{
2738 struct host_def_status_block *def_sb = bp->def_status_blk;
2739 u16 rc = 0;
2740
2741 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2742 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2743 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2744 rc |= 1;
2745 }
2746 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2747 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2748 rc |= 2;
2749 }
2750 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2751 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2752 rc |= 4;
2753 }
2754 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2755 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2756 rc |= 8;
2757 }
2758 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2759 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2760 rc |= 16;
2761 }
2762 return rc;
2763}
2764
2765/*
2766 * slow path service functions
2767 */
2768
2769static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2770{
34f80b04 2771 int port = BP_PORT(bp);
5c862848
EG
2772 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2773 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2774 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2775 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2776 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2777 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2778 u32 aeu_mask;
87942b46 2779 u32 nig_mask = 0;
a2fbb9ea 2780
a2fbb9ea
ET
2781 if (bp->attn_state & asserted)
2782 BNX2X_ERR("IGU ERROR\n");
2783
3fcaf2e5
EG
2784 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2785 aeu_mask = REG_RD(bp, aeu_addr);
2786
a2fbb9ea 2787 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2788 aeu_mask, asserted);
2789 aeu_mask &= ~(asserted & 0xff);
2790 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2791
3fcaf2e5
EG
2792 REG_WR(bp, aeu_addr, aeu_mask);
2793 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2794
3fcaf2e5 2795 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2796 bp->attn_state |= asserted;
3fcaf2e5 2797 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2798
2799 if (asserted & ATTN_HARD_WIRED_MASK) {
2800 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2801
a5e9a7cf
EG
2802 bnx2x_acquire_phy_lock(bp);
2803
877e9aa4 2804 /* save nig interrupt mask */
87942b46 2805 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2806 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2807
c18487ee 2808 bnx2x_link_attn(bp);
a2fbb9ea
ET
2809
2810 /* handle unicore attn? */
2811 }
2812 if (asserted & ATTN_SW_TIMER_4_FUNC)
2813 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2814
2815 if (asserted & GPIO_2_FUNC)
2816 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2817
2818 if (asserted & GPIO_3_FUNC)
2819 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2820
2821 if (asserted & GPIO_4_FUNC)
2822 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2823
2824 if (port == 0) {
2825 if (asserted & ATTN_GENERAL_ATTN_1) {
2826 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2827 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2828 }
2829 if (asserted & ATTN_GENERAL_ATTN_2) {
2830 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2831 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2832 }
2833 if (asserted & ATTN_GENERAL_ATTN_3) {
2834 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2835 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2836 }
2837 } else {
2838 if (asserted & ATTN_GENERAL_ATTN_4) {
2839 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2840 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2841 }
2842 if (asserted & ATTN_GENERAL_ATTN_5) {
2843 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2844 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2845 }
2846 if (asserted & ATTN_GENERAL_ATTN_6) {
2847 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2848 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2849 }
2850 }
2851
2852 } /* if hardwired */
2853
5c862848
EG
2854 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2855 asserted, hc_addr);
2856 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2857
2858 /* now set back the mask */
a5e9a7cf 2859 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2860 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2861 bnx2x_release_phy_lock(bp);
2862 }
a2fbb9ea
ET
2863}
2864
fd4ef40d
EG
2865static inline void bnx2x_fan_failure(struct bnx2x *bp)
2866{
2867 int port = BP_PORT(bp);
2868
2869 /* mark the failure */
2870 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2871 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2872 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2873 bp->link_params.ext_phy_config);
2874
2875 /* log the failure */
2876 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2877 " the driver to shutdown the card to prevent permanent"
2878 " damage. Please contact Dell Support for assistance\n",
2879 bp->dev->name);
2880}
ab6ad5a4 2881
877e9aa4 2882static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2883{
34f80b04 2884 int port = BP_PORT(bp);
877e9aa4 2885 int reg_offset;
4d295db0 2886 u32 val, swap_val, swap_override;
877e9aa4 2887
34f80b04
EG
2888 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2889 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2890
34f80b04 2891 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2892
2893 val = REG_RD(bp, reg_offset);
2894 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2895 REG_WR(bp, reg_offset, val);
2896
2897 BNX2X_ERR("SPIO5 hw attention\n");
2898
fd4ef40d 2899 /* Fan failure attention */
35b19ba5
EG
2900 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2901 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2902 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2903 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2904 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2905 /* The PHY reset is controlled by GPIO 1 */
2906 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2907 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2908 break;
2909
4d295db0
EG
2910 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2911 /* The PHY reset is controlled by GPIO 1 */
2912 /* fake the port number to cancel the swap done in
2913 set_gpio() */
2914 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2915 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2916 port = (swap_val && swap_override) ^ 1;
2917 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2918 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2919 break;
2920
877e9aa4
ET
2921 default:
2922 break;
2923 }
fd4ef40d 2924 bnx2x_fan_failure(bp);
877e9aa4 2925 }
34f80b04 2926
589abe3a
EG
2927 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2928 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2929 bnx2x_acquire_phy_lock(bp);
2930 bnx2x_handle_module_detect_int(&bp->link_params);
2931 bnx2x_release_phy_lock(bp);
2932 }
2933
34f80b04
EG
2934 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2935
2936 val = REG_RD(bp, reg_offset);
2937 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2938 REG_WR(bp, reg_offset, val);
2939
2940 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2941 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2942 bnx2x_panic();
2943 }
877e9aa4
ET
2944}
2945
2946static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2947{
2948 u32 val;
2949
0626b899 2950 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2951
2952 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2953 BNX2X_ERR("DB hw attention 0x%x\n", val);
2954 /* DORQ discard attention */
2955 if (val & 0x2)
2956 BNX2X_ERR("FATAL error from DORQ\n");
2957 }
34f80b04
EG
2958
2959 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2960
2961 int port = BP_PORT(bp);
2962 int reg_offset;
2963
2964 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2965 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2966
2967 val = REG_RD(bp, reg_offset);
2968 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2969 REG_WR(bp, reg_offset, val);
2970
2971 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 2972 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
2973 bnx2x_panic();
2974 }
877e9aa4
ET
2975}
2976
2977static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2978{
2979 u32 val;
2980
2981 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2982
2983 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2984 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2985 /* CFC error attention */
2986 if (val & 0x2)
2987 BNX2X_ERR("FATAL error from CFC\n");
2988 }
2989
2990 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2991
2992 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2993 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2994 /* RQ_USDMDP_FIFO_OVERFLOW */
2995 if (val & 0x18000)
2996 BNX2X_ERR("FATAL error from PXP\n");
2997 }
34f80b04
EG
2998
2999 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3000
3001 int port = BP_PORT(bp);
3002 int reg_offset;
3003
3004 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3005 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3006
3007 val = REG_RD(bp, reg_offset);
3008 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3009 REG_WR(bp, reg_offset, val);
3010
3011 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3012 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3013 bnx2x_panic();
3014 }
877e9aa4
ET
3015}
3016
3017static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3018{
34f80b04
EG
3019 u32 val;
3020
877e9aa4
ET
3021 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3022
34f80b04
EG
3023 if (attn & BNX2X_PMF_LINK_ASSERT) {
3024 int func = BP_FUNC(bp);
3025
3026 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2691d51d
EG
3027 val = SHMEM_RD(bp, func_mb[func].drv_status);
3028 if (val & DRV_STATUS_DCC_EVENT_MASK)
3029 bnx2x_dcc_event(bp,
3030 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3031 bnx2x__link_status_update(bp);
2691d51d 3032 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3033 bnx2x_pmf_update(bp);
3034
3035 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3036
3037 BNX2X_ERR("MC assert!\n");
3038 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3039 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3040 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3041 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3042 bnx2x_panic();
3043
3044 } else if (attn & BNX2X_MCP_ASSERT) {
3045
3046 BNX2X_ERR("MCP assert!\n");
3047 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3048 bnx2x_fw_dump(bp);
877e9aa4
ET
3049
3050 } else
3051 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3052 }
3053
3054 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3055 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3056 if (attn & BNX2X_GRC_TIMEOUT) {
3057 val = CHIP_IS_E1H(bp) ?
3058 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3059 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3060 }
3061 if (attn & BNX2X_GRC_RSV) {
3062 val = CHIP_IS_E1H(bp) ?
3063 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3064 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3065 }
877e9aa4 3066 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3067 }
3068}
3069
3070static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3071{
a2fbb9ea
ET
3072 struct attn_route attn;
3073 struct attn_route group_mask;
34f80b04 3074 int port = BP_PORT(bp);
877e9aa4 3075 int index;
a2fbb9ea
ET
3076 u32 reg_addr;
3077 u32 val;
3fcaf2e5 3078 u32 aeu_mask;
a2fbb9ea
ET
3079
3080 /* need to take HW lock because MCP or other port might also
3081 try to handle this event */
4a37fb66 3082 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
3083
3084 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3085 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3086 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3087 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3088 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3089 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3090
3091 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3092 if (deasserted & (1 << index)) {
3093 group_mask = bp->attn_group[index];
3094
34f80b04
EG
3095 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3096 index, group_mask.sig[0], group_mask.sig[1],
3097 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 3098
877e9aa4
ET
3099 bnx2x_attn_int_deasserted3(bp,
3100 attn.sig[3] & group_mask.sig[3]);
3101 bnx2x_attn_int_deasserted1(bp,
3102 attn.sig[1] & group_mask.sig[1]);
3103 bnx2x_attn_int_deasserted2(bp,
3104 attn.sig[2] & group_mask.sig[2]);
3105 bnx2x_attn_int_deasserted0(bp,
3106 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 3107
a2fbb9ea
ET
3108 if ((attn.sig[0] & group_mask.sig[0] &
3109 HW_PRTY_ASSERT_SET_0) ||
3110 (attn.sig[1] & group_mask.sig[1] &
3111 HW_PRTY_ASSERT_SET_1) ||
3112 (attn.sig[2] & group_mask.sig[2] &
3113 HW_PRTY_ASSERT_SET_2))
6378c025 3114 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
3115 }
3116 }
3117
4a37fb66 3118 bnx2x_release_alr(bp);
a2fbb9ea 3119
5c862848 3120 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3121
3122 val = ~deasserted;
3fcaf2e5
EG
3123 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3124 val, reg_addr);
5c862848 3125 REG_WR(bp, reg_addr, val);
a2fbb9ea 3126
a2fbb9ea 3127 if (~bp->attn_state & deasserted)
3fcaf2e5 3128 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3129
3130 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3131 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3132
3fcaf2e5
EG
3133 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3134 aeu_mask = REG_RD(bp, reg_addr);
3135
3136 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3137 aeu_mask, deasserted);
3138 aeu_mask |= (deasserted & 0xff);
3139 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3140
3fcaf2e5
EG
3141 REG_WR(bp, reg_addr, aeu_mask);
3142 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3143
3144 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3145 bp->attn_state &= ~deasserted;
3146 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3147}
3148
3149static void bnx2x_attn_int(struct bnx2x *bp)
3150{
3151 /* read local copy of bits */
68d59484
EG
3152 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3153 attn_bits);
3154 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3155 attn_bits_ack);
a2fbb9ea
ET
3156 u32 attn_state = bp->attn_state;
3157
3158 /* look for changed bits */
3159 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3160 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3161
3162 DP(NETIF_MSG_HW,
3163 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3164 attn_bits, attn_ack, asserted, deasserted);
3165
3166 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3167 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3168
3169 /* handle bits that were raised */
3170 if (asserted)
3171 bnx2x_attn_int_asserted(bp, asserted);
3172
3173 if (deasserted)
3174 bnx2x_attn_int_deasserted(bp, deasserted);
3175}
3176
3177static void bnx2x_sp_task(struct work_struct *work)
3178{
1cf167f2 3179 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3180 u16 status;
3181
34f80b04 3182
a2fbb9ea
ET
3183 /* Return here if interrupt is disabled */
3184 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3185 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3186 return;
3187 }
3188
3189 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3190/* if (status == 0) */
3191/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3192
3196a88a 3193 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 3194
877e9aa4
ET
3195 /* HW attentions */
3196 if (status & 0x1)
a2fbb9ea 3197 bnx2x_attn_int(bp);
a2fbb9ea 3198
68d59484 3199 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3200 IGU_INT_NOP, 1);
3201 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3202 IGU_INT_NOP, 1);
3203 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3204 IGU_INT_NOP, 1);
3205 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3206 IGU_INT_NOP, 1);
3207 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3208 IGU_INT_ENABLE, 1);
877e9aa4 3209
a2fbb9ea
ET
3210}
3211
3212static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3213{
3214 struct net_device *dev = dev_instance;
3215 struct bnx2x *bp = netdev_priv(dev);
3216
3217 /* Return here if interrupt is disabled */
3218 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3219 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3220 return IRQ_HANDLED;
3221 }
3222
8d9c5f34 3223 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3224
3225#ifdef BNX2X_STOP_ON_ERROR
3226 if (unlikely(bp->panic))
3227 return IRQ_HANDLED;
3228#endif
3229
1cf167f2 3230 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3231
3232 return IRQ_HANDLED;
3233}
3234
3235/* end of slow path */
3236
3237/* Statistics */
3238
3239/****************************************************************************
3240* Macros
3241****************************************************************************/
3242
a2fbb9ea
ET
3243/* sum[hi:lo] += add[hi:lo] */
3244#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3245 do { \
3246 s_lo += a_lo; \
f5ba6772 3247 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3248 } while (0)
3249
3250/* difference = minuend - subtrahend */
3251#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3252 do { \
bb2a0f7a
YG
3253 if (m_lo < s_lo) { \
3254 /* underflow */ \
a2fbb9ea 3255 d_hi = m_hi - s_hi; \
bb2a0f7a 3256 if (d_hi > 0) { \
6378c025 3257 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3258 d_hi--; \
3259 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3260 } else { \
6378c025 3261 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3262 d_hi = 0; \
3263 d_lo = 0; \
3264 } \
bb2a0f7a
YG
3265 } else { \
3266 /* m_lo >= s_lo */ \
a2fbb9ea 3267 if (m_hi < s_hi) { \
bb2a0f7a
YG
3268 d_hi = 0; \
3269 d_lo = 0; \
3270 } else { \
6378c025 3271 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3272 d_hi = m_hi - s_hi; \
3273 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3274 } \
3275 } \
3276 } while (0)
3277
bb2a0f7a 3278#define UPDATE_STAT64(s, t) \
a2fbb9ea 3279 do { \
bb2a0f7a
YG
3280 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3281 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3282 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3283 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3284 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3285 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3286 } while (0)
3287
bb2a0f7a 3288#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3289 do { \
bb2a0f7a
YG
3290 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3291 diff.lo, new->s##_lo, old->s##_lo); \
3292 ADD_64(estats->t##_hi, diff.hi, \
3293 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3294 } while (0)
3295
3296/* sum[hi:lo] += add */
3297#define ADD_EXTEND_64(s_hi, s_lo, a) \
3298 do { \
3299 s_lo += a; \
3300 s_hi += (s_lo < a) ? 1 : 0; \
3301 } while (0)
3302
bb2a0f7a 3303#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3304 do { \
bb2a0f7a
YG
3305 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3306 pstats->mac_stx[1].s##_lo, \
3307 new->s); \
a2fbb9ea
ET
3308 } while (0)
3309
bb2a0f7a 3310#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3311 do { \
4781bfad
EG
3312 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3313 old_tclient->s = tclient->s; \
de832a55
EG
3314 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3315 } while (0)
3316
3317#define UPDATE_EXTEND_USTAT(s, t) \
3318 do { \
3319 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3320 old_uclient->s = uclient->s; \
3321 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3322 } while (0)
3323
3324#define UPDATE_EXTEND_XSTAT(s, t) \
3325 do { \
4781bfad
EG
3326 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3327 old_xclient->s = xclient->s; \
de832a55
EG
3328 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3329 } while (0)
3330
3331/* minuend -= subtrahend */
3332#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3333 do { \
3334 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3335 } while (0)
3336
3337/* minuend[hi:lo] -= subtrahend */
3338#define SUB_EXTEND_64(m_hi, m_lo, s) \
3339 do { \
3340 SUB_64(m_hi, 0, m_lo, s); \
3341 } while (0)
3342
3343#define SUB_EXTEND_USTAT(s, t) \
3344 do { \
3345 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3346 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3347 } while (0)
3348
3349/*
3350 * General service functions
3351 */
3352
3353static inline long bnx2x_hilo(u32 *hiref)
3354{
3355 u32 lo = *(hiref + 1);
3356#if (BITS_PER_LONG == 64)
3357 u32 hi = *hiref;
3358
3359 return HILO_U64(hi, lo);
3360#else
3361 return lo;
3362#endif
3363}
3364
3365/*
3366 * Init service functions
3367 */
3368
bb2a0f7a
YG
3369static void bnx2x_storm_stats_post(struct bnx2x *bp)
3370{
3371 if (!bp->stats_pending) {
3372 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3373 int i, rc;
bb2a0f7a
YG
3374
3375 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3376 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3377 for_each_queue(bp, i)
3378 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3379
3380 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3381 ((u32 *)&ramrod_data)[1],
3382 ((u32 *)&ramrod_data)[0], 0);
3383 if (rc == 0) {
3384 /* stats ramrod has it's own slot on the spq */
3385 bp->spq_left++;
3386 bp->stats_pending = 1;
3387 }
3388 }
3389}
3390
bb2a0f7a
YG
3391static void bnx2x_hw_stats_post(struct bnx2x *bp)
3392{
3393 struct dmae_command *dmae = &bp->stats_dmae;
3394 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3395
3396 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3397 if (CHIP_REV_IS_SLOW(bp))
3398 return;
bb2a0f7a
YG
3399
3400 /* loader */
3401 if (bp->executer_idx) {
3402 int loader_idx = PMF_DMAE_C(bp);
3403
3404 memset(dmae, 0, sizeof(struct dmae_command));
3405
3406 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3407 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3408 DMAE_CMD_DST_RESET |
3409#ifdef __BIG_ENDIAN
3410 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3411#else
3412 DMAE_CMD_ENDIANITY_DW_SWAP |
3413#endif
3414 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3415 DMAE_CMD_PORT_0) |
3416 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3417 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3418 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3419 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3420 sizeof(struct dmae_command) *
3421 (loader_idx + 1)) >> 2;
3422 dmae->dst_addr_hi = 0;
3423 dmae->len = sizeof(struct dmae_command) >> 2;
3424 if (CHIP_IS_E1(bp))
3425 dmae->len--;
3426 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3427 dmae->comp_addr_hi = 0;
3428 dmae->comp_val = 1;
3429
3430 *stats_comp = 0;
3431 bnx2x_post_dmae(bp, dmae, loader_idx);
3432
3433 } else if (bp->func_stx) {
3434 *stats_comp = 0;
3435 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3436 }
3437}
3438
3439static int bnx2x_stats_comp(struct bnx2x *bp)
3440{
3441 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3442 int cnt = 10;
3443
3444 might_sleep();
3445 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3446 if (!cnt) {
3447 BNX2X_ERR("timeout waiting for stats finished\n");
3448 break;
3449 }
3450 cnt--;
12469401 3451 msleep(1);
bb2a0f7a
YG
3452 }
3453 return 1;
3454}
3455
3456/*
3457 * Statistics service functions
3458 */
3459
3460static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3461{
3462 struct dmae_command *dmae;
3463 u32 opcode;
3464 int loader_idx = PMF_DMAE_C(bp);
3465 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3466
3467 /* sanity */
3468 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3469 BNX2X_ERR("BUG!\n");
3470 return;
3471 }
3472
3473 bp->executer_idx = 0;
3474
3475 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3476 DMAE_CMD_C_ENABLE |
3477 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3478#ifdef __BIG_ENDIAN
3479 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3480#else
3481 DMAE_CMD_ENDIANITY_DW_SWAP |
3482#endif
3483 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3484 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3485
3486 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3487 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3488 dmae->src_addr_lo = bp->port.port_stx >> 2;
3489 dmae->src_addr_hi = 0;
3490 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3491 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3492 dmae->len = DMAE_LEN32_RD_MAX;
3493 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3494 dmae->comp_addr_hi = 0;
3495 dmae->comp_val = 1;
3496
3497 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3498 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3499 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3500 dmae->src_addr_hi = 0;
7a9b2557
VZ
3501 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3502 DMAE_LEN32_RD_MAX * 4);
3503 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3504 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3505 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3506 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3507 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3508 dmae->comp_val = DMAE_COMP_VAL;
3509
3510 *stats_comp = 0;
3511 bnx2x_hw_stats_post(bp);
3512 bnx2x_stats_comp(bp);
3513}
3514
3515static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3516{
3517 struct dmae_command *dmae;
34f80b04 3518 int port = BP_PORT(bp);
bb2a0f7a 3519 int vn = BP_E1HVN(bp);
a2fbb9ea 3520 u32 opcode;
bb2a0f7a 3521 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3522 u32 mac_addr;
bb2a0f7a
YG
3523 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3524
3525 /* sanity */
3526 if (!bp->link_vars.link_up || !bp->port.pmf) {
3527 BNX2X_ERR("BUG!\n");
3528 return;
3529 }
a2fbb9ea
ET
3530
3531 bp->executer_idx = 0;
bb2a0f7a
YG
3532
3533 /* MCP */
3534 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3535 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3536 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3537#ifdef __BIG_ENDIAN
bb2a0f7a 3538 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3539#else
bb2a0f7a 3540 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3541#endif
bb2a0f7a
YG
3542 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3543 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3544
bb2a0f7a 3545 if (bp->port.port_stx) {
a2fbb9ea
ET
3546
3547 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3548 dmae->opcode = opcode;
bb2a0f7a
YG
3549 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3550 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3551 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3552 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3553 dmae->len = sizeof(struct host_port_stats) >> 2;
3554 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3555 dmae->comp_addr_hi = 0;
3556 dmae->comp_val = 1;
a2fbb9ea
ET
3557 }
3558
bb2a0f7a
YG
3559 if (bp->func_stx) {
3560
3561 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3562 dmae->opcode = opcode;
3563 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3564 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3565 dmae->dst_addr_lo = bp->func_stx >> 2;
3566 dmae->dst_addr_hi = 0;
3567 dmae->len = sizeof(struct host_func_stats) >> 2;
3568 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3569 dmae->comp_addr_hi = 0;
3570 dmae->comp_val = 1;
a2fbb9ea
ET
3571 }
3572
bb2a0f7a 3573 /* MAC */
a2fbb9ea
ET
3574 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3575 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3576 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3577#ifdef __BIG_ENDIAN
3578 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3579#else
3580 DMAE_CMD_ENDIANITY_DW_SWAP |
3581#endif
bb2a0f7a
YG
3582 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3583 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3584
c18487ee 3585 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3586
3587 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3588 NIG_REG_INGRESS_BMAC0_MEM);
3589
3590 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3591 BIGMAC_REGISTER_TX_STAT_GTBYT */
3592 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3593 dmae->opcode = opcode;
3594 dmae->src_addr_lo = (mac_addr +
3595 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3596 dmae->src_addr_hi = 0;
3597 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3598 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3599 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3600 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3601 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3602 dmae->comp_addr_hi = 0;
3603 dmae->comp_val = 1;
3604
3605 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3606 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3607 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3608 dmae->opcode = opcode;
3609 dmae->src_addr_lo = (mac_addr +
3610 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3611 dmae->src_addr_hi = 0;
3612 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3613 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3614 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3615 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3616 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3617 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3618 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3619 dmae->comp_addr_hi = 0;
3620 dmae->comp_val = 1;
3621
c18487ee 3622 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3623
3624 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3625
3626 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3627 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3628 dmae->opcode = opcode;
3629 dmae->src_addr_lo = (mac_addr +
3630 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3631 dmae->src_addr_hi = 0;
3632 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3633 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3634 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3635 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3636 dmae->comp_addr_hi = 0;
3637 dmae->comp_val = 1;
3638
3639 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3640 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3641 dmae->opcode = opcode;
3642 dmae->src_addr_lo = (mac_addr +
3643 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3644 dmae->src_addr_hi = 0;
3645 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3646 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3647 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3648 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3649 dmae->len = 1;
3650 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3651 dmae->comp_addr_hi = 0;
3652 dmae->comp_val = 1;
3653
3654 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3655 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3656 dmae->opcode = opcode;
3657 dmae->src_addr_lo = (mac_addr +
3658 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3659 dmae->src_addr_hi = 0;
3660 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3661 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3662 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3663 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3664 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3665 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3666 dmae->comp_addr_hi = 0;
3667 dmae->comp_val = 1;
3668 }
3669
3670 /* NIG */
bb2a0f7a
YG
3671 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3672 dmae->opcode = opcode;
3673 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3674 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3675 dmae->src_addr_hi = 0;
3676 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3677 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3678 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3679 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3680 dmae->comp_addr_hi = 0;
3681 dmae->comp_val = 1;
3682
3683 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3684 dmae->opcode = opcode;
3685 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3686 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3687 dmae->src_addr_hi = 0;
3688 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3689 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3690 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3691 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3692 dmae->len = (2*sizeof(u32)) >> 2;
3693 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3694 dmae->comp_addr_hi = 0;
3695 dmae->comp_val = 1;
3696
a2fbb9ea
ET
3697 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3698 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3699 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3700 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3701#ifdef __BIG_ENDIAN
3702 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3703#else
3704 DMAE_CMD_ENDIANITY_DW_SWAP |
3705#endif
bb2a0f7a
YG
3706 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3707 (vn << DMAE_CMD_E1HVN_SHIFT));
3708 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3709 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3710 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3711 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3712 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3713 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3714 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3715 dmae->len = (2*sizeof(u32)) >> 2;
3716 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3717 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3718 dmae->comp_val = DMAE_COMP_VAL;
3719
3720 *stats_comp = 0;
a2fbb9ea
ET
3721}
3722
bb2a0f7a 3723static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3724{
bb2a0f7a
YG
3725 struct dmae_command *dmae = &bp->stats_dmae;
3726 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3727
bb2a0f7a
YG
3728 /* sanity */
3729 if (!bp->func_stx) {
3730 BNX2X_ERR("BUG!\n");
3731 return;
3732 }
a2fbb9ea 3733
bb2a0f7a
YG
3734 bp->executer_idx = 0;
3735 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3736
bb2a0f7a
YG
3737 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3738 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3739 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3740#ifdef __BIG_ENDIAN
3741 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3742#else
3743 DMAE_CMD_ENDIANITY_DW_SWAP |
3744#endif
3745 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3746 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3747 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3748 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3749 dmae->dst_addr_lo = bp->func_stx >> 2;
3750 dmae->dst_addr_hi = 0;
3751 dmae->len = sizeof(struct host_func_stats) >> 2;
3752 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3753 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3754 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3755
bb2a0f7a
YG
3756 *stats_comp = 0;
3757}
a2fbb9ea 3758
bb2a0f7a
YG
3759static void bnx2x_stats_start(struct bnx2x *bp)
3760{
3761 if (bp->port.pmf)
3762 bnx2x_port_stats_init(bp);
3763
3764 else if (bp->func_stx)
3765 bnx2x_func_stats_init(bp);
3766
3767 bnx2x_hw_stats_post(bp);
3768 bnx2x_storm_stats_post(bp);
3769}
3770
3771static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3772{
3773 bnx2x_stats_comp(bp);
3774 bnx2x_stats_pmf_update(bp);
3775 bnx2x_stats_start(bp);
3776}
3777
3778static void bnx2x_stats_restart(struct bnx2x *bp)
3779{
3780 bnx2x_stats_comp(bp);
3781 bnx2x_stats_start(bp);
3782}
3783
3784static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3785{
3786 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3787 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3788 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3789 struct {
3790 u32 lo;
3791 u32 hi;
3792 } diff;
bb2a0f7a
YG
3793
3794 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3795 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3796 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3797 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3798 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3799 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3800 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3801 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3802 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3803 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3804 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3805 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3806 UPDATE_STAT64(tx_stat_gt127,
3807 tx_stat_etherstatspkts65octetsto127octets);
3808 UPDATE_STAT64(tx_stat_gt255,
3809 tx_stat_etherstatspkts128octetsto255octets);
3810 UPDATE_STAT64(tx_stat_gt511,
3811 tx_stat_etherstatspkts256octetsto511octets);
3812 UPDATE_STAT64(tx_stat_gt1023,
3813 tx_stat_etherstatspkts512octetsto1023octets);
3814 UPDATE_STAT64(tx_stat_gt1518,
3815 tx_stat_etherstatspkts1024octetsto1522octets);
3816 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3817 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3818 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3819 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3820 UPDATE_STAT64(tx_stat_gterr,
3821 tx_stat_dot3statsinternalmactransmiterrors);
3822 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3823
3824 estats->pause_frames_received_hi =
3825 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3826 estats->pause_frames_received_lo =
3827 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3828
3829 estats->pause_frames_sent_hi =
3830 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3831 estats->pause_frames_sent_lo =
3832 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3833}
3834
3835static void bnx2x_emac_stats_update(struct bnx2x *bp)
3836{
3837 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3838 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3839 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3840
3841 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3842 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3843 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3844 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3845 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3846 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3847 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3848 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3849 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3850 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3851 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3852 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3853 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3854 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3855 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3856 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3857 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3858 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3859 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3860 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3861 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3862 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3863 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3864 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3865 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3866 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3867 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3868 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3869 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3870 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3871 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3872
3873 estats->pause_frames_received_hi =
3874 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3875 estats->pause_frames_received_lo =
3876 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3877 ADD_64(estats->pause_frames_received_hi,
3878 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3879 estats->pause_frames_received_lo,
3880 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3881
3882 estats->pause_frames_sent_hi =
3883 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3884 estats->pause_frames_sent_lo =
3885 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3886 ADD_64(estats->pause_frames_sent_hi,
3887 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3888 estats->pause_frames_sent_lo,
3889 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3890}
3891
3892static int bnx2x_hw_stats_update(struct bnx2x *bp)
3893{
3894 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3895 struct nig_stats *old = &(bp->port.old_nig_stats);
3896 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3897 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3898 struct {
3899 u32 lo;
3900 u32 hi;
3901 } diff;
de832a55 3902 u32 nig_timer_max;
bb2a0f7a
YG
3903
3904 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3905 bnx2x_bmac_stats_update(bp);
3906
3907 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3908 bnx2x_emac_stats_update(bp);
3909
3910 else { /* unreached */
c3eefaf6 3911 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3912 return -1;
3913 }
a2fbb9ea 3914
bb2a0f7a
YG
3915 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3916 new->brb_discard - old->brb_discard);
66e855f3
YG
3917 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3918 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3919
bb2a0f7a
YG
3920 UPDATE_STAT64_NIG(egress_mac_pkt0,
3921 etherstatspkts1024octetsto1522octets);
3922 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3923
bb2a0f7a 3924 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3925
bb2a0f7a
YG
3926 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3927 sizeof(struct mac_stx));
3928 estats->brb_drop_hi = pstats->brb_drop_hi;
3929 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3930
bb2a0f7a 3931 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3932
de832a55
EG
3933 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3934 if (nig_timer_max != estats->nig_timer_max) {
3935 estats->nig_timer_max = nig_timer_max;
3936 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3937 }
3938
bb2a0f7a 3939 return 0;
a2fbb9ea
ET
3940}
3941
bb2a0f7a 3942static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3943{
3944 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3945 struct tstorm_per_port_stats *tport =
de832a55 3946 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3947 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3948 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3949 int i;
3950
6fe49bb9
EG
3951 memcpy(&(fstats->total_bytes_received_hi),
3952 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
3953 sizeof(struct host_func_stats) - 2*sizeof(u32));
3954 estats->error_bytes_received_hi = 0;
3955 estats->error_bytes_received_lo = 0;
3956 estats->etherstatsoverrsizepkts_hi = 0;
3957 estats->etherstatsoverrsizepkts_lo = 0;
3958 estats->no_buff_discard_hi = 0;
3959 estats->no_buff_discard_lo = 0;
a2fbb9ea 3960
ca00392c 3961 for_each_rx_queue(bp, i) {
de832a55
EG
3962 struct bnx2x_fastpath *fp = &bp->fp[i];
3963 int cl_id = fp->cl_id;
3964 struct tstorm_per_client_stats *tclient =
3965 &stats->tstorm_common.client_statistics[cl_id];
3966 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3967 struct ustorm_per_client_stats *uclient =
3968 &stats->ustorm_common.client_statistics[cl_id];
3969 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3970 struct xstorm_per_client_stats *xclient =
3971 &stats->xstorm_common.client_statistics[cl_id];
3972 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3973 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3974 u32 diff;
3975
3976 /* are storm stats valid? */
3977 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3978 bp->stats_counter) {
de832a55
EG
3979 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3980 " xstorm counter (%d) != stats_counter (%d)\n",
3981 i, xclient->stats_counter, bp->stats_counter);
3982 return -1;
3983 }
3984 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3985 bp->stats_counter) {
de832a55
EG
3986 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3987 " tstorm counter (%d) != stats_counter (%d)\n",
3988 i, tclient->stats_counter, bp->stats_counter);
3989 return -2;
3990 }
3991 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3992 bp->stats_counter) {
3993 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3994 " ustorm counter (%d) != stats_counter (%d)\n",
3995 i, uclient->stats_counter, bp->stats_counter);
3996 return -4;
3997 }
a2fbb9ea 3998
de832a55 3999 qstats->total_bytes_received_hi =
ca00392c 4000 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 4001 qstats->total_bytes_received_lo =
ca00392c
EG
4002 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4003
4004 ADD_64(qstats->total_bytes_received_hi,
4005 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4006 qstats->total_bytes_received_lo,
4007 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4008
4009 ADD_64(qstats->total_bytes_received_hi,
4010 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4011 qstats->total_bytes_received_lo,
4012 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4013
4014 qstats->valid_bytes_received_hi =
4015 qstats->total_bytes_received_hi;
de832a55 4016 qstats->valid_bytes_received_lo =
ca00392c 4017 qstats->total_bytes_received_lo;
bb2a0f7a 4018
de832a55 4019 qstats->error_bytes_received_hi =
bb2a0f7a 4020 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4021 qstats->error_bytes_received_lo =
bb2a0f7a 4022 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4023
de832a55
EG
4024 ADD_64(qstats->total_bytes_received_hi,
4025 qstats->error_bytes_received_hi,
4026 qstats->total_bytes_received_lo,
4027 qstats->error_bytes_received_lo);
4028
4029 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4030 total_unicast_packets_received);
4031 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4032 total_multicast_packets_received);
4033 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4034 total_broadcast_packets_received);
4035 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4036 etherstatsoverrsizepkts);
4037 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4038
4039 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4040 total_unicast_packets_received);
4041 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4042 total_multicast_packets_received);
4043 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4044 total_broadcast_packets_received);
4045 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4046 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4047 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4048
4049 qstats->total_bytes_transmitted_hi =
ca00392c 4050 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4051 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4052 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4053
4054 ADD_64(qstats->total_bytes_transmitted_hi,
4055 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4056 qstats->total_bytes_transmitted_lo,
4057 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4058
4059 ADD_64(qstats->total_bytes_transmitted_hi,
4060 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4061 qstats->total_bytes_transmitted_lo,
4062 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4063
de832a55
EG
4064 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4065 total_unicast_packets_transmitted);
4066 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4067 total_multicast_packets_transmitted);
4068 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4069 total_broadcast_packets_transmitted);
4070
4071 old_tclient->checksum_discard = tclient->checksum_discard;
4072 old_tclient->ttl0_discard = tclient->ttl0_discard;
4073
4074 ADD_64(fstats->total_bytes_received_hi,
4075 qstats->total_bytes_received_hi,
4076 fstats->total_bytes_received_lo,
4077 qstats->total_bytes_received_lo);
4078 ADD_64(fstats->total_bytes_transmitted_hi,
4079 qstats->total_bytes_transmitted_hi,
4080 fstats->total_bytes_transmitted_lo,
4081 qstats->total_bytes_transmitted_lo);
4082 ADD_64(fstats->total_unicast_packets_received_hi,
4083 qstats->total_unicast_packets_received_hi,
4084 fstats->total_unicast_packets_received_lo,
4085 qstats->total_unicast_packets_received_lo);
4086 ADD_64(fstats->total_multicast_packets_received_hi,
4087 qstats->total_multicast_packets_received_hi,
4088 fstats->total_multicast_packets_received_lo,
4089 qstats->total_multicast_packets_received_lo);
4090 ADD_64(fstats->total_broadcast_packets_received_hi,
4091 qstats->total_broadcast_packets_received_hi,
4092 fstats->total_broadcast_packets_received_lo,
4093 qstats->total_broadcast_packets_received_lo);
4094 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4095 qstats->total_unicast_packets_transmitted_hi,
4096 fstats->total_unicast_packets_transmitted_lo,
4097 qstats->total_unicast_packets_transmitted_lo);
4098 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4099 qstats->total_multicast_packets_transmitted_hi,
4100 fstats->total_multicast_packets_transmitted_lo,
4101 qstats->total_multicast_packets_transmitted_lo);
4102 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4103 qstats->total_broadcast_packets_transmitted_hi,
4104 fstats->total_broadcast_packets_transmitted_lo,
4105 qstats->total_broadcast_packets_transmitted_lo);
4106 ADD_64(fstats->valid_bytes_received_hi,
4107 qstats->valid_bytes_received_hi,
4108 fstats->valid_bytes_received_lo,
4109 qstats->valid_bytes_received_lo);
4110
4111 ADD_64(estats->error_bytes_received_hi,
4112 qstats->error_bytes_received_hi,
4113 estats->error_bytes_received_lo,
4114 qstats->error_bytes_received_lo);
4115 ADD_64(estats->etherstatsoverrsizepkts_hi,
4116 qstats->etherstatsoverrsizepkts_hi,
4117 estats->etherstatsoverrsizepkts_lo,
4118 qstats->etherstatsoverrsizepkts_lo);
4119 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4120 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4121 }
4122
4123 ADD_64(fstats->total_bytes_received_hi,
4124 estats->rx_stat_ifhcinbadoctets_hi,
4125 fstats->total_bytes_received_lo,
4126 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4127
4128 memcpy(estats, &(fstats->total_bytes_received_hi),
4129 sizeof(struct host_func_stats) - 2*sizeof(u32));
4130
de832a55
EG
4131 ADD_64(estats->etherstatsoverrsizepkts_hi,
4132 estats->rx_stat_dot3statsframestoolong_hi,
4133 estats->etherstatsoverrsizepkts_lo,
4134 estats->rx_stat_dot3statsframestoolong_lo);
4135 ADD_64(estats->error_bytes_received_hi,
4136 estats->rx_stat_ifhcinbadoctets_hi,
4137 estats->error_bytes_received_lo,
4138 estats->rx_stat_ifhcinbadoctets_lo);
4139
4140 if (bp->port.pmf) {
4141 estats->mac_filter_discard =
4142 le32_to_cpu(tport->mac_filter_discard);
4143 estats->xxoverflow_discard =
4144 le32_to_cpu(tport->xxoverflow_discard);
4145 estats->brb_truncate_discard =
bb2a0f7a 4146 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4147 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4148 }
bb2a0f7a
YG
4149
4150 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4151
de832a55
EG
4152 bp->stats_pending = 0;
4153
a2fbb9ea
ET
4154 return 0;
4155}
4156
bb2a0f7a 4157static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4158{
bb2a0f7a 4159 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4160 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4161 int i;
a2fbb9ea
ET
4162
4163 nstats->rx_packets =
4164 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4165 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4166 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4167
4168 nstats->tx_packets =
4169 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4170 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4171 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4172
de832a55 4173 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4174
0e39e645 4175 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4176
de832a55 4177 nstats->rx_dropped = estats->mac_discard;
ca00392c 4178 for_each_rx_queue(bp, i)
de832a55
EG
4179 nstats->rx_dropped +=
4180 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4181
a2fbb9ea
ET
4182 nstats->tx_dropped = 0;
4183
4184 nstats->multicast =
de832a55 4185 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4186
bb2a0f7a 4187 nstats->collisions =
de832a55 4188 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4189
4190 nstats->rx_length_errors =
de832a55
EG
4191 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4192 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4193 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4194 bnx2x_hilo(&estats->brb_truncate_hi);
4195 nstats->rx_crc_errors =
4196 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4197 nstats->rx_frame_errors =
4198 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4199 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4200 nstats->rx_missed_errors = estats->xxoverflow_discard;
4201
4202 nstats->rx_errors = nstats->rx_length_errors +
4203 nstats->rx_over_errors +
4204 nstats->rx_crc_errors +
4205 nstats->rx_frame_errors +
0e39e645
ET
4206 nstats->rx_fifo_errors +
4207 nstats->rx_missed_errors;
a2fbb9ea 4208
bb2a0f7a 4209 nstats->tx_aborted_errors =
de832a55
EG
4210 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4211 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4212 nstats->tx_carrier_errors =
4213 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4214 nstats->tx_fifo_errors = 0;
4215 nstats->tx_heartbeat_errors = 0;
4216 nstats->tx_window_errors = 0;
4217
4218 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4219 nstats->tx_carrier_errors +
4220 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4221}
4222
4223static void bnx2x_drv_stats_update(struct bnx2x *bp)
4224{
4225 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4226 int i;
4227
4228 estats->driver_xoff = 0;
4229 estats->rx_err_discard_pkt = 0;
4230 estats->rx_skb_alloc_failed = 0;
4231 estats->hw_csum_err = 0;
ca00392c 4232 for_each_rx_queue(bp, i) {
de832a55
EG
4233 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4234
4235 estats->driver_xoff += qstats->driver_xoff;
4236 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4237 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4238 estats->hw_csum_err += qstats->hw_csum_err;
4239 }
a2fbb9ea
ET
4240}
4241
bb2a0f7a 4242static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4243{
bb2a0f7a 4244 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4245
bb2a0f7a
YG
4246 if (*stats_comp != DMAE_COMP_VAL)
4247 return;
4248
4249 if (bp->port.pmf)
de832a55 4250 bnx2x_hw_stats_update(bp);
a2fbb9ea 4251
de832a55
EG
4252 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4253 BNX2X_ERR("storm stats were not updated for 3 times\n");
4254 bnx2x_panic();
4255 return;
a2fbb9ea
ET
4256 }
4257
de832a55
EG
4258 bnx2x_net_stats_update(bp);
4259 bnx2x_drv_stats_update(bp);
4260
a2fbb9ea 4261 if (bp->msglevel & NETIF_MSG_TIMER) {
ca00392c
EG
4262 struct bnx2x_fastpath *fp0_rx = bp->fp;
4263 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
de832a55
EG
4264 struct tstorm_per_client_stats *old_tclient =
4265 &bp->fp->old_tclient;
4266 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4267 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4268 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4269 int i;
a2fbb9ea
ET
4270
4271 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4272 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4273 " tx pkt (%lx)\n",
ca00392c
EG
4274 bnx2x_tx_avail(fp0_tx),
4275 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4276 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4277 " rx pkt (%lx)\n",
ca00392c
EG
4278 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4279 fp0_rx->rx_comp_cons),
4280 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4281 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4282 "brb truncate %u\n",
4283 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4284 qstats->driver_xoff,
4285 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4286 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4287 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4288 "mac_discard %u mac_filter_discard %u "
4289 "xxovrflow_discard %u brb_truncate_discard %u "
4290 "ttl0_discard %u\n",
4781bfad 4291 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4292 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4293 bnx2x_hilo(&qstats->no_buff_discard_hi),
4294 estats->mac_discard, estats->mac_filter_discard,
4295 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4296 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4297
4298 for_each_queue(bp, i) {
4299 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4300 bnx2x_fp(bp, i, tx_pkt),
4301 bnx2x_fp(bp, i, rx_pkt),
4302 bnx2x_fp(bp, i, rx_calls));
4303 }
4304 }
4305
bb2a0f7a
YG
4306 bnx2x_hw_stats_post(bp);
4307 bnx2x_storm_stats_post(bp);
4308}
a2fbb9ea 4309
bb2a0f7a
YG
4310static void bnx2x_port_stats_stop(struct bnx2x *bp)
4311{
4312 struct dmae_command *dmae;
4313 u32 opcode;
4314 int loader_idx = PMF_DMAE_C(bp);
4315 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4316
bb2a0f7a 4317 bp->executer_idx = 0;
a2fbb9ea 4318
bb2a0f7a
YG
4319 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4320 DMAE_CMD_C_ENABLE |
4321 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4322#ifdef __BIG_ENDIAN
bb2a0f7a 4323 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4324#else
bb2a0f7a 4325 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4326#endif
bb2a0f7a
YG
4327 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4328 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4329
4330 if (bp->port.port_stx) {
4331
4332 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4333 if (bp->func_stx)
4334 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4335 else
4336 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4337 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4338 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4339 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4340 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4341 dmae->len = sizeof(struct host_port_stats) >> 2;
4342 if (bp->func_stx) {
4343 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4344 dmae->comp_addr_hi = 0;
4345 dmae->comp_val = 1;
4346 } else {
4347 dmae->comp_addr_lo =
4348 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4349 dmae->comp_addr_hi =
4350 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4351 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4352
bb2a0f7a
YG
4353 *stats_comp = 0;
4354 }
a2fbb9ea
ET
4355 }
4356
bb2a0f7a
YG
4357 if (bp->func_stx) {
4358
4359 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4360 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4361 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4362 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4363 dmae->dst_addr_lo = bp->func_stx >> 2;
4364 dmae->dst_addr_hi = 0;
4365 dmae->len = sizeof(struct host_func_stats) >> 2;
4366 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4367 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4368 dmae->comp_val = DMAE_COMP_VAL;
4369
4370 *stats_comp = 0;
a2fbb9ea 4371 }
bb2a0f7a
YG
4372}
4373
4374static void bnx2x_stats_stop(struct bnx2x *bp)
4375{
4376 int update = 0;
4377
4378 bnx2x_stats_comp(bp);
4379
4380 if (bp->port.pmf)
4381 update = (bnx2x_hw_stats_update(bp) == 0);
4382
4383 update |= (bnx2x_storm_stats_update(bp) == 0);
4384
4385 if (update) {
4386 bnx2x_net_stats_update(bp);
a2fbb9ea 4387
bb2a0f7a
YG
4388 if (bp->port.pmf)
4389 bnx2x_port_stats_stop(bp);
4390
4391 bnx2x_hw_stats_post(bp);
4392 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4393 }
4394}
4395
bb2a0f7a
YG
4396static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4397{
4398}
4399
4400static const struct {
4401 void (*action)(struct bnx2x *bp);
4402 enum bnx2x_stats_state next_state;
4403} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4404/* state event */
4405{
4406/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4407/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4408/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4409/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4410},
4411{
4412/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4413/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4414/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4415/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4416}
4417};
4418
4419static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4420{
4421 enum bnx2x_stats_state state = bp->stats_state;
4422
4423 bnx2x_stats_stm[state][event].action(bp);
4424 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4425
8924665a
EG
4426 /* Make sure the state has been "changed" */
4427 smp_wmb();
4428
bb2a0f7a
YG
4429 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4430 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4431 state, event, bp->stats_state);
4432}
4433
6fe49bb9
EG
4434static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4435{
4436 struct dmae_command *dmae;
4437 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4438
4439 /* sanity */
4440 if (!bp->port.pmf || !bp->port.port_stx) {
4441 BNX2X_ERR("BUG!\n");
4442 return;
4443 }
4444
4445 bp->executer_idx = 0;
4446
4447 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4448 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4449 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4450 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4451#ifdef __BIG_ENDIAN
4452 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4453#else
4454 DMAE_CMD_ENDIANITY_DW_SWAP |
4455#endif
4456 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4457 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4458 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4459 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4460 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4461 dmae->dst_addr_hi = 0;
4462 dmae->len = sizeof(struct host_port_stats) >> 2;
4463 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4464 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4465 dmae->comp_val = DMAE_COMP_VAL;
4466
4467 *stats_comp = 0;
4468 bnx2x_hw_stats_post(bp);
4469 bnx2x_stats_comp(bp);
4470}
4471
4472static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4473{
4474 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4475 int port = BP_PORT(bp);
4476 int func;
4477 u32 func_stx;
4478
4479 /* sanity */
4480 if (!bp->port.pmf || !bp->func_stx) {
4481 BNX2X_ERR("BUG!\n");
4482 return;
4483 }
4484
4485 /* save our func_stx */
4486 func_stx = bp->func_stx;
4487
4488 for (vn = VN_0; vn < vn_max; vn++) {
4489 func = 2*vn + port;
4490
4491 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4492 bnx2x_func_stats_init(bp);
4493 bnx2x_hw_stats_post(bp);
4494 bnx2x_stats_comp(bp);
4495 }
4496
4497 /* restore our func_stx */
4498 bp->func_stx = func_stx;
4499}
4500
4501static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4502{
4503 struct dmae_command *dmae = &bp->stats_dmae;
4504 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4505
4506 /* sanity */
4507 if (!bp->func_stx) {
4508 BNX2X_ERR("BUG!\n");
4509 return;
4510 }
4511
4512 bp->executer_idx = 0;
4513 memset(dmae, 0, sizeof(struct dmae_command));
4514
4515 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4516 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4517 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4518#ifdef __BIG_ENDIAN
4519 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4520#else
4521 DMAE_CMD_ENDIANITY_DW_SWAP |
4522#endif
4523 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4524 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4525 dmae->src_addr_lo = bp->func_stx >> 2;
4526 dmae->src_addr_hi = 0;
4527 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4528 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4529 dmae->len = sizeof(struct host_func_stats) >> 2;
4530 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4531 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4532 dmae->comp_val = DMAE_COMP_VAL;
4533
4534 *stats_comp = 0;
4535 bnx2x_hw_stats_post(bp);
4536 bnx2x_stats_comp(bp);
4537}
4538
4539static void bnx2x_stats_init(struct bnx2x *bp)
4540{
4541 int port = BP_PORT(bp);
4542 int func = BP_FUNC(bp);
4543 int i;
4544
4545 bp->stats_pending = 0;
4546 bp->executer_idx = 0;
4547 bp->stats_counter = 0;
4548
4549 /* port and func stats for management */
4550 if (!BP_NOMCP(bp)) {
4551 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4552 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4553
4554 } else {
4555 bp->port.port_stx = 0;
4556 bp->func_stx = 0;
4557 }
4558 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4559 bp->port.port_stx, bp->func_stx);
4560
4561 /* port stats */
4562 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4563 bp->port.old_nig_stats.brb_discard =
4564 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4565 bp->port.old_nig_stats.brb_truncate =
4566 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4567 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4568 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4569 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4570 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4571
4572 /* function stats */
4573 for_each_queue(bp, i) {
4574 struct bnx2x_fastpath *fp = &bp->fp[i];
4575
4576 memset(&fp->old_tclient, 0,
4577 sizeof(struct tstorm_per_client_stats));
4578 memset(&fp->old_uclient, 0,
4579 sizeof(struct ustorm_per_client_stats));
4580 memset(&fp->old_xclient, 0,
4581 sizeof(struct xstorm_per_client_stats));
4582 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4583 }
4584
4585 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4586 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4587
4588 bp->stats_state = STATS_STATE_DISABLED;
4589
4590 if (bp->port.pmf) {
4591 if (bp->port.port_stx)
4592 bnx2x_port_stats_base_init(bp);
4593
4594 if (bp->func_stx)
4595 bnx2x_func_stats_base_init(bp);
4596
4597 } else if (bp->func_stx)
4598 bnx2x_func_stats_base_update(bp);
4599}
4600
a2fbb9ea
ET
4601static void bnx2x_timer(unsigned long data)
4602{
4603 struct bnx2x *bp = (struct bnx2x *) data;
4604
4605 if (!netif_running(bp->dev))
4606 return;
4607
4608 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4609 goto timer_restart;
a2fbb9ea
ET
4610
4611 if (poll) {
4612 struct bnx2x_fastpath *fp = &bp->fp[0];
4613 int rc;
4614
7961f791 4615 bnx2x_tx_int(fp);
a2fbb9ea
ET
4616 rc = bnx2x_rx_int(fp, 1000);
4617 }
4618
34f80b04
EG
4619 if (!BP_NOMCP(bp)) {
4620 int func = BP_FUNC(bp);
a2fbb9ea
ET
4621 u32 drv_pulse;
4622 u32 mcp_pulse;
4623
4624 ++bp->fw_drv_pulse_wr_seq;
4625 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4626 /* TBD - add SYSTEM_TIME */
4627 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4628 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4629
34f80b04 4630 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4631 MCP_PULSE_SEQ_MASK);
4632 /* The delta between driver pulse and mcp response
4633 * should be 1 (before mcp response) or 0 (after mcp response)
4634 */
4635 if ((drv_pulse != mcp_pulse) &&
4636 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4637 /* someone lost a heartbeat... */
4638 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4639 drv_pulse, mcp_pulse);
4640 }
4641 }
4642
bb2a0f7a
YG
4643 if ((bp->state == BNX2X_STATE_OPEN) ||
4644 (bp->state == BNX2X_STATE_DISABLED))
4645 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4646
f1410647 4647timer_restart:
a2fbb9ea
ET
4648 mod_timer(&bp->timer, jiffies + bp->current_interval);
4649}
4650
4651/* end of Statistics */
4652
4653/* nic init */
4654
4655/*
4656 * nic init service functions
4657 */
4658
34f80b04 4659static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4660{
34f80b04
EG
4661 int port = BP_PORT(bp);
4662
ca00392c
EG
4663 /* "CSTORM" */
4664 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4665 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4666 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4667 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4668 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4669 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
4670}
4671
5c862848
EG
4672static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4673 dma_addr_t mapping, int sb_id)
34f80b04
EG
4674{
4675 int port = BP_PORT(bp);
bb2a0f7a 4676 int func = BP_FUNC(bp);
a2fbb9ea 4677 int index;
34f80b04 4678 u64 section;
a2fbb9ea
ET
4679
4680 /* USTORM */
4681 section = ((u64)mapping) + offsetof(struct host_status_block,
4682 u_status_block);
34f80b04 4683 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 4684
ca00392c
EG
4685 REG_WR(bp, BAR_CSTRORM_INTMEM +
4686 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4687 REG_WR(bp, BAR_CSTRORM_INTMEM +
4688 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4689 U64_HI(section));
ca00392c
EG
4690 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4691 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4692
4693 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
4694 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4695 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4696
4697 /* CSTORM */
4698 section = ((u64)mapping) + offsetof(struct host_status_block,
4699 c_status_block);
34f80b04 4700 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4701
4702 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4703 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4704 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4705 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4706 U64_HI(section));
7a9b2557 4707 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 4708 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4709
4710 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4711 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4712 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
4713
4714 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4715}
4716
4717static void bnx2x_zero_def_sb(struct bnx2x *bp)
4718{
4719 int func = BP_FUNC(bp);
a2fbb9ea 4720
ca00392c 4721 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
4722 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4723 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
4724 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4725 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4726 sizeof(struct cstorm_def_status_block_u)/4);
4727 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4728 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4729 sizeof(struct cstorm_def_status_block_c)/4);
4730 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
4731 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4732 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4733}
4734
4735static void bnx2x_init_def_sb(struct bnx2x *bp,
4736 struct host_def_status_block *def_sb,
34f80b04 4737 dma_addr_t mapping, int sb_id)
a2fbb9ea 4738{
34f80b04
EG
4739 int port = BP_PORT(bp);
4740 int func = BP_FUNC(bp);
a2fbb9ea
ET
4741 int index, val, reg_offset;
4742 u64 section;
4743
4744 /* ATTN */
4745 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4746 atten_status_block);
34f80b04 4747 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4748
49d66772
ET
4749 bp->attn_state = 0;
4750
a2fbb9ea
ET
4751 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4752 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4753
34f80b04 4754 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4755 bp->attn_group[index].sig[0] = REG_RD(bp,
4756 reg_offset + 0x10*index);
4757 bp->attn_group[index].sig[1] = REG_RD(bp,
4758 reg_offset + 0x4 + 0x10*index);
4759 bp->attn_group[index].sig[2] = REG_RD(bp,
4760 reg_offset + 0x8 + 0x10*index);
4761 bp->attn_group[index].sig[3] = REG_RD(bp,
4762 reg_offset + 0xc + 0x10*index);
4763 }
4764
a2fbb9ea
ET
4765 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4766 HC_REG_ATTN_MSG0_ADDR_L);
4767
4768 REG_WR(bp, reg_offset, U64_LO(section));
4769 REG_WR(bp, reg_offset + 4, U64_HI(section));
4770
4771 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4772
4773 val = REG_RD(bp, reg_offset);
34f80b04 4774 val |= sb_id;
a2fbb9ea
ET
4775 REG_WR(bp, reg_offset, val);
4776
4777 /* USTORM */
4778 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4779 u_def_status_block);
34f80b04 4780 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4781
ca00392c
EG
4782 REG_WR(bp, BAR_CSTRORM_INTMEM +
4783 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4784 REG_WR(bp, BAR_CSTRORM_INTMEM +
4785 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 4786 U64_HI(section));
ca00392c
EG
4787 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4788 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
4789
4790 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
4791 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4792 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
4793
4794 /* CSTORM */
4795 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4796 c_def_status_block);
34f80b04 4797 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4798
4799 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4800 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 4801 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4802 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 4803 U64_HI(section));
5c862848 4804 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 4805 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
4806
4807 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4808 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4809 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
4810
4811 /* TSTORM */
4812 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4813 t_def_status_block);
34f80b04 4814 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4815
4816 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4817 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4818 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4819 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4820 U64_HI(section));
5c862848 4821 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4822 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4823
4824 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4825 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4826 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4827
4828 /* XSTORM */
4829 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4830 x_def_status_block);
34f80b04 4831 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4832
4833 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4834 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4835 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4836 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4837 U64_HI(section));
5c862848 4838 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4839 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4840
4841 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4842 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4843 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4844
bb2a0f7a 4845 bp->stats_pending = 0;
66e855f3 4846 bp->set_mac_pending = 0;
bb2a0f7a 4847
34f80b04 4848 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4849}
4850
4851static void bnx2x_update_coalesce(struct bnx2x *bp)
4852{
34f80b04 4853 int port = BP_PORT(bp);
a2fbb9ea
ET
4854 int i;
4855
4856 for_each_queue(bp, i) {
34f80b04 4857 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4858
4859 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
4860 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4861 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4862 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4863 bp->rx_ticks/12);
ca00392c
EG
4864 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4865 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4866 U_SB_ETH_RX_CQ_INDEX),
3799cf47 4867 (bp->rx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4868
4869 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4870 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4871 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4872 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4873 bp->tx_ticks/12);
a2fbb9ea 4874 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4875 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4876 C_SB_ETH_TX_CQ_INDEX),
3799cf47 4877 (bp->tx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4878 }
4879}
4880
7a9b2557
VZ
4881static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4882 struct bnx2x_fastpath *fp, int last)
4883{
4884 int i;
4885
4886 for (i = 0; i < last; i++) {
4887 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4888 struct sk_buff *skb = rx_buf->skb;
4889
4890 if (skb == NULL) {
4891 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4892 continue;
4893 }
4894
4895 if (fp->tpa_state[i] == BNX2X_TPA_START)
4896 pci_unmap_single(bp->pdev,
4897 pci_unmap_addr(rx_buf, mapping),
356e2385 4898 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4899
4900 dev_kfree_skb(skb);
4901 rx_buf->skb = NULL;
4902 }
4903}
4904
a2fbb9ea
ET
4905static void bnx2x_init_rx_rings(struct bnx2x *bp)
4906{
7a9b2557 4907 int func = BP_FUNC(bp);
32626230
EG
4908 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4909 ETH_MAX_AGGREGATION_QUEUES_E1H;
4910 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4911 int i, j;
a2fbb9ea 4912
87942b46 4913 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4914 DP(NETIF_MSG_IFUP,
4915 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4916
7a9b2557 4917 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4918
555f6c78 4919 for_each_rx_queue(bp, j) {
32626230 4920 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4921
32626230 4922 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4923 fp->tpa_pool[i].skb =
4924 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4925 if (!fp->tpa_pool[i].skb) {
4926 BNX2X_ERR("Failed to allocate TPA "
4927 "skb pool for queue[%d] - "
4928 "disabling TPA on this "
4929 "queue!\n", j);
4930 bnx2x_free_tpa_pool(bp, fp, i);
4931 fp->disable_tpa = 1;
4932 break;
4933 }
4934 pci_unmap_addr_set((struct sw_rx_bd *)
4935 &bp->fp->tpa_pool[i],
4936 mapping, 0);
4937 fp->tpa_state[i] = BNX2X_TPA_STOP;
4938 }
4939 }
4940 }
4941
555f6c78 4942 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4943 struct bnx2x_fastpath *fp = &bp->fp[j];
4944
4945 fp->rx_bd_cons = 0;
4946 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4947 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4948
ca00392c
EG
4949 /* Mark queue as Rx */
4950 fp->is_rx_queue = 1;
4951
7a9b2557
VZ
4952 /* "next page" elements initialization */
4953 /* SGE ring */
4954 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4955 struct eth_rx_sge *sge;
4956
4957 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4958 sge->addr_hi =
4959 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4960 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4961 sge->addr_lo =
4962 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4963 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4964 }
4965
4966 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4967
7a9b2557 4968 /* RX BD ring */
a2fbb9ea
ET
4969 for (i = 1; i <= NUM_RX_RINGS; i++) {
4970 struct eth_rx_bd *rx_bd;
4971
4972 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4973 rx_bd->addr_hi =
4974 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4975 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4976 rx_bd->addr_lo =
4977 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4978 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4979 }
4980
34f80b04 4981 /* CQ ring */
a2fbb9ea
ET
4982 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4983 struct eth_rx_cqe_next_page *nextpg;
4984
4985 nextpg = (struct eth_rx_cqe_next_page *)
4986 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4987 nextpg->addr_hi =
4988 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4989 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4990 nextpg->addr_lo =
4991 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4992 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4993 }
4994
7a9b2557
VZ
4995 /* Allocate SGEs and initialize the ring elements */
4996 for (i = 0, ring_prod = 0;
4997 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4998
7a9b2557
VZ
4999 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5000 BNX2X_ERR("was only able to allocate "
5001 "%d rx sges\n", i);
5002 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5003 /* Cleanup already allocated elements */
5004 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 5005 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
5006 fp->disable_tpa = 1;
5007 ring_prod = 0;
5008 break;
5009 }
5010 ring_prod = NEXT_SGE_IDX(ring_prod);
5011 }
5012 fp->rx_sge_prod = ring_prod;
5013
5014 /* Allocate BDs and initialize BD ring */
66e855f3 5015 fp->rx_comp_cons = 0;
7a9b2557 5016 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
5017 for (i = 0; i < bp->rx_ring_size; i++) {
5018 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5019 BNX2X_ERR("was only able to allocate "
de832a55
EG
5020 "%d rx skbs on queue[%d]\n", i, j);
5021 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
5022 break;
5023 }
5024 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 5025 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5026 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5027 }
5028
7a9b2557
VZ
5029 fp->rx_bd_prod = ring_prod;
5030 /* must not have more available CQEs than BDs */
5031 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5032 cqe_ring_prod);
a2fbb9ea
ET
5033 fp->rx_pkt = fp->rx_calls = 0;
5034
7a9b2557
VZ
5035 /* Warning!
5036 * this will generate an interrupt (to the TSTORM)
5037 * must only be done after chip is initialized
5038 */
5039 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5040 fp->rx_sge_prod);
a2fbb9ea
ET
5041 if (j != 0)
5042 continue;
5043
5044 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5045 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5046 U64_LO(fp->rx_comp_mapping));
5047 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5048 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5049 U64_HI(fp->rx_comp_mapping));
5050 }
5051}
5052
5053static void bnx2x_init_tx_ring(struct bnx2x *bp)
5054{
5055 int i, j;
5056
555f6c78 5057 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
5058 struct bnx2x_fastpath *fp = &bp->fp[j];
5059
5060 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5061 struct eth_tx_next_bd *tx_next_bd =
5062 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5063
ca00392c 5064 tx_next_bd->addr_hi =
a2fbb9ea 5065 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5066 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5067 tx_next_bd->addr_lo =
a2fbb9ea 5068 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5069 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5070 }
5071
ca00392c
EG
5072 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5073 fp->tx_db.data.zero_fill1 = 0;
5074 fp->tx_db.data.prod = 0;
5075
a2fbb9ea
ET
5076 fp->tx_pkt_prod = 0;
5077 fp->tx_pkt_cons = 0;
5078 fp->tx_bd_prod = 0;
5079 fp->tx_bd_cons = 0;
5080 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5081 fp->tx_pkt = 0;
5082 }
6fe49bb9
EG
5083
5084 /* clean tx statistics */
5085 for_each_rx_queue(bp, i)
5086 bnx2x_fp(bp, i, tx_pkt) = 0;
a2fbb9ea
ET
5087}
5088
5089static void bnx2x_init_sp_ring(struct bnx2x *bp)
5090{
34f80b04 5091 int func = BP_FUNC(bp);
a2fbb9ea
ET
5092
5093 spin_lock_init(&bp->spq_lock);
5094
5095 bp->spq_left = MAX_SPQ_PENDING;
5096 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5097 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5098 bp->spq_prod_bd = bp->spq;
5099 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5100
34f80b04 5101 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5102 U64_LO(bp->spq_mapping));
34f80b04
EG
5103 REG_WR(bp,
5104 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5105 U64_HI(bp->spq_mapping));
5106
34f80b04 5107 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5108 bp->spq_prod_idx);
5109}
5110
5111static void bnx2x_init_context(struct bnx2x *bp)
5112{
5113 int i;
5114
ca00392c 5115 for_each_rx_queue(bp, i) {
a2fbb9ea
ET
5116 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5117 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5118 u8 cl_id = fp->cl_id;
a2fbb9ea 5119
34f80b04
EG
5120 context->ustorm_st_context.common.sb_index_numbers =
5121 BNX2X_RX_SB_INDEX_NUM;
0626b899 5122 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5123 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5124 context->ustorm_st_context.common.flags =
de832a55
EG
5125 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5126 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5127 context->ustorm_st_context.common.statistics_counter_id =
5128 cl_id;
8d9c5f34 5129 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5130 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5131 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5132 bp->rx_buf_size;
34f80b04 5133 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5134 U64_HI(fp->rx_desc_mapping);
34f80b04 5135 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5136 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5137 if (!fp->disable_tpa) {
5138 context->ustorm_st_context.common.flags |=
ca00392c 5139 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5140 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
5141 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5142 (u32)0xffff);
7a9b2557
VZ
5143 context->ustorm_st_context.common.sge_page_base_hi =
5144 U64_HI(fp->rx_sge_mapping);
5145 context->ustorm_st_context.common.sge_page_base_lo =
5146 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5147
5148 context->ustorm_st_context.common.max_sges_for_packet =
5149 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5150 context->ustorm_st_context.common.max_sges_for_packet =
5151 ((context->ustorm_st_context.common.
5152 max_sges_for_packet + PAGES_PER_SGE - 1) &
5153 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5154 }
5155
8d9c5f34
EG
5156 context->ustorm_ag_context.cdu_usage =
5157 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5158 CDU_REGION_NUMBER_UCM_AG,
5159 ETH_CONNECTION_TYPE);
5160
ca00392c
EG
5161 context->xstorm_ag_context.cdu_reserved =
5162 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5163 CDU_REGION_NUMBER_XCM_AG,
5164 ETH_CONNECTION_TYPE);
5165 }
5166
5167 for_each_tx_queue(bp, i) {
5168 struct bnx2x_fastpath *fp = &bp->fp[i];
5169 struct eth_context *context =
5170 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5171
5172 context->cstorm_st_context.sb_index_number =
5173 C_SB_ETH_TX_CQ_INDEX;
5174 context->cstorm_st_context.status_block_id = fp->sb_id;
5175
8d9c5f34
EG
5176 context->xstorm_st_context.tx_bd_page_base_hi =
5177 U64_HI(fp->tx_desc_mapping);
5178 context->xstorm_st_context.tx_bd_page_base_lo =
5179 U64_LO(fp->tx_desc_mapping);
ca00392c 5180 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5181 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5182 }
5183}
5184
5185static void bnx2x_init_ind_table(struct bnx2x *bp)
5186{
26c8fa4d 5187 int func = BP_FUNC(bp);
a2fbb9ea
ET
5188 int i;
5189
555f6c78 5190 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5191 return;
5192
555f6c78
EG
5193 DP(NETIF_MSG_IFUP,
5194 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5195 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5196 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5197 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 5198 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
5199}
5200
49d66772
ET
5201static void bnx2x_set_client_config(struct bnx2x *bp)
5202{
49d66772 5203 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5204 int port = BP_PORT(bp);
5205 int i;
49d66772 5206
e7799c5f 5207 tstorm_client.mtu = bp->dev->mtu;
49d66772 5208 tstorm_client.config_flags =
de832a55
EG
5209 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5210 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5211#ifdef BCM_VLAN
0c6671b0 5212 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5213 tstorm_client.config_flags |=
8d9c5f34 5214 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5215 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5216 }
5217#endif
49d66772
ET
5218
5219 for_each_queue(bp, i) {
de832a55
EG
5220 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5221
49d66772 5222 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5223 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5224 ((u32 *)&tstorm_client)[0]);
5225 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5226 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5227 ((u32 *)&tstorm_client)[1]);
5228 }
5229
34f80b04
EG
5230 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5231 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5232}
5233
a2fbb9ea
ET
5234static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5235{
a2fbb9ea 5236 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
5237 int mode = bp->rx_mode;
5238 int mask = (1 << BP_L_ID(bp));
5239 int func = BP_FUNC(bp);
581ce43d 5240 int port = BP_PORT(bp);
a2fbb9ea 5241 int i;
581ce43d
EG
5242 /* All but management unicast packets should pass to the host as well */
5243 u32 llh_mask =
5244 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5245 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5246 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5247 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5248
3196a88a 5249 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5250
5251 switch (mode) {
5252 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5253 tstorm_mac_filter.ucast_drop_all = mask;
5254 tstorm_mac_filter.mcast_drop_all = mask;
5255 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5256 break;
356e2385 5257
a2fbb9ea 5258 case BNX2X_RX_MODE_NORMAL:
34f80b04 5259 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5260 break;
356e2385 5261
a2fbb9ea 5262 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5263 tstorm_mac_filter.mcast_accept_all = mask;
5264 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5265 break;
356e2385 5266
a2fbb9ea 5267 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5268 tstorm_mac_filter.ucast_accept_all = mask;
5269 tstorm_mac_filter.mcast_accept_all = mask;
5270 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5271 /* pass management unicast packets as well */
5272 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5273 break;
356e2385 5274
a2fbb9ea 5275 default:
34f80b04
EG
5276 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5277 break;
a2fbb9ea
ET
5278 }
5279
581ce43d
EG
5280 REG_WR(bp,
5281 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5282 llh_mask);
5283
a2fbb9ea
ET
5284 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5285 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5286 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5287 ((u32 *)&tstorm_mac_filter)[i]);
5288
34f80b04 5289/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5290 ((u32 *)&tstorm_mac_filter)[i]); */
5291 }
a2fbb9ea 5292
49d66772
ET
5293 if (mode != BNX2X_RX_MODE_NONE)
5294 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5295}
5296
471de716
EG
5297static void bnx2x_init_internal_common(struct bnx2x *bp)
5298{
5299 int i;
5300
5301 /* Zero this manually as its initialization is
5302 currently missing in the initTool */
5303 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5304 REG_WR(bp, BAR_USTRORM_INTMEM +
5305 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5306}
5307
5308static void bnx2x_init_internal_port(struct bnx2x *bp)
5309{
5310 int port = BP_PORT(bp);
5311
ca00392c
EG
5312 REG_WR(bp,
5313 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5314 REG_WR(bp,
5315 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5316 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5317 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5318}
5319
5320static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5321{
a2fbb9ea
ET
5322 struct tstorm_eth_function_common_config tstorm_config = {0};
5323 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5324 int port = BP_PORT(bp);
5325 int func = BP_FUNC(bp);
de832a55
EG
5326 int i, j;
5327 u32 offset;
471de716 5328 u16 max_agg_size;
a2fbb9ea
ET
5329
5330 if (is_multi(bp)) {
555f6c78 5331 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5332 tstorm_config.rss_result_mask = MULTI_MASK;
5333 }
ca00392c
EG
5334
5335 /* Enable TPA if needed */
5336 if (bp->flags & TPA_ENABLE_FLAG)
5337 tstorm_config.config_flags |=
5338 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5339
8d9c5f34
EG
5340 if (IS_E1HMF(bp))
5341 tstorm_config.config_flags |=
5342 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5343
34f80b04
EG
5344 tstorm_config.leading_client_id = BP_L_ID(bp);
5345
a2fbb9ea 5346 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5347 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5348 (*(u32 *)&tstorm_config));
5349
c14423fe 5350 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
5351 bnx2x_set_storm_rx_mode(bp);
5352
de832a55
EG
5353 for_each_queue(bp, i) {
5354 u8 cl_id = bp->fp[i].cl_id;
5355
5356 /* reset xstorm per client statistics */
5357 offset = BAR_XSTRORM_INTMEM +
5358 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5359 for (j = 0;
5360 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5361 REG_WR(bp, offset + j*4, 0);
5362
5363 /* reset tstorm per client statistics */
5364 offset = BAR_TSTRORM_INTMEM +
5365 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5366 for (j = 0;
5367 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5368 REG_WR(bp, offset + j*4, 0);
5369
5370 /* reset ustorm per client statistics */
5371 offset = BAR_USTRORM_INTMEM +
5372 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5373 for (j = 0;
5374 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5375 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5376 }
5377
5378 /* Init statistics related context */
34f80b04 5379 stats_flags.collect_eth = 1;
a2fbb9ea 5380
66e855f3 5381 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5382 ((u32 *)&stats_flags)[0]);
66e855f3 5383 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5384 ((u32 *)&stats_flags)[1]);
5385
66e855f3 5386 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5387 ((u32 *)&stats_flags)[0]);
66e855f3 5388 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5389 ((u32 *)&stats_flags)[1]);
5390
de832a55
EG
5391 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5392 ((u32 *)&stats_flags)[0]);
5393 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5394 ((u32 *)&stats_flags)[1]);
5395
66e855f3 5396 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5397 ((u32 *)&stats_flags)[0]);
66e855f3 5398 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5399 ((u32 *)&stats_flags)[1]);
5400
66e855f3
YG
5401 REG_WR(bp, BAR_XSTRORM_INTMEM +
5402 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5403 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5404 REG_WR(bp, BAR_XSTRORM_INTMEM +
5405 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5406 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5407
5408 REG_WR(bp, BAR_TSTRORM_INTMEM +
5409 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5410 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5411 REG_WR(bp, BAR_TSTRORM_INTMEM +
5412 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5413 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5414
de832a55
EG
5415 REG_WR(bp, BAR_USTRORM_INTMEM +
5416 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5417 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5418 REG_WR(bp, BAR_USTRORM_INTMEM +
5419 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5420 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5421
34f80b04
EG
5422 if (CHIP_IS_E1H(bp)) {
5423 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5424 IS_E1HMF(bp));
5425 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5426 IS_E1HMF(bp));
5427 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5428 IS_E1HMF(bp));
5429 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5430 IS_E1HMF(bp));
5431
7a9b2557
VZ
5432 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5433 bp->e1hov);
34f80b04
EG
5434 }
5435
4f40f2cb
EG
5436 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5437 max_agg_size =
5438 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5439 SGE_PAGE_SIZE * PAGES_PER_SGE),
5440 (u32)0xffff);
555f6c78 5441 for_each_rx_queue(bp, i) {
7a9b2557 5442 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5443
5444 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5445 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5446 U64_LO(fp->rx_comp_mapping));
5447 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5448 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5449 U64_HI(fp->rx_comp_mapping));
5450
ca00392c
EG
5451 /* Next page */
5452 REG_WR(bp, BAR_USTRORM_INTMEM +
5453 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5454 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5455 REG_WR(bp, BAR_USTRORM_INTMEM +
5456 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5457 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5458
7a9b2557 5459 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5460 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5461 max_agg_size);
5462 }
8a1c38d1 5463
1c06328c
EG
5464 /* dropless flow control */
5465 if (CHIP_IS_E1H(bp)) {
5466 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5467
5468 rx_pause.bd_thr_low = 250;
5469 rx_pause.cqe_thr_low = 250;
5470 rx_pause.cos = 1;
5471 rx_pause.sge_thr_low = 0;
5472 rx_pause.bd_thr_high = 350;
5473 rx_pause.cqe_thr_high = 350;
5474 rx_pause.sge_thr_high = 0;
5475
5476 for_each_rx_queue(bp, i) {
5477 struct bnx2x_fastpath *fp = &bp->fp[i];
5478
5479 if (!fp->disable_tpa) {
5480 rx_pause.sge_thr_low = 150;
5481 rx_pause.sge_thr_high = 250;
5482 }
5483
5484
5485 offset = BAR_USTRORM_INTMEM +
5486 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5487 fp->cl_id);
5488 for (j = 0;
5489 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5490 j++)
5491 REG_WR(bp, offset + j*4,
5492 ((u32 *)&rx_pause)[j]);
5493 }
5494 }
5495
8a1c38d1
EG
5496 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5497
5498 /* Init rate shaping and fairness contexts */
5499 if (IS_E1HMF(bp)) {
5500 int vn;
5501
5502 /* During init there is no active link
5503 Until link is up, set link rate to 10Gbps */
5504 bp->link_vars.line_speed = SPEED_10000;
5505 bnx2x_init_port_minmax(bp);
5506
5507 bnx2x_calc_vn_weight_sum(bp);
5508
5509 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5510 bnx2x_init_vn_minmax(bp, 2*vn + port);
5511
5512 /* Enable rate shaping and fairness */
5513 bp->cmng.flags.cmng_enables =
5514 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5515 if (bp->vn_weight_sum)
5516 bp->cmng.flags.cmng_enables |=
5517 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5518 else
5519 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5520 " fairness will be disabled\n");
5521 } else {
5522 /* rate shaping and fairness are disabled */
5523 DP(NETIF_MSG_IFUP,
5524 "single function mode minmax will be disabled\n");
5525 }
5526
5527
5528 /* Store it to internal memory */
5529 if (bp->port.pmf)
5530 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5531 REG_WR(bp, BAR_XSTRORM_INTMEM +
5532 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5533 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5534}
5535
471de716
EG
5536static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5537{
5538 switch (load_code) {
5539 case FW_MSG_CODE_DRV_LOAD_COMMON:
5540 bnx2x_init_internal_common(bp);
5541 /* no break */
5542
5543 case FW_MSG_CODE_DRV_LOAD_PORT:
5544 bnx2x_init_internal_port(bp);
5545 /* no break */
5546
5547 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5548 bnx2x_init_internal_func(bp);
5549 break;
5550
5551 default:
5552 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5553 break;
5554 }
5555}
5556
5557static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5558{
5559 int i;
5560
5561 for_each_queue(bp, i) {
5562 struct bnx2x_fastpath *fp = &bp->fp[i];
5563
34f80b04 5564 fp->bp = bp;
a2fbb9ea 5565 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5566 fp->index = i;
34f80b04
EG
5567 fp->cl_id = BP_L_ID(bp) + i;
5568 fp->sb_id = fp->cl_id;
ca00392c
EG
5569 /* Suitable Rx and Tx SBs are served by the same client */
5570 if (i >= bp->num_rx_queues)
5571 fp->cl_id -= bp->num_rx_queues;
34f80b04 5572 DP(NETIF_MSG_IFUP,
f5372251
EG
5573 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5574 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5575 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5576 fp->sb_id);
5c862848 5577 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5578 }
5579
16119785
EG
5580 /* ensure status block indices were read */
5581 rmb();
5582
5583
5c862848
EG
5584 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5585 DEF_SB_ID);
5586 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5587 bnx2x_update_coalesce(bp);
5588 bnx2x_init_rx_rings(bp);
5589 bnx2x_init_tx_ring(bp);
5590 bnx2x_init_sp_ring(bp);
5591 bnx2x_init_context(bp);
471de716 5592 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5593 bnx2x_init_ind_table(bp);
0ef00459
EG
5594 bnx2x_stats_init(bp);
5595
5596 /* At this point, we are ready for interrupts */
5597 atomic_set(&bp->intr_sem, 0);
5598
5599 /* flush all before enabling interrupts */
5600 mb();
5601 mmiowb();
5602
615f8fd9 5603 bnx2x_int_enable(bp);
eb8da205
EG
5604
5605 /* Check for SPIO5 */
5606 bnx2x_attn_int_deasserted0(bp,
5607 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5608 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5609}
5610
5611/* end of nic init */
5612
5613/*
5614 * gzip service functions
5615 */
5616
5617static int bnx2x_gunzip_init(struct bnx2x *bp)
5618{
5619 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5620 &bp->gunzip_mapping);
5621 if (bp->gunzip_buf == NULL)
5622 goto gunzip_nomem1;
5623
5624 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5625 if (bp->strm == NULL)
5626 goto gunzip_nomem2;
5627
5628 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5629 GFP_KERNEL);
5630 if (bp->strm->workspace == NULL)
5631 goto gunzip_nomem3;
5632
5633 return 0;
5634
5635gunzip_nomem3:
5636 kfree(bp->strm);
5637 bp->strm = NULL;
5638
5639gunzip_nomem2:
5640 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5641 bp->gunzip_mapping);
5642 bp->gunzip_buf = NULL;
5643
5644gunzip_nomem1:
5645 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5646 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5647 return -ENOMEM;
5648}
5649
5650static void bnx2x_gunzip_end(struct bnx2x *bp)
5651{
5652 kfree(bp->strm->workspace);
5653
5654 kfree(bp->strm);
5655 bp->strm = NULL;
5656
5657 if (bp->gunzip_buf) {
5658 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5659 bp->gunzip_mapping);
5660 bp->gunzip_buf = NULL;
5661 }
5662}
5663
94a78b79 5664static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5665{
5666 int n, rc;
5667
5668 /* check gzip header */
94a78b79
VZ
5669 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5670 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5671 return -EINVAL;
94a78b79 5672 }
a2fbb9ea
ET
5673
5674 n = 10;
5675
34f80b04 5676#define FNAME 0x8
a2fbb9ea
ET
5677
5678 if (zbuf[3] & FNAME)
5679 while ((zbuf[n++] != 0) && (n < len));
5680
94a78b79 5681 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5682 bp->strm->avail_in = len - n;
5683 bp->strm->next_out = bp->gunzip_buf;
5684 bp->strm->avail_out = FW_BUF_SIZE;
5685
5686 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5687 if (rc != Z_OK)
5688 return rc;
5689
5690 rc = zlib_inflate(bp->strm, Z_FINISH);
5691 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5692 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5693 bp->dev->name, bp->strm->msg);
5694
5695 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5696 if (bp->gunzip_outlen & 0x3)
5697 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5698 " gunzip_outlen (%d) not aligned\n",
5699 bp->dev->name, bp->gunzip_outlen);
5700 bp->gunzip_outlen >>= 2;
5701
5702 zlib_inflateEnd(bp->strm);
5703
5704 if (rc == Z_STREAM_END)
5705 return 0;
5706
5707 return rc;
5708}
5709
5710/* nic load/unload */
5711
5712/*
34f80b04 5713 * General service functions
a2fbb9ea
ET
5714 */
5715
5716/* send a NIG loopback debug packet */
5717static void bnx2x_lb_pckt(struct bnx2x *bp)
5718{
a2fbb9ea 5719 u32 wb_write[3];
a2fbb9ea
ET
5720
5721 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5722 wb_write[0] = 0x55555555;
5723 wb_write[1] = 0x55555555;
34f80b04 5724 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5725 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5726
5727 /* NON-IP protocol */
a2fbb9ea
ET
5728 wb_write[0] = 0x09000000;
5729 wb_write[1] = 0x55555555;
34f80b04 5730 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5731 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5732}
5733
5734/* some of the internal memories
5735 * are not directly readable from the driver
5736 * to test them we send debug packets
5737 */
5738static int bnx2x_int_mem_test(struct bnx2x *bp)
5739{
5740 int factor;
5741 int count, i;
5742 u32 val = 0;
5743
ad8d3948 5744 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5745 factor = 120;
ad8d3948
EG
5746 else if (CHIP_REV_IS_EMUL(bp))
5747 factor = 200;
5748 else
a2fbb9ea 5749 factor = 1;
a2fbb9ea
ET
5750
5751 DP(NETIF_MSG_HW, "start part1\n");
5752
5753 /* Disable inputs of parser neighbor blocks */
5754 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5755 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5756 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5757 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5758
5759 /* Write 0 to parser credits for CFC search request */
5760 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5761
5762 /* send Ethernet packet */
5763 bnx2x_lb_pckt(bp);
5764
5765 /* TODO do i reset NIG statistic? */
5766 /* Wait until NIG register shows 1 packet of size 0x10 */
5767 count = 1000 * factor;
5768 while (count) {
34f80b04 5769
a2fbb9ea
ET
5770 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5771 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5772 if (val == 0x10)
5773 break;
5774
5775 msleep(10);
5776 count--;
5777 }
5778 if (val != 0x10) {
5779 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5780 return -1;
5781 }
5782
5783 /* Wait until PRS register shows 1 packet */
5784 count = 1000 * factor;
5785 while (count) {
5786 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5787 if (val == 1)
5788 break;
5789
5790 msleep(10);
5791 count--;
5792 }
5793 if (val != 0x1) {
5794 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5795 return -2;
5796 }
5797
5798 /* Reset and init BRB, PRS */
34f80b04 5799 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5800 msleep(50);
34f80b04 5801 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5802 msleep(50);
94a78b79
VZ
5803 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5804 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5805
5806 DP(NETIF_MSG_HW, "part2\n");
5807
5808 /* Disable inputs of parser neighbor blocks */
5809 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5810 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5811 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5812 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5813
5814 /* Write 0 to parser credits for CFC search request */
5815 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5816
5817 /* send 10 Ethernet packets */
5818 for (i = 0; i < 10; i++)
5819 bnx2x_lb_pckt(bp);
5820
5821 /* Wait until NIG register shows 10 + 1
5822 packets of size 11*0x10 = 0xb0 */
5823 count = 1000 * factor;
5824 while (count) {
34f80b04 5825
a2fbb9ea
ET
5826 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5827 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5828 if (val == 0xb0)
5829 break;
5830
5831 msleep(10);
5832 count--;
5833 }
5834 if (val != 0xb0) {
5835 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5836 return -3;
5837 }
5838
5839 /* Wait until PRS register shows 2 packets */
5840 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5841 if (val != 2)
5842 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5843
5844 /* Write 1 to parser credits for CFC search request */
5845 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5846
5847 /* Wait until PRS register shows 3 packets */
5848 msleep(10 * factor);
5849 /* Wait until NIG register shows 1 packet of size 0x10 */
5850 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5851 if (val != 3)
5852 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5853
5854 /* clear NIG EOP FIFO */
5855 for (i = 0; i < 11; i++)
5856 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5857 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5858 if (val != 1) {
5859 BNX2X_ERR("clear of NIG failed\n");
5860 return -4;
5861 }
5862
5863 /* Reset and init BRB, PRS, NIG */
5864 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5865 msleep(50);
5866 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5867 msleep(50);
94a78b79
VZ
5868 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5869 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5870#ifndef BCM_ISCSI
5871 /* set NIC mode */
5872 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5873#endif
5874
5875 /* Enable inputs of parser neighbor blocks */
5876 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5877 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5878 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5879 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5880
5881 DP(NETIF_MSG_HW, "done\n");
5882
5883 return 0; /* OK */
5884}
5885
5886static void enable_blocks_attention(struct bnx2x *bp)
5887{
5888 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5889 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5890 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5891 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5892 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5893 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5894 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5895 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5896 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5897/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5898/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5899 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5900 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5901 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5902/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5903/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5904 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5905 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5906 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5907 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5908/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5909/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5910 if (CHIP_REV_IS_FPGA(bp))
5911 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5912 else
5913 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5914 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5915 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5916 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5917/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5918/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5919 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5920 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5921/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5922 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5923}
5924
34f80b04 5925
81f75bbf
EG
5926static void bnx2x_reset_common(struct bnx2x *bp)
5927{
5928 /* reset_common */
5929 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5930 0xd3ffff7f);
5931 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5932}
5933
573f2035
EG
5934static void bnx2x_init_pxp(struct bnx2x *bp)
5935{
5936 u16 devctl;
5937 int r_order, w_order;
5938
5939 pci_read_config_word(bp->pdev,
5940 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5941 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5942 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5943 if (bp->mrrs == -1)
5944 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5945 else {
5946 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5947 r_order = bp->mrrs;
5948 }
5949
5950 bnx2x_init_pxp_arb(bp, r_order, w_order);
5951}
fd4ef40d
EG
5952
5953static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5954{
5955 u32 val;
5956 u8 port;
5957 u8 is_required = 0;
5958
5959 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5960 SHARED_HW_CFG_FAN_FAILURE_MASK;
5961
5962 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5963 is_required = 1;
5964
5965 /*
5966 * The fan failure mechanism is usually related to the PHY type since
5967 * the power consumption of the board is affected by the PHY. Currently,
5968 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5969 */
5970 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5971 for (port = PORT_0; port < PORT_MAX; port++) {
5972 u32 phy_type =
5973 SHMEM_RD(bp, dev_info.port_hw_config[port].
5974 external_phy_config) &
5975 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5976 is_required |=
5977 ((phy_type ==
5978 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
5979 (phy_type ==
5980 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
5981 (phy_type ==
5982 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5983 }
5984
5985 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5986
5987 if (is_required == 0)
5988 return;
5989
5990 /* Fan failure is indicated by SPIO 5 */
5991 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5992 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5993
5994 /* set to active low mode */
5995 val = REG_RD(bp, MISC_REG_SPIO_INT);
5996 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5997 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5998 REG_WR(bp, MISC_REG_SPIO_INT, val);
5999
6000 /* enable interrupt to signal the IGU */
6001 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6002 val |= (1 << MISC_REGISTERS_SPIO_5);
6003 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6004}
6005
34f80b04 6006static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 6007{
a2fbb9ea 6008 u32 val, i;
a2fbb9ea 6009
34f80b04 6010 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 6011
81f75bbf 6012 bnx2x_reset_common(bp);
34f80b04
EG
6013 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6014 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 6015
94a78b79 6016 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
6017 if (CHIP_IS_E1H(bp))
6018 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 6019
34f80b04
EG
6020 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6021 msleep(30);
6022 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 6023
94a78b79 6024 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
6025 if (CHIP_IS_E1(bp)) {
6026 /* enable HW interrupt from PXP on USDM overflow
6027 bit 16 on INT_MASK_0 */
6028 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6029 }
a2fbb9ea 6030
94a78b79 6031 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 6032 bnx2x_init_pxp(bp);
a2fbb9ea
ET
6033
6034#ifdef __BIG_ENDIAN
34f80b04
EG
6035 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6036 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6037 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6038 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6039 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6040 /* make sure this value is 0 */
6041 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6042
6043/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6044 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6045 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6046 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6047 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6048#endif
6049
34f80b04 6050 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 6051#ifdef BCM_ISCSI
34f80b04
EG
6052 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6053 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6054 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6055#endif
6056
34f80b04
EG
6057 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6058 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6059
34f80b04
EG
6060 /* let the HW do it's magic ... */
6061 msleep(100);
6062 /* finish PXP init */
6063 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6064 if (val != 1) {
6065 BNX2X_ERR("PXP2 CFG failed\n");
6066 return -EBUSY;
6067 }
6068 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6069 if (val != 1) {
6070 BNX2X_ERR("PXP2 RD_INIT failed\n");
6071 return -EBUSY;
6072 }
a2fbb9ea 6073
34f80b04
EG
6074 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6075 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6076
94a78b79 6077 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6078
34f80b04
EG
6079 /* clean the DMAE memory */
6080 bp->dmae_ready = 1;
6081 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6082
94a78b79
VZ
6083 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6084 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6085 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6086 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6087
34f80b04
EG
6088 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6089 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6090 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6091 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6092
94a78b79 6093 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
34f80b04
EG
6094 /* soft reset pulse */
6095 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6096 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
6097
6098#ifdef BCM_ISCSI
94a78b79 6099 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6100#endif
a2fbb9ea 6101
94a78b79 6102 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6103 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6104 if (!CHIP_REV_IS_SLOW(bp)) {
6105 /* enable hw interrupt from doorbell Q */
6106 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6107 }
a2fbb9ea 6108
94a78b79
VZ
6109 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6110 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6111 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
6112 /* set NIC mode */
6113 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
6114 if (CHIP_IS_E1H(bp))
6115 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6116
94a78b79
VZ
6117 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6118 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6119 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6120 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6121
ca00392c
EG
6122 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6123 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6124 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6125 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6126
94a78b79
VZ
6127 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6128 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6129 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6130 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6131
34f80b04
EG
6132 /* sync semi rtc */
6133 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6134 0x80000000);
6135 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6136 0x80000000);
a2fbb9ea 6137
94a78b79
VZ
6138 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6139 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6140 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6141
34f80b04
EG
6142 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6143 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6144 REG_WR(bp, i, 0xc0cac01a);
6145 /* TODO: replace with something meaningful */
6146 }
94a78b79 6147 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
34f80b04 6148 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6149
34f80b04
EG
6150 if (sizeof(union cdu_context) != 1024)
6151 /* we currently assume that a context is 1024 bytes */
6152 printk(KERN_ALERT PFX "please adjust the size of"
6153 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 6154
94a78b79 6155 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6156 val = (4 << 24) + (0 << 12) + 1024;
6157 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6158
94a78b79 6159 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6160 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6161 /* enable context validation interrupt from CFC */
6162 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6163
6164 /* set the thresholds to prevent CFC/CDU race */
6165 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6166
94a78b79
VZ
6167 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6168 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6169
94a78b79 6170 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6171 /* Reset PCIE errors for debug */
6172 REG_WR(bp, 0x2814, 0xffffffff);
6173 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6174
94a78b79 6175 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6176 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6177 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6178 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6179
94a78b79 6180 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6181 if (CHIP_IS_E1H(bp)) {
6182 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6183 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6184 }
6185
6186 if (CHIP_REV_IS_SLOW(bp))
6187 msleep(200);
6188
6189 /* finish CFC init */
6190 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6191 if (val != 1) {
6192 BNX2X_ERR("CFC LL_INIT failed\n");
6193 return -EBUSY;
6194 }
6195 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6196 if (val != 1) {
6197 BNX2X_ERR("CFC AC_INIT failed\n");
6198 return -EBUSY;
6199 }
6200 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6201 if (val != 1) {
6202 BNX2X_ERR("CFC CAM_INIT failed\n");
6203 return -EBUSY;
6204 }
6205 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6206
34f80b04
EG
6207 /* read NIG statistic
6208 to see if this is our first up since powerup */
6209 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6210 val = *bnx2x_sp(bp, wb_data[0]);
6211
6212 /* do internal memory self test */
6213 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6214 BNX2X_ERR("internal mem self test failed\n");
6215 return -EBUSY;
6216 }
6217
35b19ba5 6218 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6219 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6220 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6221 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6222 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6223 bp->port.need_hw_lock = 1;
6224 break;
6225
34f80b04
EG
6226 default:
6227 break;
6228 }
f1410647 6229
fd4ef40d
EG
6230 bnx2x_setup_fan_failure_detection(bp);
6231
34f80b04
EG
6232 /* clear PXP2 attentions */
6233 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6234
34f80b04 6235 enable_blocks_attention(bp);
a2fbb9ea 6236
6bbca910
YR
6237 if (!BP_NOMCP(bp)) {
6238 bnx2x_acquire_phy_lock(bp);
6239 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6240 bnx2x_release_phy_lock(bp);
6241 } else
6242 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6243
34f80b04
EG
6244 return 0;
6245}
a2fbb9ea 6246
34f80b04
EG
6247static int bnx2x_init_port(struct bnx2x *bp)
6248{
6249 int port = BP_PORT(bp);
94a78b79 6250 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6251 u32 low, high;
34f80b04 6252 u32 val;
a2fbb9ea 6253
34f80b04
EG
6254 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6255
6256 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6257
94a78b79 6258 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6259 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6260
6261 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6262 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6263 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
a2fbb9ea
ET
6264#ifdef BCM_ISCSI
6265 /* Port0 1
6266 * Port1 385 */
6267 i++;
6268 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6269 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6270 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6271 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6272
6273 /* Port0 2
6274 * Port1 386 */
6275 i++;
6276 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6277 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6278 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6279 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6280
6281 /* Port0 3
6282 * Port1 387 */
6283 i++;
6284 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6285 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6286 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6287 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6288#endif
94a78b79 6289 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6290
a2fbb9ea
ET
6291#ifdef BCM_ISCSI
6292 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6293 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6294
94a78b79 6295 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
a2fbb9ea 6296#endif
94a78b79 6297 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6298
94a78b79 6299 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6300 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6301 /* no pause for emulation and FPGA */
6302 low = 0;
6303 high = 513;
6304 } else {
6305 if (IS_E1HMF(bp))
6306 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6307 else if (bp->dev->mtu > 4096) {
6308 if (bp->flags & ONE_PORT_FLAG)
6309 low = 160;
6310 else {
6311 val = bp->dev->mtu;
6312 /* (24*1024 + val*4)/256 */
6313 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6314 }
6315 } else
6316 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6317 high = low + 56; /* 14*1024/256 */
6318 }
6319 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6320 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6321
6322
94a78b79 6323 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6324
94a78b79 6325 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6326 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6327 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6328 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6329
94a78b79
VZ
6330 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6331 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6332 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6333 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6334
94a78b79 6335 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6336 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6337
94a78b79 6338 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6339
6340 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6341 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6342
6343 /* update threshold */
34f80b04 6344 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6345 /* update init credit */
34f80b04 6346 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6347
6348 /* probe changes */
34f80b04 6349 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6350 msleep(5);
34f80b04 6351 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
6352
6353#ifdef BCM_ISCSI
6354 /* tell the searcher where the T2 table is */
6355 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6356
6357 wb_write[0] = U64_LO(bp->t2_mapping);
6358 wb_write[1] = U64_HI(bp->t2_mapping);
6359 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6360 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6361 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6362 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6363
6364 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
a2fbb9ea 6365#endif
94a78b79 6366 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6367 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6368
6369 if (CHIP_IS_E1(bp)) {
6370 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6371 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6372 }
94a78b79 6373 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6374
94a78b79 6375 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6376 /* init aeu_mask_attn_func_0/1:
6377 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6378 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6379 * bits 4-7 are used for "per vn group attention" */
6380 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6381 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6382
94a78b79 6383 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6384 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6385 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6386 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6387 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6388
94a78b79 6389 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6390
6391 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6392
6393 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6394 /* 0x2 disable e1hov, 0x1 enable */
6395 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6396 (IS_E1HMF(bp) ? 0x1 : 0x2));
6397
1c06328c
EG
6398 {
6399 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6400 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6401 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6402 }
34f80b04
EG
6403 }
6404
94a78b79 6405 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6406 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6407
35b19ba5 6408 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6409 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6410 {
6411 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6412
6413 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6414 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6415
6416 /* The GPIO should be swapped if the swap register is
6417 set and active */
6418 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6419 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6420
6421 /* Select function upon port-swap configuration */
6422 if (port == 0) {
6423 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6424 aeu_gpio_mask = (swap_val && swap_override) ?
6425 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6426 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6427 } else {
6428 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6429 aeu_gpio_mask = (swap_val && swap_override) ?
6430 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6431 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6432 }
6433 val = REG_RD(bp, offset);
6434 /* add GPIO3 to group */
6435 val |= aeu_gpio_mask;
6436 REG_WR(bp, offset, val);
6437 }
6438 break;
6439
35b19ba5 6440 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6441 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6442 /* add SPIO 5 to group 0 */
4d295db0
EG
6443 {
6444 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6445 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6446 val = REG_RD(bp, reg_addr);
f1410647 6447 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6448 REG_WR(bp, reg_addr, val);
6449 }
f1410647
ET
6450 break;
6451
6452 default:
6453 break;
6454 }
6455
c18487ee 6456 bnx2x__link_reset(bp);
a2fbb9ea 6457
34f80b04
EG
6458 return 0;
6459}
6460
6461#define ILT_PER_FUNC (768/2)
6462#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6463/* the phys address is shifted right 12 bits and has an added
6464 1=valid bit added to the 53rd bit
6465 then since this is a wide register(TM)
6466 we split it into two 32 bit writes
6467 */
6468#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6469#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6470#define PXP_ONE_ILT(x) (((x) << 10) | x)
6471#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6472
6473#define CNIC_ILT_LINES 0
6474
6475static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6476{
6477 int reg;
6478
6479 if (CHIP_IS_E1H(bp))
6480 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6481 else /* E1 */
6482 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6483
6484 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6485}
6486
6487static int bnx2x_init_func(struct bnx2x *bp)
6488{
6489 int port = BP_PORT(bp);
6490 int func = BP_FUNC(bp);
8badd27a 6491 u32 addr, val;
34f80b04
EG
6492 int i;
6493
6494 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6495
8badd27a
EG
6496 /* set MSI reconfigure capability */
6497 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6498 val = REG_RD(bp, addr);
6499 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6500 REG_WR(bp, addr, val);
6501
34f80b04
EG
6502 i = FUNC_ILT_BASE(func);
6503
6504 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6505 if (CHIP_IS_E1H(bp)) {
6506 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6507 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6508 } else /* E1 */
6509 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6510 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6511
6512
6513 if (CHIP_IS_E1H(bp)) {
573f2035
EG
6514 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6515 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6516 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6517 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6518 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6519 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6520 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6521 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6522 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
6523
6524 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6525 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6526 }
6527
6528 /* HC init per function */
6529 if (CHIP_IS_E1H(bp)) {
6530 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6531
6532 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6533 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6534 }
94a78b79 6535 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6536
c14423fe 6537 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6538 REG_WR(bp, 0x2114, 0xffffffff);
6539 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6540
34f80b04
EG
6541 return 0;
6542}
6543
6544static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6545{
6546 int i, rc = 0;
a2fbb9ea 6547
34f80b04
EG
6548 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6549 BP_FUNC(bp), load_code);
a2fbb9ea 6550
34f80b04
EG
6551 bp->dmae_ready = 0;
6552 mutex_init(&bp->dmae_mutex);
54016b26
EG
6553 rc = bnx2x_gunzip_init(bp);
6554 if (rc)
6555 return rc;
a2fbb9ea 6556
34f80b04
EG
6557 switch (load_code) {
6558 case FW_MSG_CODE_DRV_LOAD_COMMON:
6559 rc = bnx2x_init_common(bp);
6560 if (rc)
6561 goto init_hw_err;
6562 /* no break */
6563
6564 case FW_MSG_CODE_DRV_LOAD_PORT:
6565 bp->dmae_ready = 1;
6566 rc = bnx2x_init_port(bp);
6567 if (rc)
6568 goto init_hw_err;
6569 /* no break */
6570
6571 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6572 bp->dmae_ready = 1;
6573 rc = bnx2x_init_func(bp);
6574 if (rc)
6575 goto init_hw_err;
6576 break;
6577
6578 default:
6579 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6580 break;
6581 }
6582
6583 if (!BP_NOMCP(bp)) {
6584 int func = BP_FUNC(bp);
a2fbb9ea
ET
6585
6586 bp->fw_drv_pulse_wr_seq =
34f80b04 6587 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6588 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
6589 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6590 }
a2fbb9ea 6591
34f80b04
EG
6592 /* this needs to be done before gunzip end */
6593 bnx2x_zero_def_sb(bp);
6594 for_each_queue(bp, i)
6595 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6596
6597init_hw_err:
6598 bnx2x_gunzip_end(bp);
6599
6600 return rc;
a2fbb9ea
ET
6601}
6602
a2fbb9ea
ET
6603static void bnx2x_free_mem(struct bnx2x *bp)
6604{
6605
6606#define BNX2X_PCI_FREE(x, y, size) \
6607 do { \
6608 if (x) { \
6609 pci_free_consistent(bp->pdev, size, x, y); \
6610 x = NULL; \
6611 y = 0; \
6612 } \
6613 } while (0)
6614
6615#define BNX2X_FREE(x) \
6616 do { \
6617 if (x) { \
6618 vfree(x); \
6619 x = NULL; \
6620 } \
6621 } while (0)
6622
6623 int i;
6624
6625 /* fastpath */
555f6c78 6626 /* Common */
a2fbb9ea
ET
6627 for_each_queue(bp, i) {
6628
555f6c78 6629 /* status blocks */
a2fbb9ea
ET
6630 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6631 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6632 sizeof(struct host_status_block));
555f6c78
EG
6633 }
6634 /* Rx */
6635 for_each_rx_queue(bp, i) {
a2fbb9ea 6636
555f6c78 6637 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6638 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6639 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6640 bnx2x_fp(bp, i, rx_desc_mapping),
6641 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6642
6643 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6644 bnx2x_fp(bp, i, rx_comp_mapping),
6645 sizeof(struct eth_fast_path_rx_cqe) *
6646 NUM_RCQ_BD);
a2fbb9ea 6647
7a9b2557 6648 /* SGE ring */
32626230 6649 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6650 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6651 bnx2x_fp(bp, i, rx_sge_mapping),
6652 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6653 }
555f6c78
EG
6654 /* Tx */
6655 for_each_tx_queue(bp, i) {
6656
6657 /* fastpath tx rings: tx_buf tx_desc */
6658 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6659 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6660 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6661 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6662 }
a2fbb9ea
ET
6663 /* end of fastpath */
6664
6665 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6666 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6667
6668 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6669 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6670
6671#ifdef BCM_ISCSI
6672 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6673 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6674 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6675 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6676#endif
7a9b2557 6677 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6678
6679#undef BNX2X_PCI_FREE
6680#undef BNX2X_KFREE
6681}
6682
6683static int bnx2x_alloc_mem(struct bnx2x *bp)
6684{
6685
6686#define BNX2X_PCI_ALLOC(x, y, size) \
6687 do { \
6688 x = pci_alloc_consistent(bp->pdev, size, y); \
6689 if (x == NULL) \
6690 goto alloc_mem_err; \
6691 memset(x, 0, size); \
6692 } while (0)
6693
6694#define BNX2X_ALLOC(x, size) \
6695 do { \
6696 x = vmalloc(size); \
6697 if (x == NULL) \
6698 goto alloc_mem_err; \
6699 memset(x, 0, size); \
6700 } while (0)
6701
6702 int i;
6703
6704 /* fastpath */
555f6c78 6705 /* Common */
a2fbb9ea
ET
6706 for_each_queue(bp, i) {
6707 bnx2x_fp(bp, i, bp) = bp;
6708
555f6c78 6709 /* status blocks */
a2fbb9ea
ET
6710 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6711 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6712 sizeof(struct host_status_block));
555f6c78
EG
6713 }
6714 /* Rx */
6715 for_each_rx_queue(bp, i) {
a2fbb9ea 6716
555f6c78 6717 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6718 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6719 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6720 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6721 &bnx2x_fp(bp, i, rx_desc_mapping),
6722 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6723
6724 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6725 &bnx2x_fp(bp, i, rx_comp_mapping),
6726 sizeof(struct eth_fast_path_rx_cqe) *
6727 NUM_RCQ_BD);
6728
7a9b2557
VZ
6729 /* SGE ring */
6730 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6731 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6732 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6733 &bnx2x_fp(bp, i, rx_sge_mapping),
6734 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6735 }
555f6c78
EG
6736 /* Tx */
6737 for_each_tx_queue(bp, i) {
6738
555f6c78
EG
6739 /* fastpath tx rings: tx_buf tx_desc */
6740 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6741 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6742 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6743 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6744 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6745 }
a2fbb9ea
ET
6746 /* end of fastpath */
6747
6748 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6749 sizeof(struct host_def_status_block));
6750
6751 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6752 sizeof(struct bnx2x_slowpath));
6753
6754#ifdef BCM_ISCSI
6755 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6756
6757 /* Initialize T1 */
6758 for (i = 0; i < 64*1024; i += 64) {
6759 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6760 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6761 }
6762
6763 /* allocate searcher T2 table
6764 we allocate 1/4 of alloc num for T2
6765 (which is not entered into the ILT) */
6766 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6767
6768 /* Initialize T2 */
6769 for (i = 0; i < 16*1024; i += 64)
6770 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6771
c14423fe 6772 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6773 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6774
6775 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6776 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6777
6778 /* QM queues (128*MAX_CONN) */
6779 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6780#endif
6781
6782 /* Slow path ring */
6783 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6784
6785 return 0;
6786
6787alloc_mem_err:
6788 bnx2x_free_mem(bp);
6789 return -ENOMEM;
6790
6791#undef BNX2X_PCI_ALLOC
6792#undef BNX2X_ALLOC
6793}
6794
6795static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6796{
6797 int i;
6798
555f6c78 6799 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6800 struct bnx2x_fastpath *fp = &bp->fp[i];
6801
6802 u16 bd_cons = fp->tx_bd_cons;
6803 u16 sw_prod = fp->tx_pkt_prod;
6804 u16 sw_cons = fp->tx_pkt_cons;
6805
a2fbb9ea
ET
6806 while (sw_cons != sw_prod) {
6807 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6808 sw_cons++;
6809 }
6810 }
6811}
6812
6813static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6814{
6815 int i, j;
6816
555f6c78 6817 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6818 struct bnx2x_fastpath *fp = &bp->fp[j];
6819
a2fbb9ea
ET
6820 for (i = 0; i < NUM_RX_BD; i++) {
6821 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6822 struct sk_buff *skb = rx_buf->skb;
6823
6824 if (skb == NULL)
6825 continue;
6826
6827 pci_unmap_single(bp->pdev,
6828 pci_unmap_addr(rx_buf, mapping),
356e2385 6829 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6830
6831 rx_buf->skb = NULL;
6832 dev_kfree_skb(skb);
6833 }
7a9b2557 6834 if (!fp->disable_tpa)
32626230
EG
6835 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6836 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6837 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6838 }
6839}
6840
6841static void bnx2x_free_skbs(struct bnx2x *bp)
6842{
6843 bnx2x_free_tx_skbs(bp);
6844 bnx2x_free_rx_skbs(bp);
6845}
6846
6847static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6848{
34f80b04 6849 int i, offset = 1;
a2fbb9ea
ET
6850
6851 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6852 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6853 bp->msix_table[0].vector);
6854
6855 for_each_queue(bp, i) {
c14423fe 6856 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6857 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6858 bnx2x_fp(bp, i, state));
6859
34f80b04 6860 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6861 }
a2fbb9ea
ET
6862}
6863
6864static void bnx2x_free_irq(struct bnx2x *bp)
6865{
a2fbb9ea 6866 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6867 bnx2x_free_msix_irqs(bp);
6868 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6869 bp->flags &= ~USING_MSIX_FLAG;
6870
8badd27a
EG
6871 } else if (bp->flags & USING_MSI_FLAG) {
6872 free_irq(bp->pdev->irq, bp->dev);
6873 pci_disable_msi(bp->pdev);
6874 bp->flags &= ~USING_MSI_FLAG;
6875
a2fbb9ea
ET
6876 } else
6877 free_irq(bp->pdev->irq, bp->dev);
6878}
6879
6880static int bnx2x_enable_msix(struct bnx2x *bp)
6881{
8badd27a
EG
6882 int i, rc, offset = 1;
6883 int igu_vec = 0;
a2fbb9ea 6884
8badd27a
EG
6885 bp->msix_table[0].entry = igu_vec;
6886 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6887
34f80b04 6888 for_each_queue(bp, i) {
8badd27a 6889 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6890 bp->msix_table[i + offset].entry = igu_vec;
6891 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6892 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6893 }
6894
34f80b04 6895 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6896 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6897 if (rc) {
8badd27a
EG
6898 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6899 return rc;
34f80b04 6900 }
8badd27a 6901
a2fbb9ea
ET
6902 bp->flags |= USING_MSIX_FLAG;
6903
6904 return 0;
a2fbb9ea
ET
6905}
6906
a2fbb9ea
ET
6907static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6908{
34f80b04 6909 int i, rc, offset = 1;
a2fbb9ea 6910
a2fbb9ea
ET
6911 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6912 bp->dev->name, bp->dev);
a2fbb9ea
ET
6913 if (rc) {
6914 BNX2X_ERR("request sp irq failed\n");
6915 return -EBUSY;
6916 }
6917
6918 for_each_queue(bp, i) {
555f6c78
EG
6919 struct bnx2x_fastpath *fp = &bp->fp[i];
6920
ca00392c
EG
6921 if (i < bp->num_rx_queues)
6922 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6923 else
6924 sprintf(fp->name, "%s-tx-%d",
6925 bp->dev->name, i - bp->num_rx_queues);
6926
34f80b04 6927 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6928 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6929 if (rc) {
555f6c78 6930 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6931 bnx2x_free_msix_irqs(bp);
6932 return -EBUSY;
6933 }
6934
555f6c78 6935 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6936 }
6937
555f6c78 6938 i = BNX2X_NUM_QUEUES(bp);
ca00392c
EG
6939 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
6940 " ... fp[%d] %d\n",
6941 bp->dev->name, bp->msix_table[0].vector,
6942 0, bp->msix_table[offset].vector,
6943 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 6944
a2fbb9ea 6945 return 0;
a2fbb9ea
ET
6946}
6947
8badd27a
EG
6948static int bnx2x_enable_msi(struct bnx2x *bp)
6949{
6950 int rc;
6951
6952 rc = pci_enable_msi(bp->pdev);
6953 if (rc) {
6954 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6955 return -1;
6956 }
6957 bp->flags |= USING_MSI_FLAG;
6958
6959 return 0;
6960}
6961
a2fbb9ea
ET
6962static int bnx2x_req_irq(struct bnx2x *bp)
6963{
8badd27a 6964 unsigned long flags;
34f80b04 6965 int rc;
a2fbb9ea 6966
8badd27a
EG
6967 if (bp->flags & USING_MSI_FLAG)
6968 flags = 0;
6969 else
6970 flags = IRQF_SHARED;
6971
6972 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6973 bp->dev->name, bp->dev);
a2fbb9ea
ET
6974 if (!rc)
6975 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6976
6977 return rc;
a2fbb9ea
ET
6978}
6979
65abd74d
YG
6980static void bnx2x_napi_enable(struct bnx2x *bp)
6981{
6982 int i;
6983
555f6c78 6984 for_each_rx_queue(bp, i)
65abd74d
YG
6985 napi_enable(&bnx2x_fp(bp, i, napi));
6986}
6987
6988static void bnx2x_napi_disable(struct bnx2x *bp)
6989{
6990 int i;
6991
555f6c78 6992 for_each_rx_queue(bp, i)
65abd74d
YG
6993 napi_disable(&bnx2x_fp(bp, i, napi));
6994}
6995
6996static void bnx2x_netif_start(struct bnx2x *bp)
6997{
e1510706
EG
6998 int intr_sem;
6999
7000 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7001 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7002
7003 if (intr_sem) {
65abd74d 7004 if (netif_running(bp->dev)) {
65abd74d
YG
7005 bnx2x_napi_enable(bp);
7006 bnx2x_int_enable(bp);
555f6c78
EG
7007 if (bp->state == BNX2X_STATE_OPEN)
7008 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
7009 }
7010 }
7011}
7012
f8ef6e44 7013static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 7014{
f8ef6e44 7015 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 7016 bnx2x_napi_disable(bp);
762d5f6c
EG
7017 netif_tx_disable(bp->dev);
7018 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
7019}
7020
a2fbb9ea
ET
7021/*
7022 * Init service functions
7023 */
7024
3101c2bc 7025static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
7026{
7027 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 7028 int port = BP_PORT(bp);
a2fbb9ea
ET
7029
7030 /* CAM allocation
7031 * unicasts 0-31:port0 32-63:port1
7032 * multicast 64-127:port0 128-191:port1
7033 */
8d9c5f34 7034 config->hdr.length = 2;
af246401 7035 config->hdr.offset = port ? 32 : 0;
0626b899 7036 config->hdr.client_id = bp->fp->cl_id;
a2fbb9ea
ET
7037 config->hdr.reserved1 = 0;
7038
7039 /* primary MAC */
7040 config->config_table[0].cam_entry.msb_mac_addr =
7041 swab16(*(u16 *)&bp->dev->dev_addr[0]);
7042 config->config_table[0].cam_entry.middle_mac_addr =
7043 swab16(*(u16 *)&bp->dev->dev_addr[2]);
7044 config->config_table[0].cam_entry.lsb_mac_addr =
7045 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 7046 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7047 if (set)
7048 config->config_table[0].target_table_entry.flags = 0;
7049 else
7050 CAM_INVALIDATE(config->config_table[0]);
ca00392c
EG
7051 config->config_table[0].target_table_entry.clients_bit_vector =
7052 cpu_to_le32(1 << BP_L_ID(bp));
a2fbb9ea
ET
7053 config->config_table[0].target_table_entry.vlan_id = 0;
7054
3101c2bc
YG
7055 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7056 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7057 config->config_table[0].cam_entry.msb_mac_addr,
7058 config->config_table[0].cam_entry.middle_mac_addr,
7059 config->config_table[0].cam_entry.lsb_mac_addr);
7060
7061 /* broadcast */
4781bfad
EG
7062 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
7063 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
7064 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
34f80b04 7065 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7066 if (set)
7067 config->config_table[1].target_table_entry.flags =
a2fbb9ea 7068 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
7069 else
7070 CAM_INVALIDATE(config->config_table[1]);
ca00392c
EG
7071 config->config_table[1].target_table_entry.clients_bit_vector =
7072 cpu_to_le32(1 << BP_L_ID(bp));
a2fbb9ea
ET
7073 config->config_table[1].target_table_entry.vlan_id = 0;
7074
7075 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7076 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7077 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7078}
7079
3101c2bc 7080static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
7081{
7082 struct mac_configuration_cmd_e1h *config =
7083 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7084
34f80b04
EG
7085 /* CAM allocation for E1H
7086 * unicasts: by func number
7087 * multicast: 20+FUNC*20, 20 each
7088 */
8d9c5f34 7089 config->hdr.length = 1;
34f80b04 7090 config->hdr.offset = BP_FUNC(bp);
0626b899 7091 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
7092 config->hdr.reserved1 = 0;
7093
7094 /* primary MAC */
7095 config->config_table[0].msb_mac_addr =
7096 swab16(*(u16 *)&bp->dev->dev_addr[0]);
7097 config->config_table[0].middle_mac_addr =
7098 swab16(*(u16 *)&bp->dev->dev_addr[2]);
7099 config->config_table[0].lsb_mac_addr =
7100 swab16(*(u16 *)&bp->dev->dev_addr[4]);
ca00392c
EG
7101 config->config_table[0].clients_bit_vector =
7102 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
7103 config->config_table[0].vlan_id = 0;
7104 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7105 if (set)
7106 config->config_table[0].flags = BP_PORT(bp);
7107 else
7108 config->config_table[0].flags =
7109 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7110
3101c2bc
YG
7111 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
7112 (set ? "setting" : "clearing"),
34f80b04
EG
7113 config->config_table[0].msb_mac_addr,
7114 config->config_table[0].middle_mac_addr,
7115 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
7116
7117 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7118 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7119 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7120}
7121
a2fbb9ea
ET
7122static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7123 int *state_p, int poll)
7124{
7125 /* can take a while if any port is running */
8b3a0f0b 7126 int cnt = 5000;
a2fbb9ea 7127
c14423fe
ET
7128 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7129 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7130
7131 might_sleep();
34f80b04 7132 while (cnt--) {
a2fbb9ea
ET
7133 if (poll) {
7134 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7135 /* if index is different from 0
7136 * the reply for some commands will
3101c2bc 7137 * be on the non default queue
a2fbb9ea
ET
7138 */
7139 if (idx)
7140 bnx2x_rx_int(&bp->fp[idx], 10);
7141 }
a2fbb9ea 7142
3101c2bc 7143 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7144 if (*state_p == state) {
7145#ifdef BNX2X_STOP_ON_ERROR
7146 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7147#endif
a2fbb9ea 7148 return 0;
8b3a0f0b 7149 }
a2fbb9ea 7150
a2fbb9ea 7151 msleep(1);
e3553b29
EG
7152
7153 if (bp->panic)
7154 return -EIO;
a2fbb9ea
ET
7155 }
7156
a2fbb9ea 7157 /* timeout! */
49d66772
ET
7158 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7159 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7160#ifdef BNX2X_STOP_ON_ERROR
7161 bnx2x_panic();
7162#endif
a2fbb9ea 7163
49d66772 7164 return -EBUSY;
a2fbb9ea
ET
7165}
7166
7167static int bnx2x_setup_leading(struct bnx2x *bp)
7168{
34f80b04 7169 int rc;
a2fbb9ea 7170
c14423fe 7171 /* reset IGU state */
34f80b04 7172 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7173
7174 /* SETUP ramrod */
7175 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7176
34f80b04
EG
7177 /* Wait for completion */
7178 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7179
34f80b04 7180 return rc;
a2fbb9ea
ET
7181}
7182
7183static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7184{
555f6c78
EG
7185 struct bnx2x_fastpath *fp = &bp->fp[index];
7186
a2fbb9ea 7187 /* reset IGU state */
555f6c78 7188 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7189
228241eb 7190 /* SETUP ramrod */
555f6c78
EG
7191 fp->state = BNX2X_FP_STATE_OPENING;
7192 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7193 fp->cl_id, 0);
a2fbb9ea
ET
7194
7195 /* Wait for completion */
7196 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7197 &(fp->state), 0);
a2fbb9ea
ET
7198}
7199
a2fbb9ea 7200static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7201
ca00392c
EG
7202static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7203 int *num_tx_queues_out)
7204{
7205 int _num_rx_queues = 0, _num_tx_queues = 0;
7206
7207 switch (bp->multi_mode) {
7208 case ETH_RSS_MODE_DISABLED:
7209 _num_rx_queues = 1;
7210 _num_tx_queues = 1;
7211 break;
7212
7213 case ETH_RSS_MODE_REGULAR:
7214 if (num_rx_queues)
7215 _num_rx_queues = min_t(u32, num_rx_queues,
7216 BNX2X_MAX_QUEUES(bp));
7217 else
7218 _num_rx_queues = min_t(u32, num_online_cpus(),
7219 BNX2X_MAX_QUEUES(bp));
7220
7221 if (num_tx_queues)
7222 _num_tx_queues = min_t(u32, num_tx_queues,
7223 BNX2X_MAX_QUEUES(bp));
7224 else
7225 _num_tx_queues = min_t(u32, num_online_cpus(),
7226 BNX2X_MAX_QUEUES(bp));
7227
7228 /* There must be not more Tx queues than Rx queues */
7229 if (_num_tx_queues > _num_rx_queues) {
7230 BNX2X_ERR("number of tx queues (%d) > "
7231 "number of rx queues (%d)"
7232 " defaulting to %d\n",
7233 _num_tx_queues, _num_rx_queues,
7234 _num_rx_queues);
7235 _num_tx_queues = _num_rx_queues;
7236 }
7237 break;
7238
7239
7240 default:
7241 _num_rx_queues = 1;
7242 _num_tx_queues = 1;
7243 break;
7244 }
7245
7246 *num_rx_queues_out = _num_rx_queues;
7247 *num_tx_queues_out = _num_tx_queues;
7248}
7249
7250static int bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 7251{
ca00392c 7252 int rc = 0;
a2fbb9ea 7253
8badd27a
EG
7254 switch (int_mode) {
7255 case INT_MODE_INTx:
7256 case INT_MODE_MSI:
ca00392c
EG
7257 bp->num_rx_queues = 1;
7258 bp->num_tx_queues = 1;
7259 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a
EG
7260 break;
7261
7262 case INT_MODE_MSIX:
7263 default:
ca00392c
EG
7264 /* Set interrupt mode according to bp->multi_mode value */
7265 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7266 &bp->num_tx_queues);
7267
7268 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
555f6c78 7269 bp->num_rx_queues, bp->num_tx_queues);
ca00392c 7270
2dfe0e1f
EG
7271 /* if we can't use MSI-X we only need one fp,
7272 * so try to enable MSI-X with the requested number of fp's
7273 * and fallback to MSI or legacy INTx with one fp
7274 */
ca00392c
EG
7275 rc = bnx2x_enable_msix(bp);
7276 if (rc) {
34f80b04 7277 /* failed to enable MSI-X */
555f6c78
EG
7278 if (bp->multi_mode)
7279 BNX2X_ERR("Multi requested but failed to "
ca00392c
EG
7280 "enable MSI-X (rx %d tx %d), "
7281 "set number of queues to 1\n",
7282 bp->num_rx_queues, bp->num_tx_queues);
7283 bp->num_rx_queues = 1;
7284 bp->num_tx_queues = 1;
a2fbb9ea 7285 }
8badd27a 7286 break;
a2fbb9ea 7287 }
555f6c78 7288 bp->dev->real_num_tx_queues = bp->num_tx_queues;
ca00392c 7289 return rc;
8badd27a
EG
7290}
7291
8badd27a
EG
7292
7293/* must be called with rtnl_lock */
7294static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7295{
7296 u32 load_code;
ca00392c
EG
7297 int i, rc;
7298
8badd27a 7299#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7300 if (unlikely(bp->panic))
7301 return -EPERM;
7302#endif
7303
7304 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7305
ca00392c 7306 rc = bnx2x_set_int_mode(bp);
c14423fe 7307
a2fbb9ea
ET
7308 if (bnx2x_alloc_mem(bp))
7309 return -ENOMEM;
7310
555f6c78 7311 for_each_rx_queue(bp, i)
7a9b2557
VZ
7312 bnx2x_fp(bp, i, disable_tpa) =
7313 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7314
555f6c78 7315 for_each_rx_queue(bp, i)
2dfe0e1f
EG
7316 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7317 bnx2x_poll, 128);
7318
2dfe0e1f
EG
7319 bnx2x_napi_enable(bp);
7320
34f80b04
EG
7321 if (bp->flags & USING_MSIX_FLAG) {
7322 rc = bnx2x_req_msix_irqs(bp);
7323 if (rc) {
7324 pci_disable_msix(bp->pdev);
2dfe0e1f 7325 goto load_error1;
34f80b04
EG
7326 }
7327 } else {
ca00392c
EG
7328 /* Fall to INTx if failed to enable MSI-X due to lack of
7329 memory (in bnx2x_set_int_mode()) */
8badd27a
EG
7330 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7331 bnx2x_enable_msi(bp);
34f80b04
EG
7332 bnx2x_ack_int(bp);
7333 rc = bnx2x_req_irq(bp);
7334 if (rc) {
2dfe0e1f 7335 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
7336 if (bp->flags & USING_MSI_FLAG)
7337 pci_disable_msi(bp->pdev);
2dfe0e1f 7338 goto load_error1;
a2fbb9ea 7339 }
8badd27a
EG
7340 if (bp->flags & USING_MSI_FLAG) {
7341 bp->dev->irq = bp->pdev->irq;
7342 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7343 bp->dev->name, bp->pdev->irq);
7344 }
a2fbb9ea
ET
7345 }
7346
2dfe0e1f
EG
7347 /* Send LOAD_REQUEST command to MCP
7348 Returns the type of LOAD command:
7349 if it is the first port to be initialized
7350 common blocks should be initialized, otherwise - not
7351 */
7352 if (!BP_NOMCP(bp)) {
7353 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7354 if (!load_code) {
7355 BNX2X_ERR("MCP response failure, aborting\n");
7356 rc = -EBUSY;
7357 goto load_error2;
7358 }
7359 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7360 rc = -EBUSY; /* other port in diagnostic mode */
7361 goto load_error2;
7362 }
7363
7364 } else {
7365 int port = BP_PORT(bp);
7366
f5372251 7367 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7368 load_count[0], load_count[1], load_count[2]);
7369 load_count[0]++;
7370 load_count[1 + port]++;
f5372251 7371 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7372 load_count[0], load_count[1], load_count[2]);
7373 if (load_count[0] == 1)
7374 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7375 else if (load_count[1 + port] == 1)
7376 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7377 else
7378 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7379 }
7380
7381 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7382 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7383 bp->port.pmf = 1;
7384 else
7385 bp->port.pmf = 0;
7386 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7387
a2fbb9ea 7388 /* Initialize HW */
34f80b04
EG
7389 rc = bnx2x_init_hw(bp, load_code);
7390 if (rc) {
a2fbb9ea 7391 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 7392 goto load_error2;
a2fbb9ea
ET
7393 }
7394
a2fbb9ea 7395 /* Setup NIC internals and enable interrupts */
471de716 7396 bnx2x_nic_init(bp, load_code);
a2fbb9ea 7397
2691d51d
EG
7398 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7399 (bp->common.shmem2_base))
7400 SHMEM2_WR(bp, dcc_support,
7401 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7402 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7403
a2fbb9ea 7404 /* Send LOAD_DONE command to MCP */
34f80b04 7405 if (!BP_NOMCP(bp)) {
228241eb
ET
7406 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7407 if (!load_code) {
da5a662a 7408 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7409 rc = -EBUSY;
2dfe0e1f 7410 goto load_error3;
a2fbb9ea
ET
7411 }
7412 }
7413
7414 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7415
34f80b04
EG
7416 rc = bnx2x_setup_leading(bp);
7417 if (rc) {
da5a662a 7418 BNX2X_ERR("Setup leading failed!\n");
e3553b29 7419#ifndef BNX2X_STOP_ON_ERROR
2dfe0e1f 7420 goto load_error3;
e3553b29
EG
7421#else
7422 bp->panic = 1;
7423 return -EBUSY;
7424#endif
34f80b04 7425 }
a2fbb9ea 7426
34f80b04
EG
7427 if (CHIP_IS_E1H(bp))
7428 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7429 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
7430 bp->state = BNX2X_STATE_DISABLED;
7431 }
a2fbb9ea 7432
ca00392c 7433 if (bp->state == BNX2X_STATE_OPEN) {
34f80b04
EG
7434 for_each_nondefault_queue(bp, i) {
7435 rc = bnx2x_setup_multi(bp, i);
7436 if (rc)
2dfe0e1f 7437 goto load_error3;
34f80b04 7438 }
a2fbb9ea 7439
ca00392c
EG
7440 if (CHIP_IS_E1(bp))
7441 bnx2x_set_mac_addr_e1(bp, 1);
7442 else
7443 bnx2x_set_mac_addr_e1h(bp, 1);
7444 }
34f80b04
EG
7445
7446 if (bp->port.pmf)
b5bf9068 7447 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7448
7449 /* Start fast path */
34f80b04
EG
7450 switch (load_mode) {
7451 case LOAD_NORMAL:
ca00392c
EG
7452 if (bp->state == BNX2X_STATE_OPEN) {
7453 /* Tx queue should be only reenabled */
7454 netif_tx_wake_all_queues(bp->dev);
7455 }
2dfe0e1f 7456 /* Initialize the receive filter. */
34f80b04
EG
7457 bnx2x_set_rx_mode(bp->dev);
7458 break;
7459
7460 case LOAD_OPEN:
555f6c78 7461 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
7462 if (bp->state != BNX2X_STATE_OPEN)
7463 netif_tx_disable(bp->dev);
2dfe0e1f 7464 /* Initialize the receive filter. */
34f80b04 7465 bnx2x_set_rx_mode(bp->dev);
34f80b04 7466 break;
a2fbb9ea 7467
34f80b04 7468 case LOAD_DIAG:
2dfe0e1f 7469 /* Initialize the receive filter. */
a2fbb9ea 7470 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7471 bp->state = BNX2X_STATE_DIAG;
7472 break;
7473
7474 default:
7475 break;
a2fbb9ea
ET
7476 }
7477
34f80b04
EG
7478 if (!bp->port.pmf)
7479 bnx2x__link_status_update(bp);
7480
a2fbb9ea
ET
7481 /* start the timer */
7482 mod_timer(&bp->timer, jiffies + bp->current_interval);
7483
34f80b04 7484
a2fbb9ea
ET
7485 return 0;
7486
2dfe0e1f
EG
7487load_error3:
7488 bnx2x_int_disable_sync(bp, 1);
7489 if (!BP_NOMCP(bp)) {
7490 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7491 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7492 }
7493 bp->port.pmf = 0;
7a9b2557
VZ
7494 /* Free SKBs, SGEs, TPA pool and driver internals */
7495 bnx2x_free_skbs(bp);
555f6c78 7496 for_each_rx_queue(bp, i)
3196a88a 7497 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7498load_error2:
d1014634
YG
7499 /* Release IRQs */
7500 bnx2x_free_irq(bp);
2dfe0e1f
EG
7501load_error1:
7502 bnx2x_napi_disable(bp);
555f6c78 7503 for_each_rx_queue(bp, i)
7cde1c8b 7504 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7505 bnx2x_free_mem(bp);
7506
34f80b04 7507 return rc;
a2fbb9ea
ET
7508}
7509
7510static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7511{
555f6c78 7512 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7513 int rc;
7514
c14423fe 7515 /* halt the connection */
555f6c78
EG
7516 fp->state = BNX2X_FP_STATE_HALTING;
7517 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7518
34f80b04 7519 /* Wait for completion */
a2fbb9ea 7520 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7521 &(fp->state), 1);
c14423fe 7522 if (rc) /* timeout */
a2fbb9ea
ET
7523 return rc;
7524
7525 /* delete cfc entry */
7526 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7527
34f80b04
EG
7528 /* Wait for completion */
7529 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7530 &(fp->state), 1);
34f80b04 7531 return rc;
a2fbb9ea
ET
7532}
7533
da5a662a 7534static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7535{
4781bfad 7536 __le16 dsb_sp_prod_idx;
c14423fe 7537 /* if the other port is handling traffic,
a2fbb9ea 7538 this can take a lot of time */
34f80b04
EG
7539 int cnt = 500;
7540 int rc;
a2fbb9ea
ET
7541
7542 might_sleep();
7543
7544 /* Send HALT ramrod */
7545 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7546 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7547
34f80b04
EG
7548 /* Wait for completion */
7549 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7550 &(bp->fp[0].state), 1);
7551 if (rc) /* timeout */
da5a662a 7552 return rc;
a2fbb9ea 7553
49d66772 7554 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7555
228241eb 7556 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7557 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7558
49d66772 7559 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7560 we are going to reset the chip anyway
7561 so there is not much to do if this times out
7562 */
34f80b04 7563 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7564 if (!cnt) {
7565 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7566 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7567 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7568#ifdef BNX2X_STOP_ON_ERROR
7569 bnx2x_panic();
7570#endif
36e552ab 7571 rc = -EBUSY;
34f80b04
EG
7572 break;
7573 }
7574 cnt--;
da5a662a 7575 msleep(1);
5650d9d4 7576 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7577 }
7578 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7579 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7580
7581 return rc;
a2fbb9ea
ET
7582}
7583
34f80b04
EG
7584static void bnx2x_reset_func(struct bnx2x *bp)
7585{
7586 int port = BP_PORT(bp);
7587 int func = BP_FUNC(bp);
7588 int base, i;
7589
7590 /* Configure IGU */
7591 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7592 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7593
34f80b04
EG
7594 /* Clear ILT */
7595 base = FUNC_ILT_BASE(func);
7596 for (i = base; i < base + ILT_PER_FUNC; i++)
7597 bnx2x_ilt_wr(bp, i, 0);
7598}
7599
7600static void bnx2x_reset_port(struct bnx2x *bp)
7601{
7602 int port = BP_PORT(bp);
7603 u32 val;
7604
7605 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7606
7607 /* Do not rcv packets to BRB */
7608 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7609 /* Do not direct rcv packets that are not for MCP to the BRB */
7610 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7611 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7612
7613 /* Configure AEU */
7614 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7615
7616 msleep(100);
7617 /* Check for BRB port occupancy */
7618 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7619 if (val)
7620 DP(NETIF_MSG_IFDOWN,
33471629 7621 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7622
7623 /* TODO: Close Doorbell port? */
7624}
7625
34f80b04
EG
7626static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7627{
7628 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7629 BP_FUNC(bp), reset_code);
7630
7631 switch (reset_code) {
7632 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7633 bnx2x_reset_port(bp);
7634 bnx2x_reset_func(bp);
7635 bnx2x_reset_common(bp);
7636 break;
7637
7638 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7639 bnx2x_reset_port(bp);
7640 bnx2x_reset_func(bp);
7641 break;
7642
7643 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7644 bnx2x_reset_func(bp);
7645 break;
49d66772 7646
34f80b04
EG
7647 default:
7648 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7649 break;
7650 }
7651}
7652
33471629 7653/* must be called with rtnl_lock */
34f80b04 7654static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7655{
da5a662a 7656 int port = BP_PORT(bp);
a2fbb9ea 7657 u32 reset_code = 0;
da5a662a 7658 int i, cnt, rc;
a2fbb9ea
ET
7659
7660 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7661
ab6ad5a4 7662 /* Set "drop all" */
228241eb
ET
7663 bp->rx_mode = BNX2X_RX_MODE_NONE;
7664 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7665
ab6ad5a4 7666 /* Disable HW interrupts, NAPI and Tx */
f8ef6e44 7667 bnx2x_netif_stop(bp, 1);
e94d8af3 7668
34f80b04
EG
7669 del_timer_sync(&bp->timer);
7670 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7671 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7672 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7673
70b9986c
EG
7674 /* Release IRQs */
7675 bnx2x_free_irq(bp);
7676
555f6c78
EG
7677 /* Wait until tx fastpath tasks complete */
7678 for_each_tx_queue(bp, i) {
228241eb
ET
7679 struct bnx2x_fastpath *fp = &bp->fp[i];
7680
34f80b04 7681 cnt = 1000;
e8b5fc51 7682 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7683
7961f791 7684 bnx2x_tx_int(fp);
34f80b04
EG
7685 if (!cnt) {
7686 BNX2X_ERR("timeout waiting for queue[%d]\n",
7687 i);
7688#ifdef BNX2X_STOP_ON_ERROR
7689 bnx2x_panic();
7690 return -EBUSY;
7691#else
7692 break;
7693#endif
7694 }
7695 cnt--;
da5a662a 7696 msleep(1);
34f80b04 7697 }
228241eb 7698 }
da5a662a
VZ
7699 /* Give HW time to discard old tx messages */
7700 msleep(1);
a2fbb9ea 7701
3101c2bc
YG
7702 if (CHIP_IS_E1(bp)) {
7703 struct mac_configuration_cmd *config =
7704 bnx2x_sp(bp, mcast_config);
7705
7706 bnx2x_set_mac_addr_e1(bp, 0);
7707
8d9c5f34 7708 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7709 CAM_INVALIDATE(config->config_table[i]);
7710
8d9c5f34 7711 config->hdr.length = i;
3101c2bc
YG
7712 if (CHIP_REV_IS_SLOW(bp))
7713 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7714 else
7715 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7716 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7717 config->hdr.reserved1 = 0;
7718
7719 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7720 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7721 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7722
7723 } else { /* E1H */
65abd74d
YG
7724 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7725
3101c2bc
YG
7726 bnx2x_set_mac_addr_e1h(bp, 0);
7727
7728 for (i = 0; i < MC_HASH_SIZE; i++)
7729 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
7730
7731 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc
YG
7732 }
7733
65abd74d
YG
7734 if (unload_mode == UNLOAD_NORMAL)
7735 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7736
7d0446c2 7737 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7738 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7739
7d0446c2 7740 else if (bp->wol) {
65abd74d
YG
7741 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7742 u8 *mac_addr = bp->dev->dev_addr;
7743 u32 val;
7744 /* The mac address is written to entries 1-4 to
7745 preserve entry 0 which is used by the PMF */
7746 u8 entry = (BP_E1HVN(bp) + 1)*8;
7747
7748 val = (mac_addr[0] << 8) | mac_addr[1];
7749 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7750
7751 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7752 (mac_addr[4] << 8) | mac_addr[5];
7753 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7754
7755 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7756
7757 } else
7758 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7759
34f80b04
EG
7760 /* Close multi and leading connections
7761 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7762 for_each_nondefault_queue(bp, i)
7763 if (bnx2x_stop_multi(bp, i))
228241eb 7764 goto unload_error;
a2fbb9ea 7765
da5a662a
VZ
7766 rc = bnx2x_stop_leading(bp);
7767 if (rc) {
34f80b04 7768 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7769#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7770 return -EBUSY;
da5a662a
VZ
7771#else
7772 goto unload_error;
34f80b04 7773#endif
228241eb
ET
7774 }
7775
7776unload_error:
34f80b04 7777 if (!BP_NOMCP(bp))
228241eb 7778 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7779 else {
f5372251 7780 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7781 load_count[0], load_count[1], load_count[2]);
7782 load_count[0]--;
da5a662a 7783 load_count[1 + port]--;
f5372251 7784 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7785 load_count[0], load_count[1], load_count[2]);
7786 if (load_count[0] == 0)
7787 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7788 else if (load_count[1 + port] == 0)
34f80b04
EG
7789 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7790 else
7791 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7792 }
a2fbb9ea 7793
34f80b04
EG
7794 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7795 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7796 bnx2x__link_reset(bp);
a2fbb9ea
ET
7797
7798 /* Reset the chip */
228241eb 7799 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7800
7801 /* Report UNLOAD_DONE to MCP */
34f80b04 7802 if (!BP_NOMCP(bp))
a2fbb9ea 7803 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 7804
9a035440 7805 bp->port.pmf = 0;
a2fbb9ea 7806
7a9b2557 7807 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7808 bnx2x_free_skbs(bp);
555f6c78 7809 for_each_rx_queue(bp, i)
3196a88a 7810 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7811 for_each_rx_queue(bp, i)
7cde1c8b 7812 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7813 bnx2x_free_mem(bp);
7814
7815 bp->state = BNX2X_STATE_CLOSED;
228241eb 7816
a2fbb9ea
ET
7817 netif_carrier_off(bp->dev);
7818
7819 return 0;
7820}
7821
34f80b04
EG
7822static void bnx2x_reset_task(struct work_struct *work)
7823{
7824 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7825
7826#ifdef BNX2X_STOP_ON_ERROR
7827 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7828 " so reset not done to allow debug dump,\n"
ad361c98 7829 " you will need to reboot when done\n");
34f80b04
EG
7830 return;
7831#endif
7832
7833 rtnl_lock();
7834
7835 if (!netif_running(bp->dev))
7836 goto reset_task_exit;
7837
7838 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7839 bnx2x_nic_load(bp, LOAD_NORMAL);
7840
7841reset_task_exit:
7842 rtnl_unlock();
7843}
7844
a2fbb9ea
ET
7845/* end of nic load/unload */
7846
7847/* ethtool_ops */
7848
7849/*
7850 * Init service functions
7851 */
7852
f1ef27ef
EG
7853static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7854{
7855 switch (func) {
7856 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7857 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7858 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7859 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7860 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7861 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7862 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7863 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7864 default:
7865 BNX2X_ERR("Unsupported function index: %d\n", func);
7866 return (u32)(-1);
7867 }
7868}
7869
7870static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7871{
7872 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7873
7874 /* Flush all outstanding writes */
7875 mmiowb();
7876
7877 /* Pretend to be function 0 */
7878 REG_WR(bp, reg, 0);
7879 /* Flush the GRC transaction (in the chip) */
7880 new_val = REG_RD(bp, reg);
7881 if (new_val != 0) {
7882 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7883 new_val);
7884 BUG();
7885 }
7886
7887 /* From now we are in the "like-E1" mode */
7888 bnx2x_int_disable(bp);
7889
7890 /* Flush all outstanding writes */
7891 mmiowb();
7892
7893 /* Restore the original funtion settings */
7894 REG_WR(bp, reg, orig_func);
7895 new_val = REG_RD(bp, reg);
7896 if (new_val != orig_func) {
7897 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7898 orig_func, new_val);
7899 BUG();
7900 }
7901}
7902
7903static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7904{
7905 if (CHIP_IS_E1H(bp))
7906 bnx2x_undi_int_disable_e1h(bp, func);
7907 else
7908 bnx2x_int_disable(bp);
7909}
7910
34f80b04
EG
7911static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7912{
7913 u32 val;
7914
7915 /* Check if there is any driver already loaded */
7916 val = REG_RD(bp, MISC_REG_UNPREPARED);
7917 if (val == 0x1) {
7918 /* Check if it is the UNDI driver
7919 * UNDI driver initializes CID offset for normal bell to 0x7
7920 */
4a37fb66 7921 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7922 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7923 if (val == 0x7) {
7924 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7925 /* save our func */
34f80b04 7926 int func = BP_FUNC(bp);
da5a662a
VZ
7927 u32 swap_en;
7928 u32 swap_val;
34f80b04 7929
b4661739
EG
7930 /* clear the UNDI indication */
7931 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7932
34f80b04
EG
7933 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7934
7935 /* try unload UNDI on port 0 */
7936 bp->func = 0;
da5a662a
VZ
7937 bp->fw_seq =
7938 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7939 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7940 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7941
7942 /* if UNDI is loaded on the other port */
7943 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7944
da5a662a
VZ
7945 /* send "DONE" for previous unload */
7946 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7947
7948 /* unload UNDI on port 1 */
34f80b04 7949 bp->func = 1;
da5a662a
VZ
7950 bp->fw_seq =
7951 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7952 DRV_MSG_SEQ_NUMBER_MASK);
7953 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7954
7955 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7956 }
7957
b4661739
EG
7958 /* now it's safe to release the lock */
7959 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7960
f1ef27ef 7961 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7962
7963 /* close input traffic and wait for it */
7964 /* Do not rcv packets to BRB */
7965 REG_WR(bp,
7966 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7967 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7968 /* Do not direct rcv packets that are not for MCP to
7969 * the BRB */
7970 REG_WR(bp,
7971 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7972 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7973 /* clear AEU */
7974 REG_WR(bp,
7975 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7976 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7977 msleep(10);
7978
7979 /* save NIG port swap info */
7980 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7981 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7982 /* reset device */
7983 REG_WR(bp,
7984 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7985 0xd3ffffff);
34f80b04
EG
7986 REG_WR(bp,
7987 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7988 0x1403);
da5a662a
VZ
7989 /* take the NIG out of reset and restore swap values */
7990 REG_WR(bp,
7991 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7992 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7993 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7994 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7995
7996 /* send unload done to the MCP */
7997 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7998
7999 /* restore our func and fw_seq */
8000 bp->func = func;
8001 bp->fw_seq =
8002 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8003 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
8004
8005 } else
8006 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
8007 }
8008}
8009
8010static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8011{
8012 u32 val, val2, val3, val4, id;
72ce58c3 8013 u16 pmc;
34f80b04
EG
8014
8015 /* Get the chip revision id and number. */
8016 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8017 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8018 id = ((val & 0xffff) << 16);
8019 val = REG_RD(bp, MISC_REG_CHIP_REV);
8020 id |= ((val & 0xf) << 12);
8021 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8022 id |= ((val & 0xff) << 4);
5a40e08e 8023 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
8024 id |= (val & 0xf);
8025 bp->common.chip_id = id;
8026 bp->link_params.chip_id = bp->common.chip_id;
8027 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8028
1c06328c
EG
8029 val = (REG_RD(bp, 0x2874) & 0x55);
8030 if ((bp->common.chip_id & 0x1) ||
8031 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8032 bp->flags |= ONE_PORT_FLAG;
8033 BNX2X_DEV_INFO("single port device\n");
8034 }
8035
34f80b04
EG
8036 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8037 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8038 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8039 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8040 bp->common.flash_size, bp->common.flash_size);
8041
8042 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 8043 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 8044 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
8045 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8046 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
8047
8048 if (!bp->common.shmem_base ||
8049 (bp->common.shmem_base < 0xA0000) ||
8050 (bp->common.shmem_base >= 0xC0000)) {
8051 BNX2X_DEV_INFO("MCP not active\n");
8052 bp->flags |= NO_MCP_FLAG;
8053 return;
8054 }
8055
8056 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8057 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8058 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8059 BNX2X_ERR("BAD MCP validity signature\n");
8060
8061 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 8062 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
8063
8064 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8065 SHARED_HW_CFG_LED_MODE_MASK) >>
8066 SHARED_HW_CFG_LED_MODE_SHIFT);
8067
c2c8b03e
EG
8068 bp->link_params.feature_config_flags = 0;
8069 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8070 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8071 bp->link_params.feature_config_flags |=
8072 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8073 else
8074 bp->link_params.feature_config_flags &=
8075 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8076
34f80b04
EG
8077 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8078 bp->common.bc_ver = val;
8079 BNX2X_DEV_INFO("bc_ver %X\n", val);
8080 if (val < BNX2X_BC_VER) {
8081 /* for now only warn
8082 * later we might need to enforce this */
8083 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8084 " please upgrade BC\n", BNX2X_BC_VER, val);
8085 }
4d295db0
EG
8086 bp->link_params.feature_config_flags |=
8087 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8088 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
8089
8090 if (BP_E1HVN(bp) == 0) {
8091 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8092 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8093 } else {
8094 /* no WOL capability for E1HVN != 0 */
8095 bp->flags |= NO_WOL_FLAG;
8096 }
8097 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 8098 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
8099
8100 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8101 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8102 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8103 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8104
8105 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8106 val, val2, val3, val4);
8107}
8108
8109static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8110 u32 switch_cfg)
a2fbb9ea 8111{
34f80b04 8112 int port = BP_PORT(bp);
a2fbb9ea
ET
8113 u32 ext_phy_type;
8114
a2fbb9ea
ET
8115 switch (switch_cfg) {
8116 case SWITCH_CFG_1G:
8117 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8118
c18487ee
YR
8119 ext_phy_type =
8120 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8121 switch (ext_phy_type) {
8122 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8123 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8124 ext_phy_type);
8125
34f80b04
EG
8126 bp->port.supported |= (SUPPORTED_10baseT_Half |
8127 SUPPORTED_10baseT_Full |
8128 SUPPORTED_100baseT_Half |
8129 SUPPORTED_100baseT_Full |
8130 SUPPORTED_1000baseT_Full |
8131 SUPPORTED_2500baseX_Full |
8132 SUPPORTED_TP |
8133 SUPPORTED_FIBRE |
8134 SUPPORTED_Autoneg |
8135 SUPPORTED_Pause |
8136 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8137 break;
8138
8139 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8140 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8141 ext_phy_type);
8142
34f80b04
EG
8143 bp->port.supported |= (SUPPORTED_10baseT_Half |
8144 SUPPORTED_10baseT_Full |
8145 SUPPORTED_100baseT_Half |
8146 SUPPORTED_100baseT_Full |
8147 SUPPORTED_1000baseT_Full |
8148 SUPPORTED_TP |
8149 SUPPORTED_FIBRE |
8150 SUPPORTED_Autoneg |
8151 SUPPORTED_Pause |
8152 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8153 break;
8154
8155 default:
8156 BNX2X_ERR("NVRAM config error. "
8157 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 8158 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8159 return;
8160 }
8161
34f80b04
EG
8162 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8163 port*0x10);
8164 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
8165 break;
8166
8167 case SWITCH_CFG_10G:
8168 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8169
c18487ee
YR
8170 ext_phy_type =
8171 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8172 switch (ext_phy_type) {
8173 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8174 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8175 ext_phy_type);
8176
34f80b04
EG
8177 bp->port.supported |= (SUPPORTED_10baseT_Half |
8178 SUPPORTED_10baseT_Full |
8179 SUPPORTED_100baseT_Half |
8180 SUPPORTED_100baseT_Full |
8181 SUPPORTED_1000baseT_Full |
8182 SUPPORTED_2500baseX_Full |
8183 SUPPORTED_10000baseT_Full |
8184 SUPPORTED_TP |
8185 SUPPORTED_FIBRE |
8186 SUPPORTED_Autoneg |
8187 SUPPORTED_Pause |
8188 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8189 break;
8190
589abe3a
EG
8191 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8192 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 8193 ext_phy_type);
f1410647 8194
34f80b04 8195 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8196 SUPPORTED_1000baseT_Full |
34f80b04 8197 SUPPORTED_FIBRE |
589abe3a 8198 SUPPORTED_Autoneg |
34f80b04
EG
8199 SUPPORTED_Pause |
8200 SUPPORTED_Asym_Pause);
f1410647
ET
8201 break;
8202
589abe3a
EG
8203 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8204 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
8205 ext_phy_type);
8206
34f80b04 8207 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8208 SUPPORTED_2500baseX_Full |
34f80b04 8209 SUPPORTED_1000baseT_Full |
589abe3a
EG
8210 SUPPORTED_FIBRE |
8211 SUPPORTED_Autoneg |
8212 SUPPORTED_Pause |
8213 SUPPORTED_Asym_Pause);
8214 break;
8215
8216 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8217 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8218 ext_phy_type);
8219
8220 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
8221 SUPPORTED_FIBRE |
8222 SUPPORTED_Pause |
8223 SUPPORTED_Asym_Pause);
f1410647
ET
8224 break;
8225
589abe3a
EG
8226 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8227 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
8228 ext_phy_type);
8229
34f80b04
EG
8230 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8231 SUPPORTED_1000baseT_Full |
8232 SUPPORTED_FIBRE |
34f80b04
EG
8233 SUPPORTED_Pause |
8234 SUPPORTED_Asym_Pause);
f1410647
ET
8235 break;
8236
589abe3a
EG
8237 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8238 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
8239 ext_phy_type);
8240
34f80b04 8241 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 8242 SUPPORTED_1000baseT_Full |
34f80b04 8243 SUPPORTED_Autoneg |
589abe3a 8244 SUPPORTED_FIBRE |
34f80b04
EG
8245 SUPPORTED_Pause |
8246 SUPPORTED_Asym_Pause);
c18487ee
YR
8247 break;
8248
4d295db0
EG
8249 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8250 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8251 ext_phy_type);
8252
8253 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8254 SUPPORTED_1000baseT_Full |
8255 SUPPORTED_Autoneg |
8256 SUPPORTED_FIBRE |
8257 SUPPORTED_Pause |
8258 SUPPORTED_Asym_Pause);
8259 break;
8260
f1410647
ET
8261 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8262 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8263 ext_phy_type);
8264
34f80b04
EG
8265 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8266 SUPPORTED_TP |
8267 SUPPORTED_Autoneg |
8268 SUPPORTED_Pause |
8269 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8270 break;
8271
28577185
EG
8272 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8273 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8274 ext_phy_type);
8275
8276 bp->port.supported |= (SUPPORTED_10baseT_Half |
8277 SUPPORTED_10baseT_Full |
8278 SUPPORTED_100baseT_Half |
8279 SUPPORTED_100baseT_Full |
8280 SUPPORTED_1000baseT_Full |
8281 SUPPORTED_10000baseT_Full |
8282 SUPPORTED_TP |
8283 SUPPORTED_Autoneg |
8284 SUPPORTED_Pause |
8285 SUPPORTED_Asym_Pause);
8286 break;
8287
c18487ee
YR
8288 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8289 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8290 bp->link_params.ext_phy_config);
8291 break;
8292
a2fbb9ea
ET
8293 default:
8294 BNX2X_ERR("NVRAM config error. "
8295 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 8296 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8297 return;
8298 }
8299
34f80b04
EG
8300 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8301 port*0x18);
8302 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 8303
a2fbb9ea
ET
8304 break;
8305
8306 default:
8307 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 8308 bp->port.link_config);
a2fbb9ea
ET
8309 return;
8310 }
34f80b04 8311 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
8312
8313 /* mask what we support according to speed_cap_mask */
c18487ee
YR
8314 if (!(bp->link_params.speed_cap_mask &
8315 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 8316 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 8317
c18487ee
YR
8318 if (!(bp->link_params.speed_cap_mask &
8319 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 8320 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 8321
c18487ee
YR
8322 if (!(bp->link_params.speed_cap_mask &
8323 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 8324 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 8325
c18487ee
YR
8326 if (!(bp->link_params.speed_cap_mask &
8327 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 8328 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 8329
c18487ee
YR
8330 if (!(bp->link_params.speed_cap_mask &
8331 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
8332 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8333 SUPPORTED_1000baseT_Full);
a2fbb9ea 8334
c18487ee
YR
8335 if (!(bp->link_params.speed_cap_mask &
8336 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 8337 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 8338
c18487ee
YR
8339 if (!(bp->link_params.speed_cap_mask &
8340 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 8341 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 8342
34f80b04 8343 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
8344}
8345
34f80b04 8346static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8347{
c18487ee 8348 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 8349
34f80b04 8350 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 8351 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 8352 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 8353 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8354 bp->port.advertising = bp->port.supported;
a2fbb9ea 8355 } else {
c18487ee
YR
8356 u32 ext_phy_type =
8357 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8358
8359 if ((ext_phy_type ==
8360 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8361 (ext_phy_type ==
8362 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 8363 /* force 10G, no AN */
c18487ee 8364 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 8365 bp->port.advertising =
a2fbb9ea
ET
8366 (ADVERTISED_10000baseT_Full |
8367 ADVERTISED_FIBRE);
8368 break;
8369 }
8370 BNX2X_ERR("NVRAM config error. "
8371 "Invalid link_config 0x%x"
8372 " Autoneg not supported\n",
34f80b04 8373 bp->port.link_config);
a2fbb9ea
ET
8374 return;
8375 }
8376 break;
8377
8378 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 8379 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 8380 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
8381 bp->port.advertising = (ADVERTISED_10baseT_Full |
8382 ADVERTISED_TP);
a2fbb9ea
ET
8383 } else {
8384 BNX2X_ERR("NVRAM config error. "
8385 "Invalid link_config 0x%x"
8386 " speed_cap_mask 0x%x\n",
34f80b04 8387 bp->port.link_config,
c18487ee 8388 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8389 return;
8390 }
8391 break;
8392
8393 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 8394 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
8395 bp->link_params.req_line_speed = SPEED_10;
8396 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8397 bp->port.advertising = (ADVERTISED_10baseT_Half |
8398 ADVERTISED_TP);
a2fbb9ea
ET
8399 } else {
8400 BNX2X_ERR("NVRAM config error. "
8401 "Invalid link_config 0x%x"
8402 " speed_cap_mask 0x%x\n",
34f80b04 8403 bp->port.link_config,
c18487ee 8404 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8405 return;
8406 }
8407 break;
8408
8409 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 8410 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 8411 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
8412 bp->port.advertising = (ADVERTISED_100baseT_Full |
8413 ADVERTISED_TP);
a2fbb9ea
ET
8414 } else {
8415 BNX2X_ERR("NVRAM config error. "
8416 "Invalid link_config 0x%x"
8417 " speed_cap_mask 0x%x\n",
34f80b04 8418 bp->port.link_config,
c18487ee 8419 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8420 return;
8421 }
8422 break;
8423
8424 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 8425 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
8426 bp->link_params.req_line_speed = SPEED_100;
8427 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8428 bp->port.advertising = (ADVERTISED_100baseT_Half |
8429 ADVERTISED_TP);
a2fbb9ea
ET
8430 } else {
8431 BNX2X_ERR("NVRAM config error. "
8432 "Invalid link_config 0x%x"
8433 " speed_cap_mask 0x%x\n",
34f80b04 8434 bp->port.link_config,
c18487ee 8435 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8436 return;
8437 }
8438 break;
8439
8440 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 8441 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 8442 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
8443 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8444 ADVERTISED_TP);
a2fbb9ea
ET
8445 } else {
8446 BNX2X_ERR("NVRAM config error. "
8447 "Invalid link_config 0x%x"
8448 " speed_cap_mask 0x%x\n",
34f80b04 8449 bp->port.link_config,
c18487ee 8450 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8451 return;
8452 }
8453 break;
8454
8455 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8456 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8457 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8458 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8459 ADVERTISED_TP);
a2fbb9ea
ET
8460 } else {
8461 BNX2X_ERR("NVRAM config error. "
8462 "Invalid link_config 0x%x"
8463 " speed_cap_mask 0x%x\n",
34f80b04 8464 bp->port.link_config,
c18487ee 8465 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8466 return;
8467 }
8468 break;
8469
8470 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8471 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8472 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8473 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8474 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8475 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8476 ADVERTISED_FIBRE);
a2fbb9ea
ET
8477 } else {
8478 BNX2X_ERR("NVRAM config error. "
8479 "Invalid link_config 0x%x"
8480 " speed_cap_mask 0x%x\n",
34f80b04 8481 bp->port.link_config,
c18487ee 8482 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8483 return;
8484 }
8485 break;
8486
8487 default:
8488 BNX2X_ERR("NVRAM config error. "
8489 "BAD link speed link_config 0x%x\n",
34f80b04 8490 bp->port.link_config);
c18487ee 8491 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8492 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8493 break;
8494 }
a2fbb9ea 8495
34f80b04
EG
8496 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8497 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8498 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8499 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8500 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8501
c18487ee 8502 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8503 " advertising 0x%x\n",
c18487ee
YR
8504 bp->link_params.req_line_speed,
8505 bp->link_params.req_duplex,
34f80b04 8506 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8507}
8508
34f80b04 8509static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8510{
34f80b04
EG
8511 int port = BP_PORT(bp);
8512 u32 val, val2;
589abe3a 8513 u32 config;
c2c8b03e 8514 u16 i;
01cd4528 8515 u32 ext_phy_type;
a2fbb9ea 8516
c18487ee 8517 bp->link_params.bp = bp;
34f80b04 8518 bp->link_params.port = port;
c18487ee 8519
c18487ee 8520 bp->link_params.lane_config =
a2fbb9ea 8521 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8522 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8523 SHMEM_RD(bp,
8524 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
8525 /* BCM8727_NOC => BCM8727 no over current */
8526 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8527 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8528 bp->link_params.ext_phy_config &=
8529 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8530 bp->link_params.ext_phy_config |=
8531 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8532 bp->link_params.feature_config_flags |=
8533 FEATURE_CONFIG_BCM8727_NOC;
8534 }
8535
c18487ee 8536 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8537 SHMEM_RD(bp,
8538 dev_info.port_hw_config[port].speed_capability_mask);
8539
34f80b04 8540 bp->port.link_config =
a2fbb9ea
ET
8541 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8542
c2c8b03e
EG
8543 /* Get the 4 lanes xgxs config rx and tx */
8544 for (i = 0; i < 2; i++) {
8545 val = SHMEM_RD(bp,
8546 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8547 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8548 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8549
8550 val = SHMEM_RD(bp,
8551 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8552 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8553 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8554 }
8555
3ce2c3f9
EG
8556 /* If the device is capable of WoL, set the default state according
8557 * to the HW
8558 */
4d295db0 8559 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8560 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8561 (config & PORT_FEATURE_WOL_ENABLED));
8562
c2c8b03e
EG
8563 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8564 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8565 bp->link_params.lane_config,
8566 bp->link_params.ext_phy_config,
34f80b04 8567 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8568
4d295db0
EG
8569 bp->link_params.switch_cfg |= (bp->port.link_config &
8570 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 8571 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8572
8573 bnx2x_link_settings_requested(bp);
8574
01cd4528
EG
8575 /*
8576 * If connected directly, work with the internal PHY, otherwise, work
8577 * with the external PHY
8578 */
8579 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8580 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8581 bp->mdio.prtad = bp->link_params.phy_addr;
8582
8583 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8584 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8585 bp->mdio.prtad =
659bc5c4 8586 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
01cd4528 8587
a2fbb9ea
ET
8588 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8589 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8590 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8591 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8592 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8593 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8594 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8595 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8596 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8597 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8598}
8599
8600static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8601{
8602 int func = BP_FUNC(bp);
8603 u32 val, val2;
8604 int rc = 0;
a2fbb9ea 8605
34f80b04 8606 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8607
34f80b04
EG
8608 bp->e1hov = 0;
8609 bp->e1hmf = 0;
8610 if (CHIP_IS_E1H(bp)) {
8611 bp->mf_config =
8612 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8613
2691d51d 8614 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 8615 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 8616 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 8617 bp->e1hmf = 1;
2691d51d
EG
8618 BNX2X_DEV_INFO("%s function mode\n",
8619 IS_E1HMF(bp) ? "multi" : "single");
8620
8621 if (IS_E1HMF(bp)) {
8622 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8623 e1hov_tag) &
8624 FUNC_MF_CFG_E1HOV_TAG_MASK);
8625 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8626 bp->e1hov = val;
8627 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8628 "(0x%04x)\n",
8629 func, bp->e1hov, bp->e1hov);
8630 } else {
34f80b04
EG
8631 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8632 " aborting\n", func);
8633 rc = -EPERM;
8634 }
2691d51d
EG
8635 } else {
8636 if (BP_E1HVN(bp)) {
8637 BNX2X_ERR("!!! VN %d in single function mode,"
8638 " aborting\n", BP_E1HVN(bp));
8639 rc = -EPERM;
8640 }
34f80b04
EG
8641 }
8642 }
a2fbb9ea 8643
34f80b04
EG
8644 if (!BP_NOMCP(bp)) {
8645 bnx2x_get_port_hwinfo(bp);
8646
8647 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8648 DRV_MSG_SEQ_NUMBER_MASK);
8649 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8650 }
8651
8652 if (IS_E1HMF(bp)) {
8653 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8654 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8655 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8656 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8657 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8658 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8659 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8660 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8661 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8662 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8663 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8664 ETH_ALEN);
8665 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8666 ETH_ALEN);
a2fbb9ea 8667 }
34f80b04
EG
8668
8669 return rc;
a2fbb9ea
ET
8670 }
8671
34f80b04
EG
8672 if (BP_NOMCP(bp)) {
8673 /* only supposed to happen on emulation/FPGA */
33471629 8674 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8675 random_ether_addr(bp->dev->dev_addr);
8676 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8677 }
a2fbb9ea 8678
34f80b04
EG
8679 return rc;
8680}
8681
8682static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8683{
8684 int func = BP_FUNC(bp);
87942b46 8685 int timer_interval;
34f80b04
EG
8686 int rc;
8687
da5a662a
VZ
8688 /* Disable interrupt handling until HW is initialized */
8689 atomic_set(&bp->intr_sem, 1);
e1510706 8690 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8691
34f80b04 8692 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8693
1cf167f2 8694 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8695 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8696
8697 rc = bnx2x_get_hwinfo(bp);
8698
8699 /* need to reset chip if undi was active */
8700 if (!BP_NOMCP(bp))
8701 bnx2x_undi_unload(bp);
8702
8703 if (CHIP_REV_IS_FPGA(bp))
8704 printk(KERN_ERR PFX "FPGA detected\n");
8705
8706 if (BP_NOMCP(bp) && (func == 0))
8707 printk(KERN_ERR PFX
8708 "MCP disabled, must load devices in order!\n");
8709
555f6c78 8710 /* Set multi queue mode */
8badd27a
EG
8711 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8712 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8713 printk(KERN_ERR PFX
8badd27a 8714 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8715 multi_mode = ETH_RSS_MODE_DISABLED;
8716 }
8717 bp->multi_mode = multi_mode;
8718
8719
7a9b2557
VZ
8720 /* Set TPA flags */
8721 if (disable_tpa) {
8722 bp->flags &= ~TPA_ENABLE_FLAG;
8723 bp->dev->features &= ~NETIF_F_LRO;
8724 } else {
8725 bp->flags |= TPA_ENABLE_FLAG;
8726 bp->dev->features |= NETIF_F_LRO;
8727 }
8728
a18f5128
EG
8729 if (CHIP_IS_E1(bp))
8730 bp->dropless_fc = 0;
8731 else
8732 bp->dropless_fc = dropless_fc;
8733
8d5726c4 8734 bp->mrrs = mrrs;
7a9b2557 8735
34f80b04
EG
8736 bp->tx_ring_size = MAX_TX_AVAIL;
8737 bp->rx_ring_size = MAX_RX_AVAIL;
8738
8739 bp->rx_csum = 1;
34f80b04
EG
8740
8741 bp->tx_ticks = 50;
8742 bp->rx_ticks = 25;
8743
87942b46
EG
8744 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8745 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8746
8747 init_timer(&bp->timer);
8748 bp->timer.expires = jiffies + bp->current_interval;
8749 bp->timer.data = (unsigned long) bp;
8750 bp->timer.function = bnx2x_timer;
8751
8752 return rc;
a2fbb9ea
ET
8753}
8754
8755/*
8756 * ethtool service functions
8757 */
8758
8759/* All ethtool functions called with rtnl_lock */
8760
8761static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8762{
8763 struct bnx2x *bp = netdev_priv(dev);
8764
34f80b04
EG
8765 cmd->supported = bp->port.supported;
8766 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8767
8768 if (netif_carrier_ok(dev)) {
c18487ee
YR
8769 cmd->speed = bp->link_vars.line_speed;
8770 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8771 } else {
c18487ee
YR
8772 cmd->speed = bp->link_params.req_line_speed;
8773 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8774 }
34f80b04
EG
8775 if (IS_E1HMF(bp)) {
8776 u16 vn_max_rate;
8777
8778 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8779 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8780 if (vn_max_rate < cmd->speed)
8781 cmd->speed = vn_max_rate;
8782 }
a2fbb9ea 8783
c18487ee
YR
8784 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8785 u32 ext_phy_type =
8786 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8787
8788 switch (ext_phy_type) {
8789 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8790 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8791 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8792 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8793 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8794 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 8795 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
8796 cmd->port = PORT_FIBRE;
8797 break;
8798
8799 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8800 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8801 cmd->port = PORT_TP;
8802 break;
8803
c18487ee
YR
8804 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8805 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8806 bp->link_params.ext_phy_config);
8807 break;
8808
f1410647
ET
8809 default:
8810 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8811 bp->link_params.ext_phy_config);
8812 break;
f1410647
ET
8813 }
8814 } else
a2fbb9ea 8815 cmd->port = PORT_TP;
a2fbb9ea 8816
01cd4528 8817 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
8818 cmd->transceiver = XCVR_INTERNAL;
8819
c18487ee 8820 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8821 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8822 else
a2fbb9ea 8823 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8824
8825 cmd->maxtxpkt = 0;
8826 cmd->maxrxpkt = 0;
8827
8828 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8829 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8830 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8831 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8832 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8833 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8834 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8835
8836 return 0;
8837}
8838
8839static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8840{
8841 struct bnx2x *bp = netdev_priv(dev);
8842 u32 advertising;
8843
34f80b04
EG
8844 if (IS_E1HMF(bp))
8845 return 0;
8846
a2fbb9ea
ET
8847 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8848 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8849 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8850 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8851 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8852 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8853 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8854
a2fbb9ea 8855 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8856 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8857 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8858 return -EINVAL;
f1410647 8859 }
a2fbb9ea
ET
8860
8861 /* advertise the requested speed and duplex if supported */
34f80b04 8862 cmd->advertising &= bp->port.supported;
a2fbb9ea 8863
c18487ee
YR
8864 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8865 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8866 bp->port.advertising |= (ADVERTISED_Autoneg |
8867 cmd->advertising);
a2fbb9ea
ET
8868
8869 } else { /* forced speed */
8870 /* advertise the requested speed and duplex if supported */
8871 switch (cmd->speed) {
8872 case SPEED_10:
8873 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8874 if (!(bp->port.supported &
f1410647
ET
8875 SUPPORTED_10baseT_Full)) {
8876 DP(NETIF_MSG_LINK,
8877 "10M full not supported\n");
a2fbb9ea 8878 return -EINVAL;
f1410647 8879 }
a2fbb9ea
ET
8880
8881 advertising = (ADVERTISED_10baseT_Full |
8882 ADVERTISED_TP);
8883 } else {
34f80b04 8884 if (!(bp->port.supported &
f1410647
ET
8885 SUPPORTED_10baseT_Half)) {
8886 DP(NETIF_MSG_LINK,
8887 "10M half not supported\n");
a2fbb9ea 8888 return -EINVAL;
f1410647 8889 }
a2fbb9ea
ET
8890
8891 advertising = (ADVERTISED_10baseT_Half |
8892 ADVERTISED_TP);
8893 }
8894 break;
8895
8896 case SPEED_100:
8897 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8898 if (!(bp->port.supported &
f1410647
ET
8899 SUPPORTED_100baseT_Full)) {
8900 DP(NETIF_MSG_LINK,
8901 "100M full not supported\n");
a2fbb9ea 8902 return -EINVAL;
f1410647 8903 }
a2fbb9ea
ET
8904
8905 advertising = (ADVERTISED_100baseT_Full |
8906 ADVERTISED_TP);
8907 } else {
34f80b04 8908 if (!(bp->port.supported &
f1410647
ET
8909 SUPPORTED_100baseT_Half)) {
8910 DP(NETIF_MSG_LINK,
8911 "100M half not supported\n");
a2fbb9ea 8912 return -EINVAL;
f1410647 8913 }
a2fbb9ea
ET
8914
8915 advertising = (ADVERTISED_100baseT_Half |
8916 ADVERTISED_TP);
8917 }
8918 break;
8919
8920 case SPEED_1000:
f1410647
ET
8921 if (cmd->duplex != DUPLEX_FULL) {
8922 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8923 return -EINVAL;
f1410647 8924 }
a2fbb9ea 8925
34f80b04 8926 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8927 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8928 return -EINVAL;
f1410647 8929 }
a2fbb9ea
ET
8930
8931 advertising = (ADVERTISED_1000baseT_Full |
8932 ADVERTISED_TP);
8933 break;
8934
8935 case SPEED_2500:
f1410647
ET
8936 if (cmd->duplex != DUPLEX_FULL) {
8937 DP(NETIF_MSG_LINK,
8938 "2.5G half not supported\n");
a2fbb9ea 8939 return -EINVAL;
f1410647 8940 }
a2fbb9ea 8941
34f80b04 8942 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8943 DP(NETIF_MSG_LINK,
8944 "2.5G full not supported\n");
a2fbb9ea 8945 return -EINVAL;
f1410647 8946 }
a2fbb9ea 8947
f1410647 8948 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8949 ADVERTISED_TP);
8950 break;
8951
8952 case SPEED_10000:
f1410647
ET
8953 if (cmd->duplex != DUPLEX_FULL) {
8954 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8955 return -EINVAL;
f1410647 8956 }
a2fbb9ea 8957
34f80b04 8958 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8959 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8960 return -EINVAL;
f1410647 8961 }
a2fbb9ea
ET
8962
8963 advertising = (ADVERTISED_10000baseT_Full |
8964 ADVERTISED_FIBRE);
8965 break;
8966
8967 default:
f1410647 8968 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8969 return -EINVAL;
8970 }
8971
c18487ee
YR
8972 bp->link_params.req_line_speed = cmd->speed;
8973 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8974 bp->port.advertising = advertising;
a2fbb9ea
ET
8975 }
8976
c18487ee 8977 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8978 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8979 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8980 bp->port.advertising);
a2fbb9ea 8981
34f80b04 8982 if (netif_running(dev)) {
bb2a0f7a 8983 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8984 bnx2x_link_set(bp);
8985 }
a2fbb9ea
ET
8986
8987 return 0;
8988}
8989
0a64ea57
EG
8990#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8991#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8992
8993static int bnx2x_get_regs_len(struct net_device *dev)
8994{
0a64ea57 8995 struct bnx2x *bp = netdev_priv(dev);
0d28e49a 8996 int regdump_len = 0;
0a64ea57
EG
8997 int i;
8998
0a64ea57
EG
8999 if (CHIP_IS_E1(bp)) {
9000 for (i = 0; i < REGS_COUNT; i++)
9001 if (IS_E1_ONLINE(reg_addrs[i].info))
9002 regdump_len += reg_addrs[i].size;
9003
9004 for (i = 0; i < WREGS_COUNT_E1; i++)
9005 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9006 regdump_len += wreg_addrs_e1[i].size *
9007 (1 + wreg_addrs_e1[i].read_regs_count);
9008
9009 } else { /* E1H */
9010 for (i = 0; i < REGS_COUNT; i++)
9011 if (IS_E1H_ONLINE(reg_addrs[i].info))
9012 regdump_len += reg_addrs[i].size;
9013
9014 for (i = 0; i < WREGS_COUNT_E1H; i++)
9015 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9016 regdump_len += wreg_addrs_e1h[i].size *
9017 (1 + wreg_addrs_e1h[i].read_regs_count);
9018 }
9019 regdump_len *= 4;
9020 regdump_len += sizeof(struct dump_hdr);
9021
9022 return regdump_len;
9023}
9024
9025static void bnx2x_get_regs(struct net_device *dev,
9026 struct ethtool_regs *regs, void *_p)
9027{
9028 u32 *p = _p, i, j;
9029 struct bnx2x *bp = netdev_priv(dev);
9030 struct dump_hdr dump_hdr = {0};
9031
9032 regs->version = 0;
9033 memset(p, 0, regs->len);
9034
9035 if (!netif_running(bp->dev))
9036 return;
9037
9038 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9039 dump_hdr.dump_sign = dump_sign_all;
9040 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9041 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9042 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9043 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9044 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9045
9046 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9047 p += dump_hdr.hdr_size + 1;
9048
9049 if (CHIP_IS_E1(bp)) {
9050 for (i = 0; i < REGS_COUNT; i++)
9051 if (IS_E1_ONLINE(reg_addrs[i].info))
9052 for (j = 0; j < reg_addrs[i].size; j++)
9053 *p++ = REG_RD(bp,
9054 reg_addrs[i].addr + j*4);
9055
9056 } else { /* E1H */
9057 for (i = 0; i < REGS_COUNT; i++)
9058 if (IS_E1H_ONLINE(reg_addrs[i].info))
9059 for (j = 0; j < reg_addrs[i].size; j++)
9060 *p++ = REG_RD(bp,
9061 reg_addrs[i].addr + j*4);
9062 }
9063}
9064
0d28e49a
EG
9065#define PHY_FW_VER_LEN 10
9066
9067static void bnx2x_get_drvinfo(struct net_device *dev,
9068 struct ethtool_drvinfo *info)
9069{
9070 struct bnx2x *bp = netdev_priv(dev);
9071 u8 phy_fw_ver[PHY_FW_VER_LEN];
9072
9073 strcpy(info->driver, DRV_MODULE_NAME);
9074 strcpy(info->version, DRV_MODULE_VERSION);
9075
9076 phy_fw_ver[0] = '\0';
9077 if (bp->port.pmf) {
9078 bnx2x_acquire_phy_lock(bp);
9079 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9080 (bp->state != BNX2X_STATE_CLOSED),
9081 phy_fw_ver, PHY_FW_VER_LEN);
9082 bnx2x_release_phy_lock(bp);
9083 }
9084
9085 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9086 (bp->common.bc_ver & 0xff0000) >> 16,
9087 (bp->common.bc_ver & 0xff00) >> 8,
9088 (bp->common.bc_ver & 0xff),
9089 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9090 strcpy(info->bus_info, pci_name(bp->pdev));
9091 info->n_stats = BNX2X_NUM_STATS;
9092 info->testinfo_len = BNX2X_NUM_TESTS;
9093 info->eedump_len = bp->common.flash_size;
9094 info->regdump_len = bnx2x_get_regs_len(dev);
9095}
9096
a2fbb9ea
ET
9097static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9098{
9099 struct bnx2x *bp = netdev_priv(dev);
9100
9101 if (bp->flags & NO_WOL_FLAG) {
9102 wol->supported = 0;
9103 wol->wolopts = 0;
9104 } else {
9105 wol->supported = WAKE_MAGIC;
9106 if (bp->wol)
9107 wol->wolopts = WAKE_MAGIC;
9108 else
9109 wol->wolopts = 0;
9110 }
9111 memset(&wol->sopass, 0, sizeof(wol->sopass));
9112}
9113
9114static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9115{
9116 struct bnx2x *bp = netdev_priv(dev);
9117
9118 if (wol->wolopts & ~WAKE_MAGIC)
9119 return -EINVAL;
9120
9121 if (wol->wolopts & WAKE_MAGIC) {
9122 if (bp->flags & NO_WOL_FLAG)
9123 return -EINVAL;
9124
9125 bp->wol = 1;
34f80b04 9126 } else
a2fbb9ea 9127 bp->wol = 0;
34f80b04 9128
a2fbb9ea
ET
9129 return 0;
9130}
9131
9132static u32 bnx2x_get_msglevel(struct net_device *dev)
9133{
9134 struct bnx2x *bp = netdev_priv(dev);
9135
9136 return bp->msglevel;
9137}
9138
9139static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9140{
9141 struct bnx2x *bp = netdev_priv(dev);
9142
9143 if (capable(CAP_NET_ADMIN))
9144 bp->msglevel = level;
9145}
9146
9147static int bnx2x_nway_reset(struct net_device *dev)
9148{
9149 struct bnx2x *bp = netdev_priv(dev);
9150
34f80b04
EG
9151 if (!bp->port.pmf)
9152 return 0;
a2fbb9ea 9153
34f80b04 9154 if (netif_running(dev)) {
bb2a0f7a 9155 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9156 bnx2x_link_set(bp);
9157 }
a2fbb9ea
ET
9158
9159 return 0;
9160}
9161
ab6ad5a4 9162static u32 bnx2x_get_link(struct net_device *dev)
01e53298
NO
9163{
9164 struct bnx2x *bp = netdev_priv(dev);
9165
9166 return bp->link_vars.link_up;
9167}
9168
a2fbb9ea
ET
9169static int bnx2x_get_eeprom_len(struct net_device *dev)
9170{
9171 struct bnx2x *bp = netdev_priv(dev);
9172
34f80b04 9173 return bp->common.flash_size;
a2fbb9ea
ET
9174}
9175
9176static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9177{
34f80b04 9178 int port = BP_PORT(bp);
a2fbb9ea
ET
9179 int count, i;
9180 u32 val = 0;
9181
9182 /* adjust timeout for emulation/FPGA */
9183 count = NVRAM_TIMEOUT_COUNT;
9184 if (CHIP_REV_IS_SLOW(bp))
9185 count *= 100;
9186
9187 /* request access to nvram interface */
9188 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9189 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9190
9191 for (i = 0; i < count*10; i++) {
9192 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9193 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9194 break;
9195
9196 udelay(5);
9197 }
9198
9199 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 9200 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
9201 return -EBUSY;
9202 }
9203
9204 return 0;
9205}
9206
9207static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9208{
34f80b04 9209 int port = BP_PORT(bp);
a2fbb9ea
ET
9210 int count, i;
9211 u32 val = 0;
9212
9213 /* adjust timeout for emulation/FPGA */
9214 count = NVRAM_TIMEOUT_COUNT;
9215 if (CHIP_REV_IS_SLOW(bp))
9216 count *= 100;
9217
9218 /* relinquish nvram interface */
9219 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9220 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9221
9222 for (i = 0; i < count*10; i++) {
9223 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9224 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9225 break;
9226
9227 udelay(5);
9228 }
9229
9230 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 9231 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
9232 return -EBUSY;
9233 }
9234
9235 return 0;
9236}
9237
9238static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9239{
9240 u32 val;
9241
9242 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9243
9244 /* enable both bits, even on read */
9245 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9246 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9247 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9248}
9249
9250static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9251{
9252 u32 val;
9253
9254 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9255
9256 /* disable both bits, even after read */
9257 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9258 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9259 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9260}
9261
4781bfad 9262static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
9263 u32 cmd_flags)
9264{
f1410647 9265 int count, i, rc;
a2fbb9ea
ET
9266 u32 val;
9267
9268 /* build the command word */
9269 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9270
9271 /* need to clear DONE bit separately */
9272 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9273
9274 /* address of the NVRAM to read from */
9275 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9276 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9277
9278 /* issue a read command */
9279 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9280
9281 /* adjust timeout for emulation/FPGA */
9282 count = NVRAM_TIMEOUT_COUNT;
9283 if (CHIP_REV_IS_SLOW(bp))
9284 count *= 100;
9285
9286 /* wait for completion */
9287 *ret_val = 0;
9288 rc = -EBUSY;
9289 for (i = 0; i < count; i++) {
9290 udelay(5);
9291 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9292
9293 if (val & MCPR_NVM_COMMAND_DONE) {
9294 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
9295 /* we read nvram data in cpu order
9296 * but ethtool sees it as an array of bytes
9297 * converting to big-endian will do the work */
4781bfad 9298 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
9299 rc = 0;
9300 break;
9301 }
9302 }
9303
9304 return rc;
9305}
9306
9307static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9308 int buf_size)
9309{
9310 int rc;
9311 u32 cmd_flags;
4781bfad 9312 __be32 val;
a2fbb9ea
ET
9313
9314 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9315 DP(BNX2X_MSG_NVM,
c14423fe 9316 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9317 offset, buf_size);
9318 return -EINVAL;
9319 }
9320
34f80b04
EG
9321 if (offset + buf_size > bp->common.flash_size) {
9322 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9323 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9324 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9325 return -EINVAL;
9326 }
9327
9328 /* request access to nvram interface */
9329 rc = bnx2x_acquire_nvram_lock(bp);
9330 if (rc)
9331 return rc;
9332
9333 /* enable access to nvram interface */
9334 bnx2x_enable_nvram_access(bp);
9335
9336 /* read the first word(s) */
9337 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9338 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9339 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9340 memcpy(ret_buf, &val, 4);
9341
9342 /* advance to the next dword */
9343 offset += sizeof(u32);
9344 ret_buf += sizeof(u32);
9345 buf_size -= sizeof(u32);
9346 cmd_flags = 0;
9347 }
9348
9349 if (rc == 0) {
9350 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9351 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9352 memcpy(ret_buf, &val, 4);
9353 }
9354
9355 /* disable access to nvram interface */
9356 bnx2x_disable_nvram_access(bp);
9357 bnx2x_release_nvram_lock(bp);
9358
9359 return rc;
9360}
9361
9362static int bnx2x_get_eeprom(struct net_device *dev,
9363 struct ethtool_eeprom *eeprom, u8 *eebuf)
9364{
9365 struct bnx2x *bp = netdev_priv(dev);
9366 int rc;
9367
2add3acb
EG
9368 if (!netif_running(dev))
9369 return -EAGAIN;
9370
34f80b04 9371 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9372 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9373 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9374 eeprom->len, eeprom->len);
9375
9376 /* parameters already validated in ethtool_get_eeprom */
9377
9378 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9379
9380 return rc;
9381}
9382
9383static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9384 u32 cmd_flags)
9385{
f1410647 9386 int count, i, rc;
a2fbb9ea
ET
9387
9388 /* build the command word */
9389 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9390
9391 /* need to clear DONE bit separately */
9392 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9393
9394 /* write the data */
9395 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9396
9397 /* address of the NVRAM to write to */
9398 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9399 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9400
9401 /* issue the write command */
9402 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9403
9404 /* adjust timeout for emulation/FPGA */
9405 count = NVRAM_TIMEOUT_COUNT;
9406 if (CHIP_REV_IS_SLOW(bp))
9407 count *= 100;
9408
9409 /* wait for completion */
9410 rc = -EBUSY;
9411 for (i = 0; i < count; i++) {
9412 udelay(5);
9413 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9414 if (val & MCPR_NVM_COMMAND_DONE) {
9415 rc = 0;
9416 break;
9417 }
9418 }
9419
9420 return rc;
9421}
9422
f1410647 9423#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
9424
9425static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9426 int buf_size)
9427{
9428 int rc;
9429 u32 cmd_flags;
9430 u32 align_offset;
4781bfad 9431 __be32 val;
a2fbb9ea 9432
34f80b04
EG
9433 if (offset + buf_size > bp->common.flash_size) {
9434 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9435 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9436 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9437 return -EINVAL;
9438 }
9439
9440 /* request access to nvram interface */
9441 rc = bnx2x_acquire_nvram_lock(bp);
9442 if (rc)
9443 return rc;
9444
9445 /* enable access to nvram interface */
9446 bnx2x_enable_nvram_access(bp);
9447
9448 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9449 align_offset = (offset & ~0x03);
9450 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9451
9452 if (rc == 0) {
9453 val &= ~(0xff << BYTE_OFFSET(offset));
9454 val |= (*data_buf << BYTE_OFFSET(offset));
9455
9456 /* nvram data is returned as an array of bytes
9457 * convert it back to cpu order */
9458 val = be32_to_cpu(val);
9459
a2fbb9ea
ET
9460 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9461 cmd_flags);
9462 }
9463
9464 /* disable access to nvram interface */
9465 bnx2x_disable_nvram_access(bp);
9466 bnx2x_release_nvram_lock(bp);
9467
9468 return rc;
9469}
9470
9471static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9472 int buf_size)
9473{
9474 int rc;
9475 u32 cmd_flags;
9476 u32 val;
9477 u32 written_so_far;
9478
34f80b04 9479 if (buf_size == 1) /* ethtool */
a2fbb9ea 9480 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9481
9482 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9483 DP(BNX2X_MSG_NVM,
c14423fe 9484 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9485 offset, buf_size);
9486 return -EINVAL;
9487 }
9488
34f80b04
EG
9489 if (offset + buf_size > bp->common.flash_size) {
9490 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9491 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9492 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9493 return -EINVAL;
9494 }
9495
9496 /* request access to nvram interface */
9497 rc = bnx2x_acquire_nvram_lock(bp);
9498 if (rc)
9499 return rc;
9500
9501 /* enable access to nvram interface */
9502 bnx2x_enable_nvram_access(bp);
9503
9504 written_so_far = 0;
9505 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9506 while ((written_so_far < buf_size) && (rc == 0)) {
9507 if (written_so_far == (buf_size - sizeof(u32)))
9508 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9509 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9510 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9511 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9512 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9513
9514 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9515
9516 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9517
9518 /* advance to the next dword */
9519 offset += sizeof(u32);
9520 data_buf += sizeof(u32);
9521 written_so_far += sizeof(u32);
9522 cmd_flags = 0;
9523 }
9524
9525 /* disable access to nvram interface */
9526 bnx2x_disable_nvram_access(bp);
9527 bnx2x_release_nvram_lock(bp);
9528
9529 return rc;
9530}
9531
9532static int bnx2x_set_eeprom(struct net_device *dev,
9533 struct ethtool_eeprom *eeprom, u8 *eebuf)
9534{
9535 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
9536 int port = BP_PORT(bp);
9537 int rc = 0;
a2fbb9ea 9538
9f4c9583
EG
9539 if (!netif_running(dev))
9540 return -EAGAIN;
9541
34f80b04 9542 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9543 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9544 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9545 eeprom->len, eeprom->len);
9546
9547 /* parameters already validated in ethtool_set_eeprom */
9548
f57a6025
EG
9549 /* PHY eeprom can be accessed only by the PMF */
9550 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9551 !bp->port.pmf)
9552 return -EINVAL;
9553
9554 if (eeprom->magic == 0x50485950) {
9555 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9556 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 9557
f57a6025
EG
9558 bnx2x_acquire_phy_lock(bp);
9559 rc |= bnx2x_link_reset(&bp->link_params,
9560 &bp->link_vars, 0);
9561 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9562 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9563 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9564 MISC_REGISTERS_GPIO_HIGH, port);
9565 bnx2x_release_phy_lock(bp);
9566 bnx2x_link_report(bp);
9567
9568 } else if (eeprom->magic == 0x50485952) {
9569 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9570 if ((bp->state == BNX2X_STATE_OPEN) ||
9571 (bp->state == BNX2X_STATE_DISABLED)) {
4a37fb66 9572 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
9573 rc |= bnx2x_link_reset(&bp->link_params,
9574 &bp->link_vars, 1);
9575
9576 rc |= bnx2x_phy_init(&bp->link_params,
9577 &bp->link_vars);
4a37fb66 9578 bnx2x_release_phy_lock(bp);
f57a6025
EG
9579 bnx2x_calc_fc_adv(bp);
9580 }
9581 } else if (eeprom->magic == 0x53985943) {
9582 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9583 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9584 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9585 u8 ext_phy_addr =
659bc5c4 9586 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
f57a6025
EG
9587
9588 /* DSP Remove Download Mode */
9589 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9590 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 9591
f57a6025
EG
9592 bnx2x_acquire_phy_lock(bp);
9593
9594 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9595
9596 /* wait 0.5 sec to allow it to run */
9597 msleep(500);
9598 bnx2x_ext_phy_hw_reset(bp, port);
9599 msleep(500);
9600 bnx2x_release_phy_lock(bp);
9601 }
9602 } else
c18487ee 9603 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9604
9605 return rc;
9606}
9607
9608static int bnx2x_get_coalesce(struct net_device *dev,
9609 struct ethtool_coalesce *coal)
9610{
9611 struct bnx2x *bp = netdev_priv(dev);
9612
9613 memset(coal, 0, sizeof(struct ethtool_coalesce));
9614
9615 coal->rx_coalesce_usecs = bp->rx_ticks;
9616 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9617
9618 return 0;
9619}
9620
ca00392c 9621#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
a2fbb9ea
ET
9622static int bnx2x_set_coalesce(struct net_device *dev,
9623 struct ethtool_coalesce *coal)
9624{
9625 struct bnx2x *bp = netdev_priv(dev);
9626
9627 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
ca00392c
EG
9628 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9629 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea
ET
9630
9631 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
ca00392c
EG
9632 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9633 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea 9634
34f80b04 9635 if (netif_running(dev))
a2fbb9ea
ET
9636 bnx2x_update_coalesce(bp);
9637
9638 return 0;
9639}
9640
9641static void bnx2x_get_ringparam(struct net_device *dev,
9642 struct ethtool_ringparam *ering)
9643{
9644 struct bnx2x *bp = netdev_priv(dev);
9645
9646 ering->rx_max_pending = MAX_RX_AVAIL;
9647 ering->rx_mini_max_pending = 0;
9648 ering->rx_jumbo_max_pending = 0;
9649
9650 ering->rx_pending = bp->rx_ring_size;
9651 ering->rx_mini_pending = 0;
9652 ering->rx_jumbo_pending = 0;
9653
9654 ering->tx_max_pending = MAX_TX_AVAIL;
9655 ering->tx_pending = bp->tx_ring_size;
9656}
9657
9658static int bnx2x_set_ringparam(struct net_device *dev,
9659 struct ethtool_ringparam *ering)
9660{
9661 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9662 int rc = 0;
a2fbb9ea
ET
9663
9664 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9665 (ering->tx_pending > MAX_TX_AVAIL) ||
9666 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9667 return -EINVAL;
9668
9669 bp->rx_ring_size = ering->rx_pending;
9670 bp->tx_ring_size = ering->tx_pending;
9671
34f80b04
EG
9672 if (netif_running(dev)) {
9673 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9674 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9675 }
9676
34f80b04 9677 return rc;
a2fbb9ea
ET
9678}
9679
9680static void bnx2x_get_pauseparam(struct net_device *dev,
9681 struct ethtool_pauseparam *epause)
9682{
9683 struct bnx2x *bp = netdev_priv(dev);
9684
356e2385
EG
9685 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9686 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9687 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9688
c0700f90
DM
9689 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9690 BNX2X_FLOW_CTRL_RX);
9691 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9692 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9693
9694 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9695 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9696 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9697}
9698
9699static int bnx2x_set_pauseparam(struct net_device *dev,
9700 struct ethtool_pauseparam *epause)
9701{
9702 struct bnx2x *bp = netdev_priv(dev);
9703
34f80b04
EG
9704 if (IS_E1HMF(bp))
9705 return 0;
9706
a2fbb9ea
ET
9707 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9708 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9709 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9710
c0700f90 9711 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9712
f1410647 9713 if (epause->rx_pause)
c0700f90 9714 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9715
f1410647 9716 if (epause->tx_pause)
c0700f90 9717 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9718
c0700f90
DM
9719 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9720 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9721
c18487ee 9722 if (epause->autoneg) {
34f80b04 9723 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9724 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9725 return -EINVAL;
9726 }
a2fbb9ea 9727
c18487ee 9728 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9729 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9730 }
a2fbb9ea 9731
c18487ee
YR
9732 DP(NETIF_MSG_LINK,
9733 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9734
9735 if (netif_running(dev)) {
bb2a0f7a 9736 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9737 bnx2x_link_set(bp);
9738 }
a2fbb9ea
ET
9739
9740 return 0;
9741}
9742
df0f2343
VZ
9743static int bnx2x_set_flags(struct net_device *dev, u32 data)
9744{
9745 struct bnx2x *bp = netdev_priv(dev);
9746 int changed = 0;
9747 int rc = 0;
9748
9749 /* TPA requires Rx CSUM offloading */
9750 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9751 if (!(dev->features & NETIF_F_LRO)) {
9752 dev->features |= NETIF_F_LRO;
9753 bp->flags |= TPA_ENABLE_FLAG;
9754 changed = 1;
9755 }
9756
9757 } else if (dev->features & NETIF_F_LRO) {
9758 dev->features &= ~NETIF_F_LRO;
9759 bp->flags &= ~TPA_ENABLE_FLAG;
9760 changed = 1;
9761 }
9762
9763 if (changed && netif_running(dev)) {
9764 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9765 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9766 }
9767
9768 return rc;
9769}
9770
a2fbb9ea
ET
9771static u32 bnx2x_get_rx_csum(struct net_device *dev)
9772{
9773 struct bnx2x *bp = netdev_priv(dev);
9774
9775 return bp->rx_csum;
9776}
9777
9778static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9779{
9780 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9781 int rc = 0;
a2fbb9ea
ET
9782
9783 bp->rx_csum = data;
df0f2343
VZ
9784
9785 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9786 TPA'ed packets will be discarded due to wrong TCP CSUM */
9787 if (!data) {
9788 u32 flags = ethtool_op_get_flags(dev);
9789
9790 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9791 }
9792
9793 return rc;
a2fbb9ea
ET
9794}
9795
9796static int bnx2x_set_tso(struct net_device *dev, u32 data)
9797{
755735eb 9798 if (data) {
a2fbb9ea 9799 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9800 dev->features |= NETIF_F_TSO6;
9801 } else {
a2fbb9ea 9802 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9803 dev->features &= ~NETIF_F_TSO6;
9804 }
9805
a2fbb9ea
ET
9806 return 0;
9807}
9808
f3c87cdd 9809static const struct {
a2fbb9ea
ET
9810 char string[ETH_GSTRING_LEN];
9811} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9812 { "register_test (offline)" },
9813 { "memory_test (offline)" },
9814 { "loopback_test (offline)" },
9815 { "nvram_test (online)" },
9816 { "interrupt_test (online)" },
9817 { "link_test (online)" },
d3d4f495 9818 { "idle check (online)" }
a2fbb9ea
ET
9819};
9820
9821static int bnx2x_self_test_count(struct net_device *dev)
9822{
9823 return BNX2X_NUM_TESTS;
9824}
9825
f3c87cdd
YG
9826static int bnx2x_test_registers(struct bnx2x *bp)
9827{
9828 int idx, i, rc = -ENODEV;
9829 u32 wr_val = 0;
9dabc424 9830 int port = BP_PORT(bp);
f3c87cdd
YG
9831 static const struct {
9832 u32 offset0;
9833 u32 offset1;
9834 u32 mask;
9835 } reg_tbl[] = {
9836/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9837 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9838 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9839 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9840 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9841 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9842 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9843 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9844 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9845 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9846/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9847 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9848 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9849 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9850 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9851 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9852 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9853 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 9854 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
9855 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9856/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
9857 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9858 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9859 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9860 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9861 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9862 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9863 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9864 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
9865 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9866/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
9867 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9868 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9869 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9870 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9871 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9872 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9873
9874 { 0xffffffff, 0, 0x00000000 }
9875 };
9876
9877 if (!netif_running(bp->dev))
9878 return rc;
9879
9880 /* Repeat the test twice:
9881 First by writing 0x00000000, second by writing 0xffffffff */
9882 for (idx = 0; idx < 2; idx++) {
9883
9884 switch (idx) {
9885 case 0:
9886 wr_val = 0;
9887 break;
9888 case 1:
9889 wr_val = 0xffffffff;
9890 break;
9891 }
9892
9893 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9894 u32 offset, mask, save_val, val;
f3c87cdd
YG
9895
9896 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9897 mask = reg_tbl[i].mask;
9898
9899 save_val = REG_RD(bp, offset);
9900
9901 REG_WR(bp, offset, wr_val);
9902 val = REG_RD(bp, offset);
9903
9904 /* Restore the original register's value */
9905 REG_WR(bp, offset, save_val);
9906
9907 /* verify that value is as expected value */
9908 if ((val & mask) != (wr_val & mask))
9909 goto test_reg_exit;
9910 }
9911 }
9912
9913 rc = 0;
9914
9915test_reg_exit:
9916 return rc;
9917}
9918
9919static int bnx2x_test_memory(struct bnx2x *bp)
9920{
9921 int i, j, rc = -ENODEV;
9922 u32 val;
9923 static const struct {
9924 u32 offset;
9925 int size;
9926 } mem_tbl[] = {
9927 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9928 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9929 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9930 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9931 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9932 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9933 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9934
9935 { 0xffffffff, 0 }
9936 };
9937 static const struct {
9938 char *name;
9939 u32 offset;
9dabc424
YG
9940 u32 e1_mask;
9941 u32 e1h_mask;
f3c87cdd 9942 } prty_tbl[] = {
9dabc424
YG
9943 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9944 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9945 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9946 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9947 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9948 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9949
9950 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9951 };
9952
9953 if (!netif_running(bp->dev))
9954 return rc;
9955
9956 /* Go through all the memories */
9957 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9958 for (j = 0; j < mem_tbl[i].size; j++)
9959 REG_RD(bp, mem_tbl[i].offset + j*4);
9960
9961 /* Check the parity status */
9962 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9963 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9964 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9965 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9966 DP(NETIF_MSG_HW,
9967 "%s is 0x%x\n", prty_tbl[i].name, val);
9968 goto test_mem_exit;
9969 }
9970 }
9971
9972 rc = 0;
9973
9974test_mem_exit:
9975 return rc;
9976}
9977
f3c87cdd
YG
9978static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9979{
9980 int cnt = 1000;
9981
9982 if (link_up)
9983 while (bnx2x_link_test(bp) && cnt--)
9984 msleep(10);
9985}
9986
9987static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9988{
9989 unsigned int pkt_size, num_pkts, i;
9990 struct sk_buff *skb;
9991 unsigned char *packet;
ca00392c
EG
9992 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
9993 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
f3c87cdd
YG
9994 u16 tx_start_idx, tx_idx;
9995 u16 rx_start_idx, rx_idx;
ca00392c 9996 u16 pkt_prod, bd_prod;
f3c87cdd 9997 struct sw_tx_bd *tx_buf;
ca00392c
EG
9998 struct eth_tx_start_bd *tx_start_bd;
9999 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
10000 dma_addr_t mapping;
10001 union eth_rx_cqe *cqe;
10002 u8 cqe_fp_flags;
10003 struct sw_rx_bd *rx_buf;
10004 u16 len;
10005 int rc = -ENODEV;
10006
b5bf9068
EG
10007 /* check the loopback mode */
10008 switch (loopback_mode) {
10009 case BNX2X_PHY_LOOPBACK:
10010 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10011 return -EINVAL;
10012 break;
10013 case BNX2X_MAC_LOOPBACK:
f3c87cdd 10014 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 10015 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
10016 break;
10017 default:
f3c87cdd 10018 return -EINVAL;
b5bf9068 10019 }
f3c87cdd 10020
b5bf9068
EG
10021 /* prepare the loopback packet */
10022 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10023 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
10024 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10025 if (!skb) {
10026 rc = -ENOMEM;
10027 goto test_loopback_exit;
10028 }
10029 packet = skb_put(skb, pkt_size);
10030 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
10031 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10032 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
10033 for (i = ETH_HLEN; i < pkt_size; i++)
10034 packet[i] = (unsigned char) (i & 0xff);
10035
b5bf9068 10036 /* send the loopback packet */
f3c87cdd 10037 num_pkts = 0;
ca00392c
EG
10038 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10039 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 10040
ca00392c
EG
10041 pkt_prod = fp_tx->tx_pkt_prod++;
10042 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10043 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 10044 tx_buf->skb = skb;
ca00392c 10045 tx_buf->flags = 0;
f3c87cdd 10046
ca00392c
EG
10047 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10048 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
f3c87cdd
YG
10049 mapping = pci_map_single(bp->pdev, skb->data,
10050 skb_headlen(skb), PCI_DMA_TODEVICE);
ca00392c
EG
10051 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10052 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10053 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10054 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10055 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10056 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10057 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10058 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10059
10060 /* turn on parsing and get a BD */
10061 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10062 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10063
10064 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 10065
58f4c4cf
EG
10066 wmb();
10067
ca00392c
EG
10068 fp_tx->tx_db.data.prod += 2;
10069 barrier();
10070 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
f3c87cdd
YG
10071
10072 mmiowb();
10073
10074 num_pkts++;
ca00392c 10075 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
10076 bp->dev->trans_start = jiffies;
10077
10078 udelay(100);
10079
ca00392c 10080 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
10081 if (tx_idx != tx_start_idx + num_pkts)
10082 goto test_loopback_exit;
10083
ca00392c 10084 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
10085 if (rx_idx != rx_start_idx + num_pkts)
10086 goto test_loopback_exit;
10087
ca00392c 10088 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
10089 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10090 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10091 goto test_loopback_rx_exit;
10092
10093 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10094 if (len != pkt_size)
10095 goto test_loopback_rx_exit;
10096
ca00392c 10097 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
10098 skb = rx_buf->skb;
10099 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10100 for (i = ETH_HLEN; i < pkt_size; i++)
10101 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10102 goto test_loopback_rx_exit;
10103
10104 rc = 0;
10105
10106test_loopback_rx_exit:
f3c87cdd 10107
ca00392c
EG
10108 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10109 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10110 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10111 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
10112
10113 /* Update producers */
ca00392c
EG
10114 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10115 fp_rx->rx_sge_prod);
f3c87cdd
YG
10116
10117test_loopback_exit:
10118 bp->link_params.loopback_mode = LOOPBACK_NONE;
10119
10120 return rc;
10121}
10122
10123static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10124{
b5bf9068 10125 int rc = 0, res;
f3c87cdd
YG
10126
10127 if (!netif_running(bp->dev))
10128 return BNX2X_LOOPBACK_FAILED;
10129
f8ef6e44 10130 bnx2x_netif_stop(bp, 1);
3910c8ae 10131 bnx2x_acquire_phy_lock(bp);
f3c87cdd 10132
b5bf9068
EG
10133 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10134 if (res) {
10135 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10136 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
10137 }
10138
b5bf9068
EG
10139 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10140 if (res) {
10141 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10142 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
10143 }
10144
3910c8ae 10145 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
10146 bnx2x_netif_start(bp);
10147
10148 return rc;
10149}
10150
10151#define CRC32_RESIDUAL 0xdebb20e3
10152
10153static int bnx2x_test_nvram(struct bnx2x *bp)
10154{
10155 static const struct {
10156 int offset;
10157 int size;
10158 } nvram_tbl[] = {
10159 { 0, 0x14 }, /* bootstrap */
10160 { 0x14, 0xec }, /* dir */
10161 { 0x100, 0x350 }, /* manuf_info */
10162 { 0x450, 0xf0 }, /* feature_info */
10163 { 0x640, 0x64 }, /* upgrade_key_info */
10164 { 0x6a4, 0x64 },
10165 { 0x708, 0x70 }, /* manuf_key_info */
10166 { 0x778, 0x70 },
10167 { 0, 0 }
10168 };
4781bfad 10169 __be32 buf[0x350 / 4];
f3c87cdd
YG
10170 u8 *data = (u8 *)buf;
10171 int i, rc;
ab6ad5a4 10172 u32 magic, crc;
f3c87cdd
YG
10173
10174 rc = bnx2x_nvram_read(bp, 0, data, 4);
10175 if (rc) {
f5372251 10176 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
10177 goto test_nvram_exit;
10178 }
10179
10180 magic = be32_to_cpu(buf[0]);
10181 if (magic != 0x669955aa) {
10182 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10183 rc = -ENODEV;
10184 goto test_nvram_exit;
10185 }
10186
10187 for (i = 0; nvram_tbl[i].size; i++) {
10188
10189 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10190 nvram_tbl[i].size);
10191 if (rc) {
10192 DP(NETIF_MSG_PROBE,
f5372251 10193 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
10194 goto test_nvram_exit;
10195 }
10196
ab6ad5a4
EG
10197 crc = ether_crc_le(nvram_tbl[i].size, data);
10198 if (crc != CRC32_RESIDUAL) {
f3c87cdd 10199 DP(NETIF_MSG_PROBE,
ab6ad5a4 10200 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
f3c87cdd
YG
10201 rc = -ENODEV;
10202 goto test_nvram_exit;
10203 }
10204 }
10205
10206test_nvram_exit:
10207 return rc;
10208}
10209
10210static int bnx2x_test_intr(struct bnx2x *bp)
10211{
10212 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10213 int i, rc;
10214
10215 if (!netif_running(bp->dev))
10216 return -ENODEV;
10217
8d9c5f34 10218 config->hdr.length = 0;
af246401
EG
10219 if (CHIP_IS_E1(bp))
10220 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10221 else
10222 config->hdr.offset = BP_FUNC(bp);
0626b899 10223 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
10224 config->hdr.reserved1 = 0;
10225
10226 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10227 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10228 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10229 if (rc == 0) {
10230 bp->set_mac_pending++;
10231 for (i = 0; i < 10; i++) {
10232 if (!bp->set_mac_pending)
10233 break;
10234 msleep_interruptible(10);
10235 }
10236 if (i == 10)
10237 rc = -ENODEV;
10238 }
10239
10240 return rc;
10241}
10242
a2fbb9ea
ET
10243static void bnx2x_self_test(struct net_device *dev,
10244 struct ethtool_test *etest, u64 *buf)
10245{
10246 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
10247
10248 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10249
f3c87cdd 10250 if (!netif_running(dev))
a2fbb9ea 10251 return;
a2fbb9ea 10252
33471629 10253 /* offline tests are not supported in MF mode */
f3c87cdd
YG
10254 if (IS_E1HMF(bp))
10255 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10256
10257 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
10258 int port = BP_PORT(bp);
10259 u32 val;
f3c87cdd
YG
10260 u8 link_up;
10261
279abdf5
EG
10262 /* save current value of input enable for TX port IF */
10263 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10264 /* disable input for TX port IF */
10265 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10266
f3c87cdd
YG
10267 link_up = bp->link_vars.link_up;
10268 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10269 bnx2x_nic_load(bp, LOAD_DIAG);
10270 /* wait until link state is restored */
10271 bnx2x_wait_for_link(bp, link_up);
10272
10273 if (bnx2x_test_registers(bp) != 0) {
10274 buf[0] = 1;
10275 etest->flags |= ETH_TEST_FL_FAILED;
10276 }
10277 if (bnx2x_test_memory(bp) != 0) {
10278 buf[1] = 1;
10279 etest->flags |= ETH_TEST_FL_FAILED;
10280 }
10281 buf[2] = bnx2x_test_loopback(bp, link_up);
10282 if (buf[2] != 0)
10283 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 10284
f3c87cdd 10285 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
10286
10287 /* restore input for TX port IF */
10288 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10289
f3c87cdd
YG
10290 bnx2x_nic_load(bp, LOAD_NORMAL);
10291 /* wait until link state is restored */
10292 bnx2x_wait_for_link(bp, link_up);
10293 }
10294 if (bnx2x_test_nvram(bp) != 0) {
10295 buf[3] = 1;
a2fbb9ea
ET
10296 etest->flags |= ETH_TEST_FL_FAILED;
10297 }
f3c87cdd
YG
10298 if (bnx2x_test_intr(bp) != 0) {
10299 buf[4] = 1;
10300 etest->flags |= ETH_TEST_FL_FAILED;
10301 }
10302 if (bp->port.pmf)
10303 if (bnx2x_link_test(bp) != 0) {
10304 buf[5] = 1;
10305 etest->flags |= ETH_TEST_FL_FAILED;
10306 }
f3c87cdd
YG
10307
10308#ifdef BNX2X_EXTRA_DEBUG
10309 bnx2x_panic_dump(bp);
10310#endif
a2fbb9ea
ET
10311}
10312
de832a55
EG
10313static const struct {
10314 long offset;
10315 int size;
10316 u8 string[ETH_GSTRING_LEN];
10317} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10318/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10319 { Q_STATS_OFFSET32(error_bytes_received_hi),
10320 8, "[%d]: rx_error_bytes" },
10321 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10322 8, "[%d]: rx_ucast_packets" },
10323 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10324 8, "[%d]: rx_mcast_packets" },
10325 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10326 8, "[%d]: rx_bcast_packets" },
10327 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10328 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10329 4, "[%d]: rx_phy_ip_err_discards"},
10330 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10331 4, "[%d]: rx_skb_alloc_discard" },
10332 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10333
10334/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10335 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10336 8, "[%d]: tx_packets" }
10337};
10338
bb2a0f7a
YG
10339static const struct {
10340 long offset;
10341 int size;
10342 u32 flags;
66e855f3
YG
10343#define STATS_FLAGS_PORT 1
10344#define STATS_FLAGS_FUNC 2
de832a55 10345#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 10346 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 10347} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
10348/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10349 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 10350 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 10351 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 10352 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 10353 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 10354 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 10355 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 10356 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 10357 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 10358 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 10359 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 10360 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 10361 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
10362 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10363 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10364 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10365 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10366/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10367 8, STATS_FLAGS_PORT, "rx_fragments" },
10368 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10369 8, STATS_FLAGS_PORT, "rx_jabbers" },
10370 { STATS_OFFSET32(no_buff_discard_hi),
10371 8, STATS_FLAGS_BOTH, "rx_discards" },
10372 { STATS_OFFSET32(mac_filter_discard),
10373 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10374 { STATS_OFFSET32(xxoverflow_discard),
10375 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10376 { STATS_OFFSET32(brb_drop_hi),
10377 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10378 { STATS_OFFSET32(brb_truncate_hi),
10379 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10380 { STATS_OFFSET32(pause_frames_received_hi),
10381 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10382 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10383 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10384 { STATS_OFFSET32(nig_timer_max),
10385 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10386/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10387 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10388 { STATS_OFFSET32(rx_skb_alloc_failed),
10389 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10390 { STATS_OFFSET32(hw_csum_err),
10391 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10392
10393 { STATS_OFFSET32(total_bytes_transmitted_hi),
10394 8, STATS_FLAGS_BOTH, "tx_bytes" },
10395 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10396 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10397 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10398 8, STATS_FLAGS_BOTH, "tx_packets" },
10399 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10400 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10401 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10402 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 10403 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 10404 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 10405 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 10406 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 10407/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 10408 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 10409 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 10410 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 10411 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 10412 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 10413 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 10414 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 10415 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 10416 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 10417 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 10418 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 10419 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 10420 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 10421 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 10422 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 10423 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 10424 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 10425 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 10426 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 10427/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 10428 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
10429 { STATS_OFFSET32(pause_frames_sent_hi),
10430 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
10431};
10432
de832a55
EG
10433#define IS_PORT_STAT(i) \
10434 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10435#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10436#define IS_E1HMF_MODE_STAT(bp) \
10437 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 10438
a2fbb9ea
ET
10439static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10440{
bb2a0f7a 10441 struct bnx2x *bp = netdev_priv(dev);
de832a55 10442 int i, j, k;
bb2a0f7a 10443
a2fbb9ea
ET
10444 switch (stringset) {
10445 case ETH_SS_STATS:
de832a55
EG
10446 if (is_multi(bp)) {
10447 k = 0;
ca00392c 10448 for_each_rx_queue(bp, i) {
de832a55
EG
10449 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10450 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10451 bnx2x_q_stats_arr[j].string, i);
10452 k += BNX2X_NUM_Q_STATS;
10453 }
10454 if (IS_E1HMF_MODE_STAT(bp))
10455 break;
10456 for (j = 0; j < BNX2X_NUM_STATS; j++)
10457 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10458 bnx2x_stats_arr[j].string);
10459 } else {
10460 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10461 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10462 continue;
10463 strcpy(buf + j*ETH_GSTRING_LEN,
10464 bnx2x_stats_arr[i].string);
10465 j++;
10466 }
bb2a0f7a 10467 }
a2fbb9ea
ET
10468 break;
10469
10470 case ETH_SS_TEST:
10471 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10472 break;
10473 }
10474}
10475
10476static int bnx2x_get_stats_count(struct net_device *dev)
10477{
bb2a0f7a 10478 struct bnx2x *bp = netdev_priv(dev);
de832a55 10479 int i, num_stats;
bb2a0f7a 10480
de832a55 10481 if (is_multi(bp)) {
ca00392c 10482 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
de832a55
EG
10483 if (!IS_E1HMF_MODE_STAT(bp))
10484 num_stats += BNX2X_NUM_STATS;
10485 } else {
10486 if (IS_E1HMF_MODE_STAT(bp)) {
10487 num_stats = 0;
10488 for (i = 0; i < BNX2X_NUM_STATS; i++)
10489 if (IS_FUNC_STAT(i))
10490 num_stats++;
10491 } else
10492 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 10493 }
de832a55 10494
bb2a0f7a 10495 return num_stats;
a2fbb9ea
ET
10496}
10497
10498static void bnx2x_get_ethtool_stats(struct net_device *dev,
10499 struct ethtool_stats *stats, u64 *buf)
10500{
10501 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
10502 u32 *hw_stats, *offset;
10503 int i, j, k;
bb2a0f7a 10504
de832a55
EG
10505 if (is_multi(bp)) {
10506 k = 0;
ca00392c 10507 for_each_rx_queue(bp, i) {
de832a55
EG
10508 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10509 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10510 if (bnx2x_q_stats_arr[j].size == 0) {
10511 /* skip this counter */
10512 buf[k + j] = 0;
10513 continue;
10514 }
10515 offset = (hw_stats +
10516 bnx2x_q_stats_arr[j].offset);
10517 if (bnx2x_q_stats_arr[j].size == 4) {
10518 /* 4-byte counter */
10519 buf[k + j] = (u64) *offset;
10520 continue;
10521 }
10522 /* 8-byte counter */
10523 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10524 }
10525 k += BNX2X_NUM_Q_STATS;
10526 }
10527 if (IS_E1HMF_MODE_STAT(bp))
10528 return;
10529 hw_stats = (u32 *)&bp->eth_stats;
10530 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10531 if (bnx2x_stats_arr[j].size == 0) {
10532 /* skip this counter */
10533 buf[k + j] = 0;
10534 continue;
10535 }
10536 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10537 if (bnx2x_stats_arr[j].size == 4) {
10538 /* 4-byte counter */
10539 buf[k + j] = (u64) *offset;
10540 continue;
10541 }
10542 /* 8-byte counter */
10543 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10544 }
de832a55
EG
10545 } else {
10546 hw_stats = (u32 *)&bp->eth_stats;
10547 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10548 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10549 continue;
10550 if (bnx2x_stats_arr[i].size == 0) {
10551 /* skip this counter */
10552 buf[j] = 0;
10553 j++;
10554 continue;
10555 }
10556 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10557 if (bnx2x_stats_arr[i].size == 4) {
10558 /* 4-byte counter */
10559 buf[j] = (u64) *offset;
10560 j++;
10561 continue;
10562 }
10563 /* 8-byte counter */
10564 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10565 j++;
a2fbb9ea 10566 }
a2fbb9ea
ET
10567 }
10568}
10569
10570static int bnx2x_phys_id(struct net_device *dev, u32 data)
10571{
10572 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10573 int port = BP_PORT(bp);
a2fbb9ea
ET
10574 int i;
10575
34f80b04
EG
10576 if (!netif_running(dev))
10577 return 0;
10578
10579 if (!bp->port.pmf)
10580 return 0;
10581
a2fbb9ea
ET
10582 if (data == 0)
10583 data = 2;
10584
10585 for (i = 0; i < (data * 2); i++) {
c18487ee 10586 if ((i % 2) == 0)
34f80b04 10587 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
10588 bp->link_params.hw_led_mode,
10589 bp->link_params.chip_id);
10590 else
34f80b04 10591 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
10592 bp->link_params.hw_led_mode,
10593 bp->link_params.chip_id);
10594
a2fbb9ea
ET
10595 msleep_interruptible(500);
10596 if (signal_pending(current))
10597 break;
10598 }
10599
c18487ee 10600 if (bp->link_vars.link_up)
34f80b04 10601 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
10602 bp->link_vars.line_speed,
10603 bp->link_params.hw_led_mode,
10604 bp->link_params.chip_id);
a2fbb9ea
ET
10605
10606 return 0;
10607}
10608
10609static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10610 .get_settings = bnx2x_get_settings,
10611 .set_settings = bnx2x_set_settings,
10612 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10613 .get_regs_len = bnx2x_get_regs_len,
10614 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10615 .get_wol = bnx2x_get_wol,
10616 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10617 .get_msglevel = bnx2x_get_msglevel,
10618 .set_msglevel = bnx2x_set_msglevel,
10619 .nway_reset = bnx2x_nway_reset,
01e53298 10620 .get_link = bnx2x_get_link,
7a9b2557
VZ
10621 .get_eeprom_len = bnx2x_get_eeprom_len,
10622 .get_eeprom = bnx2x_get_eeprom,
10623 .set_eeprom = bnx2x_set_eeprom,
10624 .get_coalesce = bnx2x_get_coalesce,
10625 .set_coalesce = bnx2x_set_coalesce,
10626 .get_ringparam = bnx2x_get_ringparam,
10627 .set_ringparam = bnx2x_set_ringparam,
10628 .get_pauseparam = bnx2x_get_pauseparam,
10629 .set_pauseparam = bnx2x_set_pauseparam,
10630 .get_rx_csum = bnx2x_get_rx_csum,
10631 .set_rx_csum = bnx2x_set_rx_csum,
10632 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10633 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10634 .set_flags = bnx2x_set_flags,
10635 .get_flags = ethtool_op_get_flags,
10636 .get_sg = ethtool_op_get_sg,
10637 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10638 .get_tso = ethtool_op_get_tso,
10639 .set_tso = bnx2x_set_tso,
10640 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
10641 .self_test = bnx2x_self_test,
10642 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
10643 .phys_id = bnx2x_phys_id,
10644 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 10645 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10646};
10647
10648/* end of ethtool_ops */
10649
10650/****************************************************************************
10651* General service functions
10652****************************************************************************/
10653
10654static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10655{
10656 u16 pmcsr;
10657
10658 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10659
10660 switch (state) {
10661 case PCI_D0:
34f80b04 10662 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10663 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10664 PCI_PM_CTRL_PME_STATUS));
10665
10666 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10667 /* delay required during transition out of D3hot */
a2fbb9ea 10668 msleep(20);
34f80b04 10669 break;
a2fbb9ea 10670
34f80b04
EG
10671 case PCI_D3hot:
10672 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10673 pmcsr |= 3;
a2fbb9ea 10674
34f80b04
EG
10675 if (bp->wol)
10676 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10677
34f80b04
EG
10678 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10679 pmcsr);
a2fbb9ea 10680
34f80b04
EG
10681 /* No more memory access after this point until
10682 * device is brought back to D0.
10683 */
10684 break;
10685
10686 default:
10687 return -EINVAL;
10688 }
10689 return 0;
a2fbb9ea
ET
10690}
10691
237907c1
EG
10692static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10693{
10694 u16 rx_cons_sb;
10695
10696 /* Tell compiler that status block fields can change */
10697 barrier();
10698 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10699 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10700 rx_cons_sb++;
10701 return (fp->rx_comp_cons != rx_cons_sb);
10702}
10703
34f80b04
EG
10704/*
10705 * net_device service functions
10706 */
10707
a2fbb9ea
ET
10708static int bnx2x_poll(struct napi_struct *napi, int budget)
10709{
10710 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10711 napi);
10712 struct bnx2x *bp = fp->bp;
10713 int work_done = 0;
10714
10715#ifdef BNX2X_STOP_ON_ERROR
10716 if (unlikely(bp->panic))
34f80b04 10717 goto poll_panic;
a2fbb9ea
ET
10718#endif
10719
a2fbb9ea
ET
10720 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10721 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10722
10723 bnx2x_update_fpsb_idx(fp);
10724
8534f32c 10725 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 10726 work_done = bnx2x_rx_int(fp, budget);
356e2385 10727
8534f32c
EG
10728 /* must not complete if we consumed full budget */
10729 if (work_done >= budget)
10730 goto poll_again;
10731 }
a2fbb9ea 10732
ca00392c 10733 /* bnx2x_has_rx_work() reads the status block, thus we need to
8534f32c 10734 * ensure that status block indices have been actually read
ca00392c 10735 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
8534f32c 10736 * so that we won't write the "newer" value of the status block to IGU
ca00392c 10737 * (if there was a DMA right after bnx2x_has_rx_work and
8534f32c
EG
10738 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10739 * may be postponed to right before bnx2x_ack_sb). In this case
10740 * there will never be another interrupt until there is another update
10741 * of the status block, while there is still unhandled work.
10742 */
10743 rmb();
a2fbb9ea 10744
ca00392c 10745 if (!bnx2x_has_rx_work(fp)) {
a2fbb9ea 10746#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10747poll_panic:
a2fbb9ea 10748#endif
288379f0 10749 napi_complete(napi);
a2fbb9ea 10750
0626b899 10751 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10752 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10753 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10754 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10755 }
356e2385 10756
8534f32c 10757poll_again:
a2fbb9ea
ET
10758 return work_done;
10759}
10760
755735eb
EG
10761
10762/* we split the first BD into headers and data BDs
33471629 10763 * to ease the pain of our fellow microcode engineers
755735eb
EG
10764 * we use one mapping for both BDs
10765 * So far this has only been observed to happen
10766 * in Other Operating Systems(TM)
10767 */
10768static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10769 struct bnx2x_fastpath *fp,
ca00392c
EG
10770 struct sw_tx_bd *tx_buf,
10771 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
10772 u16 bd_prod, int nbd)
10773{
ca00392c 10774 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
10775 struct eth_tx_bd *d_tx_bd;
10776 dma_addr_t mapping;
10777 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10778
10779 /* first fix first BD */
10780 h_tx_bd->nbd = cpu_to_le16(nbd);
10781 h_tx_bd->nbytes = cpu_to_le16(hlen);
10782
10783 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10784 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10785 h_tx_bd->addr_lo, h_tx_bd->nbd);
10786
10787 /* now get a new data BD
10788 * (after the pbd) and fill it */
10789 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 10790 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
10791
10792 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10793 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10794
10795 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10796 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10797 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
10798
10799 /* this marks the BD as one that has no individual mapping */
10800 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10801
755735eb
EG
10802 DP(NETIF_MSG_TX_QUEUED,
10803 "TSO split data size is %d (%x:%x)\n",
10804 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10805
ca00392c
EG
10806 /* update tx_bd */
10807 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
10808
10809 return bd_prod;
10810}
10811
10812static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10813{
10814 if (fix > 0)
10815 csum = (u16) ~csum_fold(csum_sub(csum,
10816 csum_partial(t_header - fix, fix, 0)));
10817
10818 else if (fix < 0)
10819 csum = (u16) ~csum_fold(csum_add(csum,
10820 csum_partial(t_header, -fix, 0)));
10821
10822 return swab16(csum);
10823}
10824
10825static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10826{
10827 u32 rc;
10828
10829 if (skb->ip_summed != CHECKSUM_PARTIAL)
10830 rc = XMIT_PLAIN;
10831
10832 else {
4781bfad 10833 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10834 rc = XMIT_CSUM_V6;
10835 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10836 rc |= XMIT_CSUM_TCP;
10837
10838 } else {
10839 rc = XMIT_CSUM_V4;
10840 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10841 rc |= XMIT_CSUM_TCP;
10842 }
10843 }
10844
10845 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10846 rc |= XMIT_GSO_V4;
10847
10848 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10849 rc |= XMIT_GSO_V6;
10850
10851 return rc;
10852}
10853
632da4d6 10854#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10855/* check if packet requires linearization (packet is too fragmented)
10856 no need to check fragmentation if page size > 8K (there will be no
10857 violation to FW restrictions) */
755735eb
EG
10858static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10859 u32 xmit_type)
10860{
10861 int to_copy = 0;
10862 int hlen = 0;
10863 int first_bd_sz = 0;
10864
10865 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10866 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10867
10868 if (xmit_type & XMIT_GSO) {
10869 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10870 /* Check if LSO packet needs to be copied:
10871 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10872 int wnd_size = MAX_FETCH_BD - 3;
33471629 10873 /* Number of windows to check */
755735eb
EG
10874 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10875 int wnd_idx = 0;
10876 int frag_idx = 0;
10877 u32 wnd_sum = 0;
10878
10879 /* Headers length */
10880 hlen = (int)(skb_transport_header(skb) - skb->data) +
10881 tcp_hdrlen(skb);
10882
10883 /* Amount of data (w/o headers) on linear part of SKB*/
10884 first_bd_sz = skb_headlen(skb) - hlen;
10885
10886 wnd_sum = first_bd_sz;
10887
10888 /* Calculate the first sum - it's special */
10889 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10890 wnd_sum +=
10891 skb_shinfo(skb)->frags[frag_idx].size;
10892
10893 /* If there was data on linear skb data - check it */
10894 if (first_bd_sz > 0) {
10895 if (unlikely(wnd_sum < lso_mss)) {
10896 to_copy = 1;
10897 goto exit_lbl;
10898 }
10899
10900 wnd_sum -= first_bd_sz;
10901 }
10902
10903 /* Others are easier: run through the frag list and
10904 check all windows */
10905 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10906 wnd_sum +=
10907 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10908
10909 if (unlikely(wnd_sum < lso_mss)) {
10910 to_copy = 1;
10911 break;
10912 }
10913 wnd_sum -=
10914 skb_shinfo(skb)->frags[wnd_idx].size;
10915 }
755735eb
EG
10916 } else {
10917 /* in non-LSO too fragmented packet should always
10918 be linearized */
10919 to_copy = 1;
10920 }
10921 }
10922
10923exit_lbl:
10924 if (unlikely(to_copy))
10925 DP(NETIF_MSG_TX_QUEUED,
10926 "Linearization IS REQUIRED for %s packet. "
10927 "num_frags %d hlen %d first_bd_sz %d\n",
10928 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10929 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10930
10931 return to_copy;
10932}
632da4d6 10933#endif
755735eb
EG
10934
10935/* called with netif_tx_lock
a2fbb9ea 10936 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10937 * netif_wake_queue()
a2fbb9ea
ET
10938 */
10939static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10940{
10941 struct bnx2x *bp = netdev_priv(dev);
ca00392c 10942 struct bnx2x_fastpath *fp, *fp_stat;
555f6c78 10943 struct netdev_queue *txq;
a2fbb9ea 10944 struct sw_tx_bd *tx_buf;
ca00392c
EG
10945 struct eth_tx_start_bd *tx_start_bd;
10946 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
10947 struct eth_tx_parse_bd *pbd = NULL;
10948 u16 pkt_prod, bd_prod;
755735eb 10949 int nbd, fp_index;
a2fbb9ea 10950 dma_addr_t mapping;
755735eb 10951 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
10952 int i;
10953 u8 hlen = 0;
ca00392c 10954 __le16 pkt_size = 0;
a2fbb9ea
ET
10955
10956#ifdef BNX2X_STOP_ON_ERROR
10957 if (unlikely(bp->panic))
10958 return NETDEV_TX_BUSY;
10959#endif
10960
555f6c78
EG
10961 fp_index = skb_get_queue_mapping(skb);
10962 txq = netdev_get_tx_queue(dev, fp_index);
10963
ca00392c
EG
10964 fp = &bp->fp[fp_index + bp->num_rx_queues];
10965 fp_stat = &bp->fp[fp_index];
755735eb 10966
231fd58a 10967 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
ca00392c 10968 fp_stat->eth_q_stats.driver_xoff++;
555f6c78 10969 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10970 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10971 return NETDEV_TX_BUSY;
10972 }
10973
755735eb
EG
10974 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10975 " gso type %x xmit_type %x\n",
10976 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10977 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10978
632da4d6 10979#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10980 /* First, check if we need to linearize the skb (due to FW
10981 restrictions). No need to check fragmentation if page size > 8K
10982 (there will be no violation to FW restrictions) */
755735eb
EG
10983 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10984 /* Statistics of linearization */
10985 bp->lin_cnt++;
10986 if (skb_linearize(skb) != 0) {
10987 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10988 "silently dropping this SKB\n");
10989 dev_kfree_skb_any(skb);
da5a662a 10990 return NETDEV_TX_OK;
755735eb
EG
10991 }
10992 }
632da4d6 10993#endif
755735eb 10994
a2fbb9ea 10995 /*
755735eb 10996 Please read carefully. First we use one BD which we mark as start,
ca00392c 10997 then we have a parsing info BD (used for TSO or xsum),
755735eb 10998 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10999 (don't forget to mark the last one as last,
11000 and to unmap only AFTER you write to the BD ...)
755735eb 11001 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
11002 */
11003
11004 pkt_prod = fp->tx_pkt_prod++;
755735eb 11005 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 11006
755735eb 11007 /* get a tx_buf and first BD */
a2fbb9ea 11008 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 11009 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 11010
ca00392c
EG
11011 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11012 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11013 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 11014 /* header nbd */
ca00392c 11015 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 11016
755735eb
EG
11017 /* remember the first BD of the packet */
11018 tx_buf->first_bd = fp->tx_bd_prod;
11019 tx_buf->skb = skb;
ca00392c 11020 tx_buf->flags = 0;
a2fbb9ea
ET
11021
11022 DP(NETIF_MSG_TX_QUEUED,
11023 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 11024 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 11025
0c6671b0
EG
11026#ifdef BCM_VLAN
11027 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11028 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
11029 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11030 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 11031 } else
0c6671b0 11032#endif
ca00392c 11033 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 11034
ca00392c
EG
11035 /* turn on parsing and get a BD */
11036 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11037 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 11038
ca00392c 11039 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
11040
11041 if (xmit_type & XMIT_CSUM) {
ca00392c 11042 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
11043
11044 /* for now NS flag is not used in Linux */
4781bfad
EG
11045 pbd->global_data =
11046 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11047 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 11048
755735eb
EG
11049 pbd->ip_hlen = (skb_transport_header(skb) -
11050 skb_network_header(skb)) / 2;
11051
11052 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 11053
755735eb 11054 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 11055 hlen = hlen*2;
a2fbb9ea 11056
ca00392c 11057 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
11058
11059 if (xmit_type & XMIT_CSUM_V4)
ca00392c 11060 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
11061 ETH_TX_BD_FLAGS_IP_CSUM;
11062 else
ca00392c
EG
11063 tx_start_bd->bd_flags.as_bitfield |=
11064 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
11065
11066 if (xmit_type & XMIT_CSUM_TCP) {
11067 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11068
11069 } else {
11070 s8 fix = SKB_CS_OFF(skb); /* signed! */
11071
ca00392c 11072 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 11073
755735eb 11074 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11075 "hlen %d fix %d csum before fix %x\n",
11076 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
11077
11078 /* HW bug: fixup the CSUM */
11079 pbd->tcp_pseudo_csum =
11080 bnx2x_csum_fix(skb_transport_header(skb),
11081 SKB_CS(skb), fix);
11082
11083 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11084 pbd->tcp_pseudo_csum);
11085 }
a2fbb9ea
ET
11086 }
11087
11088 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 11089 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea 11090
ca00392c
EG
11091 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11092 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11093 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11094 tx_start_bd->nbd = cpu_to_le16(nbd);
11095 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11096 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
11097
11098 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 11099 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
11100 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11101 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11102 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 11103
755735eb 11104 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
11105
11106 DP(NETIF_MSG_TX_QUEUED,
11107 "TSO packet len %d hlen %d total len %d tso size %d\n",
11108 skb->len, hlen, skb_headlen(skb),
11109 skb_shinfo(skb)->gso_size);
11110
ca00392c 11111 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 11112
755735eb 11113 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
11114 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11115 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
11116
11117 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11118 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
11119 pbd->tcp_flags = pbd_tcp_flags(skb);
11120
11121 if (xmit_type & XMIT_GSO_V4) {
11122 pbd->ip_id = swab16(ip_hdr(skb)->id);
11123 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
11124 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11125 ip_hdr(skb)->daddr,
11126 0, IPPROTO_TCP, 0));
755735eb
EG
11127
11128 } else
11129 pbd->tcp_pseudo_csum =
11130 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11131 &ipv6_hdr(skb)->daddr,
11132 0, IPPROTO_TCP, 0));
11133
a2fbb9ea
ET
11134 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11135 }
ca00392c 11136 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 11137
755735eb
EG
11138 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11139 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 11140
755735eb 11141 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
11142 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11143 if (total_pkt_bd == NULL)
11144 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 11145
755735eb
EG
11146 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11147 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 11148
ca00392c
EG
11149 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11150 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11151 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11152 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 11153
755735eb 11154 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11155 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11156 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11157 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
11158 }
11159
ca00392c 11160 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 11161
a2fbb9ea
ET
11162 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11163
755735eb 11164 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
11165 * if the packet contains or ends with it
11166 */
11167 if (TX_BD_POFF(bd_prod) < nbd)
11168 nbd++;
11169
ca00392c
EG
11170 if (total_pkt_bd != NULL)
11171 total_pkt_bd->total_pkt_bytes = pkt_size;
11172
a2fbb9ea
ET
11173 if (pbd)
11174 DP(NETIF_MSG_TX_QUEUED,
11175 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11176 " tcp_flags %x xsum %x seq %u hlen %u\n",
11177 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11178 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 11179 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 11180
755735eb 11181 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 11182
58f4c4cf
EG
11183 /*
11184 * Make sure that the BD data is updated before updating the producer
11185 * since FW might read the BD right after the producer is updated.
11186 * This is only applicable for weak-ordered memory model archs such
11187 * as IA-64. The following barrier is also mandatory since FW will
11188 * assumes packets must have BDs.
11189 */
11190 wmb();
11191
ca00392c
EG
11192 fp->tx_db.data.prod += nbd;
11193 barrier();
11194 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
a2fbb9ea
ET
11195
11196 mmiowb();
11197
755735eb 11198 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
11199
11200 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 11201 netif_tx_stop_queue(txq);
58f4c4cf
EG
11202 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11203 if we put Tx into XOFF state. */
11204 smp_mb();
ca00392c 11205 fp_stat->eth_q_stats.driver_xoff++;
a2fbb9ea 11206 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 11207 netif_tx_wake_queue(txq);
a2fbb9ea 11208 }
ca00392c 11209 fp_stat->tx_pkt++;
a2fbb9ea
ET
11210
11211 return NETDEV_TX_OK;
11212}
11213
bb2a0f7a 11214/* called with rtnl_lock */
a2fbb9ea
ET
11215static int bnx2x_open(struct net_device *dev)
11216{
11217 struct bnx2x *bp = netdev_priv(dev);
11218
6eccabb3
EG
11219 netif_carrier_off(dev);
11220
a2fbb9ea
ET
11221 bnx2x_set_power_state(bp, PCI_D0);
11222
bb2a0f7a 11223 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
11224}
11225
bb2a0f7a 11226/* called with rtnl_lock */
a2fbb9ea
ET
11227static int bnx2x_close(struct net_device *dev)
11228{
a2fbb9ea
ET
11229 struct bnx2x *bp = netdev_priv(dev);
11230
11231 /* Unload the driver, release IRQs */
bb2a0f7a
YG
11232 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11233 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11234 if (!CHIP_REV_IS_SLOW(bp))
11235 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
11236
11237 return 0;
11238}
11239
f5372251 11240/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
11241static void bnx2x_set_rx_mode(struct net_device *dev)
11242{
11243 struct bnx2x *bp = netdev_priv(dev);
11244 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11245 int port = BP_PORT(bp);
11246
11247 if (bp->state != BNX2X_STATE_OPEN) {
11248 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11249 return;
11250 }
11251
11252 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11253
11254 if (dev->flags & IFF_PROMISC)
11255 rx_mode = BNX2X_RX_MODE_PROMISC;
11256
11257 else if ((dev->flags & IFF_ALLMULTI) ||
11258 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11259 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11260
11261 else { /* some multicasts */
11262 if (CHIP_IS_E1(bp)) {
11263 int i, old, offset;
11264 struct dev_mc_list *mclist;
11265 struct mac_configuration_cmd *config =
11266 bnx2x_sp(bp, mcast_config);
11267
11268 for (i = 0, mclist = dev->mc_list;
11269 mclist && (i < dev->mc_count);
11270 i++, mclist = mclist->next) {
11271
11272 config->config_table[i].
11273 cam_entry.msb_mac_addr =
11274 swab16(*(u16 *)&mclist->dmi_addr[0]);
11275 config->config_table[i].
11276 cam_entry.middle_mac_addr =
11277 swab16(*(u16 *)&mclist->dmi_addr[2]);
11278 config->config_table[i].
11279 cam_entry.lsb_mac_addr =
11280 swab16(*(u16 *)&mclist->dmi_addr[4]);
11281 config->config_table[i].cam_entry.flags =
11282 cpu_to_le16(port);
11283 config->config_table[i].
11284 target_table_entry.flags = 0;
ca00392c
EG
11285 config->config_table[i].target_table_entry.
11286 clients_bit_vector =
11287 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
11288 config->config_table[i].
11289 target_table_entry.vlan_id = 0;
11290
11291 DP(NETIF_MSG_IFUP,
11292 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11293 config->config_table[i].
11294 cam_entry.msb_mac_addr,
11295 config->config_table[i].
11296 cam_entry.middle_mac_addr,
11297 config->config_table[i].
11298 cam_entry.lsb_mac_addr);
11299 }
8d9c5f34 11300 old = config->hdr.length;
34f80b04
EG
11301 if (old > i) {
11302 for (; i < old; i++) {
11303 if (CAM_IS_INVALID(config->
11304 config_table[i])) {
af246401 11305 /* already invalidated */
34f80b04
EG
11306 break;
11307 }
11308 /* invalidate */
11309 CAM_INVALIDATE(config->
11310 config_table[i]);
11311 }
11312 }
11313
11314 if (CHIP_REV_IS_SLOW(bp))
11315 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11316 else
11317 offset = BNX2X_MAX_MULTICAST*(1 + port);
11318
8d9c5f34 11319 config->hdr.length = i;
34f80b04 11320 config->hdr.offset = offset;
8d9c5f34 11321 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
11322 config->hdr.reserved1 = 0;
11323
11324 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11325 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11326 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11327 0);
11328 } else { /* E1H */
11329 /* Accept one or more multicasts */
11330 struct dev_mc_list *mclist;
11331 u32 mc_filter[MC_HASH_SIZE];
11332 u32 crc, bit, regidx;
11333 int i;
11334
11335 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11336
11337 for (i = 0, mclist = dev->mc_list;
11338 mclist && (i < dev->mc_count);
11339 i++, mclist = mclist->next) {
11340
7c510e4b
JB
11341 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11342 mclist->dmi_addr);
34f80b04
EG
11343
11344 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11345 bit = (crc >> 24) & 0xff;
11346 regidx = bit >> 5;
11347 bit &= 0x1f;
11348 mc_filter[regidx] |= (1 << bit);
11349 }
11350
11351 for (i = 0; i < MC_HASH_SIZE; i++)
11352 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11353 mc_filter[i]);
11354 }
11355 }
11356
11357 bp->rx_mode = rx_mode;
11358 bnx2x_set_storm_rx_mode(bp);
11359}
11360
11361/* called with rtnl_lock */
a2fbb9ea
ET
11362static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11363{
11364 struct sockaddr *addr = p;
11365 struct bnx2x *bp = netdev_priv(dev);
11366
34f80b04 11367 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
11368 return -EINVAL;
11369
11370 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
11371 if (netif_running(dev)) {
11372 if (CHIP_IS_E1(bp))
3101c2bc 11373 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 11374 else
3101c2bc 11375 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 11376 }
a2fbb9ea
ET
11377
11378 return 0;
11379}
11380
c18487ee 11381/* called with rtnl_lock */
01cd4528
EG
11382static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11383 int devad, u16 addr)
a2fbb9ea 11384{
01cd4528
EG
11385 struct bnx2x *bp = netdev_priv(netdev);
11386 u16 value;
11387 int rc;
11388 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 11389
01cd4528
EG
11390 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11391 prtad, devad, addr);
a2fbb9ea 11392
01cd4528
EG
11393 if (prtad != bp->mdio.prtad) {
11394 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11395 prtad, bp->mdio.prtad);
11396 return -EINVAL;
11397 }
11398
11399 /* The HW expects different devad if CL22 is used */
11400 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 11401
01cd4528
EG
11402 bnx2x_acquire_phy_lock(bp);
11403 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11404 devad, addr, &value);
11405 bnx2x_release_phy_lock(bp);
11406 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 11407
01cd4528
EG
11408 if (!rc)
11409 rc = value;
11410 return rc;
11411}
a2fbb9ea 11412
01cd4528
EG
11413/* called with rtnl_lock */
11414static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11415 u16 addr, u16 value)
11416{
11417 struct bnx2x *bp = netdev_priv(netdev);
11418 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11419 int rc;
11420
11421 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11422 " value 0x%x\n", prtad, devad, addr, value);
11423
11424 if (prtad != bp->mdio.prtad) {
11425 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11426 prtad, bp->mdio.prtad);
11427 return -EINVAL;
a2fbb9ea
ET
11428 }
11429
01cd4528
EG
11430 /* The HW expects different devad if CL22 is used */
11431 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 11432
01cd4528
EG
11433 bnx2x_acquire_phy_lock(bp);
11434 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11435 devad, addr, value);
11436 bnx2x_release_phy_lock(bp);
11437 return rc;
11438}
c18487ee 11439
01cd4528
EG
11440/* called with rtnl_lock */
11441static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11442{
11443 struct bnx2x *bp = netdev_priv(dev);
11444 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 11445
01cd4528
EG
11446 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11447 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 11448
01cd4528
EG
11449 if (!netif_running(dev))
11450 return -EAGAIN;
11451
11452 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
11453}
11454
34f80b04 11455/* called with rtnl_lock */
a2fbb9ea
ET
11456static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11457{
11458 struct bnx2x *bp = netdev_priv(dev);
34f80b04 11459 int rc = 0;
a2fbb9ea
ET
11460
11461 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11462 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11463 return -EINVAL;
11464
11465 /* This does not race with packet allocation
c14423fe 11466 * because the actual alloc size is
a2fbb9ea
ET
11467 * only updated as part of load
11468 */
11469 dev->mtu = new_mtu;
11470
11471 if (netif_running(dev)) {
34f80b04
EG
11472 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11473 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 11474 }
34f80b04
EG
11475
11476 return rc;
a2fbb9ea
ET
11477}
11478
11479static void bnx2x_tx_timeout(struct net_device *dev)
11480{
11481 struct bnx2x *bp = netdev_priv(dev);
11482
11483#ifdef BNX2X_STOP_ON_ERROR
11484 if (!bp->panic)
11485 bnx2x_panic();
11486#endif
11487 /* This allows the netif to be shutdown gracefully before resetting */
11488 schedule_work(&bp->reset_task);
11489}
11490
11491#ifdef BCM_VLAN
34f80b04 11492/* called with rtnl_lock */
a2fbb9ea
ET
11493static void bnx2x_vlan_rx_register(struct net_device *dev,
11494 struct vlan_group *vlgrp)
11495{
11496 struct bnx2x *bp = netdev_priv(dev);
11497
11498 bp->vlgrp = vlgrp;
0c6671b0
EG
11499
11500 /* Set flags according to the required capabilities */
11501 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11502
11503 if (dev->features & NETIF_F_HW_VLAN_TX)
11504 bp->flags |= HW_VLAN_TX_FLAG;
11505
11506 if (dev->features & NETIF_F_HW_VLAN_RX)
11507 bp->flags |= HW_VLAN_RX_FLAG;
11508
a2fbb9ea 11509 if (netif_running(dev))
49d66772 11510 bnx2x_set_client_config(bp);
a2fbb9ea 11511}
34f80b04 11512
a2fbb9ea
ET
11513#endif
11514
11515#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11516static void poll_bnx2x(struct net_device *dev)
11517{
11518 struct bnx2x *bp = netdev_priv(dev);
11519
11520 disable_irq(bp->pdev->irq);
11521 bnx2x_interrupt(bp->pdev->irq, dev);
11522 enable_irq(bp->pdev->irq);
11523}
11524#endif
11525
c64213cd
SH
11526static const struct net_device_ops bnx2x_netdev_ops = {
11527 .ndo_open = bnx2x_open,
11528 .ndo_stop = bnx2x_close,
11529 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 11530 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
11531 .ndo_set_mac_address = bnx2x_change_mac_addr,
11532 .ndo_validate_addr = eth_validate_addr,
11533 .ndo_do_ioctl = bnx2x_ioctl,
11534 .ndo_change_mtu = bnx2x_change_mtu,
11535 .ndo_tx_timeout = bnx2x_tx_timeout,
11536#ifdef BCM_VLAN
11537 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11538#endif
11539#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11540 .ndo_poll_controller = poll_bnx2x,
11541#endif
11542};
11543
34f80b04
EG
11544static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11545 struct net_device *dev)
a2fbb9ea
ET
11546{
11547 struct bnx2x *bp;
11548 int rc;
11549
11550 SET_NETDEV_DEV(dev, &pdev->dev);
11551 bp = netdev_priv(dev);
11552
34f80b04
EG
11553 bp->dev = dev;
11554 bp->pdev = pdev;
a2fbb9ea 11555 bp->flags = 0;
34f80b04 11556 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11557
11558 rc = pci_enable_device(pdev);
11559 if (rc) {
11560 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11561 goto err_out;
11562 }
11563
11564 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11565 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11566 " aborting\n");
11567 rc = -ENODEV;
11568 goto err_out_disable;
11569 }
11570
11571 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11572 printk(KERN_ERR PFX "Cannot find second PCI device"
11573 " base address, aborting\n");
11574 rc = -ENODEV;
11575 goto err_out_disable;
11576 }
11577
34f80b04
EG
11578 if (atomic_read(&pdev->enable_cnt) == 1) {
11579 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11580 if (rc) {
11581 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11582 " aborting\n");
11583 goto err_out_disable;
11584 }
a2fbb9ea 11585
34f80b04
EG
11586 pci_set_master(pdev);
11587 pci_save_state(pdev);
11588 }
a2fbb9ea
ET
11589
11590 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11591 if (bp->pm_cap == 0) {
11592 printk(KERN_ERR PFX "Cannot find power management"
11593 " capability, aborting\n");
11594 rc = -EIO;
11595 goto err_out_release;
11596 }
11597
11598 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11599 if (bp->pcie_cap == 0) {
11600 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11601 " aborting\n");
11602 rc = -EIO;
11603 goto err_out_release;
11604 }
11605
6a35528a 11606 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11607 bp->flags |= USING_DAC_FLAG;
6a35528a 11608 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
11609 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11610 " failed, aborting\n");
11611 rc = -EIO;
11612 goto err_out_release;
11613 }
11614
284901a9 11615 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a2fbb9ea
ET
11616 printk(KERN_ERR PFX "System does not support DMA,"
11617 " aborting\n");
11618 rc = -EIO;
11619 goto err_out_release;
11620 }
11621
34f80b04
EG
11622 dev->mem_start = pci_resource_start(pdev, 0);
11623 dev->base_addr = dev->mem_start;
11624 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11625
11626 dev->irq = pdev->irq;
11627
275f165f 11628 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11629 if (!bp->regview) {
11630 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11631 rc = -ENOMEM;
11632 goto err_out_release;
11633 }
11634
34f80b04
EG
11635 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11636 min_t(u64, BNX2X_DB_SIZE,
11637 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11638 if (!bp->doorbells) {
11639 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11640 rc = -ENOMEM;
11641 goto err_out_unmap;
11642 }
11643
11644 bnx2x_set_power_state(bp, PCI_D0);
11645
34f80b04
EG
11646 /* clean indirect addresses */
11647 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11648 PCICFG_VENDOR_ID_OFFSET);
11649 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11650 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11651 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11652 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11653
34f80b04 11654 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11655
c64213cd 11656 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11657 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11658 dev->features |= NETIF_F_SG;
11659 dev->features |= NETIF_F_HW_CSUM;
11660 if (bp->flags & USING_DAC_FLAG)
11661 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
11662 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11663 dev->features |= NETIF_F_TSO6;
34f80b04
EG
11664#ifdef BCM_VLAN
11665 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11666 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
11667
11668 dev->vlan_features |= NETIF_F_SG;
11669 dev->vlan_features |= NETIF_F_HW_CSUM;
11670 if (bp->flags & USING_DAC_FLAG)
11671 dev->vlan_features |= NETIF_F_HIGHDMA;
11672 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11673 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 11674#endif
a2fbb9ea 11675
01cd4528
EG
11676 /* get_port_hwinfo() will set prtad and mmds properly */
11677 bp->mdio.prtad = MDIO_PRTAD_NONE;
11678 bp->mdio.mmds = 0;
11679 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11680 bp->mdio.dev = dev;
11681 bp->mdio.mdio_read = bnx2x_mdio_read;
11682 bp->mdio.mdio_write = bnx2x_mdio_write;
11683
a2fbb9ea
ET
11684 return 0;
11685
11686err_out_unmap:
11687 if (bp->regview) {
11688 iounmap(bp->regview);
11689 bp->regview = NULL;
11690 }
a2fbb9ea
ET
11691 if (bp->doorbells) {
11692 iounmap(bp->doorbells);
11693 bp->doorbells = NULL;
11694 }
11695
11696err_out_release:
34f80b04
EG
11697 if (atomic_read(&pdev->enable_cnt) == 1)
11698 pci_release_regions(pdev);
a2fbb9ea
ET
11699
11700err_out_disable:
11701 pci_disable_device(pdev);
11702 pci_set_drvdata(pdev, NULL);
11703
11704err_out:
11705 return rc;
11706}
11707
37f9ce62
EG
11708static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11709 int *width, int *speed)
25047950
ET
11710{
11711 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11712
37f9ce62 11713 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 11714
37f9ce62
EG
11715 /* return value of 1=2.5GHz 2=5GHz */
11716 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 11717}
37f9ce62 11718
94a78b79
VZ
11719static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11720{
37f9ce62 11721 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
11722 struct bnx2x_fw_file_hdr *fw_hdr;
11723 struct bnx2x_fw_file_section *sections;
94a78b79 11724 u32 offset, len, num_ops;
37f9ce62 11725 u16 *ops_offsets;
94a78b79 11726 int i;
37f9ce62 11727 const u8 *fw_ver;
94a78b79
VZ
11728
11729 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11730 return -EINVAL;
11731
11732 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11733 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11734
11735 /* Make sure none of the offsets and sizes make us read beyond
11736 * the end of the firmware data */
11737 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11738 offset = be32_to_cpu(sections[i].offset);
11739 len = be32_to_cpu(sections[i].len);
11740 if (offset + len > firmware->size) {
37f9ce62
EG
11741 printk(KERN_ERR PFX "Section %d length is out of "
11742 "bounds\n", i);
94a78b79
VZ
11743 return -EINVAL;
11744 }
11745 }
11746
11747 /* Likewise for the init_ops offsets */
11748 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11749 ops_offsets = (u16 *)(firmware->data + offset);
11750 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11751
11752 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11753 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
37f9ce62
EG
11754 printk(KERN_ERR PFX "Section offset %d is out of "
11755 "bounds\n", i);
94a78b79
VZ
11756 return -EINVAL;
11757 }
11758 }
11759
11760 /* Check FW version */
11761 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11762 fw_ver = firmware->data + offset;
11763 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11764 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11765 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11766 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11767 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11768 " Should be %d.%d.%d.%d\n",
11769 fw_ver[0], fw_ver[1], fw_ver[2],
11770 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11771 BCM_5710_FW_MINOR_VERSION,
11772 BCM_5710_FW_REVISION_VERSION,
11773 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 11774 return -EINVAL;
94a78b79
VZ
11775 }
11776
11777 return 0;
11778}
11779
ab6ad5a4 11780static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 11781{
ab6ad5a4
EG
11782 const __be32 *source = (const __be32 *)_source;
11783 u32 *target = (u32 *)_target;
94a78b79 11784 u32 i;
94a78b79
VZ
11785
11786 for (i = 0; i < n/4; i++)
11787 target[i] = be32_to_cpu(source[i]);
11788}
11789
11790/*
11791 Ops array is stored in the following format:
11792 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11793 */
ab6ad5a4 11794static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 11795{
ab6ad5a4
EG
11796 const __be32 *source = (const __be32 *)_source;
11797 struct raw_op *target = (struct raw_op *)_target;
94a78b79 11798 u32 i, j, tmp;
94a78b79 11799
ab6ad5a4 11800 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
11801 tmp = be32_to_cpu(source[j]);
11802 target[i].op = (tmp >> 24) & 0xff;
11803 target[i].offset = tmp & 0xffffff;
11804 target[i].raw_data = be32_to_cpu(source[j+1]);
11805 }
11806}
ab6ad5a4
EG
11807
11808static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 11809{
ab6ad5a4
EG
11810 const __be16 *source = (const __be16 *)_source;
11811 u16 *target = (u16 *)_target;
94a78b79 11812 u32 i;
94a78b79
VZ
11813
11814 for (i = 0; i < n/2; i++)
11815 target[i] = be16_to_cpu(source[i]);
11816}
11817
11818#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
ab6ad5a4
EG
11819 do { \
11820 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11821 bp->arr = kmalloc(len, GFP_KERNEL); \
94a78b79 11822 if (!bp->arr) { \
ab6ad5a4
EG
11823 printk(KERN_ERR PFX "Failed to allocate %d bytes " \
11824 "for "#arr"\n", len); \
94a78b79
VZ
11825 goto lbl; \
11826 } \
ab6ad5a4
EG
11827 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
11828 (u8 *)bp->arr, len); \
94a78b79
VZ
11829 } while (0)
11830
94a78b79
VZ
11831static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11832{
11833 char fw_file_name[40] = {0};
94a78b79 11834 struct bnx2x_fw_file_hdr *fw_hdr;
ab6ad5a4 11835 int rc, offset;
94a78b79
VZ
11836
11837 /* Create a FW file name */
11838 if (CHIP_IS_E1(bp))
ab6ad5a4 11839 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
94a78b79
VZ
11840 else
11841 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11842
11843 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11844 BCM_5710_FW_MAJOR_VERSION,
ab6ad5a4
EG
11845 BCM_5710_FW_MINOR_VERSION,
11846 BCM_5710_FW_REVISION_VERSION,
11847 BCM_5710_FW_ENGINEERING_VERSION);
94a78b79
VZ
11848
11849 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11850
11851 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11852 if (rc) {
ab6ad5a4
EG
11853 printk(KERN_ERR PFX "Can't load firmware file %s\n",
11854 fw_file_name);
94a78b79
VZ
11855 goto request_firmware_exit;
11856 }
11857
11858 rc = bnx2x_check_firmware(bp);
11859 if (rc) {
11860 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11861 goto request_firmware_exit;
11862 }
11863
11864 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11865
11866 /* Initialize the pointers to the init arrays */
11867 /* Blob */
11868 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11869
11870 /* Opcodes */
11871 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11872
11873 /* Offsets */
ab6ad5a4
EG
11874 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
11875 be16_to_cpu_n);
94a78b79
VZ
11876
11877 /* STORMs firmware */
573f2035
EG
11878 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11879 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11880 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
11881 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11882 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11883 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11884 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
11885 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11886 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11887 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11888 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
11889 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11890 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11891 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11892 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
11893 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
11894
11895 return 0;
ab6ad5a4 11896
94a78b79
VZ
11897init_offsets_alloc_err:
11898 kfree(bp->init_ops);
11899init_ops_alloc_err:
11900 kfree(bp->init_data);
11901request_firmware_exit:
11902 release_firmware(bp->firmware);
11903
11904 return rc;
11905}
11906
11907
a2fbb9ea
ET
11908static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11909 const struct pci_device_id *ent)
11910{
a2fbb9ea
ET
11911 struct net_device *dev = NULL;
11912 struct bnx2x *bp;
37f9ce62 11913 int pcie_width, pcie_speed;
25047950 11914 int rc;
a2fbb9ea 11915
a2fbb9ea 11916 /* dev zeroed in init_etherdev */
555f6c78 11917 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11918 if (!dev) {
11919 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11920 return -ENOMEM;
34f80b04 11921 }
a2fbb9ea 11922
a2fbb9ea
ET
11923 bp = netdev_priv(dev);
11924 bp->msglevel = debug;
11925
df4770de
EG
11926 pci_set_drvdata(pdev, dev);
11927
34f80b04 11928 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11929 if (rc < 0) {
11930 free_netdev(dev);
11931 return rc;
11932 }
11933
34f80b04 11934 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11935 if (rc)
11936 goto init_one_exit;
11937
94a78b79
VZ
11938 /* Set init arrays */
11939 rc = bnx2x_init_firmware(bp, &pdev->dev);
11940 if (rc) {
11941 printk(KERN_ERR PFX "Error loading firmware\n");
11942 goto init_one_exit;
11943 }
11944
693fc0d1 11945 rc = register_netdev(dev);
34f80b04 11946 if (rc) {
693fc0d1 11947 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11948 goto init_one_exit;
11949 }
11950
37f9ce62 11951 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
25047950 11952 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11953 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11954 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
37f9ce62 11955 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
25047950 11956 dev->base_addr, bp->pdev->irq);
e174961c 11957 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 11958
a2fbb9ea 11959 return 0;
34f80b04
EG
11960
11961init_one_exit:
11962 if (bp->regview)
11963 iounmap(bp->regview);
11964
11965 if (bp->doorbells)
11966 iounmap(bp->doorbells);
11967
11968 free_netdev(dev);
11969
11970 if (atomic_read(&pdev->enable_cnt) == 1)
11971 pci_release_regions(pdev);
11972
11973 pci_disable_device(pdev);
11974 pci_set_drvdata(pdev, NULL);
11975
11976 return rc;
a2fbb9ea
ET
11977}
11978
11979static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11980{
11981 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11982 struct bnx2x *bp;
11983
11984 if (!dev) {
228241eb
ET
11985 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11986 return;
11987 }
228241eb 11988 bp = netdev_priv(dev);
a2fbb9ea 11989
a2fbb9ea
ET
11990 unregister_netdev(dev);
11991
94a78b79
VZ
11992 kfree(bp->init_ops_offsets);
11993 kfree(bp->init_ops);
11994 kfree(bp->init_data);
11995 release_firmware(bp->firmware);
11996
a2fbb9ea
ET
11997 if (bp->regview)
11998 iounmap(bp->regview);
11999
12000 if (bp->doorbells)
12001 iounmap(bp->doorbells);
12002
12003 free_netdev(dev);
34f80b04
EG
12004
12005 if (atomic_read(&pdev->enable_cnt) == 1)
12006 pci_release_regions(pdev);
12007
a2fbb9ea
ET
12008 pci_disable_device(pdev);
12009 pci_set_drvdata(pdev, NULL);
12010}
12011
12012static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12013{
12014 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
12015 struct bnx2x *bp;
12016
34f80b04
EG
12017 if (!dev) {
12018 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12019 return -ENODEV;
12020 }
12021 bp = netdev_priv(dev);
a2fbb9ea 12022
34f80b04 12023 rtnl_lock();
a2fbb9ea 12024
34f80b04 12025 pci_save_state(pdev);
228241eb 12026
34f80b04
EG
12027 if (!netif_running(dev)) {
12028 rtnl_unlock();
12029 return 0;
12030 }
a2fbb9ea
ET
12031
12032 netif_device_detach(dev);
a2fbb9ea 12033
da5a662a 12034 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 12035
a2fbb9ea 12036 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 12037
34f80b04
EG
12038 rtnl_unlock();
12039
a2fbb9ea
ET
12040 return 0;
12041}
12042
12043static int bnx2x_resume(struct pci_dev *pdev)
12044{
12045 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 12046 struct bnx2x *bp;
a2fbb9ea
ET
12047 int rc;
12048
228241eb
ET
12049 if (!dev) {
12050 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12051 return -ENODEV;
12052 }
228241eb 12053 bp = netdev_priv(dev);
a2fbb9ea 12054
34f80b04
EG
12055 rtnl_lock();
12056
228241eb 12057 pci_restore_state(pdev);
34f80b04
EG
12058
12059 if (!netif_running(dev)) {
12060 rtnl_unlock();
12061 return 0;
12062 }
12063
a2fbb9ea
ET
12064 bnx2x_set_power_state(bp, PCI_D0);
12065 netif_device_attach(dev);
12066
da5a662a 12067 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 12068
34f80b04
EG
12069 rtnl_unlock();
12070
12071 return rc;
a2fbb9ea
ET
12072}
12073
f8ef6e44
YG
12074static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12075{
12076 int i;
12077
12078 bp->state = BNX2X_STATE_ERROR;
12079
12080 bp->rx_mode = BNX2X_RX_MODE_NONE;
12081
12082 bnx2x_netif_stop(bp, 0);
12083
12084 del_timer_sync(&bp->timer);
12085 bp->stats_state = STATS_STATE_DISABLED;
12086 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12087
12088 /* Release IRQs */
12089 bnx2x_free_irq(bp);
12090
12091 if (CHIP_IS_E1(bp)) {
12092 struct mac_configuration_cmd *config =
12093 bnx2x_sp(bp, mcast_config);
12094
8d9c5f34 12095 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
12096 CAM_INVALIDATE(config->config_table[i]);
12097 }
12098
12099 /* Free SKBs, SGEs, TPA pool and driver internals */
12100 bnx2x_free_skbs(bp);
555f6c78 12101 for_each_rx_queue(bp, i)
f8ef6e44 12102 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 12103 for_each_rx_queue(bp, i)
7cde1c8b 12104 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
12105 bnx2x_free_mem(bp);
12106
12107 bp->state = BNX2X_STATE_CLOSED;
12108
12109 netif_carrier_off(bp->dev);
12110
12111 return 0;
12112}
12113
12114static void bnx2x_eeh_recover(struct bnx2x *bp)
12115{
12116 u32 val;
12117
12118 mutex_init(&bp->port.phy_mutex);
12119
12120 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12121 bp->link_params.shmem_base = bp->common.shmem_base;
12122 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12123
12124 if (!bp->common.shmem_base ||
12125 (bp->common.shmem_base < 0xA0000) ||
12126 (bp->common.shmem_base >= 0xC0000)) {
12127 BNX2X_DEV_INFO("MCP not active\n");
12128 bp->flags |= NO_MCP_FLAG;
12129 return;
12130 }
12131
12132 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12133 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12134 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12135 BNX2X_ERR("BAD MCP validity signature\n");
12136
12137 if (!BP_NOMCP(bp)) {
12138 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12139 & DRV_MSG_SEQ_NUMBER_MASK);
12140 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12141 }
12142}
12143
493adb1f
WX
12144/**
12145 * bnx2x_io_error_detected - called when PCI error is detected
12146 * @pdev: Pointer to PCI device
12147 * @state: The current pci connection state
12148 *
12149 * This function is called after a PCI bus error affecting
12150 * this device has been detected.
12151 */
12152static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12153 pci_channel_state_t state)
12154{
12155 struct net_device *dev = pci_get_drvdata(pdev);
12156 struct bnx2x *bp = netdev_priv(dev);
12157
12158 rtnl_lock();
12159
12160 netif_device_detach(dev);
12161
07ce50e4
DN
12162 if (state == pci_channel_io_perm_failure) {
12163 rtnl_unlock();
12164 return PCI_ERS_RESULT_DISCONNECT;
12165 }
12166
493adb1f 12167 if (netif_running(dev))
f8ef6e44 12168 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
12169
12170 pci_disable_device(pdev);
12171
12172 rtnl_unlock();
12173
12174 /* Request a slot reset */
12175 return PCI_ERS_RESULT_NEED_RESET;
12176}
12177
12178/**
12179 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12180 * @pdev: Pointer to PCI device
12181 *
12182 * Restart the card from scratch, as if from a cold-boot.
12183 */
12184static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12185{
12186 struct net_device *dev = pci_get_drvdata(pdev);
12187 struct bnx2x *bp = netdev_priv(dev);
12188
12189 rtnl_lock();
12190
12191 if (pci_enable_device(pdev)) {
12192 dev_err(&pdev->dev,
12193 "Cannot re-enable PCI device after reset\n");
12194 rtnl_unlock();
12195 return PCI_ERS_RESULT_DISCONNECT;
12196 }
12197
12198 pci_set_master(pdev);
12199 pci_restore_state(pdev);
12200
12201 if (netif_running(dev))
12202 bnx2x_set_power_state(bp, PCI_D0);
12203
12204 rtnl_unlock();
12205
12206 return PCI_ERS_RESULT_RECOVERED;
12207}
12208
12209/**
12210 * bnx2x_io_resume - called when traffic can start flowing again
12211 * @pdev: Pointer to PCI device
12212 *
12213 * This callback is called when the error recovery driver tells us that
12214 * its OK to resume normal operation.
12215 */
12216static void bnx2x_io_resume(struct pci_dev *pdev)
12217{
12218 struct net_device *dev = pci_get_drvdata(pdev);
12219 struct bnx2x *bp = netdev_priv(dev);
12220
12221 rtnl_lock();
12222
f8ef6e44
YG
12223 bnx2x_eeh_recover(bp);
12224
493adb1f 12225 if (netif_running(dev))
f8ef6e44 12226 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
12227
12228 netif_device_attach(dev);
12229
12230 rtnl_unlock();
12231}
12232
12233static struct pci_error_handlers bnx2x_err_handler = {
12234 .error_detected = bnx2x_io_error_detected,
356e2385
EG
12235 .slot_reset = bnx2x_io_slot_reset,
12236 .resume = bnx2x_io_resume,
493adb1f
WX
12237};
12238
a2fbb9ea 12239static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
12240 .name = DRV_MODULE_NAME,
12241 .id_table = bnx2x_pci_tbl,
12242 .probe = bnx2x_init_one,
12243 .remove = __devexit_p(bnx2x_remove_one),
12244 .suspend = bnx2x_suspend,
12245 .resume = bnx2x_resume,
12246 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
12247};
12248
12249static int __init bnx2x_init(void)
12250{
dd21ca6d
SG
12251 int ret;
12252
938cf541
EG
12253 printk(KERN_INFO "%s", version);
12254
1cf167f2
EG
12255 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12256 if (bnx2x_wq == NULL) {
12257 printk(KERN_ERR PFX "Cannot create workqueue\n");
12258 return -ENOMEM;
12259 }
12260
dd21ca6d
SG
12261 ret = pci_register_driver(&bnx2x_pci_driver);
12262 if (ret) {
12263 printk(KERN_ERR PFX "Cannot register driver\n");
12264 destroy_workqueue(bnx2x_wq);
12265 }
12266 return ret;
a2fbb9ea
ET
12267}
12268
12269static void __exit bnx2x_cleanup(void)
12270{
12271 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
12272
12273 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
12274}
12275
12276module_init(bnx2x_init);
12277module_exit(bnx2x_cleanup);
12278
94a78b79 12279