]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
net: move address list functions to a separate file
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea 51#include <linux/io.h>
45229b42 52#include <linux/stringify.h>
a2fbb9ea 53
359d8b15 54
a2fbb9ea
ET
55#include "bnx2x.h"
56#include "bnx2x_init.h"
94a78b79 57#include "bnx2x_init_ops.h"
0a64ea57 58#include "bnx2x_dump.h"
a2fbb9ea 59
4fd89b7a
DK
60#define DRV_MODULE_VERSION "1.52.1-8"
61#define DRV_MODULE_RELDATE "2010/04/01"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
94a78b79
VZ
64#include <linux/firmware.h>
65#include "bnx2x_fw_file_hdr.h"
66/* FW files */
45229b42
BH
67#define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
94a78b79 74
34f80b04
EG
75/* Time in jiffies before concluding the transmitter is hung */
76#define TX_TIMEOUT (5*HZ)
a2fbb9ea 77
53a10565 78static char version[] __devinitdata =
34f80b04 79 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
80 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
24e3fcef 82MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 83MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
84MODULE_LICENSE("GPL");
85MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
86MODULE_FIRMWARE(FW_FILE_NAME_E1);
87MODULE_FIRMWARE(FW_FILE_NAME_E1H);
a2fbb9ea 88
555f6c78
EG
89static int multi_mode = 1;
90module_param(multi_mode, int, 0);
ca00392c
EG
91MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
93
54b9ddaa
VZ
94static int num_queues;
95module_param(num_queues, int, 0);
96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
555f6c78 98
19680c48 99static int disable_tpa;
19680c48 100module_param(disable_tpa, int, 0);
9898f86d 101MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
102
103static int int_mode;
104module_param(int_mode, int, 0);
105MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
106
a18f5128
EG
107static int dropless_fc;
108module_param(dropless_fc, int, 0);
109MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
9898f86d 111static int poll;
a2fbb9ea 112module_param(poll, int, 0);
9898f86d 113MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
114
115static int mrrs = -1;
116module_param(mrrs, int, 0);
117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
9898f86d 119static int debug;
a2fbb9ea 120module_param(debug, int, 0);
9898f86d
EG
121MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 124
1cf167f2 125static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
126
127enum bnx2x_board_type {
128 BCM57710 = 0,
34f80b04
EG
129 BCM57711 = 1,
130 BCM57711E = 2,
a2fbb9ea
ET
131};
132
34f80b04 133/* indexed by board_type, above */
53a10565 134static struct {
a2fbb9ea
ET
135 char *name;
136} board_info[] __devinitdata = {
34f80b04
EG
137 { "Broadcom NetXtreme II BCM57710 XGb" },
138 { "Broadcom NetXtreme II BCM57711 XGb" },
139 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
140};
141
34f80b04 142
a3aa1884 143static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
144 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
147 { 0 }
148};
149
150MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
151
152/****************************************************************************
153* General service functions
154****************************************************************************/
155
156/* used only at init
157 * locking is done by mcp
158 */
573f2035 159void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
160{
161 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164 PCICFG_VENDOR_ID_OFFSET);
165}
166
a2fbb9ea
ET
167static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168{
169 u32 val;
170
171 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174 PCICFG_VENDOR_ID_OFFSET);
175
176 return val;
177}
a2fbb9ea
ET
178
179static const u32 dmae_reg_go_c[] = {
180 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
184};
185
186/* copy command into DMAE command memory and set DMAE command go */
187static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188 int idx)
189{
190 u32 cmd_offset;
191 int i;
192
193 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
196
ad8d3948
EG
197 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
199 }
200 REG_WR(bp, dmae_reg_go_c[idx], 1);
201}
202
ad8d3948
EG
203void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204 u32 len32)
a2fbb9ea 205{
5ff7b6d4 206 struct dmae_command dmae;
a2fbb9ea 207 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
208 int cnt = 200;
209
210 if (!bp->dmae_ready) {
211 u32 *data = bnx2x_sp(bp, wb_data[0]);
212
213 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
214 " using indirect\n", dst_addr, len32);
215 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216 return;
217 }
218
5ff7b6d4 219 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 220
5ff7b6d4
EG
221 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 224#ifdef __BIG_ENDIAN
5ff7b6d4 225 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 226#else
5ff7b6d4 227 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 228#endif
5ff7b6d4
EG
229 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231 dmae.src_addr_lo = U64_LO(dma_addr);
232 dmae.src_addr_hi = U64_HI(dma_addr);
233 dmae.dst_addr_lo = dst_addr >> 2;
234 dmae.dst_addr_hi = 0;
235 dmae.len = len32;
236 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 239
c3eefaf6 240 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
241 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
242 "dst_addr [%x:%08x (%08x)]\n"
243 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
244 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 247 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
248 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 250
5ff7b6d4
EG
251 mutex_lock(&bp->dmae_mutex);
252
a2fbb9ea
ET
253 *wb_comp = 0;
254
5ff7b6d4 255 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
256
257 udelay(5);
ad8d3948
EG
258
259 while (*wb_comp != DMAE_COMP_VAL) {
260 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
261
ad8d3948 262 if (!cnt) {
c3eefaf6 263 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
264 break;
265 }
ad8d3948 266 cnt--;
12469401
YG
267 /* adjust delay for emulation/FPGA */
268 if (CHIP_REV_IS_SLOW(bp))
269 msleep(100);
270 else
271 udelay(5);
a2fbb9ea 272 }
ad8d3948
EG
273
274 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
275}
276
c18487ee 277void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 278{
5ff7b6d4 279 struct dmae_command dmae;
a2fbb9ea 280 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
281 int cnt = 200;
282
283 if (!bp->dmae_ready) {
284 u32 *data = bnx2x_sp(bp, wb_data[0]);
285 int i;
286
287 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
288 " using indirect\n", src_addr, len32);
289 for (i = 0; i < len32; i++)
290 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291 return;
292 }
293
5ff7b6d4 294 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 295
5ff7b6d4
EG
296 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 299#ifdef __BIG_ENDIAN
5ff7b6d4 300 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 301#else
5ff7b6d4 302 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 303#endif
5ff7b6d4
EG
304 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306 dmae.src_addr_lo = src_addr >> 2;
307 dmae.src_addr_hi = 0;
308 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310 dmae.len = len32;
311 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 314
c3eefaf6 315 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
316 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
317 "dst_addr [%x:%08x (%08x)]\n"
318 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
319 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 322
5ff7b6d4
EG
323 mutex_lock(&bp->dmae_mutex);
324
325 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
326 *wb_comp = 0;
327
5ff7b6d4 328 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
329
330 udelay(5);
ad8d3948
EG
331
332 while (*wb_comp != DMAE_COMP_VAL) {
333
ad8d3948 334 if (!cnt) {
c3eefaf6 335 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
336 break;
337 }
ad8d3948 338 cnt--;
12469401
YG
339 /* adjust delay for emulation/FPGA */
340 if (CHIP_REV_IS_SLOW(bp))
341 msleep(100);
342 else
343 udelay(5);
a2fbb9ea 344 }
ad8d3948 345 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
346 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
348
349 mutex_unlock(&bp->dmae_mutex);
350}
351
573f2035
EG
352void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353 u32 addr, u32 len)
354{
355 int offset = 0;
356
357 while (len > DMAE_LEN32_WR_MAX) {
358 bnx2x_write_dmae(bp, phys_addr + offset,
359 addr + offset, DMAE_LEN32_WR_MAX);
360 offset += DMAE_LEN32_WR_MAX * 4;
361 len -= DMAE_LEN32_WR_MAX;
362 }
363
364 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
365}
366
ad8d3948
EG
367/* used only for slowpath so not inlined */
368static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369{
370 u32 wb_write[2];
371
372 wb_write[0] = val_hi;
373 wb_write[1] = val_lo;
374 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 375}
a2fbb9ea 376
ad8d3948
EG
377#ifdef USE_WB_RD
378static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379{
380 u32 wb_data[2];
381
382 REG_RD_DMAE(bp, reg, wb_data, 2);
383
384 return HILO_U64(wb_data[0], wb_data[1]);
385}
386#endif
387
a2fbb9ea
ET
388static int bnx2x_mc_assert(struct bnx2x *bp)
389{
a2fbb9ea 390 char last_idx;
34f80b04
EG
391 int i, rc = 0;
392 u32 row0, row1, row2, row3;
393
394 /* XSTORM */
395 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
396 XSTORM_ASSERT_LIST_INDEX_OFFSET);
397 if (last_idx)
398 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
399
400 /* print the asserts */
401 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
402
403 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404 XSTORM_ASSERT_LIST_OFFSET(i));
405 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
407 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
409 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
411
412 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
413 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
414 " 0x%08x 0x%08x 0x%08x\n",
415 i, row3, row2, row1, row0);
416 rc++;
417 } else {
418 break;
419 }
420 }
421
422 /* TSTORM */
423 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
424 TSTORM_ASSERT_LIST_INDEX_OFFSET);
425 if (last_idx)
426 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
427
428 /* print the asserts */
429 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
430
431 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432 TSTORM_ASSERT_LIST_OFFSET(i));
433 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
435 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
437 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
439
440 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
441 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
442 " 0x%08x 0x%08x 0x%08x\n",
443 i, row3, row2, row1, row0);
444 rc++;
445 } else {
446 break;
447 }
448 }
449
450 /* CSTORM */
451 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
452 CSTORM_ASSERT_LIST_INDEX_OFFSET);
453 if (last_idx)
454 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
455
456 /* print the asserts */
457 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
458
459 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460 CSTORM_ASSERT_LIST_OFFSET(i));
461 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
463 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
465 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
467
468 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
469 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
470 " 0x%08x 0x%08x 0x%08x\n",
471 i, row3, row2, row1, row0);
472 rc++;
473 } else {
474 break;
475 }
476 }
477
478 /* USTORM */
479 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
480 USTORM_ASSERT_LIST_INDEX_OFFSET);
481 if (last_idx)
482 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
483
484 /* print the asserts */
485 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
486
487 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
488 USTORM_ASSERT_LIST_OFFSET(i));
489 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i) + 4);
491 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 8);
493 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 12);
495
496 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
497 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
498 " 0x%08x 0x%08x 0x%08x\n",
499 i, row3, row2, row1, row0);
500 rc++;
501 } else {
502 break;
a2fbb9ea
ET
503 }
504 }
34f80b04 505
a2fbb9ea
ET
506 return rc;
507}
c14423fe 508
a2fbb9ea
ET
509static void bnx2x_fw_dump(struct bnx2x *bp)
510{
511 u32 mark, offset;
4781bfad 512 __be32 data[9];
a2fbb9ea
ET
513 int word;
514
515 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772 516 mark = ((mark + 0x3) & ~0x3);
7995c64e 517 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 518
7995c64e 519 pr_err("");
a2fbb9ea
ET
520 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
521 for (word = 0; word < 8; word++)
522 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
523 offset + 4*word));
524 data[8] = 0x0;
7995c64e 525 pr_cont("%s", (char *)data);
a2fbb9ea
ET
526 }
527 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
528 for (word = 0; word < 8; word++)
529 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
530 offset + 4*word));
531 data[8] = 0x0;
7995c64e 532 pr_cont("%s", (char *)data);
a2fbb9ea 533 }
7995c64e 534 pr_err("end of fw dump\n");
a2fbb9ea
ET
535}
536
537static void bnx2x_panic_dump(struct bnx2x *bp)
538{
539 int i;
540 u16 j, start, end;
541
66e855f3
YG
542 bp->stats_state = STATS_STATE_DISABLED;
543 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
544
a2fbb9ea
ET
545 BNX2X_ERR("begin crash dump -----------------\n");
546
8440d2b6
EG
547 /* Indices */
548 /* Common */
549 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
550 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
551 " spq_prod_idx(%u)\n",
552 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
554
555 /* Rx */
54b9ddaa 556 for_each_queue(bp, i) {
a2fbb9ea 557 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 558
c3eefaf6 559 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
560 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
561 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 562 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
563 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 565 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
566 " fp_u_idx(%x) *sb_u_idx(%x)\n",
567 fp->rx_sge_prod, fp->last_max_sge,
568 le16_to_cpu(fp->fp_u_idx),
569 fp->status_blk->u_status_block.status_block_index);
570 }
a2fbb9ea 571
8440d2b6 572 /* Tx */
54b9ddaa 573 for_each_queue(bp, i) {
8440d2b6 574 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 575
c3eefaf6 576 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
577 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
578 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
579 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 580 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
ca00392c 581 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 582 fp->status_blk->c_status_block.status_block_index,
ca00392c 583 fp->tx_db.data.prod);
8440d2b6 584 }
a2fbb9ea 585
8440d2b6
EG
586 /* Rings */
587 /* Rx */
54b9ddaa 588 for_each_queue(bp, i) {
8440d2b6 589 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
590
591 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
592 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 593 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
594 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
595 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
596
c3eefaf6
EG
597 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
598 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
599 }
600
3196a88a
EG
601 start = RX_SGE(fp->rx_sge_prod);
602 end = RX_SGE(fp->last_max_sge);
8440d2b6 603 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
604 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
605 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
606
c3eefaf6
EG
607 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
608 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
609 }
610
a2fbb9ea
ET
611 start = RCQ_BD(fp->rx_comp_cons - 10);
612 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 613 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
614 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
615
c3eefaf6
EG
616 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
617 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
618 }
619 }
620
8440d2b6 621 /* Tx */
54b9ddaa 622 for_each_queue(bp, i) {
8440d2b6
EG
623 struct bnx2x_fastpath *fp = &bp->fp[i];
624
625 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
626 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
627 for (j = start; j != end; j = TX_BD(j + 1)) {
628 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
629
c3eefaf6
EG
630 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
631 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
632 }
633
634 start = TX_BD(fp->tx_bd_cons - 10);
635 end = TX_BD(fp->tx_bd_cons + 254);
636 for (j = start; j != end; j = TX_BD(j + 1)) {
637 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
638
c3eefaf6
EG
639 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
640 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
641 }
642 }
a2fbb9ea 643
34f80b04 644 bnx2x_fw_dump(bp);
a2fbb9ea
ET
645 bnx2x_mc_assert(bp);
646 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
647}
648
615f8fd9 649static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 650{
34f80b04 651 int port = BP_PORT(bp);
a2fbb9ea
ET
652 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
653 u32 val = REG_RD(bp, addr);
654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 655 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
656
657 if (msix) {
8badd27a
EG
658 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
659 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
660 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
661 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
662 } else if (msi) {
663 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
664 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
665 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
666 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
667 } else {
668 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
670 HC_CONFIG_0_REG_INT_LINE_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 672
8badd27a
EG
673 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674 val, port, addr);
615f8fd9
ET
675
676 REG_WR(bp, addr, val);
677
a2fbb9ea
ET
678 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
679 }
680
8badd27a
EG
681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
682 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
683
684 REG_WR(bp, addr, val);
37dbbf32
EG
685 /*
686 * Ensure that HC_CONFIG is written before leading/trailing edge config
687 */
688 mmiowb();
689 barrier();
34f80b04
EG
690
691 if (CHIP_IS_E1H(bp)) {
692 /* init leading/trailing edge */
693 if (IS_E1HMF(bp)) {
8badd27a 694 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 695 if (bp->port.pmf)
4acac6a5
EG
696 /* enable nig and gpio3 attention */
697 val |= 0x1100;
34f80b04
EG
698 } else
699 val = 0xffff;
700
701 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
702 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
703 }
37dbbf32
EG
704
705 /* Make sure that interrupts are indeed enabled from here on */
706 mmiowb();
a2fbb9ea
ET
707}
708
615f8fd9 709static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 710{
34f80b04 711 int port = BP_PORT(bp);
a2fbb9ea
ET
712 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
713 u32 val = REG_RD(bp, addr);
714
715 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
716 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
717 HC_CONFIG_0_REG_INT_LINE_EN_0 |
718 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
719
720 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
721 val, port, addr);
722
8badd27a
EG
723 /* flush all outstanding writes */
724 mmiowb();
725
a2fbb9ea
ET
726 REG_WR(bp, addr, val);
727 if (REG_RD(bp, addr) != val)
728 BNX2X_ERR("BUG! proper val not read from IGU!\n");
729}
730
f8ef6e44 731static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 732{
a2fbb9ea 733 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 734 int i, offset;
a2fbb9ea 735
34f80b04 736 /* disable interrupt handling */
a2fbb9ea 737 atomic_inc(&bp->intr_sem);
e1510706
EG
738 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
739
f8ef6e44
YG
740 if (disable_hw)
741 /* prevent the HW from sending interrupts */
742 bnx2x_int_disable(bp);
a2fbb9ea
ET
743
744 /* make sure all ISRs are done */
745 if (msix) {
8badd27a
EG
746 synchronize_irq(bp->msix_table[0].vector);
747 offset = 1;
37b091ba
MC
748#ifdef BCM_CNIC
749 offset++;
750#endif
a2fbb9ea 751 for_each_queue(bp, i)
8badd27a 752 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
753 } else
754 synchronize_irq(bp->pdev->irq);
755
756 /* make sure sp_task is not running */
1cf167f2
EG
757 cancel_delayed_work(&bp->sp_task);
758 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
759}
760
34f80b04 761/* fast path */
a2fbb9ea
ET
762
763/*
34f80b04 764 * General service functions
a2fbb9ea
ET
765 */
766
34f80b04 767static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
768 u8 storm, u16 index, u8 op, u8 update)
769{
5c862848
EG
770 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
771 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
772 struct igu_ack_register igu_ack;
773
774 igu_ack.status_block_index = index;
775 igu_ack.sb_id_and_flags =
34f80b04 776 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
777 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
778 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
779 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
780
5c862848
EG
781 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
782 (*(u32 *)&igu_ack), hc_addr);
783 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
784
785 /* Make sure that ACK is written */
786 mmiowb();
787 barrier();
a2fbb9ea
ET
788}
789
54b9ddaa 790static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
791{
792 struct host_status_block *fpsb = fp->status_blk;
a2fbb9ea
ET
793
794 barrier(); /* status block is written to by the chip */
54b9ddaa
VZ
795 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
796 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
a2fbb9ea
ET
797}
798
a2fbb9ea
ET
799static u16 bnx2x_ack_int(struct bnx2x *bp)
800{
5c862848
EG
801 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
802 COMMAND_REG_SIMD_MASK);
803 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 804
5c862848
EG
805 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
806 result, hc_addr);
a2fbb9ea 807
a2fbb9ea
ET
808 return result;
809}
810
811
812/*
813 * fast path service functions
814 */
815
e8b5fc51
VZ
816static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
817{
818 /* Tell compiler that consumer and producer can change */
819 barrier();
820 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
821}
822
a2fbb9ea
ET
823/* free skb in the packet ring at pos idx
824 * return idx of last bd freed
825 */
826static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
827 u16 idx)
828{
829 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
830 struct eth_tx_start_bd *tx_start_bd;
831 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 832 struct sk_buff *skb = tx_buf->skb;
34f80b04 833 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
834 int nbd;
835
54b9ddaa
VZ
836 /* prefetch skb end pointer to speedup dev_kfree_skb() */
837 prefetch(&skb->end);
838
a2fbb9ea
ET
839 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
840 idx, tx_buf, skb);
841
842 /* unmap first bd */
843 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c
EG
844 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
845 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
846 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 847
ca00392c 848 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 849#ifdef BNX2X_STOP_ON_ERROR
ca00392c 850 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 851 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
852 bnx2x_panic();
853 }
854#endif
ca00392c 855 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 856
ca00392c
EG
857 /* Get the next bd */
858 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 859
ca00392c
EG
860 /* Skip a parse bd... */
861 --nbd;
862 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863
864 /* ...and the TSO split header bd since they have no mapping */
865 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
866 --nbd;
867 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
868 }
869
870 /* now free frags */
871 while (nbd > 0) {
872
873 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c
EG
874 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
875 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
876 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
a2fbb9ea
ET
877 if (--nbd)
878 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
879 }
880
881 /* release skb */
53e5e96e 882 WARN_ON(!skb);
54b9ddaa 883 dev_kfree_skb(skb);
a2fbb9ea
ET
884 tx_buf->first_bd = 0;
885 tx_buf->skb = NULL;
886
34f80b04 887 return new_cons;
a2fbb9ea
ET
888}
889
34f80b04 890static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 891{
34f80b04
EG
892 s16 used;
893 u16 prod;
894 u16 cons;
a2fbb9ea 895
a2fbb9ea
ET
896 prod = fp->tx_bd_prod;
897 cons = fp->tx_bd_cons;
898
34f80b04
EG
899 /* NUM_TX_RINGS = number of "next-page" entries
900 It will be used as a threshold */
901 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 902
34f80b04 903#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
904 WARN_ON(used < 0);
905 WARN_ON(used > fp->bp->tx_ring_size);
906 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 907#endif
a2fbb9ea 908
34f80b04 909 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
910}
911
54b9ddaa
VZ
912static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
913{
914 u16 hw_cons;
915
916 /* Tell compiler that status block fields can change */
917 barrier();
918 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
919 return hw_cons != fp->tx_pkt_cons;
920}
921
922static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
923{
924 struct bnx2x *bp = fp->bp;
555f6c78 925 struct netdev_queue *txq;
a2fbb9ea 926 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
a2fbb9ea
ET
927
928#ifdef BNX2X_STOP_ON_ERROR
929 if (unlikely(bp->panic))
54b9ddaa 930 return -1;
a2fbb9ea
ET
931#endif
932
54b9ddaa 933 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
934 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
935 sw_cons = fp->tx_pkt_cons;
936
937 while (sw_cons != hw_cons) {
938 u16 pkt_cons;
939
940 pkt_cons = TX_BD(sw_cons);
941
942 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
943
34f80b04 944 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
945 hw_cons, sw_cons, pkt_cons);
946
34f80b04 947/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
948 rmb();
949 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
950 }
951*/
952 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
953 sw_cons++;
a2fbb9ea
ET
954 }
955
956 fp->tx_pkt_cons = sw_cons;
957 fp->tx_bd_cons = bd_cons;
958
c16cc0b4
VZ
959 /* Need to make the tx_bd_cons update visible to start_xmit()
960 * before checking for netif_tx_queue_stopped(). Without the
961 * memory barrier, there is a small possibility that
962 * start_xmit() will miss it and cause the queue to be stopped
963 * forever.
964 */
2d99cf16 965 smp_mb();
c16cc0b4 966
a2fbb9ea 967 /* TBD need a thresh? */
555f6c78 968 if (unlikely(netif_tx_queue_stopped(txq))) {
c16cc0b4
VZ
969 /* Taking tx_lock() is needed to prevent reenabling the queue
970 * while it's empty. This could have happen if rx_action() gets
971 * suspended in bnx2x_tx_int() after the condition before
972 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
973 *
974 * stops the queue->sees fresh tx_bd_cons->releases the queue->
975 * sends some packets consuming the whole queue again->
976 * stops the queue
6044735d 977 */
c16cc0b4
VZ
978
979 __netif_tx_lock(txq, smp_processor_id());
6044735d 980
555f6c78 981 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 982 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 983 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 984 netif_tx_wake_queue(txq);
c16cc0b4
VZ
985
986 __netif_tx_unlock(txq);
a2fbb9ea 987 }
54b9ddaa 988 return 0;
a2fbb9ea
ET
989}
990
993ac7b5
MC
991#ifdef BCM_CNIC
992static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
993#endif
3196a88a 994
a2fbb9ea
ET
995static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
996 union eth_rx_cqe *rr_cqe)
997{
998 struct bnx2x *bp = fp->bp;
999 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1000 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1001
34f80b04 1002 DP(BNX2X_MSG_SP,
a2fbb9ea 1003 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 1004 fp->index, cid, command, bp->state,
34f80b04 1005 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
1006
1007 bp->spq_left++;
1008
0626b899 1009 if (fp->index) {
a2fbb9ea
ET
1010 switch (command | fp->state) {
1011 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1012 BNX2X_FP_STATE_OPENING):
1013 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1014 cid);
1015 fp->state = BNX2X_FP_STATE_OPEN;
1016 break;
1017
1018 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1019 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1020 cid);
1021 fp->state = BNX2X_FP_STATE_HALTED;
1022 break;
1023
1024 default:
34f80b04
EG
1025 BNX2X_ERR("unexpected MC reply (%d) "
1026 "fp->state is %x\n", command, fp->state);
1027 break;
a2fbb9ea 1028 }
34f80b04 1029 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1030 return;
1031 }
c14423fe 1032
a2fbb9ea
ET
1033 switch (command | bp->state) {
1034 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1035 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1036 bp->state = BNX2X_STATE_OPEN;
1037 break;
1038
1039 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1040 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1041 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1042 fp->state = BNX2X_FP_STATE_HALTED;
1043 break;
1044
a2fbb9ea 1045 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1046 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1047 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1048 break;
1049
993ac7b5
MC
1050#ifdef BCM_CNIC
1051 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1052 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1053 bnx2x_cnic_cfc_comp(bp, cid);
1054 break;
1055#endif
3196a88a 1056
a2fbb9ea 1057 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1058 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1059 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
e665bfda
MC
1060 bp->set_mac_pending--;
1061 smp_wmb();
a2fbb9ea
ET
1062 break;
1063
49d66772 1064 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1065 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
e665bfda
MC
1066 bp->set_mac_pending--;
1067 smp_wmb();
49d66772
ET
1068 break;
1069
a2fbb9ea 1070 default:
34f80b04 1071 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1072 command, bp->state);
34f80b04 1073 break;
a2fbb9ea 1074 }
34f80b04 1075 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1076}
1077
7a9b2557
VZ
1078static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1079 struct bnx2x_fastpath *fp, u16 index)
1080{
1081 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1082 struct page *page = sw_buf->page;
1083 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1084
1085 /* Skip "next page" elements */
1086 if (!page)
1087 return;
1088
1089 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1090 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1091 __free_pages(page, PAGES_PER_SGE_SHIFT);
1092
1093 sw_buf->page = NULL;
1094 sge->addr_hi = 0;
1095 sge->addr_lo = 0;
1096}
1097
1098static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1099 struct bnx2x_fastpath *fp, int last)
1100{
1101 int i;
1102
1103 for (i = 0; i < last; i++)
1104 bnx2x_free_rx_sge(bp, fp, i);
1105}
1106
1107static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1108 struct bnx2x_fastpath *fp, u16 index)
1109{
1110 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1111 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1112 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1113 dma_addr_t mapping;
1114
1115 if (unlikely(page == NULL))
1116 return -ENOMEM;
1117
4f40f2cb 1118 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1119 PCI_DMA_FROMDEVICE);
8d8bb39b 1120 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1121 __free_pages(page, PAGES_PER_SGE_SHIFT);
1122 return -ENOMEM;
1123 }
1124
1125 sw_buf->page = page;
1126 pci_unmap_addr_set(sw_buf, mapping, mapping);
1127
1128 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1129 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1130
1131 return 0;
1132}
1133
a2fbb9ea
ET
1134static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1135 struct bnx2x_fastpath *fp, u16 index)
1136{
1137 struct sk_buff *skb;
1138 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1139 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1140 dma_addr_t mapping;
1141
1142 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1143 if (unlikely(skb == NULL))
1144 return -ENOMEM;
1145
437cf2f1 1146 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1147 PCI_DMA_FROMDEVICE);
8d8bb39b 1148 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1149 dev_kfree_skb(skb);
1150 return -ENOMEM;
1151 }
1152
1153 rx_buf->skb = skb;
1154 pci_unmap_addr_set(rx_buf, mapping, mapping);
1155
1156 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1157 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1158
1159 return 0;
1160}
1161
1162/* note that we are not allocating a new skb,
1163 * we are just moving one from cons to prod
1164 * we are not creating a new mapping,
1165 * so there is no need to check for dma_mapping_error().
1166 */
1167static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1168 struct sk_buff *skb, u16 cons, u16 prod)
1169{
1170 struct bnx2x *bp = fp->bp;
1171 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1172 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1173 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1174 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1175
1176 pci_dma_sync_single_for_device(bp->pdev,
1177 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1178 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1179
1180 prod_rx_buf->skb = cons_rx_buf->skb;
1181 pci_unmap_addr_set(prod_rx_buf, mapping,
1182 pci_unmap_addr(cons_rx_buf, mapping));
1183 *prod_bd = *cons_bd;
1184}
1185
7a9b2557
VZ
1186static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1187 u16 idx)
1188{
1189 u16 last_max = fp->last_max_sge;
1190
1191 if (SUB_S16(idx, last_max) > 0)
1192 fp->last_max_sge = idx;
1193}
1194
1195static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1196{
1197 int i, j;
1198
1199 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1200 int idx = RX_SGE_CNT * i - 1;
1201
1202 for (j = 0; j < 2; j++) {
1203 SGE_MASK_CLEAR_BIT(fp, idx);
1204 idx--;
1205 }
1206 }
1207}
1208
1209static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1210 struct eth_fast_path_rx_cqe *fp_cqe)
1211{
1212 struct bnx2x *bp = fp->bp;
4f40f2cb 1213 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1214 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1215 SGE_PAGE_SHIFT;
7a9b2557
VZ
1216 u16 last_max, last_elem, first_elem;
1217 u16 delta = 0;
1218 u16 i;
1219
1220 if (!sge_len)
1221 return;
1222
1223 /* First mark all used pages */
1224 for (i = 0; i < sge_len; i++)
1225 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1226
1227 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1228 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1229
1230 /* Here we assume that the last SGE index is the biggest */
1231 prefetch((void *)(fp->sge_mask));
1232 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1233
1234 last_max = RX_SGE(fp->last_max_sge);
1235 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1236 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1237
1238 /* If ring is not full */
1239 if (last_elem + 1 != first_elem)
1240 last_elem++;
1241
1242 /* Now update the prod */
1243 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1244 if (likely(fp->sge_mask[i]))
1245 break;
1246
1247 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1248 delta += RX_SGE_MASK_ELEM_SZ;
1249 }
1250
1251 if (delta > 0) {
1252 fp->rx_sge_prod += delta;
1253 /* clear page-end entries */
1254 bnx2x_clear_sge_mask_next_elems(fp);
1255 }
1256
1257 DP(NETIF_MSG_RX_STATUS,
1258 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1259 fp->last_max_sge, fp->rx_sge_prod);
1260}
1261
1262static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1263{
1264 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1265 memset(fp->sge_mask, 0xff,
1266 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1267
33471629
EG
1268 /* Clear the two last indices in the page to 1:
1269 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1270 hence will never be indicated and should be removed from
1271 the calculations. */
1272 bnx2x_clear_sge_mask_next_elems(fp);
1273}
1274
1275static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1276 struct sk_buff *skb, u16 cons, u16 prod)
1277{
1278 struct bnx2x *bp = fp->bp;
1279 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1280 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1281 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1282 dma_addr_t mapping;
1283
1284 /* move empty skb from pool to prod and map it */
1285 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1286 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1287 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1288 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1289
1290 /* move partial skb from cons to pool (don't unmap yet) */
1291 fp->tpa_pool[queue] = *cons_rx_buf;
1292
1293 /* mark bin state as start - print error if current state != stop */
1294 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1295 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1296
1297 fp->tpa_state[queue] = BNX2X_TPA_START;
1298
1299 /* point prod_bd to new skb */
1300 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1301 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1302
1303#ifdef BNX2X_STOP_ON_ERROR
1304 fp->tpa_queue_used |= (1 << queue);
1305#ifdef __powerpc64__
1306 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1307#else
1308 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1309#endif
1310 fp->tpa_queue_used);
1311#endif
1312}
1313
1314static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1315 struct sk_buff *skb,
1316 struct eth_fast_path_rx_cqe *fp_cqe,
1317 u16 cqe_idx)
1318{
1319 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1320 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1321 u32 i, frag_len, frag_size, pages;
1322 int err;
1323 int j;
1324
1325 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1326 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1327
1328 /* This is needed in order to enable forwarding support */
1329 if (frag_size)
4f40f2cb 1330 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1331 max(frag_size, (u32)len_on_bd));
1332
1333#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1334 if (pages >
1335 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1336 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1337 pages, cqe_idx);
1338 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1339 fp_cqe->pkt_len, len_on_bd);
1340 bnx2x_panic();
1341 return -EINVAL;
1342 }
1343#endif
1344
1345 /* Run through the SGL and compose the fragmented skb */
1346 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1347 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1348
1349 /* FW gives the indices of the SGE as if the ring is an array
1350 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1351 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1352 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1353 old_rx_pg = *rx_pg;
1354
1355 /* If we fail to allocate a substitute page, we simply stop
1356 where we are and drop the whole packet */
1357 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1358 if (unlikely(err)) {
de832a55 1359 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1360 return err;
1361 }
1362
1363 /* Unmap the page as we r going to pass it to the stack */
1364 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1365 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1366
1367 /* Add one frag and update the appropriate fields in the skb */
1368 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1369
1370 skb->data_len += frag_len;
1371 skb->truesize += frag_len;
1372 skb->len += frag_len;
1373
1374 frag_size -= frag_len;
1375 }
1376
1377 return 0;
1378}
1379
1380static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1381 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1382 u16 cqe_idx)
1383{
1384 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1385 struct sk_buff *skb = rx_buf->skb;
1386 /* alloc new skb */
1387 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1388
1389 /* Unmap skb in the pool anyway, as we are going to change
1390 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1391 fails. */
1392 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1393 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1394
7a9b2557 1395 if (likely(new_skb)) {
66e855f3
YG
1396 /* fix ip xsum and give it to the stack */
1397 /* (no need to map the new skb) */
0c6671b0
EG
1398#ifdef BCM_VLAN
1399 int is_vlan_cqe =
1400 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1401 PARSING_FLAGS_VLAN);
1402 int is_not_hwaccel_vlan_cqe =
1403 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1404#endif
7a9b2557
VZ
1405
1406 prefetch(skb);
1407 prefetch(((char *)(skb)) + 128);
1408
7a9b2557
VZ
1409#ifdef BNX2X_STOP_ON_ERROR
1410 if (pad + len > bp->rx_buf_size) {
1411 BNX2X_ERR("skb_put is about to fail... "
1412 "pad %d len %d rx_buf_size %d\n",
1413 pad, len, bp->rx_buf_size);
1414 bnx2x_panic();
1415 return;
1416 }
1417#endif
1418
1419 skb_reserve(skb, pad);
1420 skb_put(skb, len);
1421
1422 skb->protocol = eth_type_trans(skb, bp->dev);
1423 skb->ip_summed = CHECKSUM_UNNECESSARY;
1424
1425 {
1426 struct iphdr *iph;
1427
1428 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1429#ifdef BCM_VLAN
1430 /* If there is no Rx VLAN offloading -
1431 take VLAN tag into an account */
1432 if (unlikely(is_not_hwaccel_vlan_cqe))
1433 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1434#endif
7a9b2557
VZ
1435 iph->check = 0;
1436 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1437 }
1438
1439 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1440 &cqe->fast_path_cqe, cqe_idx)) {
1441#ifdef BCM_VLAN
0c6671b0
EG
1442 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1443 (!is_not_hwaccel_vlan_cqe))
4fd89b7a
DK
1444 vlan_gro_receive(&fp->napi, bp->vlgrp,
1445 le16_to_cpu(cqe->fast_path_cqe.
1446 vlan_tag), skb);
7a9b2557
VZ
1447 else
1448#endif
4fd89b7a 1449 napi_gro_receive(&fp->napi, skb);
7a9b2557
VZ
1450 } else {
1451 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1452 " - dropping packet!\n");
1453 dev_kfree_skb(skb);
1454 }
1455
7a9b2557
VZ
1456
1457 /* put new skb in bin */
1458 fp->tpa_pool[queue].skb = new_skb;
1459
1460 } else {
66e855f3 1461 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1462 DP(NETIF_MSG_RX_STATUS,
1463 "Failed to allocate new skb - dropping packet!\n");
de832a55 1464 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1465 }
1466
1467 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1468}
1469
1470static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1471 struct bnx2x_fastpath *fp,
1472 u16 bd_prod, u16 rx_comp_prod,
1473 u16 rx_sge_prod)
1474{
8d9c5f34 1475 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1476 int i;
1477
1478 /* Update producers */
1479 rx_prods.bd_prod = bd_prod;
1480 rx_prods.cqe_prod = rx_comp_prod;
1481 rx_prods.sge_prod = rx_sge_prod;
1482
58f4c4cf
EG
1483 /*
1484 * Make sure that the BD and SGE data is updated before updating the
1485 * producers since FW might read the BD/SGE right after the producer
1486 * is updated.
1487 * This is only applicable for weak-ordered memory model archs such
1488 * as IA-64. The following barrier is also mandatory since FW will
1489 * assumes BDs must have buffers.
1490 */
1491 wmb();
1492
8d9c5f34
EG
1493 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1494 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1495 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1496 ((u32 *)&rx_prods)[i]);
1497
58f4c4cf
EG
1498 mmiowb(); /* keep prod updates ordered */
1499
7a9b2557 1500 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1501 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1502 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1503}
1504
a2fbb9ea
ET
1505static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1506{
1507 struct bnx2x *bp = fp->bp;
34f80b04 1508 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1509 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1510 int rx_pkt = 0;
1511
1512#ifdef BNX2X_STOP_ON_ERROR
1513 if (unlikely(bp->panic))
1514 return 0;
1515#endif
1516
34f80b04
EG
1517 /* CQ "next element" is of the size of the regular element,
1518 that's why it's ok here */
a2fbb9ea
ET
1519 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1520 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1521 hw_comp_cons++;
1522
1523 bd_cons = fp->rx_bd_cons;
1524 bd_prod = fp->rx_bd_prod;
34f80b04 1525 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1526 sw_comp_cons = fp->rx_comp_cons;
1527 sw_comp_prod = fp->rx_comp_prod;
1528
1529 /* Memory barrier necessary as speculative reads of the rx
1530 * buffer can be ahead of the index in the status block
1531 */
1532 rmb();
1533
1534 DP(NETIF_MSG_RX_STATUS,
1535 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1536 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1537
1538 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1539 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1540 struct sk_buff *skb;
1541 union eth_rx_cqe *cqe;
34f80b04
EG
1542 u8 cqe_fp_flags;
1543 u16 len, pad;
a2fbb9ea
ET
1544
1545 comp_ring_cons = RCQ_BD(sw_comp_cons);
1546 bd_prod = RX_BD(bd_prod);
1547 bd_cons = RX_BD(bd_cons);
1548
619e7a66
EG
1549 /* Prefetch the page containing the BD descriptor
1550 at producer's index. It will be needed when new skb is
1551 allocated */
1552 prefetch((void *)(PAGE_ALIGN((unsigned long)
1553 (&fp->rx_desc_ring[bd_prod])) -
1554 PAGE_SIZE + 1));
1555
a2fbb9ea 1556 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1557 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1558
a2fbb9ea 1559 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1560 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1561 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1562 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1563 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1564 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1565
1566 /* is this a slowpath msg? */
34f80b04 1567 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1568 bnx2x_sp_event(fp, cqe);
1569 goto next_cqe;
1570
1571 /* this is an rx packet */
1572 } else {
1573 rx_buf = &fp->rx_buf_ring[bd_cons];
1574 skb = rx_buf->skb;
54b9ddaa
VZ
1575 prefetch(skb);
1576 prefetch((u8 *)skb + 256);
a2fbb9ea
ET
1577 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1578 pad = cqe->fast_path_cqe.placement_offset;
1579
7a9b2557
VZ
1580 /* If CQE is marked both TPA_START and TPA_END
1581 it is a non-TPA CQE */
1582 if ((!fp->disable_tpa) &&
1583 (TPA_TYPE(cqe_fp_flags) !=
1584 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1585 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1586
1587 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1588 DP(NETIF_MSG_RX_STATUS,
1589 "calling tpa_start on queue %d\n",
1590 queue);
1591
1592 bnx2x_tpa_start(fp, queue, skb,
1593 bd_cons, bd_prod);
1594 goto next_rx;
1595 }
1596
1597 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1598 DP(NETIF_MSG_RX_STATUS,
1599 "calling tpa_stop on queue %d\n",
1600 queue);
1601
1602 if (!BNX2X_RX_SUM_FIX(cqe))
1603 BNX2X_ERR("STOP on none TCP "
1604 "data\n");
1605
1606 /* This is a size of the linear data
1607 on this skb */
1608 len = le16_to_cpu(cqe->fast_path_cqe.
1609 len_on_bd);
1610 bnx2x_tpa_stop(bp, fp, queue, pad,
1611 len, cqe, comp_ring_cons);
1612#ifdef BNX2X_STOP_ON_ERROR
1613 if (bp->panic)
17cb4006 1614 return 0;
7a9b2557
VZ
1615#endif
1616
1617 bnx2x_update_sge_prod(fp,
1618 &cqe->fast_path_cqe);
1619 goto next_cqe;
1620 }
1621 }
1622
a2fbb9ea
ET
1623 pci_dma_sync_single_for_device(bp->pdev,
1624 pci_unmap_addr(rx_buf, mapping),
1625 pad + RX_COPY_THRESH,
1626 PCI_DMA_FROMDEVICE);
1627 prefetch(skb);
1628 prefetch(((char *)(skb)) + 128);
1629
1630 /* is this an error packet? */
34f80b04 1631 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1632 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1633 "ERROR flags %x rx packet %u\n",
1634 cqe_fp_flags, sw_comp_cons);
de832a55 1635 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1636 goto reuse_rx;
1637 }
1638
1639 /* Since we don't have a jumbo ring
1640 * copy small packets if mtu > 1500
1641 */
1642 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1643 (len <= RX_COPY_THRESH)) {
1644 struct sk_buff *new_skb;
1645
1646 new_skb = netdev_alloc_skb(bp->dev,
1647 len + pad);
1648 if (new_skb == NULL) {
1649 DP(NETIF_MSG_RX_ERR,
34f80b04 1650 "ERROR packet dropped "
a2fbb9ea 1651 "because of alloc failure\n");
de832a55 1652 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1653 goto reuse_rx;
1654 }
1655
1656 /* aligned copy */
1657 skb_copy_from_linear_data_offset(skb, pad,
1658 new_skb->data + pad, len);
1659 skb_reserve(new_skb, pad);
1660 skb_put(new_skb, len);
1661
1662 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1663
1664 skb = new_skb;
1665
a119a069
EG
1666 } else
1667 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
a2fbb9ea
ET
1668 pci_unmap_single(bp->pdev,
1669 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1670 bp->rx_buf_size,
a2fbb9ea
ET
1671 PCI_DMA_FROMDEVICE);
1672 skb_reserve(skb, pad);
1673 skb_put(skb, len);
1674
1675 } else {
1676 DP(NETIF_MSG_RX_ERR,
34f80b04 1677 "ERROR packet dropped because "
a2fbb9ea 1678 "of alloc failure\n");
de832a55 1679 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1680reuse_rx:
1681 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1682 goto next_rx;
1683 }
1684
1685 skb->protocol = eth_type_trans(skb, bp->dev);
1686
1687 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1688 if (bp->rx_csum) {
1adcd8be
EG
1689 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1690 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1691 else
de832a55 1692 fp->eth_q_stats.hw_csum_err++;
66e855f3 1693 }
a2fbb9ea
ET
1694 }
1695
748e5439 1696 skb_record_rx_queue(skb, fp->index);
ab6ad5a4 1697
a2fbb9ea 1698#ifdef BCM_VLAN
0c6671b0 1699 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1700 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1701 PARSING_FLAGS_VLAN))
4fd89b7a
DK
1702 vlan_gro_receive(&fp->napi, bp->vlgrp,
1703 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
a2fbb9ea
ET
1704 else
1705#endif
4fd89b7a 1706 napi_gro_receive(&fp->napi, skb);
a2fbb9ea 1707
a2fbb9ea
ET
1708
1709next_rx:
1710 rx_buf->skb = NULL;
1711
1712 bd_cons = NEXT_RX_IDX(bd_cons);
1713 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1714 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1715 rx_pkt++;
a2fbb9ea
ET
1716next_cqe:
1717 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1718 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1719
34f80b04 1720 if (rx_pkt == budget)
a2fbb9ea
ET
1721 break;
1722 } /* while */
1723
1724 fp->rx_bd_cons = bd_cons;
34f80b04 1725 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1726 fp->rx_comp_cons = sw_comp_cons;
1727 fp->rx_comp_prod = sw_comp_prod;
1728
7a9b2557
VZ
1729 /* Update producers */
1730 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1731 fp->rx_sge_prod);
a2fbb9ea
ET
1732
1733 fp->rx_pkt += rx_pkt;
1734 fp->rx_calls++;
1735
1736 return rx_pkt;
1737}
1738
1739static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1740{
1741 struct bnx2x_fastpath *fp = fp_cookie;
1742 struct bnx2x *bp = fp->bp;
a2fbb9ea 1743
da5a662a
VZ
1744 /* Return here if interrupt is disabled */
1745 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1746 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1747 return IRQ_HANDLED;
1748 }
1749
34f80b04 1750 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1751 fp->index, fp->sb_id);
0626b899 1752 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1753
1754#ifdef BNX2X_STOP_ON_ERROR
1755 if (unlikely(bp->panic))
1756 return IRQ_HANDLED;
1757#endif
ca00392c 1758
54b9ddaa
VZ
1759 /* Handle Rx and Tx according to MSI-X vector */
1760 prefetch(fp->rx_cons_sb);
1761 prefetch(fp->tx_cons_sb);
1762 prefetch(&fp->status_blk->u_status_block.status_block_index);
1763 prefetch(&fp->status_blk->c_status_block.status_block_index);
1764 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
34f80b04 1765
a2fbb9ea
ET
1766 return IRQ_HANDLED;
1767}
1768
1769static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1770{
555f6c78 1771 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1772 u16 status = bnx2x_ack_int(bp);
34f80b04 1773 u16 mask;
ca00392c 1774 int i;
a2fbb9ea 1775
34f80b04 1776 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1777 if (unlikely(status == 0)) {
1778 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1779 return IRQ_NONE;
1780 }
f5372251 1781 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1782
34f80b04 1783 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1784 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1785 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1786 return IRQ_HANDLED;
1787 }
1788
3196a88a
EG
1789#ifdef BNX2X_STOP_ON_ERROR
1790 if (unlikely(bp->panic))
1791 return IRQ_HANDLED;
1792#endif
1793
ca00392c
EG
1794 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1795 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1796
ca00392c
EG
1797 mask = 0x2 << fp->sb_id;
1798 if (status & mask) {
54b9ddaa
VZ
1799 /* Handle Rx and Tx according to SB id */
1800 prefetch(fp->rx_cons_sb);
1801 prefetch(&fp->status_blk->u_status_block.
1802 status_block_index);
1803 prefetch(fp->tx_cons_sb);
1804 prefetch(&fp->status_blk->c_status_block.
1805 status_block_index);
1806 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
1807 status &= ~mask;
1808 }
a2fbb9ea
ET
1809 }
1810
993ac7b5
MC
1811#ifdef BCM_CNIC
1812 mask = 0x2 << CNIC_SB_ID(bp);
1813 if (status & (mask | 0x1)) {
1814 struct cnic_ops *c_ops = NULL;
1815
1816 rcu_read_lock();
1817 c_ops = rcu_dereference(bp->cnic_ops);
1818 if (c_ops)
1819 c_ops->cnic_handler(bp->cnic_data, NULL);
1820 rcu_read_unlock();
1821
1822 status &= ~mask;
1823 }
1824#endif
a2fbb9ea 1825
34f80b04 1826 if (unlikely(status & 0x1)) {
1cf167f2 1827 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1828
1829 status &= ~0x1;
1830 if (!status)
1831 return IRQ_HANDLED;
1832 }
1833
34f80b04
EG
1834 if (status)
1835 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1836 status);
a2fbb9ea 1837
c18487ee 1838 return IRQ_HANDLED;
a2fbb9ea
ET
1839}
1840
c18487ee 1841/* end of fast path */
a2fbb9ea 1842
bb2a0f7a 1843static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1844
c18487ee
YR
1845/* Link */
1846
1847/*
1848 * General service functions
1849 */
a2fbb9ea 1850
4a37fb66 1851static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1852{
1853 u32 lock_status;
1854 u32 resource_bit = (1 << resource);
4a37fb66
YG
1855 int func = BP_FUNC(bp);
1856 u32 hw_lock_control_reg;
c18487ee 1857 int cnt;
a2fbb9ea 1858
c18487ee
YR
1859 /* Validating that the resource is within range */
1860 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1861 DP(NETIF_MSG_HW,
1862 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1863 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1864 return -EINVAL;
1865 }
a2fbb9ea 1866
4a37fb66
YG
1867 if (func <= 5) {
1868 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1869 } else {
1870 hw_lock_control_reg =
1871 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1872 }
1873
c18487ee 1874 /* Validating that the resource is not already taken */
4a37fb66 1875 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1876 if (lock_status & resource_bit) {
1877 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1878 lock_status, resource_bit);
1879 return -EEXIST;
1880 }
a2fbb9ea 1881
46230476
EG
1882 /* Try for 5 second every 5ms */
1883 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1884 /* Try to acquire the lock */
4a37fb66
YG
1885 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1886 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1887 if (lock_status & resource_bit)
1888 return 0;
a2fbb9ea 1889
c18487ee 1890 msleep(5);
a2fbb9ea 1891 }
c18487ee
YR
1892 DP(NETIF_MSG_HW, "Timeout\n");
1893 return -EAGAIN;
1894}
a2fbb9ea 1895
4a37fb66 1896static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1897{
1898 u32 lock_status;
1899 u32 resource_bit = (1 << resource);
4a37fb66
YG
1900 int func = BP_FUNC(bp);
1901 u32 hw_lock_control_reg;
a2fbb9ea 1902
c18487ee
YR
1903 /* Validating that the resource is within range */
1904 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1905 DP(NETIF_MSG_HW,
1906 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1907 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1908 return -EINVAL;
1909 }
1910
4a37fb66
YG
1911 if (func <= 5) {
1912 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1913 } else {
1914 hw_lock_control_reg =
1915 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1916 }
1917
c18487ee 1918 /* Validating that the resource is currently taken */
4a37fb66 1919 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1920 if (!(lock_status & resource_bit)) {
1921 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1922 lock_status, resource_bit);
1923 return -EFAULT;
a2fbb9ea
ET
1924 }
1925
4a37fb66 1926 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1927 return 0;
1928}
1929
1930/* HW Lock for shared dual port PHYs */
4a37fb66 1931static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1932{
34f80b04 1933 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1934
46c6a674
EG
1935 if (bp->port.need_hw_lock)
1936 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1937}
a2fbb9ea 1938
4a37fb66 1939static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1940{
46c6a674
EG
1941 if (bp->port.need_hw_lock)
1942 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1943
34f80b04 1944 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1945}
a2fbb9ea 1946
4acac6a5
EG
1947int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1948{
1949 /* The GPIO should be swapped if swap register is set and active */
1950 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1951 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1952 int gpio_shift = gpio_num +
1953 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1954 u32 gpio_mask = (1 << gpio_shift);
1955 u32 gpio_reg;
1956 int value;
1957
1958 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1959 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1960 return -EINVAL;
1961 }
1962
1963 /* read GPIO value */
1964 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1965
1966 /* get the requested pin value */
1967 if ((gpio_reg & gpio_mask) == gpio_mask)
1968 value = 1;
1969 else
1970 value = 0;
1971
1972 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1973
1974 return value;
1975}
1976
17de50b7 1977int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1978{
1979 /* The GPIO should be swapped if swap register is set and active */
1980 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1981 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1982 int gpio_shift = gpio_num +
1983 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1984 u32 gpio_mask = (1 << gpio_shift);
1985 u32 gpio_reg;
a2fbb9ea 1986
c18487ee
YR
1987 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1988 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1989 return -EINVAL;
1990 }
a2fbb9ea 1991
4a37fb66 1992 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1993 /* read GPIO and mask except the float bits */
1994 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1995
c18487ee
YR
1996 switch (mode) {
1997 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1998 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1999 gpio_num, gpio_shift);
2000 /* clear FLOAT and set CLR */
2001 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2002 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2003 break;
a2fbb9ea 2004
c18487ee
YR
2005 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2006 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2007 gpio_num, gpio_shift);
2008 /* clear FLOAT and set SET */
2009 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2010 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2011 break;
a2fbb9ea 2012
17de50b7 2013 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
2014 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2015 gpio_num, gpio_shift);
2016 /* set FLOAT */
2017 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2018 break;
a2fbb9ea 2019
c18487ee
YR
2020 default:
2021 break;
a2fbb9ea
ET
2022 }
2023
c18487ee 2024 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 2025 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 2026
c18487ee 2027 return 0;
a2fbb9ea
ET
2028}
2029
4acac6a5
EG
2030int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2031{
2032 /* The GPIO should be swapped if swap register is set and active */
2033 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2034 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2035 int gpio_shift = gpio_num +
2036 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2037 u32 gpio_mask = (1 << gpio_shift);
2038 u32 gpio_reg;
2039
2040 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2041 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2042 return -EINVAL;
2043 }
2044
2045 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2046 /* read GPIO int */
2047 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2048
2049 switch (mode) {
2050 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2051 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2052 "output low\n", gpio_num, gpio_shift);
2053 /* clear SET and set CLR */
2054 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2055 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2056 break;
2057
2058 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2059 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2060 "output high\n", gpio_num, gpio_shift);
2061 /* clear CLR and set SET */
2062 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2063 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2064 break;
2065
2066 default:
2067 break;
2068 }
2069
2070 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2071 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2072
2073 return 0;
2074}
2075
c18487ee 2076static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2077{
c18487ee
YR
2078 u32 spio_mask = (1 << spio_num);
2079 u32 spio_reg;
a2fbb9ea 2080
c18487ee
YR
2081 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2082 (spio_num > MISC_REGISTERS_SPIO_7)) {
2083 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2084 return -EINVAL;
a2fbb9ea
ET
2085 }
2086
4a37fb66 2087 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2088 /* read SPIO and mask except the float bits */
2089 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2090
c18487ee 2091 switch (mode) {
6378c025 2092 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2093 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2094 /* clear FLOAT and set CLR */
2095 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2096 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2097 break;
a2fbb9ea 2098
6378c025 2099 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2100 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2101 /* clear FLOAT and set SET */
2102 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2103 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2104 break;
a2fbb9ea 2105
c18487ee
YR
2106 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2107 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2108 /* set FLOAT */
2109 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2110 break;
a2fbb9ea 2111
c18487ee
YR
2112 default:
2113 break;
a2fbb9ea
ET
2114 }
2115
c18487ee 2116 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2117 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2118
a2fbb9ea
ET
2119 return 0;
2120}
2121
c18487ee 2122static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2123{
ad33ea3a
EG
2124 switch (bp->link_vars.ieee_fc &
2125 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2126 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2127 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2128 ADVERTISED_Pause);
2129 break;
356e2385 2130
c18487ee 2131 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2132 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2133 ADVERTISED_Pause);
2134 break;
356e2385 2135
c18487ee 2136 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2137 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2138 break;
356e2385 2139
c18487ee 2140 default:
34f80b04 2141 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2142 ADVERTISED_Pause);
2143 break;
2144 }
2145}
f1410647 2146
c18487ee
YR
2147static void bnx2x_link_report(struct bnx2x *bp)
2148{
f34d28ea 2149 if (bp->flags & MF_FUNC_DIS) {
2691d51d 2150 netif_carrier_off(bp->dev);
7995c64e 2151 netdev_err(bp->dev, "NIC Link is Down\n");
2691d51d
EG
2152 return;
2153 }
2154
c18487ee 2155 if (bp->link_vars.link_up) {
35c5f8fe
EG
2156 u16 line_speed;
2157
c18487ee
YR
2158 if (bp->state == BNX2X_STATE_OPEN)
2159 netif_carrier_on(bp->dev);
7995c64e 2160 netdev_info(bp->dev, "NIC Link is Up, ");
f1410647 2161
35c5f8fe
EG
2162 line_speed = bp->link_vars.line_speed;
2163 if (IS_E1HMF(bp)) {
2164 u16 vn_max_rate;
2165
2166 vn_max_rate =
2167 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2168 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2169 if (vn_max_rate < line_speed)
2170 line_speed = vn_max_rate;
2171 }
7995c64e 2172 pr_cont("%d Mbps ", line_speed);
f1410647 2173
c18487ee 2174 if (bp->link_vars.duplex == DUPLEX_FULL)
7995c64e 2175 pr_cont("full duplex");
c18487ee 2176 else
7995c64e 2177 pr_cont("half duplex");
f1410647 2178
c0700f90
DM
2179 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2180 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
7995c64e 2181 pr_cont(", receive ");
356e2385
EG
2182 if (bp->link_vars.flow_ctrl &
2183 BNX2X_FLOW_CTRL_TX)
7995c64e 2184 pr_cont("& transmit ");
c18487ee 2185 } else {
7995c64e 2186 pr_cont(", transmit ");
c18487ee 2187 }
7995c64e 2188 pr_cont("flow control ON");
c18487ee 2189 }
7995c64e 2190 pr_cont("\n");
f1410647 2191
c18487ee
YR
2192 } else { /* link_down */
2193 netif_carrier_off(bp->dev);
7995c64e 2194 netdev_err(bp->dev, "NIC Link is Down\n");
f1410647 2195 }
c18487ee
YR
2196}
2197
b5bf9068 2198static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2199{
19680c48
EG
2200 if (!BP_NOMCP(bp)) {
2201 u8 rc;
a2fbb9ea 2202
19680c48 2203 /* Initialize link parameters structure variables */
8c99e7b0
YR
2204 /* It is recommended to turn off RX FC for jumbo frames
2205 for better performance */
0c593270 2206 if (bp->dev->mtu > 5000)
c0700f90 2207 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2208 else
c0700f90 2209 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2210
4a37fb66 2211 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2212
2213 if (load_mode == LOAD_DIAG)
2214 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2215
19680c48 2216 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2217
4a37fb66 2218 bnx2x_release_phy_lock(bp);
a2fbb9ea 2219
3c96c68b
EG
2220 bnx2x_calc_fc_adv(bp);
2221
b5bf9068
EG
2222 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2223 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2224 bnx2x_link_report(bp);
b5bf9068 2225 }
34f80b04 2226
19680c48
EG
2227 return rc;
2228 }
f5372251 2229 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2230 return -EINVAL;
a2fbb9ea
ET
2231}
2232
c18487ee 2233static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2234{
19680c48 2235 if (!BP_NOMCP(bp)) {
4a37fb66 2236 bnx2x_acquire_phy_lock(bp);
19680c48 2237 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2238 bnx2x_release_phy_lock(bp);
a2fbb9ea 2239
19680c48
EG
2240 bnx2x_calc_fc_adv(bp);
2241 } else
f5372251 2242 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2243}
a2fbb9ea 2244
c18487ee
YR
2245static void bnx2x__link_reset(struct bnx2x *bp)
2246{
19680c48 2247 if (!BP_NOMCP(bp)) {
4a37fb66 2248 bnx2x_acquire_phy_lock(bp);
589abe3a 2249 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2250 bnx2x_release_phy_lock(bp);
19680c48 2251 } else
f5372251 2252 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2253}
a2fbb9ea 2254
c18487ee
YR
2255static u8 bnx2x_link_test(struct bnx2x *bp)
2256{
2257 u8 rc;
a2fbb9ea 2258
4a37fb66 2259 bnx2x_acquire_phy_lock(bp);
c18487ee 2260 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2261 bnx2x_release_phy_lock(bp);
a2fbb9ea 2262
c18487ee
YR
2263 return rc;
2264}
a2fbb9ea 2265
8a1c38d1 2266static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2267{
8a1c38d1
EG
2268 u32 r_param = bp->link_vars.line_speed / 8;
2269 u32 fair_periodic_timeout_usec;
2270 u32 t_fair;
34f80b04 2271
8a1c38d1
EG
2272 memset(&(bp->cmng.rs_vars), 0,
2273 sizeof(struct rate_shaping_vars_per_port));
2274 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2275
8a1c38d1
EG
2276 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2277 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2278
8a1c38d1
EG
2279 /* this is the threshold below which no timer arming will occur
2280 1.25 coefficient is for the threshold to be a little bigger
2281 than the real time, to compensate for timer in-accuracy */
2282 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2283 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2284
8a1c38d1
EG
2285 /* resolution of fairness timer */
2286 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2287 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2288 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2289
8a1c38d1
EG
2290 /* this is the threshold below which we won't arm the timer anymore */
2291 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2292
8a1c38d1
EG
2293 /* we multiply by 1e3/8 to get bytes/msec.
2294 We don't want the credits to pass a credit
2295 of the t_fair*FAIR_MEM (algorithm resolution) */
2296 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2297 /* since each tick is 4 usec */
2298 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2299}
2300
2691d51d
EG
2301/* Calculates the sum of vn_min_rates.
2302 It's needed for further normalizing of the min_rates.
2303 Returns:
2304 sum of vn_min_rates.
2305 or
2306 0 - if all the min_rates are 0.
2307 In the later case fainess algorithm should be deactivated.
2308 If not all min_rates are zero then those that are zeroes will be set to 1.
2309 */
2310static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2311{
2312 int all_zero = 1;
2313 int port = BP_PORT(bp);
2314 int vn;
2315
2316 bp->vn_weight_sum = 0;
2317 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2318 int func = 2*vn + port;
2319 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2320 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2321 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2322
2323 /* Skip hidden vns */
2324 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2325 continue;
2326
2327 /* If min rate is zero - set it to 1 */
2328 if (!vn_min_rate)
2329 vn_min_rate = DEF_MIN_RATE;
2330 else
2331 all_zero = 0;
2332
2333 bp->vn_weight_sum += vn_min_rate;
2334 }
2335
2336 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
2337 if (all_zero) {
2338 bp->cmng.flags.cmng_enables &=
2339 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2340 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2341 " fairness will be disabled\n");
2342 } else
2343 bp->cmng.flags.cmng_enables |=
2344 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
2345}
2346
8a1c38d1 2347static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2348{
2349 struct rate_shaping_vars_per_vn m_rs_vn;
2350 struct fairness_vars_per_vn m_fair_vn;
2351 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2352 u16 vn_min_rate, vn_max_rate;
2353 int i;
2354
2355 /* If function is hidden - set min and max to zeroes */
2356 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2357 vn_min_rate = 0;
2358 vn_max_rate = 0;
2359
2360 } else {
2361 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2362 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1
EG
2363 /* If min rate is zero - set it to 1 */
2364 if (!vn_min_rate)
34f80b04
EG
2365 vn_min_rate = DEF_MIN_RATE;
2366 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2367 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2368 }
8a1c38d1 2369 DP(NETIF_MSG_IFUP,
b015e3d1 2370 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 2371 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2372
2373 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2374 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2375
2376 /* global vn counter - maximal Mbps for this vn */
2377 m_rs_vn.vn_counter.rate = vn_max_rate;
2378
2379 /* quota - number of bytes transmitted in this period */
2380 m_rs_vn.vn_counter.quota =
2381 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2382
8a1c38d1 2383 if (bp->vn_weight_sum) {
34f80b04
EG
2384 /* credit for each period of the fairness algorithm:
2385 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2386 vn_weight_sum should not be larger than 10000, thus
2387 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2388 than zero */
34f80b04 2389 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2390 max((u32)(vn_min_rate * (T_FAIR_COEF /
2391 (8 * bp->vn_weight_sum))),
2392 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2393 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2394 m_fair_vn.vn_credit_delta);
2395 }
2396
34f80b04
EG
2397 /* Store it to internal memory */
2398 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2399 REG_WR(bp, BAR_XSTRORM_INTMEM +
2400 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2401 ((u32 *)(&m_rs_vn))[i]);
2402
2403 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2404 REG_WR(bp, BAR_XSTRORM_INTMEM +
2405 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2406 ((u32 *)(&m_fair_vn))[i]);
2407}
2408
8a1c38d1 2409
c18487ee
YR
2410/* This function is called upon link interrupt */
2411static void bnx2x_link_attn(struct bnx2x *bp)
2412{
bb2a0f7a
YG
2413 /* Make sure that we are synced with the current statistics */
2414 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2415
c18487ee 2416 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2417
bb2a0f7a
YG
2418 if (bp->link_vars.link_up) {
2419
1c06328c 2420 /* dropless flow control */
a18f5128 2421 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
2422 int port = BP_PORT(bp);
2423 u32 pause_enabled = 0;
2424
2425 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2426 pause_enabled = 1;
2427
2428 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2429 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2430 pause_enabled);
2431 }
2432
bb2a0f7a
YG
2433 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2434 struct host_port_stats *pstats;
2435
2436 pstats = bnx2x_sp(bp, port_stats);
2437 /* reset old bmac stats */
2438 memset(&(pstats->mac_stx[0]), 0,
2439 sizeof(struct mac_stx));
2440 }
f34d28ea 2441 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2442 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2443 }
2444
c18487ee
YR
2445 /* indicate link status */
2446 bnx2x_link_report(bp);
34f80b04
EG
2447
2448 if (IS_E1HMF(bp)) {
8a1c38d1 2449 int port = BP_PORT(bp);
34f80b04 2450 int func;
8a1c38d1 2451 int vn;
34f80b04 2452
ab6ad5a4 2453 /* Set the attention towards other drivers on the same port */
34f80b04
EG
2454 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2455 if (vn == BP_E1HVN(bp))
2456 continue;
2457
8a1c38d1 2458 func = ((vn << 1) | port);
34f80b04
EG
2459 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2460 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2461 }
34f80b04 2462
8a1c38d1
EG
2463 if (bp->link_vars.link_up) {
2464 int i;
2465
2466 /* Init rate shaping and fairness contexts */
2467 bnx2x_init_port_minmax(bp);
34f80b04 2468
34f80b04 2469 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2470 bnx2x_init_vn_minmax(bp, 2*vn + port);
2471
2472 /* Store it to internal memory */
2473 for (i = 0;
2474 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2475 REG_WR(bp, BAR_XSTRORM_INTMEM +
2476 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2477 ((u32 *)(&bp->cmng))[i]);
2478 }
34f80b04 2479 }
c18487ee 2480}
a2fbb9ea 2481
c18487ee
YR
2482static void bnx2x__link_status_update(struct bnx2x *bp)
2483{
f34d28ea 2484 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 2485 return;
a2fbb9ea 2486
c18487ee 2487 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2488
bb2a0f7a
YG
2489 if (bp->link_vars.link_up)
2490 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2491 else
2492 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2493
2691d51d
EG
2494 bnx2x_calc_vn_weight_sum(bp);
2495
c18487ee
YR
2496 /* indicate link status */
2497 bnx2x_link_report(bp);
a2fbb9ea 2498}
a2fbb9ea 2499
34f80b04
EG
2500static void bnx2x_pmf_update(struct bnx2x *bp)
2501{
2502 int port = BP_PORT(bp);
2503 u32 val;
2504
2505 bp->port.pmf = 1;
2506 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2507
2508 /* enable nig attention */
2509 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2510 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2511 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2512
2513 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2514}
2515
c18487ee 2516/* end of Link */
a2fbb9ea
ET
2517
2518/* slow path */
2519
2520/*
2521 * General service functions
2522 */
2523
2691d51d
EG
2524/* send the MCP a request, block until there is a reply */
2525u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2526{
2527 int func = BP_FUNC(bp);
2528 u32 seq = ++bp->fw_seq;
2529 u32 rc = 0;
2530 u32 cnt = 1;
2531 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2532
c4ff7cbf 2533 mutex_lock(&bp->fw_mb_mutex);
2691d51d
EG
2534 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2535 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2536
2537 do {
2538 /* let the FW do it's magic ... */
2539 msleep(delay);
2540
2541 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2542
c4ff7cbf
EG
2543 /* Give the FW up to 5 second (500*10ms) */
2544 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2545
2546 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2547 cnt*delay, rc, seq);
2548
2549 /* is this a reply to our command? */
2550 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2551 rc &= FW_MSG_CODE_MASK;
2552 else {
2553 /* FW BUG! */
2554 BNX2X_ERR("FW failed to respond!\n");
2555 bnx2x_fw_dump(bp);
2556 rc = 0;
2557 }
c4ff7cbf 2558 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2559
2560 return rc;
2561}
2562
2563static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
e665bfda 2564static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2691d51d
EG
2565static void bnx2x_set_rx_mode(struct net_device *dev);
2566
2567static void bnx2x_e1h_disable(struct bnx2x *bp)
2568{
2569 int port = BP_PORT(bp);
2691d51d
EG
2570
2571 netif_tx_disable(bp->dev);
2691d51d
EG
2572
2573 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2574
2691d51d
EG
2575 netif_carrier_off(bp->dev);
2576}
2577
2578static void bnx2x_e1h_enable(struct bnx2x *bp)
2579{
2580 int port = BP_PORT(bp);
2581
2582 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2583
2691d51d
EG
2584 /* Tx queue should be only reenabled */
2585 netif_tx_wake_all_queues(bp->dev);
2586
061bc702
EG
2587 /*
2588 * Should not call netif_carrier_on since it will be called if the link
2589 * is up when checking for link state
2590 */
2691d51d
EG
2591}
2592
2593static void bnx2x_update_min_max(struct bnx2x *bp)
2594{
2595 int port = BP_PORT(bp);
2596 int vn, i;
2597
2598 /* Init rate shaping and fairness contexts */
2599 bnx2x_init_port_minmax(bp);
2600
2601 bnx2x_calc_vn_weight_sum(bp);
2602
2603 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2604 bnx2x_init_vn_minmax(bp, 2*vn + port);
2605
2606 if (bp->port.pmf) {
2607 int func;
2608
2609 /* Set the attention towards other drivers on the same port */
2610 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2611 if (vn == BP_E1HVN(bp))
2612 continue;
2613
2614 func = ((vn << 1) | port);
2615 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2616 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2617 }
2618
2619 /* Store it to internal memory */
2620 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2621 REG_WR(bp, BAR_XSTRORM_INTMEM +
2622 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2623 ((u32 *)(&bp->cmng))[i]);
2624 }
2625}
2626
2627static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2628{
2691d51d 2629 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2691d51d
EG
2630
2631 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2632
f34d28ea
EG
2633 /*
2634 * This is the only place besides the function initialization
2635 * where the bp->flags can change so it is done without any
2636 * locks
2637 */
2691d51d
EG
2638 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2639 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
f34d28ea 2640 bp->flags |= MF_FUNC_DIS;
2691d51d
EG
2641
2642 bnx2x_e1h_disable(bp);
2643 } else {
2644 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
f34d28ea 2645 bp->flags &= ~MF_FUNC_DIS;
2691d51d
EG
2646
2647 bnx2x_e1h_enable(bp);
2648 }
2649 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2650 }
2651 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2652
2653 bnx2x_update_min_max(bp);
2654 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2655 }
2656
2657 /* Report results to MCP */
2658 if (dcc_event)
2659 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2660 else
2661 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2662}
2663
28912902
MC
2664/* must be called under the spq lock */
2665static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2666{
2667 struct eth_spe *next_spe = bp->spq_prod_bd;
2668
2669 if (bp->spq_prod_bd == bp->spq_last_bd) {
2670 bp->spq_prod_bd = bp->spq;
2671 bp->spq_prod_idx = 0;
2672 DP(NETIF_MSG_TIMER, "end of spq\n");
2673 } else {
2674 bp->spq_prod_bd++;
2675 bp->spq_prod_idx++;
2676 }
2677 return next_spe;
2678}
2679
2680/* must be called under the spq lock */
2681static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2682{
2683 int func = BP_FUNC(bp);
2684
2685 /* Make sure that BD data is updated before writing the producer */
2686 wmb();
2687
2688 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2689 bp->spq_prod_idx);
2690 mmiowb();
2691}
2692
a2fbb9ea
ET
2693/* the slow path queue is odd since completions arrive on the fastpath ring */
2694static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2695 u32 data_hi, u32 data_lo, int common)
2696{
28912902 2697 struct eth_spe *spe;
a2fbb9ea 2698
34f80b04
EG
2699 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2700 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2701 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2702 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2703 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2704
2705#ifdef BNX2X_STOP_ON_ERROR
2706 if (unlikely(bp->panic))
2707 return -EIO;
2708#endif
2709
34f80b04 2710 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2711
2712 if (!bp->spq_left) {
2713 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2714 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2715 bnx2x_panic();
2716 return -EBUSY;
2717 }
f1410647 2718
28912902
MC
2719 spe = bnx2x_sp_get_next(bp);
2720
a2fbb9ea 2721 /* CID needs port number to be encoded int it */
28912902 2722 spe->hdr.conn_and_cmd_data =
a2fbb9ea
ET
2723 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2724 HW_CID(bp, cid)));
28912902 2725 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 2726 if (common)
28912902 2727 spe->hdr.type |=
a2fbb9ea
ET
2728 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2729
28912902
MC
2730 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2731 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
2732
2733 bp->spq_left--;
2734
28912902 2735 bnx2x_sp_prod_update(bp);
34f80b04 2736 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2737 return 0;
2738}
2739
2740/* acquire split MCP access lock register */
4a37fb66 2741static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2742{
a2fbb9ea 2743 u32 i, j, val;
34f80b04 2744 int rc = 0;
a2fbb9ea
ET
2745
2746 might_sleep();
2747 i = 100;
2748 for (j = 0; j < i*10; j++) {
2749 val = (1UL << 31);
2750 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2751 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2752 if (val & (1L << 31))
2753 break;
2754
2755 msleep(5);
2756 }
a2fbb9ea 2757 if (!(val & (1L << 31))) {
19680c48 2758 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2759 rc = -EBUSY;
2760 }
2761
2762 return rc;
2763}
2764
4a37fb66
YG
2765/* release split MCP access lock register */
2766static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2767{
2768 u32 val = 0;
2769
2770 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2771}
2772
2773static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2774{
2775 struct host_def_status_block *def_sb = bp->def_status_blk;
2776 u16 rc = 0;
2777
2778 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2779 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2780 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2781 rc |= 1;
2782 }
2783 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2784 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2785 rc |= 2;
2786 }
2787 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2788 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2789 rc |= 4;
2790 }
2791 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2792 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2793 rc |= 8;
2794 }
2795 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2796 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2797 rc |= 16;
2798 }
2799 return rc;
2800}
2801
2802/*
2803 * slow path service functions
2804 */
2805
2806static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2807{
34f80b04 2808 int port = BP_PORT(bp);
5c862848
EG
2809 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2810 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2811 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2812 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2813 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2814 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2815 u32 aeu_mask;
87942b46 2816 u32 nig_mask = 0;
a2fbb9ea 2817
a2fbb9ea
ET
2818 if (bp->attn_state & asserted)
2819 BNX2X_ERR("IGU ERROR\n");
2820
3fcaf2e5
EG
2821 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2822 aeu_mask = REG_RD(bp, aeu_addr);
2823
a2fbb9ea 2824 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2825 aeu_mask, asserted);
2826 aeu_mask &= ~(asserted & 0xff);
2827 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2828
3fcaf2e5
EG
2829 REG_WR(bp, aeu_addr, aeu_mask);
2830 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2831
3fcaf2e5 2832 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2833 bp->attn_state |= asserted;
3fcaf2e5 2834 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2835
2836 if (asserted & ATTN_HARD_WIRED_MASK) {
2837 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2838
a5e9a7cf
EG
2839 bnx2x_acquire_phy_lock(bp);
2840
877e9aa4 2841 /* save nig interrupt mask */
87942b46 2842 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2843 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2844
c18487ee 2845 bnx2x_link_attn(bp);
a2fbb9ea
ET
2846
2847 /* handle unicore attn? */
2848 }
2849 if (asserted & ATTN_SW_TIMER_4_FUNC)
2850 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2851
2852 if (asserted & GPIO_2_FUNC)
2853 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2854
2855 if (asserted & GPIO_3_FUNC)
2856 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2857
2858 if (asserted & GPIO_4_FUNC)
2859 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2860
2861 if (port == 0) {
2862 if (asserted & ATTN_GENERAL_ATTN_1) {
2863 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2864 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2865 }
2866 if (asserted & ATTN_GENERAL_ATTN_2) {
2867 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2868 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2869 }
2870 if (asserted & ATTN_GENERAL_ATTN_3) {
2871 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2872 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2873 }
2874 } else {
2875 if (asserted & ATTN_GENERAL_ATTN_4) {
2876 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2877 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2878 }
2879 if (asserted & ATTN_GENERAL_ATTN_5) {
2880 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2881 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2882 }
2883 if (asserted & ATTN_GENERAL_ATTN_6) {
2884 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2885 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2886 }
2887 }
2888
2889 } /* if hardwired */
2890
5c862848
EG
2891 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2892 asserted, hc_addr);
2893 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2894
2895 /* now set back the mask */
a5e9a7cf 2896 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2897 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2898 bnx2x_release_phy_lock(bp);
2899 }
a2fbb9ea
ET
2900}
2901
fd4ef40d
EG
2902static inline void bnx2x_fan_failure(struct bnx2x *bp)
2903{
2904 int port = BP_PORT(bp);
2905
2906 /* mark the failure */
2907 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2908 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2909 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2910 bp->link_params.ext_phy_config);
2911
2912 /* log the failure */
7995c64e
JP
2913 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
2914 "Please contact Dell Support for assistance.\n");
fd4ef40d 2915}
ab6ad5a4 2916
877e9aa4 2917static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2918{
34f80b04 2919 int port = BP_PORT(bp);
877e9aa4 2920 int reg_offset;
4d295db0 2921 u32 val, swap_val, swap_override;
877e9aa4 2922
34f80b04
EG
2923 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2924 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2925
34f80b04 2926 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2927
2928 val = REG_RD(bp, reg_offset);
2929 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2930 REG_WR(bp, reg_offset, val);
2931
2932 BNX2X_ERR("SPIO5 hw attention\n");
2933
fd4ef40d 2934 /* Fan failure attention */
35b19ba5
EG
2935 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2936 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2937 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2938 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2939 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2940 /* The PHY reset is controlled by GPIO 1 */
2941 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2942 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2943 break;
2944
4d295db0
EG
2945 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2946 /* The PHY reset is controlled by GPIO 1 */
2947 /* fake the port number to cancel the swap done in
2948 set_gpio() */
2949 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2950 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2951 port = (swap_val && swap_override) ^ 1;
2952 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2953 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2954 break;
2955
877e9aa4
ET
2956 default:
2957 break;
2958 }
fd4ef40d 2959 bnx2x_fan_failure(bp);
877e9aa4 2960 }
34f80b04 2961
589abe3a
EG
2962 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2963 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2964 bnx2x_acquire_phy_lock(bp);
2965 bnx2x_handle_module_detect_int(&bp->link_params);
2966 bnx2x_release_phy_lock(bp);
2967 }
2968
34f80b04
EG
2969 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2970
2971 val = REG_RD(bp, reg_offset);
2972 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2973 REG_WR(bp, reg_offset, val);
2974
2975 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2976 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2977 bnx2x_panic();
2978 }
877e9aa4
ET
2979}
2980
2981static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2982{
2983 u32 val;
2984
0626b899 2985 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2986
2987 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2988 BNX2X_ERR("DB hw attention 0x%x\n", val);
2989 /* DORQ discard attention */
2990 if (val & 0x2)
2991 BNX2X_ERR("FATAL error from DORQ\n");
2992 }
34f80b04
EG
2993
2994 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2995
2996 int port = BP_PORT(bp);
2997 int reg_offset;
2998
2999 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3000 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3001
3002 val = REG_RD(bp, reg_offset);
3003 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3004 REG_WR(bp, reg_offset, val);
3005
3006 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 3007 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
3008 bnx2x_panic();
3009 }
877e9aa4
ET
3010}
3011
3012static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3013{
3014 u32 val;
3015
3016 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3017
3018 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3019 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3020 /* CFC error attention */
3021 if (val & 0x2)
3022 BNX2X_ERR("FATAL error from CFC\n");
3023 }
3024
3025 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3026
3027 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3028 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3029 /* RQ_USDMDP_FIFO_OVERFLOW */
3030 if (val & 0x18000)
3031 BNX2X_ERR("FATAL error from PXP\n");
3032 }
34f80b04
EG
3033
3034 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3035
3036 int port = BP_PORT(bp);
3037 int reg_offset;
3038
3039 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3040 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3041
3042 val = REG_RD(bp, reg_offset);
3043 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3044 REG_WR(bp, reg_offset, val);
3045
3046 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3047 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3048 bnx2x_panic();
3049 }
877e9aa4
ET
3050}
3051
3052static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3053{
34f80b04
EG
3054 u32 val;
3055
877e9aa4
ET
3056 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3057
34f80b04
EG
3058 if (attn & BNX2X_PMF_LINK_ASSERT) {
3059 int func = BP_FUNC(bp);
3060
3061 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
b015e3d1
EG
3062 bp->mf_config = SHMEM_RD(bp,
3063 mf_cfg.func_mf_config[func].config);
2691d51d
EG
3064 val = SHMEM_RD(bp, func_mb[func].drv_status);
3065 if (val & DRV_STATUS_DCC_EVENT_MASK)
3066 bnx2x_dcc_event(bp,
3067 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3068 bnx2x__link_status_update(bp);
2691d51d 3069 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3070 bnx2x_pmf_update(bp);
3071
3072 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3073
3074 BNX2X_ERR("MC assert!\n");
3075 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3076 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3077 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3078 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3079 bnx2x_panic();
3080
3081 } else if (attn & BNX2X_MCP_ASSERT) {
3082
3083 BNX2X_ERR("MCP assert!\n");
3084 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3085 bnx2x_fw_dump(bp);
877e9aa4
ET
3086
3087 } else
3088 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3089 }
3090
3091 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3092 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3093 if (attn & BNX2X_GRC_TIMEOUT) {
3094 val = CHIP_IS_E1H(bp) ?
3095 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3096 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3097 }
3098 if (attn & BNX2X_GRC_RSV) {
3099 val = CHIP_IS_E1H(bp) ?
3100 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3101 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3102 }
877e9aa4 3103 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3104 }
3105}
3106
3107static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3108{
a2fbb9ea
ET
3109 struct attn_route attn;
3110 struct attn_route group_mask;
34f80b04 3111 int port = BP_PORT(bp);
877e9aa4 3112 int index;
a2fbb9ea
ET
3113 u32 reg_addr;
3114 u32 val;
3fcaf2e5 3115 u32 aeu_mask;
a2fbb9ea
ET
3116
3117 /* need to take HW lock because MCP or other port might also
3118 try to handle this event */
4a37fb66 3119 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
3120
3121 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3122 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3123 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3124 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3125 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3126 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3127
3128 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3129 if (deasserted & (1 << index)) {
3130 group_mask = bp->attn_group[index];
3131
34f80b04
EG
3132 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3133 index, group_mask.sig[0], group_mask.sig[1],
3134 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 3135
877e9aa4
ET
3136 bnx2x_attn_int_deasserted3(bp,
3137 attn.sig[3] & group_mask.sig[3]);
3138 bnx2x_attn_int_deasserted1(bp,
3139 attn.sig[1] & group_mask.sig[1]);
3140 bnx2x_attn_int_deasserted2(bp,
3141 attn.sig[2] & group_mask.sig[2]);
3142 bnx2x_attn_int_deasserted0(bp,
3143 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 3144
a2fbb9ea
ET
3145 if ((attn.sig[0] & group_mask.sig[0] &
3146 HW_PRTY_ASSERT_SET_0) ||
3147 (attn.sig[1] & group_mask.sig[1] &
3148 HW_PRTY_ASSERT_SET_1) ||
3149 (attn.sig[2] & group_mask.sig[2] &
3150 HW_PRTY_ASSERT_SET_2))
6378c025 3151 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
3152 }
3153 }
3154
4a37fb66 3155 bnx2x_release_alr(bp);
a2fbb9ea 3156
5c862848 3157 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3158
3159 val = ~deasserted;
3fcaf2e5
EG
3160 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3161 val, reg_addr);
5c862848 3162 REG_WR(bp, reg_addr, val);
a2fbb9ea 3163
a2fbb9ea 3164 if (~bp->attn_state & deasserted)
3fcaf2e5 3165 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3166
3167 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3168 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3169
3fcaf2e5
EG
3170 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3171 aeu_mask = REG_RD(bp, reg_addr);
3172
3173 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3174 aeu_mask, deasserted);
3175 aeu_mask |= (deasserted & 0xff);
3176 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3177
3fcaf2e5
EG
3178 REG_WR(bp, reg_addr, aeu_mask);
3179 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3180
3181 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3182 bp->attn_state &= ~deasserted;
3183 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3184}
3185
3186static void bnx2x_attn_int(struct bnx2x *bp)
3187{
3188 /* read local copy of bits */
68d59484
EG
3189 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3190 attn_bits);
3191 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3192 attn_bits_ack);
a2fbb9ea
ET
3193 u32 attn_state = bp->attn_state;
3194
3195 /* look for changed bits */
3196 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3197 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3198
3199 DP(NETIF_MSG_HW,
3200 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3201 attn_bits, attn_ack, asserted, deasserted);
3202
3203 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3204 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3205
3206 /* handle bits that were raised */
3207 if (asserted)
3208 bnx2x_attn_int_asserted(bp, asserted);
3209
3210 if (deasserted)
3211 bnx2x_attn_int_deasserted(bp, deasserted);
3212}
3213
3214static void bnx2x_sp_task(struct work_struct *work)
3215{
1cf167f2 3216 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3217 u16 status;
3218
34f80b04 3219
a2fbb9ea
ET
3220 /* Return here if interrupt is disabled */
3221 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3222 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3223 return;
3224 }
3225
3226 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3227/* if (status == 0) */
3228/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3229
3196a88a 3230 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 3231
877e9aa4
ET
3232 /* HW attentions */
3233 if (status & 0x1)
a2fbb9ea 3234 bnx2x_attn_int(bp);
a2fbb9ea 3235
68d59484 3236 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3237 IGU_INT_NOP, 1);
3238 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3239 IGU_INT_NOP, 1);
3240 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3241 IGU_INT_NOP, 1);
3242 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3243 IGU_INT_NOP, 1);
3244 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3245 IGU_INT_ENABLE, 1);
877e9aa4 3246
a2fbb9ea
ET
3247}
3248
3249static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3250{
3251 struct net_device *dev = dev_instance;
3252 struct bnx2x *bp = netdev_priv(dev);
3253
3254 /* Return here if interrupt is disabled */
3255 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3256 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3257 return IRQ_HANDLED;
3258 }
3259
8d9c5f34 3260 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3261
3262#ifdef BNX2X_STOP_ON_ERROR
3263 if (unlikely(bp->panic))
3264 return IRQ_HANDLED;
3265#endif
3266
993ac7b5
MC
3267#ifdef BCM_CNIC
3268 {
3269 struct cnic_ops *c_ops;
3270
3271 rcu_read_lock();
3272 c_ops = rcu_dereference(bp->cnic_ops);
3273 if (c_ops)
3274 c_ops->cnic_handler(bp->cnic_data, NULL);
3275 rcu_read_unlock();
3276 }
3277#endif
1cf167f2 3278 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3279
3280 return IRQ_HANDLED;
3281}
3282
3283/* end of slow path */
3284
3285/* Statistics */
3286
3287/****************************************************************************
3288* Macros
3289****************************************************************************/
3290
a2fbb9ea
ET
3291/* sum[hi:lo] += add[hi:lo] */
3292#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3293 do { \
3294 s_lo += a_lo; \
f5ba6772 3295 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3296 } while (0)
3297
3298/* difference = minuend - subtrahend */
3299#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3300 do { \
bb2a0f7a
YG
3301 if (m_lo < s_lo) { \
3302 /* underflow */ \
a2fbb9ea 3303 d_hi = m_hi - s_hi; \
bb2a0f7a 3304 if (d_hi > 0) { \
6378c025 3305 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3306 d_hi--; \
3307 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3308 } else { \
6378c025 3309 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3310 d_hi = 0; \
3311 d_lo = 0; \
3312 } \
bb2a0f7a
YG
3313 } else { \
3314 /* m_lo >= s_lo */ \
a2fbb9ea 3315 if (m_hi < s_hi) { \
bb2a0f7a
YG
3316 d_hi = 0; \
3317 d_lo = 0; \
3318 } else { \
6378c025 3319 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3320 d_hi = m_hi - s_hi; \
3321 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3322 } \
3323 } \
3324 } while (0)
3325
bb2a0f7a 3326#define UPDATE_STAT64(s, t) \
a2fbb9ea 3327 do { \
bb2a0f7a
YG
3328 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3329 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3330 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3331 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3332 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3333 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3334 } while (0)
3335
bb2a0f7a 3336#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3337 do { \
bb2a0f7a
YG
3338 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3339 diff.lo, new->s##_lo, old->s##_lo); \
3340 ADD_64(estats->t##_hi, diff.hi, \
3341 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3342 } while (0)
3343
3344/* sum[hi:lo] += add */
3345#define ADD_EXTEND_64(s_hi, s_lo, a) \
3346 do { \
3347 s_lo += a; \
3348 s_hi += (s_lo < a) ? 1 : 0; \
3349 } while (0)
3350
bb2a0f7a 3351#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3352 do { \
bb2a0f7a
YG
3353 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3354 pstats->mac_stx[1].s##_lo, \
3355 new->s); \
a2fbb9ea
ET
3356 } while (0)
3357
bb2a0f7a 3358#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3359 do { \
4781bfad
EG
3360 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3361 old_tclient->s = tclient->s; \
de832a55
EG
3362 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3363 } while (0)
3364
3365#define UPDATE_EXTEND_USTAT(s, t) \
3366 do { \
3367 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3368 old_uclient->s = uclient->s; \
3369 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3370 } while (0)
3371
3372#define UPDATE_EXTEND_XSTAT(s, t) \
3373 do { \
4781bfad
EG
3374 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3375 old_xclient->s = xclient->s; \
de832a55
EG
3376 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3377 } while (0)
3378
3379/* minuend -= subtrahend */
3380#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3381 do { \
3382 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3383 } while (0)
3384
3385/* minuend[hi:lo] -= subtrahend */
3386#define SUB_EXTEND_64(m_hi, m_lo, s) \
3387 do { \
3388 SUB_64(m_hi, 0, m_lo, s); \
3389 } while (0)
3390
3391#define SUB_EXTEND_USTAT(s, t) \
3392 do { \
3393 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3394 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3395 } while (0)
3396
3397/*
3398 * General service functions
3399 */
3400
3401static inline long bnx2x_hilo(u32 *hiref)
3402{
3403 u32 lo = *(hiref + 1);
3404#if (BITS_PER_LONG == 64)
3405 u32 hi = *hiref;
3406
3407 return HILO_U64(hi, lo);
3408#else
3409 return lo;
3410#endif
3411}
3412
3413/*
3414 * Init service functions
3415 */
3416
bb2a0f7a
YG
3417static void bnx2x_storm_stats_post(struct bnx2x *bp)
3418{
3419 if (!bp->stats_pending) {
3420 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3421 int i, rc;
bb2a0f7a
YG
3422
3423 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3424 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3425 for_each_queue(bp, i)
3426 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3427
3428 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3429 ((u32 *)&ramrod_data)[1],
3430 ((u32 *)&ramrod_data)[0], 0);
3431 if (rc == 0) {
3432 /* stats ramrod has it's own slot on the spq */
3433 bp->spq_left++;
3434 bp->stats_pending = 1;
3435 }
3436 }
3437}
3438
bb2a0f7a
YG
3439static void bnx2x_hw_stats_post(struct bnx2x *bp)
3440{
3441 struct dmae_command *dmae = &bp->stats_dmae;
3442 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3443
3444 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3445 if (CHIP_REV_IS_SLOW(bp))
3446 return;
bb2a0f7a
YG
3447
3448 /* loader */
3449 if (bp->executer_idx) {
3450 int loader_idx = PMF_DMAE_C(bp);
3451
3452 memset(dmae, 0, sizeof(struct dmae_command));
3453
3454 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3455 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3456 DMAE_CMD_DST_RESET |
3457#ifdef __BIG_ENDIAN
3458 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3459#else
3460 DMAE_CMD_ENDIANITY_DW_SWAP |
3461#endif
3462 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3463 DMAE_CMD_PORT_0) |
3464 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3465 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3466 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3467 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3468 sizeof(struct dmae_command) *
3469 (loader_idx + 1)) >> 2;
3470 dmae->dst_addr_hi = 0;
3471 dmae->len = sizeof(struct dmae_command) >> 2;
3472 if (CHIP_IS_E1(bp))
3473 dmae->len--;
3474 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3475 dmae->comp_addr_hi = 0;
3476 dmae->comp_val = 1;
3477
3478 *stats_comp = 0;
3479 bnx2x_post_dmae(bp, dmae, loader_idx);
3480
3481 } else if (bp->func_stx) {
3482 *stats_comp = 0;
3483 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3484 }
3485}
3486
3487static int bnx2x_stats_comp(struct bnx2x *bp)
3488{
3489 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3490 int cnt = 10;
3491
3492 might_sleep();
3493 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3494 if (!cnt) {
3495 BNX2X_ERR("timeout waiting for stats finished\n");
3496 break;
3497 }
3498 cnt--;
12469401 3499 msleep(1);
bb2a0f7a
YG
3500 }
3501 return 1;
3502}
3503
3504/*
3505 * Statistics service functions
3506 */
3507
3508static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3509{
3510 struct dmae_command *dmae;
3511 u32 opcode;
3512 int loader_idx = PMF_DMAE_C(bp);
3513 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3514
3515 /* sanity */
3516 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3517 BNX2X_ERR("BUG!\n");
3518 return;
3519 }
3520
3521 bp->executer_idx = 0;
3522
3523 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3524 DMAE_CMD_C_ENABLE |
3525 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3526#ifdef __BIG_ENDIAN
3527 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3528#else
3529 DMAE_CMD_ENDIANITY_DW_SWAP |
3530#endif
3531 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3532 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3533
3534 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3535 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3536 dmae->src_addr_lo = bp->port.port_stx >> 2;
3537 dmae->src_addr_hi = 0;
3538 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3539 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3540 dmae->len = DMAE_LEN32_RD_MAX;
3541 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3542 dmae->comp_addr_hi = 0;
3543 dmae->comp_val = 1;
3544
3545 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3546 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3547 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3548 dmae->src_addr_hi = 0;
7a9b2557
VZ
3549 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3550 DMAE_LEN32_RD_MAX * 4);
3551 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3552 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3553 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3554 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3555 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3556 dmae->comp_val = DMAE_COMP_VAL;
3557
3558 *stats_comp = 0;
3559 bnx2x_hw_stats_post(bp);
3560 bnx2x_stats_comp(bp);
3561}
3562
3563static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3564{
3565 struct dmae_command *dmae;
34f80b04 3566 int port = BP_PORT(bp);
bb2a0f7a 3567 int vn = BP_E1HVN(bp);
a2fbb9ea 3568 u32 opcode;
bb2a0f7a 3569 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3570 u32 mac_addr;
bb2a0f7a
YG
3571 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3572
3573 /* sanity */
3574 if (!bp->link_vars.link_up || !bp->port.pmf) {
3575 BNX2X_ERR("BUG!\n");
3576 return;
3577 }
a2fbb9ea
ET
3578
3579 bp->executer_idx = 0;
bb2a0f7a
YG
3580
3581 /* MCP */
3582 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3583 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3584 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3585#ifdef __BIG_ENDIAN
bb2a0f7a 3586 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3587#else
bb2a0f7a 3588 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3589#endif
bb2a0f7a
YG
3590 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3591 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3592
bb2a0f7a 3593 if (bp->port.port_stx) {
a2fbb9ea
ET
3594
3595 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3596 dmae->opcode = opcode;
bb2a0f7a
YG
3597 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3598 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3599 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3600 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3601 dmae->len = sizeof(struct host_port_stats) >> 2;
3602 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3603 dmae->comp_addr_hi = 0;
3604 dmae->comp_val = 1;
a2fbb9ea
ET
3605 }
3606
bb2a0f7a
YG
3607 if (bp->func_stx) {
3608
3609 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3610 dmae->opcode = opcode;
3611 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3612 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3613 dmae->dst_addr_lo = bp->func_stx >> 2;
3614 dmae->dst_addr_hi = 0;
3615 dmae->len = sizeof(struct host_func_stats) >> 2;
3616 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3617 dmae->comp_addr_hi = 0;
3618 dmae->comp_val = 1;
a2fbb9ea
ET
3619 }
3620
bb2a0f7a 3621 /* MAC */
a2fbb9ea
ET
3622 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3623 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3624 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3625#ifdef __BIG_ENDIAN
3626 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3627#else
3628 DMAE_CMD_ENDIANITY_DW_SWAP |
3629#endif
bb2a0f7a
YG
3630 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3631 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3632
c18487ee 3633 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3634
3635 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3636 NIG_REG_INGRESS_BMAC0_MEM);
3637
3638 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3639 BIGMAC_REGISTER_TX_STAT_GTBYT */
3640 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3641 dmae->opcode = opcode;
3642 dmae->src_addr_lo = (mac_addr +
3643 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3644 dmae->src_addr_hi = 0;
3645 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3646 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3647 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3648 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3649 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3650 dmae->comp_addr_hi = 0;
3651 dmae->comp_val = 1;
3652
3653 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3654 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3655 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3656 dmae->opcode = opcode;
3657 dmae->src_addr_lo = (mac_addr +
3658 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3659 dmae->src_addr_hi = 0;
3660 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3661 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3662 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3663 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3664 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3665 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3666 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3667 dmae->comp_addr_hi = 0;
3668 dmae->comp_val = 1;
3669
c18487ee 3670 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3671
3672 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3673
3674 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3675 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3676 dmae->opcode = opcode;
3677 dmae->src_addr_lo = (mac_addr +
3678 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3679 dmae->src_addr_hi = 0;
3680 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3681 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3682 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3683 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3684 dmae->comp_addr_hi = 0;
3685 dmae->comp_val = 1;
3686
3687 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3688 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3689 dmae->opcode = opcode;
3690 dmae->src_addr_lo = (mac_addr +
3691 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3692 dmae->src_addr_hi = 0;
3693 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3694 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3695 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3696 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3697 dmae->len = 1;
3698 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3699 dmae->comp_addr_hi = 0;
3700 dmae->comp_val = 1;
3701
3702 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3703 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3704 dmae->opcode = opcode;
3705 dmae->src_addr_lo = (mac_addr +
3706 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3707 dmae->src_addr_hi = 0;
3708 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3709 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3710 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3711 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3712 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3713 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3714 dmae->comp_addr_hi = 0;
3715 dmae->comp_val = 1;
3716 }
3717
3718 /* NIG */
bb2a0f7a
YG
3719 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3720 dmae->opcode = opcode;
3721 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3722 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3723 dmae->src_addr_hi = 0;
3724 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3725 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3726 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3727 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3728 dmae->comp_addr_hi = 0;
3729 dmae->comp_val = 1;
3730
3731 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3732 dmae->opcode = opcode;
3733 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3734 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3735 dmae->src_addr_hi = 0;
3736 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3737 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3738 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3739 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3740 dmae->len = (2*sizeof(u32)) >> 2;
3741 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3742 dmae->comp_addr_hi = 0;
3743 dmae->comp_val = 1;
3744
a2fbb9ea
ET
3745 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3746 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3747 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3748 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3749#ifdef __BIG_ENDIAN
3750 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3751#else
3752 DMAE_CMD_ENDIANITY_DW_SWAP |
3753#endif
bb2a0f7a
YG
3754 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3755 (vn << DMAE_CMD_E1HVN_SHIFT));
3756 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3757 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3758 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3759 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3760 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3761 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3762 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3763 dmae->len = (2*sizeof(u32)) >> 2;
3764 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3765 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3766 dmae->comp_val = DMAE_COMP_VAL;
3767
3768 *stats_comp = 0;
a2fbb9ea
ET
3769}
3770
bb2a0f7a 3771static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3772{
bb2a0f7a
YG
3773 struct dmae_command *dmae = &bp->stats_dmae;
3774 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3775
bb2a0f7a
YG
3776 /* sanity */
3777 if (!bp->func_stx) {
3778 BNX2X_ERR("BUG!\n");
3779 return;
3780 }
a2fbb9ea 3781
bb2a0f7a
YG
3782 bp->executer_idx = 0;
3783 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3784
bb2a0f7a
YG
3785 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3786 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3787 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3788#ifdef __BIG_ENDIAN
3789 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3790#else
3791 DMAE_CMD_ENDIANITY_DW_SWAP |
3792#endif
3793 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3794 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3795 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3796 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3797 dmae->dst_addr_lo = bp->func_stx >> 2;
3798 dmae->dst_addr_hi = 0;
3799 dmae->len = sizeof(struct host_func_stats) >> 2;
3800 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3801 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3802 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3803
bb2a0f7a
YG
3804 *stats_comp = 0;
3805}
a2fbb9ea 3806
bb2a0f7a
YG
3807static void bnx2x_stats_start(struct bnx2x *bp)
3808{
3809 if (bp->port.pmf)
3810 bnx2x_port_stats_init(bp);
3811
3812 else if (bp->func_stx)
3813 bnx2x_func_stats_init(bp);
3814
3815 bnx2x_hw_stats_post(bp);
3816 bnx2x_storm_stats_post(bp);
3817}
3818
3819static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3820{
3821 bnx2x_stats_comp(bp);
3822 bnx2x_stats_pmf_update(bp);
3823 bnx2x_stats_start(bp);
3824}
3825
3826static void bnx2x_stats_restart(struct bnx2x *bp)
3827{
3828 bnx2x_stats_comp(bp);
3829 bnx2x_stats_start(bp);
3830}
3831
3832static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3833{
3834 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3835 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3836 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3837 struct {
3838 u32 lo;
3839 u32 hi;
3840 } diff;
bb2a0f7a
YG
3841
3842 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3843 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3844 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3845 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3846 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3847 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3848 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3849 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3850 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3851 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3852 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3853 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3854 UPDATE_STAT64(tx_stat_gt127,
3855 tx_stat_etherstatspkts65octetsto127octets);
3856 UPDATE_STAT64(tx_stat_gt255,
3857 tx_stat_etherstatspkts128octetsto255octets);
3858 UPDATE_STAT64(tx_stat_gt511,
3859 tx_stat_etherstatspkts256octetsto511octets);
3860 UPDATE_STAT64(tx_stat_gt1023,
3861 tx_stat_etherstatspkts512octetsto1023octets);
3862 UPDATE_STAT64(tx_stat_gt1518,
3863 tx_stat_etherstatspkts1024octetsto1522octets);
3864 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3865 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3866 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3867 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3868 UPDATE_STAT64(tx_stat_gterr,
3869 tx_stat_dot3statsinternalmactransmiterrors);
3870 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3871
3872 estats->pause_frames_received_hi =
3873 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3874 estats->pause_frames_received_lo =
3875 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3876
3877 estats->pause_frames_sent_hi =
3878 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3879 estats->pause_frames_sent_lo =
3880 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3881}
3882
3883static void bnx2x_emac_stats_update(struct bnx2x *bp)
3884{
3885 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3886 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3887 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3888
3889 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3890 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3891 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3892 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3893 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3894 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3895 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3896 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3897 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3898 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3899 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3900 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3901 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3902 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3903 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3904 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3905 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3906 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3907 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3908 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3909 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3910 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3911 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3912 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3913 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3914 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3915 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3916 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3917 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3918 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3919 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3920
3921 estats->pause_frames_received_hi =
3922 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3923 estats->pause_frames_received_lo =
3924 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3925 ADD_64(estats->pause_frames_received_hi,
3926 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3927 estats->pause_frames_received_lo,
3928 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3929
3930 estats->pause_frames_sent_hi =
3931 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3932 estats->pause_frames_sent_lo =
3933 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3934 ADD_64(estats->pause_frames_sent_hi,
3935 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3936 estats->pause_frames_sent_lo,
3937 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3938}
3939
3940static int bnx2x_hw_stats_update(struct bnx2x *bp)
3941{
3942 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3943 struct nig_stats *old = &(bp->port.old_nig_stats);
3944 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3945 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3946 struct {
3947 u32 lo;
3948 u32 hi;
3949 } diff;
de832a55 3950 u32 nig_timer_max;
bb2a0f7a
YG
3951
3952 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3953 bnx2x_bmac_stats_update(bp);
3954
3955 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3956 bnx2x_emac_stats_update(bp);
3957
3958 else { /* unreached */
c3eefaf6 3959 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3960 return -1;
3961 }
a2fbb9ea 3962
bb2a0f7a
YG
3963 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3964 new->brb_discard - old->brb_discard);
66e855f3
YG
3965 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3966 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3967
bb2a0f7a
YG
3968 UPDATE_STAT64_NIG(egress_mac_pkt0,
3969 etherstatspkts1024octetsto1522octets);
3970 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3971
bb2a0f7a 3972 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3973
bb2a0f7a
YG
3974 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3975 sizeof(struct mac_stx));
3976 estats->brb_drop_hi = pstats->brb_drop_hi;
3977 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3978
bb2a0f7a 3979 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3980
de832a55
EG
3981 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3982 if (nig_timer_max != estats->nig_timer_max) {
3983 estats->nig_timer_max = nig_timer_max;
3984 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3985 }
3986
bb2a0f7a 3987 return 0;
a2fbb9ea
ET
3988}
3989
bb2a0f7a 3990static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3991{
3992 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3993 struct tstorm_per_port_stats *tport =
de832a55 3994 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3995 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3996 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3997 int i;
3998
6fe49bb9
EG
3999 memcpy(&(fstats->total_bytes_received_hi),
4000 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
4001 sizeof(struct host_func_stats) - 2*sizeof(u32));
4002 estats->error_bytes_received_hi = 0;
4003 estats->error_bytes_received_lo = 0;
4004 estats->etherstatsoverrsizepkts_hi = 0;
4005 estats->etherstatsoverrsizepkts_lo = 0;
4006 estats->no_buff_discard_hi = 0;
4007 estats->no_buff_discard_lo = 0;
a2fbb9ea 4008
54b9ddaa 4009 for_each_queue(bp, i) {
de832a55
EG
4010 struct bnx2x_fastpath *fp = &bp->fp[i];
4011 int cl_id = fp->cl_id;
4012 struct tstorm_per_client_stats *tclient =
4013 &stats->tstorm_common.client_statistics[cl_id];
4014 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4015 struct ustorm_per_client_stats *uclient =
4016 &stats->ustorm_common.client_statistics[cl_id];
4017 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4018 struct xstorm_per_client_stats *xclient =
4019 &stats->xstorm_common.client_statistics[cl_id];
4020 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4021 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4022 u32 diff;
4023
4024 /* are storm stats valid? */
4025 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 4026 bp->stats_counter) {
de832a55
EG
4027 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4028 " xstorm counter (%d) != stats_counter (%d)\n",
4029 i, xclient->stats_counter, bp->stats_counter);
4030 return -1;
4031 }
4032 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 4033 bp->stats_counter) {
de832a55
EG
4034 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4035 " tstorm counter (%d) != stats_counter (%d)\n",
4036 i, tclient->stats_counter, bp->stats_counter);
4037 return -2;
4038 }
4039 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4040 bp->stats_counter) {
4041 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4042 " ustorm counter (%d) != stats_counter (%d)\n",
4043 i, uclient->stats_counter, bp->stats_counter);
4044 return -4;
4045 }
a2fbb9ea 4046
de832a55 4047 qstats->total_bytes_received_hi =
ca00392c 4048 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 4049 qstats->total_bytes_received_lo =
ca00392c
EG
4050 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4051
4052 ADD_64(qstats->total_bytes_received_hi,
4053 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4054 qstats->total_bytes_received_lo,
4055 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4056
4057 ADD_64(qstats->total_bytes_received_hi,
4058 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4059 qstats->total_bytes_received_lo,
4060 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4061
4062 qstats->valid_bytes_received_hi =
4063 qstats->total_bytes_received_hi;
de832a55 4064 qstats->valid_bytes_received_lo =
ca00392c 4065 qstats->total_bytes_received_lo;
bb2a0f7a 4066
de832a55 4067 qstats->error_bytes_received_hi =
bb2a0f7a 4068 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4069 qstats->error_bytes_received_lo =
bb2a0f7a 4070 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4071
de832a55
EG
4072 ADD_64(qstats->total_bytes_received_hi,
4073 qstats->error_bytes_received_hi,
4074 qstats->total_bytes_received_lo,
4075 qstats->error_bytes_received_lo);
4076
4077 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4078 total_unicast_packets_received);
4079 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4080 total_multicast_packets_received);
4081 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4082 total_broadcast_packets_received);
4083 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4084 etherstatsoverrsizepkts);
4085 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4086
4087 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4088 total_unicast_packets_received);
4089 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4090 total_multicast_packets_received);
4091 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4092 total_broadcast_packets_received);
4093 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4094 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4095 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4096
4097 qstats->total_bytes_transmitted_hi =
ca00392c 4098 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4099 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4100 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4101
4102 ADD_64(qstats->total_bytes_transmitted_hi,
4103 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4104 qstats->total_bytes_transmitted_lo,
4105 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4106
4107 ADD_64(qstats->total_bytes_transmitted_hi,
4108 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4109 qstats->total_bytes_transmitted_lo,
4110 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4111
de832a55
EG
4112 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4113 total_unicast_packets_transmitted);
4114 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4115 total_multicast_packets_transmitted);
4116 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4117 total_broadcast_packets_transmitted);
4118
4119 old_tclient->checksum_discard = tclient->checksum_discard;
4120 old_tclient->ttl0_discard = tclient->ttl0_discard;
4121
4122 ADD_64(fstats->total_bytes_received_hi,
4123 qstats->total_bytes_received_hi,
4124 fstats->total_bytes_received_lo,
4125 qstats->total_bytes_received_lo);
4126 ADD_64(fstats->total_bytes_transmitted_hi,
4127 qstats->total_bytes_transmitted_hi,
4128 fstats->total_bytes_transmitted_lo,
4129 qstats->total_bytes_transmitted_lo);
4130 ADD_64(fstats->total_unicast_packets_received_hi,
4131 qstats->total_unicast_packets_received_hi,
4132 fstats->total_unicast_packets_received_lo,
4133 qstats->total_unicast_packets_received_lo);
4134 ADD_64(fstats->total_multicast_packets_received_hi,
4135 qstats->total_multicast_packets_received_hi,
4136 fstats->total_multicast_packets_received_lo,
4137 qstats->total_multicast_packets_received_lo);
4138 ADD_64(fstats->total_broadcast_packets_received_hi,
4139 qstats->total_broadcast_packets_received_hi,
4140 fstats->total_broadcast_packets_received_lo,
4141 qstats->total_broadcast_packets_received_lo);
4142 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4143 qstats->total_unicast_packets_transmitted_hi,
4144 fstats->total_unicast_packets_transmitted_lo,
4145 qstats->total_unicast_packets_transmitted_lo);
4146 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4147 qstats->total_multicast_packets_transmitted_hi,
4148 fstats->total_multicast_packets_transmitted_lo,
4149 qstats->total_multicast_packets_transmitted_lo);
4150 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4151 qstats->total_broadcast_packets_transmitted_hi,
4152 fstats->total_broadcast_packets_transmitted_lo,
4153 qstats->total_broadcast_packets_transmitted_lo);
4154 ADD_64(fstats->valid_bytes_received_hi,
4155 qstats->valid_bytes_received_hi,
4156 fstats->valid_bytes_received_lo,
4157 qstats->valid_bytes_received_lo);
4158
4159 ADD_64(estats->error_bytes_received_hi,
4160 qstats->error_bytes_received_hi,
4161 estats->error_bytes_received_lo,
4162 qstats->error_bytes_received_lo);
4163 ADD_64(estats->etherstatsoverrsizepkts_hi,
4164 qstats->etherstatsoverrsizepkts_hi,
4165 estats->etherstatsoverrsizepkts_lo,
4166 qstats->etherstatsoverrsizepkts_lo);
4167 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4168 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4169 }
4170
4171 ADD_64(fstats->total_bytes_received_hi,
4172 estats->rx_stat_ifhcinbadoctets_hi,
4173 fstats->total_bytes_received_lo,
4174 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4175
4176 memcpy(estats, &(fstats->total_bytes_received_hi),
4177 sizeof(struct host_func_stats) - 2*sizeof(u32));
4178
de832a55
EG
4179 ADD_64(estats->etherstatsoverrsizepkts_hi,
4180 estats->rx_stat_dot3statsframestoolong_hi,
4181 estats->etherstatsoverrsizepkts_lo,
4182 estats->rx_stat_dot3statsframestoolong_lo);
4183 ADD_64(estats->error_bytes_received_hi,
4184 estats->rx_stat_ifhcinbadoctets_hi,
4185 estats->error_bytes_received_lo,
4186 estats->rx_stat_ifhcinbadoctets_lo);
4187
4188 if (bp->port.pmf) {
4189 estats->mac_filter_discard =
4190 le32_to_cpu(tport->mac_filter_discard);
4191 estats->xxoverflow_discard =
4192 le32_to_cpu(tport->xxoverflow_discard);
4193 estats->brb_truncate_discard =
bb2a0f7a 4194 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4195 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4196 }
bb2a0f7a
YG
4197
4198 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4199
de832a55
EG
4200 bp->stats_pending = 0;
4201
a2fbb9ea
ET
4202 return 0;
4203}
4204
bb2a0f7a 4205static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4206{
bb2a0f7a 4207 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4208 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4209 int i;
a2fbb9ea
ET
4210
4211 nstats->rx_packets =
4212 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4213 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4214 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4215
4216 nstats->tx_packets =
4217 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4218 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4219 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4220
de832a55 4221 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4222
0e39e645 4223 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4224
de832a55 4225 nstats->rx_dropped = estats->mac_discard;
54b9ddaa 4226 for_each_queue(bp, i)
de832a55
EG
4227 nstats->rx_dropped +=
4228 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4229
a2fbb9ea
ET
4230 nstats->tx_dropped = 0;
4231
4232 nstats->multicast =
de832a55 4233 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4234
bb2a0f7a 4235 nstats->collisions =
de832a55 4236 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4237
4238 nstats->rx_length_errors =
de832a55
EG
4239 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4240 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4241 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4242 bnx2x_hilo(&estats->brb_truncate_hi);
4243 nstats->rx_crc_errors =
4244 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4245 nstats->rx_frame_errors =
4246 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4247 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4248 nstats->rx_missed_errors = estats->xxoverflow_discard;
4249
4250 nstats->rx_errors = nstats->rx_length_errors +
4251 nstats->rx_over_errors +
4252 nstats->rx_crc_errors +
4253 nstats->rx_frame_errors +
0e39e645
ET
4254 nstats->rx_fifo_errors +
4255 nstats->rx_missed_errors;
a2fbb9ea 4256
bb2a0f7a 4257 nstats->tx_aborted_errors =
de832a55
EG
4258 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4259 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4260 nstats->tx_carrier_errors =
4261 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4262 nstats->tx_fifo_errors = 0;
4263 nstats->tx_heartbeat_errors = 0;
4264 nstats->tx_window_errors = 0;
4265
4266 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4267 nstats->tx_carrier_errors +
4268 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4269}
4270
4271static void bnx2x_drv_stats_update(struct bnx2x *bp)
4272{
4273 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4274 int i;
4275
4276 estats->driver_xoff = 0;
4277 estats->rx_err_discard_pkt = 0;
4278 estats->rx_skb_alloc_failed = 0;
4279 estats->hw_csum_err = 0;
54b9ddaa 4280 for_each_queue(bp, i) {
de832a55
EG
4281 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4282
4283 estats->driver_xoff += qstats->driver_xoff;
4284 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4285 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4286 estats->hw_csum_err += qstats->hw_csum_err;
4287 }
a2fbb9ea
ET
4288}
4289
bb2a0f7a 4290static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4291{
bb2a0f7a 4292 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4293
bb2a0f7a
YG
4294 if (*stats_comp != DMAE_COMP_VAL)
4295 return;
4296
4297 if (bp->port.pmf)
de832a55 4298 bnx2x_hw_stats_update(bp);
a2fbb9ea 4299
de832a55
EG
4300 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4301 BNX2X_ERR("storm stats were not updated for 3 times\n");
4302 bnx2x_panic();
4303 return;
a2fbb9ea
ET
4304 }
4305
de832a55
EG
4306 bnx2x_net_stats_update(bp);
4307 bnx2x_drv_stats_update(bp);
4308
7995c64e 4309 if (netif_msg_timer(bp)) {
ca00392c 4310 struct bnx2x_fastpath *fp0_rx = bp->fp;
54b9ddaa 4311 struct bnx2x_fastpath *fp0_tx = bp->fp;
de832a55
EG
4312 struct tstorm_per_client_stats *old_tclient =
4313 &bp->fp->old_tclient;
4314 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4315 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4316 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4317 int i;
a2fbb9ea 4318
7995c64e 4319 netdev_printk(KERN_DEBUG, bp->dev, "\n");
a2fbb9ea
ET
4320 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4321 " tx pkt (%lx)\n",
ca00392c
EG
4322 bnx2x_tx_avail(fp0_tx),
4323 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4324 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4325 " rx pkt (%lx)\n",
ca00392c
EG
4326 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4327 fp0_rx->rx_comp_cons),
4328 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4329 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4330 "brb truncate %u\n",
4331 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4332 qstats->driver_xoff,
4333 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4334 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4335 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4336 "mac_discard %u mac_filter_discard %u "
4337 "xxovrflow_discard %u brb_truncate_discard %u "
4338 "ttl0_discard %u\n",
4781bfad 4339 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4340 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4341 bnx2x_hilo(&qstats->no_buff_discard_hi),
4342 estats->mac_discard, estats->mac_filter_discard,
4343 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4344 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4345
4346 for_each_queue(bp, i) {
4347 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4348 bnx2x_fp(bp, i, tx_pkt),
4349 bnx2x_fp(bp, i, rx_pkt),
4350 bnx2x_fp(bp, i, rx_calls));
4351 }
4352 }
4353
bb2a0f7a
YG
4354 bnx2x_hw_stats_post(bp);
4355 bnx2x_storm_stats_post(bp);
4356}
a2fbb9ea 4357
bb2a0f7a
YG
4358static void bnx2x_port_stats_stop(struct bnx2x *bp)
4359{
4360 struct dmae_command *dmae;
4361 u32 opcode;
4362 int loader_idx = PMF_DMAE_C(bp);
4363 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4364
bb2a0f7a 4365 bp->executer_idx = 0;
a2fbb9ea 4366
bb2a0f7a
YG
4367 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4368 DMAE_CMD_C_ENABLE |
4369 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4370#ifdef __BIG_ENDIAN
bb2a0f7a 4371 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4372#else
bb2a0f7a 4373 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4374#endif
bb2a0f7a
YG
4375 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4376 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4377
4378 if (bp->port.port_stx) {
4379
4380 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4381 if (bp->func_stx)
4382 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4383 else
4384 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4385 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4386 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4387 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4388 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4389 dmae->len = sizeof(struct host_port_stats) >> 2;
4390 if (bp->func_stx) {
4391 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4392 dmae->comp_addr_hi = 0;
4393 dmae->comp_val = 1;
4394 } else {
4395 dmae->comp_addr_lo =
4396 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4397 dmae->comp_addr_hi =
4398 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4399 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4400
bb2a0f7a
YG
4401 *stats_comp = 0;
4402 }
a2fbb9ea
ET
4403 }
4404
bb2a0f7a
YG
4405 if (bp->func_stx) {
4406
4407 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4408 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4409 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4410 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4411 dmae->dst_addr_lo = bp->func_stx >> 2;
4412 dmae->dst_addr_hi = 0;
4413 dmae->len = sizeof(struct host_func_stats) >> 2;
4414 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4415 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4416 dmae->comp_val = DMAE_COMP_VAL;
4417
4418 *stats_comp = 0;
a2fbb9ea 4419 }
bb2a0f7a
YG
4420}
4421
4422static void bnx2x_stats_stop(struct bnx2x *bp)
4423{
4424 int update = 0;
4425
4426 bnx2x_stats_comp(bp);
4427
4428 if (bp->port.pmf)
4429 update = (bnx2x_hw_stats_update(bp) == 0);
4430
4431 update |= (bnx2x_storm_stats_update(bp) == 0);
4432
4433 if (update) {
4434 bnx2x_net_stats_update(bp);
a2fbb9ea 4435
bb2a0f7a
YG
4436 if (bp->port.pmf)
4437 bnx2x_port_stats_stop(bp);
4438
4439 bnx2x_hw_stats_post(bp);
4440 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4441 }
4442}
4443
bb2a0f7a
YG
4444static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4445{
4446}
4447
4448static const struct {
4449 void (*action)(struct bnx2x *bp);
4450 enum bnx2x_stats_state next_state;
4451} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4452/* state event */
4453{
4454/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4455/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4456/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4457/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4458},
4459{
4460/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4461/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4462/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4463/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4464}
4465};
4466
4467static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4468{
4469 enum bnx2x_stats_state state = bp->stats_state;
4470
4471 bnx2x_stats_stm[state][event].action(bp);
4472 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4473
8924665a
EG
4474 /* Make sure the state has been "changed" */
4475 smp_wmb();
4476
7995c64e 4477 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
bb2a0f7a
YG
4478 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4479 state, event, bp->stats_state);
4480}
4481
6fe49bb9
EG
4482static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4483{
4484 struct dmae_command *dmae;
4485 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4486
4487 /* sanity */
4488 if (!bp->port.pmf || !bp->port.port_stx) {
4489 BNX2X_ERR("BUG!\n");
4490 return;
4491 }
4492
4493 bp->executer_idx = 0;
4494
4495 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4496 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4497 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4498 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4499#ifdef __BIG_ENDIAN
4500 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4501#else
4502 DMAE_CMD_ENDIANITY_DW_SWAP |
4503#endif
4504 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4505 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4506 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4507 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4508 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4509 dmae->dst_addr_hi = 0;
4510 dmae->len = sizeof(struct host_port_stats) >> 2;
4511 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4512 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4513 dmae->comp_val = DMAE_COMP_VAL;
4514
4515 *stats_comp = 0;
4516 bnx2x_hw_stats_post(bp);
4517 bnx2x_stats_comp(bp);
4518}
4519
4520static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4521{
4522 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4523 int port = BP_PORT(bp);
4524 int func;
4525 u32 func_stx;
4526
4527 /* sanity */
4528 if (!bp->port.pmf || !bp->func_stx) {
4529 BNX2X_ERR("BUG!\n");
4530 return;
4531 }
4532
4533 /* save our func_stx */
4534 func_stx = bp->func_stx;
4535
4536 for (vn = VN_0; vn < vn_max; vn++) {
4537 func = 2*vn + port;
4538
4539 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4540 bnx2x_func_stats_init(bp);
4541 bnx2x_hw_stats_post(bp);
4542 bnx2x_stats_comp(bp);
4543 }
4544
4545 /* restore our func_stx */
4546 bp->func_stx = func_stx;
4547}
4548
4549static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4550{
4551 struct dmae_command *dmae = &bp->stats_dmae;
4552 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4553
4554 /* sanity */
4555 if (!bp->func_stx) {
4556 BNX2X_ERR("BUG!\n");
4557 return;
4558 }
4559
4560 bp->executer_idx = 0;
4561 memset(dmae, 0, sizeof(struct dmae_command));
4562
4563 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4564 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4565 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4566#ifdef __BIG_ENDIAN
4567 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4568#else
4569 DMAE_CMD_ENDIANITY_DW_SWAP |
4570#endif
4571 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4572 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4573 dmae->src_addr_lo = bp->func_stx >> 2;
4574 dmae->src_addr_hi = 0;
4575 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4576 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4577 dmae->len = sizeof(struct host_func_stats) >> 2;
4578 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4579 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4580 dmae->comp_val = DMAE_COMP_VAL;
4581
4582 *stats_comp = 0;
4583 bnx2x_hw_stats_post(bp);
4584 bnx2x_stats_comp(bp);
4585}
4586
4587static void bnx2x_stats_init(struct bnx2x *bp)
4588{
4589 int port = BP_PORT(bp);
4590 int func = BP_FUNC(bp);
4591 int i;
4592
4593 bp->stats_pending = 0;
4594 bp->executer_idx = 0;
4595 bp->stats_counter = 0;
4596
4597 /* port and func stats for management */
4598 if (!BP_NOMCP(bp)) {
4599 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4600 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4601
4602 } else {
4603 bp->port.port_stx = 0;
4604 bp->func_stx = 0;
4605 }
4606 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4607 bp->port.port_stx, bp->func_stx);
4608
4609 /* port stats */
4610 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4611 bp->port.old_nig_stats.brb_discard =
4612 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4613 bp->port.old_nig_stats.brb_truncate =
4614 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4615 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4616 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4617 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4618 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4619
4620 /* function stats */
4621 for_each_queue(bp, i) {
4622 struct bnx2x_fastpath *fp = &bp->fp[i];
4623
4624 memset(&fp->old_tclient, 0,
4625 sizeof(struct tstorm_per_client_stats));
4626 memset(&fp->old_uclient, 0,
4627 sizeof(struct ustorm_per_client_stats));
4628 memset(&fp->old_xclient, 0,
4629 sizeof(struct xstorm_per_client_stats));
4630 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4631 }
4632
4633 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4634 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4635
4636 bp->stats_state = STATS_STATE_DISABLED;
4637
4638 if (bp->port.pmf) {
4639 if (bp->port.port_stx)
4640 bnx2x_port_stats_base_init(bp);
4641
4642 if (bp->func_stx)
4643 bnx2x_func_stats_base_init(bp);
4644
4645 } else if (bp->func_stx)
4646 bnx2x_func_stats_base_update(bp);
4647}
4648
a2fbb9ea
ET
4649static void bnx2x_timer(unsigned long data)
4650{
4651 struct bnx2x *bp = (struct bnx2x *) data;
4652
4653 if (!netif_running(bp->dev))
4654 return;
4655
4656 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4657 goto timer_restart;
a2fbb9ea
ET
4658
4659 if (poll) {
4660 struct bnx2x_fastpath *fp = &bp->fp[0];
4661 int rc;
4662
7961f791 4663 bnx2x_tx_int(fp);
a2fbb9ea
ET
4664 rc = bnx2x_rx_int(fp, 1000);
4665 }
4666
34f80b04
EG
4667 if (!BP_NOMCP(bp)) {
4668 int func = BP_FUNC(bp);
a2fbb9ea
ET
4669 u32 drv_pulse;
4670 u32 mcp_pulse;
4671
4672 ++bp->fw_drv_pulse_wr_seq;
4673 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4674 /* TBD - add SYSTEM_TIME */
4675 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4676 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4677
34f80b04 4678 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4679 MCP_PULSE_SEQ_MASK);
4680 /* The delta between driver pulse and mcp response
4681 * should be 1 (before mcp response) or 0 (after mcp response)
4682 */
4683 if ((drv_pulse != mcp_pulse) &&
4684 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4685 /* someone lost a heartbeat... */
4686 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4687 drv_pulse, mcp_pulse);
4688 }
4689 }
4690
f34d28ea 4691 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 4692 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4693
f1410647 4694timer_restart:
a2fbb9ea
ET
4695 mod_timer(&bp->timer, jiffies + bp->current_interval);
4696}
4697
4698/* end of Statistics */
4699
4700/* nic init */
4701
4702/*
4703 * nic init service functions
4704 */
4705
34f80b04 4706static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4707{
34f80b04
EG
4708 int port = BP_PORT(bp);
4709
ca00392c
EG
4710 /* "CSTORM" */
4711 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4712 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4713 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4714 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4715 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4716 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
4717}
4718
5c862848
EG
4719static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4720 dma_addr_t mapping, int sb_id)
34f80b04
EG
4721{
4722 int port = BP_PORT(bp);
bb2a0f7a 4723 int func = BP_FUNC(bp);
a2fbb9ea 4724 int index;
34f80b04 4725 u64 section;
a2fbb9ea
ET
4726
4727 /* USTORM */
4728 section = ((u64)mapping) + offsetof(struct host_status_block,
4729 u_status_block);
34f80b04 4730 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 4731
ca00392c
EG
4732 REG_WR(bp, BAR_CSTRORM_INTMEM +
4733 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4734 REG_WR(bp, BAR_CSTRORM_INTMEM +
4735 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4736 U64_HI(section));
ca00392c
EG
4737 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4738 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4739
4740 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
4741 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4742 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4743
4744 /* CSTORM */
4745 section = ((u64)mapping) + offsetof(struct host_status_block,
4746 c_status_block);
34f80b04 4747 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4748
4749 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4750 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4751 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4752 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4753 U64_HI(section));
7a9b2557 4754 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 4755 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4756
4757 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4758 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4759 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
4760
4761 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4762}
4763
4764static void bnx2x_zero_def_sb(struct bnx2x *bp)
4765{
4766 int func = BP_FUNC(bp);
a2fbb9ea 4767
ca00392c 4768 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
4769 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4770 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
4771 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4772 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4773 sizeof(struct cstorm_def_status_block_u)/4);
4774 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4775 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4776 sizeof(struct cstorm_def_status_block_c)/4);
4777 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
4778 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4779 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4780}
4781
4782static void bnx2x_init_def_sb(struct bnx2x *bp,
4783 struct host_def_status_block *def_sb,
34f80b04 4784 dma_addr_t mapping, int sb_id)
a2fbb9ea 4785{
34f80b04
EG
4786 int port = BP_PORT(bp);
4787 int func = BP_FUNC(bp);
a2fbb9ea
ET
4788 int index, val, reg_offset;
4789 u64 section;
4790
4791 /* ATTN */
4792 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4793 atten_status_block);
34f80b04 4794 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4795
49d66772
ET
4796 bp->attn_state = 0;
4797
a2fbb9ea
ET
4798 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4799 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4800
34f80b04 4801 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4802 bp->attn_group[index].sig[0] = REG_RD(bp,
4803 reg_offset + 0x10*index);
4804 bp->attn_group[index].sig[1] = REG_RD(bp,
4805 reg_offset + 0x4 + 0x10*index);
4806 bp->attn_group[index].sig[2] = REG_RD(bp,
4807 reg_offset + 0x8 + 0x10*index);
4808 bp->attn_group[index].sig[3] = REG_RD(bp,
4809 reg_offset + 0xc + 0x10*index);
4810 }
4811
a2fbb9ea
ET
4812 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4813 HC_REG_ATTN_MSG0_ADDR_L);
4814
4815 REG_WR(bp, reg_offset, U64_LO(section));
4816 REG_WR(bp, reg_offset + 4, U64_HI(section));
4817
4818 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4819
4820 val = REG_RD(bp, reg_offset);
34f80b04 4821 val |= sb_id;
a2fbb9ea
ET
4822 REG_WR(bp, reg_offset, val);
4823
4824 /* USTORM */
4825 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4826 u_def_status_block);
34f80b04 4827 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4828
ca00392c
EG
4829 REG_WR(bp, BAR_CSTRORM_INTMEM +
4830 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4831 REG_WR(bp, BAR_CSTRORM_INTMEM +
4832 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 4833 U64_HI(section));
ca00392c
EG
4834 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4835 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
4836
4837 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
4838 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4839 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
4840
4841 /* CSTORM */
4842 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4843 c_def_status_block);
34f80b04 4844 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4845
4846 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4847 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 4848 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4849 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 4850 U64_HI(section));
5c862848 4851 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 4852 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
4853
4854 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4855 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4856 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
4857
4858 /* TSTORM */
4859 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4860 t_def_status_block);
34f80b04 4861 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4862
4863 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4864 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4865 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4866 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4867 U64_HI(section));
5c862848 4868 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4869 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4870
4871 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4872 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4873 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4874
4875 /* XSTORM */
4876 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4877 x_def_status_block);
34f80b04 4878 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4879
4880 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4881 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4882 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4883 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4884 U64_HI(section));
5c862848 4885 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4886 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4887
4888 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4889 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4890 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4891
bb2a0f7a 4892 bp->stats_pending = 0;
66e855f3 4893 bp->set_mac_pending = 0;
bb2a0f7a 4894
34f80b04 4895 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4896}
4897
4898static void bnx2x_update_coalesce(struct bnx2x *bp)
4899{
34f80b04 4900 int port = BP_PORT(bp);
a2fbb9ea
ET
4901 int i;
4902
4903 for_each_queue(bp, i) {
34f80b04 4904 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4905
4906 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
4907 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4908 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4909 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 4910 bp->rx_ticks/(4 * BNX2X_BTR));
ca00392c
EG
4911 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4912 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4913 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 4914 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
4915
4916 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4917 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4918 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4919 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 4920 bp->tx_ticks/(4 * BNX2X_BTR));
a2fbb9ea 4921 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4922 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4923 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 4924 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
4925 }
4926}
4927
7a9b2557
VZ
4928static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4929 struct bnx2x_fastpath *fp, int last)
4930{
4931 int i;
4932
4933 for (i = 0; i < last; i++) {
4934 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4935 struct sk_buff *skb = rx_buf->skb;
4936
4937 if (skb == NULL) {
4938 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4939 continue;
4940 }
4941
4942 if (fp->tpa_state[i] == BNX2X_TPA_START)
4943 pci_unmap_single(bp->pdev,
4944 pci_unmap_addr(rx_buf, mapping),
356e2385 4945 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4946
4947 dev_kfree_skb(skb);
4948 rx_buf->skb = NULL;
4949 }
4950}
4951
a2fbb9ea
ET
4952static void bnx2x_init_rx_rings(struct bnx2x *bp)
4953{
7a9b2557 4954 int func = BP_FUNC(bp);
32626230
EG
4955 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4956 ETH_MAX_AGGREGATION_QUEUES_E1H;
4957 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4958 int i, j;
a2fbb9ea 4959
87942b46 4960 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4961 DP(NETIF_MSG_IFUP,
4962 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4963
7a9b2557 4964 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4965
54b9ddaa 4966 for_each_queue(bp, j) {
32626230 4967 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4968
32626230 4969 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4970 fp->tpa_pool[i].skb =
4971 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4972 if (!fp->tpa_pool[i].skb) {
4973 BNX2X_ERR("Failed to allocate TPA "
4974 "skb pool for queue[%d] - "
4975 "disabling TPA on this "
4976 "queue!\n", j);
4977 bnx2x_free_tpa_pool(bp, fp, i);
4978 fp->disable_tpa = 1;
4979 break;
4980 }
4981 pci_unmap_addr_set((struct sw_rx_bd *)
4982 &bp->fp->tpa_pool[i],
4983 mapping, 0);
4984 fp->tpa_state[i] = BNX2X_TPA_STOP;
4985 }
4986 }
4987 }
4988
54b9ddaa 4989 for_each_queue(bp, j) {
a2fbb9ea
ET
4990 struct bnx2x_fastpath *fp = &bp->fp[j];
4991
4992 fp->rx_bd_cons = 0;
4993 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4994 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4995
4996 /* "next page" elements initialization */
4997 /* SGE ring */
4998 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4999 struct eth_rx_sge *sge;
5000
5001 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5002 sge->addr_hi =
5003 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5004 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5005 sge->addr_lo =
5006 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5007 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5008 }
5009
5010 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 5011
7a9b2557 5012 /* RX BD ring */
a2fbb9ea
ET
5013 for (i = 1; i <= NUM_RX_RINGS; i++) {
5014 struct eth_rx_bd *rx_bd;
5015
5016 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5017 rx_bd->addr_hi =
5018 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 5019 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5020 rx_bd->addr_lo =
5021 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 5022 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5023 }
5024
34f80b04 5025 /* CQ ring */
a2fbb9ea
ET
5026 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5027 struct eth_rx_cqe_next_page *nextpg;
5028
5029 nextpg = (struct eth_rx_cqe_next_page *)
5030 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5031 nextpg->addr_hi =
5032 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 5033 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5034 nextpg->addr_lo =
5035 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 5036 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5037 }
5038
7a9b2557
VZ
5039 /* Allocate SGEs and initialize the ring elements */
5040 for (i = 0, ring_prod = 0;
5041 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 5042
7a9b2557
VZ
5043 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5044 BNX2X_ERR("was only able to allocate "
5045 "%d rx sges\n", i);
5046 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5047 /* Cleanup already allocated elements */
5048 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 5049 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
5050 fp->disable_tpa = 1;
5051 ring_prod = 0;
5052 break;
5053 }
5054 ring_prod = NEXT_SGE_IDX(ring_prod);
5055 }
5056 fp->rx_sge_prod = ring_prod;
5057
5058 /* Allocate BDs and initialize BD ring */
66e855f3 5059 fp->rx_comp_cons = 0;
7a9b2557 5060 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
5061 for (i = 0; i < bp->rx_ring_size; i++) {
5062 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5063 BNX2X_ERR("was only able to allocate "
de832a55
EG
5064 "%d rx skbs on queue[%d]\n", i, j);
5065 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
5066 break;
5067 }
5068 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 5069 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5070 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5071 }
5072
7a9b2557
VZ
5073 fp->rx_bd_prod = ring_prod;
5074 /* must not have more available CQEs than BDs */
5075 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5076 cqe_ring_prod);
a2fbb9ea
ET
5077 fp->rx_pkt = fp->rx_calls = 0;
5078
7a9b2557
VZ
5079 /* Warning!
5080 * this will generate an interrupt (to the TSTORM)
5081 * must only be done after chip is initialized
5082 */
5083 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5084 fp->rx_sge_prod);
a2fbb9ea
ET
5085 if (j != 0)
5086 continue;
5087
5088 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5089 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5090 U64_LO(fp->rx_comp_mapping));
5091 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5092 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5093 U64_HI(fp->rx_comp_mapping));
5094 }
5095}
5096
5097static void bnx2x_init_tx_ring(struct bnx2x *bp)
5098{
5099 int i, j;
5100
54b9ddaa 5101 for_each_queue(bp, j) {
a2fbb9ea
ET
5102 struct bnx2x_fastpath *fp = &bp->fp[j];
5103
5104 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5105 struct eth_tx_next_bd *tx_next_bd =
5106 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5107
ca00392c 5108 tx_next_bd->addr_hi =
a2fbb9ea 5109 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5110 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5111 tx_next_bd->addr_lo =
a2fbb9ea 5112 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5113 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5114 }
5115
ca00392c
EG
5116 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5117 fp->tx_db.data.zero_fill1 = 0;
5118 fp->tx_db.data.prod = 0;
5119
a2fbb9ea
ET
5120 fp->tx_pkt_prod = 0;
5121 fp->tx_pkt_cons = 0;
5122 fp->tx_bd_prod = 0;
5123 fp->tx_bd_cons = 0;
5124 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5125 fp->tx_pkt = 0;
5126 }
5127}
5128
5129static void bnx2x_init_sp_ring(struct bnx2x *bp)
5130{
34f80b04 5131 int func = BP_FUNC(bp);
a2fbb9ea
ET
5132
5133 spin_lock_init(&bp->spq_lock);
5134
5135 bp->spq_left = MAX_SPQ_PENDING;
5136 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5137 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5138 bp->spq_prod_bd = bp->spq;
5139 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5140
34f80b04 5141 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5142 U64_LO(bp->spq_mapping));
34f80b04
EG
5143 REG_WR(bp,
5144 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5145 U64_HI(bp->spq_mapping));
5146
34f80b04 5147 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5148 bp->spq_prod_idx);
5149}
5150
5151static void bnx2x_init_context(struct bnx2x *bp)
5152{
5153 int i;
5154
54b9ddaa
VZ
5155 /* Rx */
5156 for_each_queue(bp, i) {
a2fbb9ea
ET
5157 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5158 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5159 u8 cl_id = fp->cl_id;
a2fbb9ea 5160
34f80b04
EG
5161 context->ustorm_st_context.common.sb_index_numbers =
5162 BNX2X_RX_SB_INDEX_NUM;
0626b899 5163 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5164 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5165 context->ustorm_st_context.common.flags =
de832a55
EG
5166 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5167 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5168 context->ustorm_st_context.common.statistics_counter_id =
5169 cl_id;
8d9c5f34 5170 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5171 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5172 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5173 bp->rx_buf_size;
34f80b04 5174 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5175 U64_HI(fp->rx_desc_mapping);
34f80b04 5176 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5177 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5178 if (!fp->disable_tpa) {
5179 context->ustorm_st_context.common.flags |=
ca00392c 5180 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5181 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
5182 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5183 (u32)0xffff);
7a9b2557
VZ
5184 context->ustorm_st_context.common.sge_page_base_hi =
5185 U64_HI(fp->rx_sge_mapping);
5186 context->ustorm_st_context.common.sge_page_base_lo =
5187 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5188
5189 context->ustorm_st_context.common.max_sges_for_packet =
5190 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5191 context->ustorm_st_context.common.max_sges_for_packet =
5192 ((context->ustorm_st_context.common.
5193 max_sges_for_packet + PAGES_PER_SGE - 1) &
5194 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5195 }
5196
8d9c5f34
EG
5197 context->ustorm_ag_context.cdu_usage =
5198 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5199 CDU_REGION_NUMBER_UCM_AG,
5200 ETH_CONNECTION_TYPE);
5201
ca00392c
EG
5202 context->xstorm_ag_context.cdu_reserved =
5203 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5204 CDU_REGION_NUMBER_XCM_AG,
5205 ETH_CONNECTION_TYPE);
5206 }
5207
54b9ddaa
VZ
5208 /* Tx */
5209 for_each_queue(bp, i) {
ca00392c
EG
5210 struct bnx2x_fastpath *fp = &bp->fp[i];
5211 struct eth_context *context =
54b9ddaa 5212 bnx2x_sp(bp, context[i].eth);
ca00392c
EG
5213
5214 context->cstorm_st_context.sb_index_number =
5215 C_SB_ETH_TX_CQ_INDEX;
5216 context->cstorm_st_context.status_block_id = fp->sb_id;
5217
8d9c5f34
EG
5218 context->xstorm_st_context.tx_bd_page_base_hi =
5219 U64_HI(fp->tx_desc_mapping);
5220 context->xstorm_st_context.tx_bd_page_base_lo =
5221 U64_LO(fp->tx_desc_mapping);
ca00392c 5222 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5223 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5224 }
5225}
5226
5227static void bnx2x_init_ind_table(struct bnx2x *bp)
5228{
26c8fa4d 5229 int func = BP_FUNC(bp);
a2fbb9ea
ET
5230 int i;
5231
555f6c78 5232 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5233 return;
5234
555f6c78
EG
5235 DP(NETIF_MSG_IFUP,
5236 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5237 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5238 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5239 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
54b9ddaa 5240 bp->fp->cl_id + (i % bp->num_queues));
a2fbb9ea
ET
5241}
5242
49d66772
ET
5243static void bnx2x_set_client_config(struct bnx2x *bp)
5244{
49d66772 5245 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5246 int port = BP_PORT(bp);
5247 int i;
49d66772 5248
e7799c5f 5249 tstorm_client.mtu = bp->dev->mtu;
49d66772 5250 tstorm_client.config_flags =
de832a55
EG
5251 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5252 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5253#ifdef BCM_VLAN
0c6671b0 5254 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5255 tstorm_client.config_flags |=
8d9c5f34 5256 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5257 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5258 }
5259#endif
49d66772
ET
5260
5261 for_each_queue(bp, i) {
de832a55
EG
5262 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5263
49d66772 5264 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5265 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5266 ((u32 *)&tstorm_client)[0]);
5267 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5268 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5269 ((u32 *)&tstorm_client)[1]);
5270 }
5271
34f80b04
EG
5272 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5273 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5274}
5275
a2fbb9ea
ET
5276static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5277{
a2fbb9ea 5278 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04 5279 int mode = bp->rx_mode;
37b091ba 5280 int mask = bp->rx_mode_cl_mask;
34f80b04 5281 int func = BP_FUNC(bp);
581ce43d 5282 int port = BP_PORT(bp);
a2fbb9ea 5283 int i;
581ce43d
EG
5284 /* All but management unicast packets should pass to the host as well */
5285 u32 llh_mask =
5286 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5287 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5288 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5289 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5290
3196a88a 5291 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5292
5293 switch (mode) {
5294 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5295 tstorm_mac_filter.ucast_drop_all = mask;
5296 tstorm_mac_filter.mcast_drop_all = mask;
5297 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5298 break;
356e2385 5299
a2fbb9ea 5300 case BNX2X_RX_MODE_NORMAL:
34f80b04 5301 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5302 break;
356e2385 5303
a2fbb9ea 5304 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5305 tstorm_mac_filter.mcast_accept_all = mask;
5306 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5307 break;
356e2385 5308
a2fbb9ea 5309 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5310 tstorm_mac_filter.ucast_accept_all = mask;
5311 tstorm_mac_filter.mcast_accept_all = mask;
5312 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5313 /* pass management unicast packets as well */
5314 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5315 break;
356e2385 5316
a2fbb9ea 5317 default:
34f80b04
EG
5318 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5319 break;
a2fbb9ea
ET
5320 }
5321
581ce43d
EG
5322 REG_WR(bp,
5323 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5324 llh_mask);
5325
a2fbb9ea
ET
5326 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5327 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5328 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5329 ((u32 *)&tstorm_mac_filter)[i]);
5330
34f80b04 5331/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5332 ((u32 *)&tstorm_mac_filter)[i]); */
5333 }
a2fbb9ea 5334
49d66772
ET
5335 if (mode != BNX2X_RX_MODE_NONE)
5336 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5337}
5338
471de716
EG
5339static void bnx2x_init_internal_common(struct bnx2x *bp)
5340{
5341 int i;
5342
5343 /* Zero this manually as its initialization is
5344 currently missing in the initTool */
5345 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5346 REG_WR(bp, BAR_USTRORM_INTMEM +
5347 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5348}
5349
5350static void bnx2x_init_internal_port(struct bnx2x *bp)
5351{
5352 int port = BP_PORT(bp);
5353
ca00392c
EG
5354 REG_WR(bp,
5355 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5356 REG_WR(bp,
5357 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5358 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5359 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5360}
5361
5362static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5363{
a2fbb9ea
ET
5364 struct tstorm_eth_function_common_config tstorm_config = {0};
5365 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5366 int port = BP_PORT(bp);
5367 int func = BP_FUNC(bp);
de832a55
EG
5368 int i, j;
5369 u32 offset;
471de716 5370 u16 max_agg_size;
a2fbb9ea
ET
5371
5372 if (is_multi(bp)) {
555f6c78 5373 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5374 tstorm_config.rss_result_mask = MULTI_MASK;
5375 }
ca00392c
EG
5376
5377 /* Enable TPA if needed */
5378 if (bp->flags & TPA_ENABLE_FLAG)
5379 tstorm_config.config_flags |=
5380 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5381
8d9c5f34
EG
5382 if (IS_E1HMF(bp))
5383 tstorm_config.config_flags |=
5384 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5385
34f80b04
EG
5386 tstorm_config.leading_client_id = BP_L_ID(bp);
5387
a2fbb9ea 5388 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5389 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5390 (*(u32 *)&tstorm_config));
5391
c14423fe 5392 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
37b091ba 5393 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
a2fbb9ea
ET
5394 bnx2x_set_storm_rx_mode(bp);
5395
de832a55
EG
5396 for_each_queue(bp, i) {
5397 u8 cl_id = bp->fp[i].cl_id;
5398
5399 /* reset xstorm per client statistics */
5400 offset = BAR_XSTRORM_INTMEM +
5401 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5402 for (j = 0;
5403 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5404 REG_WR(bp, offset + j*4, 0);
5405
5406 /* reset tstorm per client statistics */
5407 offset = BAR_TSTRORM_INTMEM +
5408 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5409 for (j = 0;
5410 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5411 REG_WR(bp, offset + j*4, 0);
5412
5413 /* reset ustorm per client statistics */
5414 offset = BAR_USTRORM_INTMEM +
5415 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5416 for (j = 0;
5417 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5418 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5419 }
5420
5421 /* Init statistics related context */
34f80b04 5422 stats_flags.collect_eth = 1;
a2fbb9ea 5423
66e855f3 5424 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5425 ((u32 *)&stats_flags)[0]);
66e855f3 5426 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5427 ((u32 *)&stats_flags)[1]);
5428
66e855f3 5429 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5430 ((u32 *)&stats_flags)[0]);
66e855f3 5431 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5432 ((u32 *)&stats_flags)[1]);
5433
de832a55
EG
5434 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5435 ((u32 *)&stats_flags)[0]);
5436 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5437 ((u32 *)&stats_flags)[1]);
5438
66e855f3 5439 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5440 ((u32 *)&stats_flags)[0]);
66e855f3 5441 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5442 ((u32 *)&stats_flags)[1]);
5443
66e855f3
YG
5444 REG_WR(bp, BAR_XSTRORM_INTMEM +
5445 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5446 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5447 REG_WR(bp, BAR_XSTRORM_INTMEM +
5448 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5449 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5450
5451 REG_WR(bp, BAR_TSTRORM_INTMEM +
5452 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5453 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5454 REG_WR(bp, BAR_TSTRORM_INTMEM +
5455 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5456 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5457
de832a55
EG
5458 REG_WR(bp, BAR_USTRORM_INTMEM +
5459 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5460 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5461 REG_WR(bp, BAR_USTRORM_INTMEM +
5462 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5463 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5464
34f80b04
EG
5465 if (CHIP_IS_E1H(bp)) {
5466 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5467 IS_E1HMF(bp));
5468 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5469 IS_E1HMF(bp));
5470 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5471 IS_E1HMF(bp));
5472 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5473 IS_E1HMF(bp));
5474
7a9b2557
VZ
5475 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5476 bp->e1hov);
34f80b04
EG
5477 }
5478
4f40f2cb
EG
5479 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5480 max_agg_size =
5481 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5482 SGE_PAGE_SIZE * PAGES_PER_SGE),
5483 (u32)0xffff);
54b9ddaa 5484 for_each_queue(bp, i) {
7a9b2557 5485 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5486
5487 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5488 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5489 U64_LO(fp->rx_comp_mapping));
5490 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5491 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5492 U64_HI(fp->rx_comp_mapping));
5493
ca00392c
EG
5494 /* Next page */
5495 REG_WR(bp, BAR_USTRORM_INTMEM +
5496 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5497 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5498 REG_WR(bp, BAR_USTRORM_INTMEM +
5499 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5500 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5501
7a9b2557 5502 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5503 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5504 max_agg_size);
5505 }
8a1c38d1 5506
1c06328c
EG
5507 /* dropless flow control */
5508 if (CHIP_IS_E1H(bp)) {
5509 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5510
5511 rx_pause.bd_thr_low = 250;
5512 rx_pause.cqe_thr_low = 250;
5513 rx_pause.cos = 1;
5514 rx_pause.sge_thr_low = 0;
5515 rx_pause.bd_thr_high = 350;
5516 rx_pause.cqe_thr_high = 350;
5517 rx_pause.sge_thr_high = 0;
5518
54b9ddaa 5519 for_each_queue(bp, i) {
1c06328c
EG
5520 struct bnx2x_fastpath *fp = &bp->fp[i];
5521
5522 if (!fp->disable_tpa) {
5523 rx_pause.sge_thr_low = 150;
5524 rx_pause.sge_thr_high = 250;
5525 }
5526
5527
5528 offset = BAR_USTRORM_INTMEM +
5529 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5530 fp->cl_id);
5531 for (j = 0;
5532 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5533 j++)
5534 REG_WR(bp, offset + j*4,
5535 ((u32 *)&rx_pause)[j]);
5536 }
5537 }
5538
8a1c38d1
EG
5539 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5540
5541 /* Init rate shaping and fairness contexts */
5542 if (IS_E1HMF(bp)) {
5543 int vn;
5544
5545 /* During init there is no active link
5546 Until link is up, set link rate to 10Gbps */
5547 bp->link_vars.line_speed = SPEED_10000;
5548 bnx2x_init_port_minmax(bp);
5549
b015e3d1
EG
5550 if (!BP_NOMCP(bp))
5551 bp->mf_config =
5552 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8a1c38d1
EG
5553 bnx2x_calc_vn_weight_sum(bp);
5554
5555 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5556 bnx2x_init_vn_minmax(bp, 2*vn + port);
5557
5558 /* Enable rate shaping and fairness */
b015e3d1 5559 bp->cmng.flags.cmng_enables |=
8a1c38d1 5560 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
b015e3d1 5561
8a1c38d1
EG
5562 } else {
5563 /* rate shaping and fairness are disabled */
5564 DP(NETIF_MSG_IFUP,
5565 "single function mode minmax will be disabled\n");
5566 }
5567
5568
5569 /* Store it to internal memory */
5570 if (bp->port.pmf)
5571 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5572 REG_WR(bp, BAR_XSTRORM_INTMEM +
5573 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5574 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5575}
5576
471de716
EG
5577static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5578{
5579 switch (load_code) {
5580 case FW_MSG_CODE_DRV_LOAD_COMMON:
5581 bnx2x_init_internal_common(bp);
5582 /* no break */
5583
5584 case FW_MSG_CODE_DRV_LOAD_PORT:
5585 bnx2x_init_internal_port(bp);
5586 /* no break */
5587
5588 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5589 bnx2x_init_internal_func(bp);
5590 break;
5591
5592 default:
5593 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5594 break;
5595 }
5596}
5597
5598static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5599{
5600 int i;
5601
5602 for_each_queue(bp, i) {
5603 struct bnx2x_fastpath *fp = &bp->fp[i];
5604
34f80b04 5605 fp->bp = bp;
a2fbb9ea 5606 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5607 fp->index = i;
34f80b04 5608 fp->cl_id = BP_L_ID(bp) + i;
37b091ba
MC
5609#ifdef BCM_CNIC
5610 fp->sb_id = fp->cl_id + 1;
5611#else
34f80b04 5612 fp->sb_id = fp->cl_id;
37b091ba 5613#endif
34f80b04 5614 DP(NETIF_MSG_IFUP,
f5372251
EG
5615 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5616 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5617 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5618 fp->sb_id);
5c862848 5619 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5620 }
5621
16119785
EG
5622 /* ensure status block indices were read */
5623 rmb();
5624
5625
5c862848
EG
5626 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5627 DEF_SB_ID);
5628 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5629 bnx2x_update_coalesce(bp);
5630 bnx2x_init_rx_rings(bp);
5631 bnx2x_init_tx_ring(bp);
5632 bnx2x_init_sp_ring(bp);
5633 bnx2x_init_context(bp);
471de716 5634 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5635 bnx2x_init_ind_table(bp);
0ef00459
EG
5636 bnx2x_stats_init(bp);
5637
5638 /* At this point, we are ready for interrupts */
5639 atomic_set(&bp->intr_sem, 0);
5640
5641 /* flush all before enabling interrupts */
5642 mb();
5643 mmiowb();
5644
615f8fd9 5645 bnx2x_int_enable(bp);
eb8da205
EG
5646
5647 /* Check for SPIO5 */
5648 bnx2x_attn_int_deasserted0(bp,
5649 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5650 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5651}
5652
5653/* end of nic init */
5654
5655/*
5656 * gzip service functions
5657 */
5658
5659static int bnx2x_gunzip_init(struct bnx2x *bp)
5660{
5661 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5662 &bp->gunzip_mapping);
5663 if (bp->gunzip_buf == NULL)
5664 goto gunzip_nomem1;
5665
5666 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5667 if (bp->strm == NULL)
5668 goto gunzip_nomem2;
5669
5670 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5671 GFP_KERNEL);
5672 if (bp->strm->workspace == NULL)
5673 goto gunzip_nomem3;
5674
5675 return 0;
5676
5677gunzip_nomem3:
5678 kfree(bp->strm);
5679 bp->strm = NULL;
5680
5681gunzip_nomem2:
5682 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5683 bp->gunzip_mapping);
5684 bp->gunzip_buf = NULL;
5685
5686gunzip_nomem1:
7995c64e 5687 netdev_err(bp->dev, "Cannot allocate firmware buffer for un-compression\n");
a2fbb9ea
ET
5688 return -ENOMEM;
5689}
5690
5691static void bnx2x_gunzip_end(struct bnx2x *bp)
5692{
5693 kfree(bp->strm->workspace);
5694
5695 kfree(bp->strm);
5696 bp->strm = NULL;
5697
5698 if (bp->gunzip_buf) {
5699 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5700 bp->gunzip_mapping);
5701 bp->gunzip_buf = NULL;
5702 }
5703}
5704
94a78b79 5705static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5706{
5707 int n, rc;
5708
5709 /* check gzip header */
94a78b79
VZ
5710 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5711 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5712 return -EINVAL;
94a78b79 5713 }
a2fbb9ea
ET
5714
5715 n = 10;
5716
34f80b04 5717#define FNAME 0x8
a2fbb9ea
ET
5718
5719 if (zbuf[3] & FNAME)
5720 while ((zbuf[n++] != 0) && (n < len));
5721
94a78b79 5722 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5723 bp->strm->avail_in = len - n;
5724 bp->strm->next_out = bp->gunzip_buf;
5725 bp->strm->avail_out = FW_BUF_SIZE;
5726
5727 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5728 if (rc != Z_OK)
5729 return rc;
5730
5731 rc = zlib_inflate(bp->strm, Z_FINISH);
5732 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
5733 netdev_err(bp->dev, "Firmware decompression error: %s\n",
5734 bp->strm->msg);
a2fbb9ea
ET
5735
5736 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5737 if (bp->gunzip_outlen & 0x3)
7995c64e
JP
5738 netdev_err(bp->dev, "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
5739 bp->gunzip_outlen);
a2fbb9ea
ET
5740 bp->gunzip_outlen >>= 2;
5741
5742 zlib_inflateEnd(bp->strm);
5743
5744 if (rc == Z_STREAM_END)
5745 return 0;
5746
5747 return rc;
5748}
5749
5750/* nic load/unload */
5751
5752/*
34f80b04 5753 * General service functions
a2fbb9ea
ET
5754 */
5755
5756/* send a NIG loopback debug packet */
5757static void bnx2x_lb_pckt(struct bnx2x *bp)
5758{
a2fbb9ea 5759 u32 wb_write[3];
a2fbb9ea
ET
5760
5761 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5762 wb_write[0] = 0x55555555;
5763 wb_write[1] = 0x55555555;
34f80b04 5764 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5765 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5766
5767 /* NON-IP protocol */
a2fbb9ea
ET
5768 wb_write[0] = 0x09000000;
5769 wb_write[1] = 0x55555555;
34f80b04 5770 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5771 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5772}
5773
5774/* some of the internal memories
5775 * are not directly readable from the driver
5776 * to test them we send debug packets
5777 */
5778static int bnx2x_int_mem_test(struct bnx2x *bp)
5779{
5780 int factor;
5781 int count, i;
5782 u32 val = 0;
5783
ad8d3948 5784 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5785 factor = 120;
ad8d3948
EG
5786 else if (CHIP_REV_IS_EMUL(bp))
5787 factor = 200;
5788 else
a2fbb9ea 5789 factor = 1;
a2fbb9ea
ET
5790
5791 DP(NETIF_MSG_HW, "start part1\n");
5792
5793 /* Disable inputs of parser neighbor blocks */
5794 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5795 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5796 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5797 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5798
5799 /* Write 0 to parser credits for CFC search request */
5800 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5801
5802 /* send Ethernet packet */
5803 bnx2x_lb_pckt(bp);
5804
5805 /* TODO do i reset NIG statistic? */
5806 /* Wait until NIG register shows 1 packet of size 0x10 */
5807 count = 1000 * factor;
5808 while (count) {
34f80b04 5809
a2fbb9ea
ET
5810 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5811 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5812 if (val == 0x10)
5813 break;
5814
5815 msleep(10);
5816 count--;
5817 }
5818 if (val != 0x10) {
5819 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5820 return -1;
5821 }
5822
5823 /* Wait until PRS register shows 1 packet */
5824 count = 1000 * factor;
5825 while (count) {
5826 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5827 if (val == 1)
5828 break;
5829
5830 msleep(10);
5831 count--;
5832 }
5833 if (val != 0x1) {
5834 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5835 return -2;
5836 }
5837
5838 /* Reset and init BRB, PRS */
34f80b04 5839 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5840 msleep(50);
34f80b04 5841 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5842 msleep(50);
94a78b79
VZ
5843 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5844 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5845
5846 DP(NETIF_MSG_HW, "part2\n");
5847
5848 /* Disable inputs of parser neighbor blocks */
5849 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5850 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5851 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5852 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5853
5854 /* Write 0 to parser credits for CFC search request */
5855 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5856
5857 /* send 10 Ethernet packets */
5858 for (i = 0; i < 10; i++)
5859 bnx2x_lb_pckt(bp);
5860
5861 /* Wait until NIG register shows 10 + 1
5862 packets of size 11*0x10 = 0xb0 */
5863 count = 1000 * factor;
5864 while (count) {
34f80b04 5865
a2fbb9ea
ET
5866 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5867 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5868 if (val == 0xb0)
5869 break;
5870
5871 msleep(10);
5872 count--;
5873 }
5874 if (val != 0xb0) {
5875 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5876 return -3;
5877 }
5878
5879 /* Wait until PRS register shows 2 packets */
5880 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5881 if (val != 2)
5882 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5883
5884 /* Write 1 to parser credits for CFC search request */
5885 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5886
5887 /* Wait until PRS register shows 3 packets */
5888 msleep(10 * factor);
5889 /* Wait until NIG register shows 1 packet of size 0x10 */
5890 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5891 if (val != 3)
5892 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5893
5894 /* clear NIG EOP FIFO */
5895 for (i = 0; i < 11; i++)
5896 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5897 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5898 if (val != 1) {
5899 BNX2X_ERR("clear of NIG failed\n");
5900 return -4;
5901 }
5902
5903 /* Reset and init BRB, PRS, NIG */
5904 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5905 msleep(50);
5906 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5907 msleep(50);
94a78b79
VZ
5908 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5909 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 5910#ifndef BCM_CNIC
a2fbb9ea
ET
5911 /* set NIC mode */
5912 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5913#endif
5914
5915 /* Enable inputs of parser neighbor blocks */
5916 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5917 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5918 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5919 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5920
5921 DP(NETIF_MSG_HW, "done\n");
5922
5923 return 0; /* OK */
5924}
5925
5926static void enable_blocks_attention(struct bnx2x *bp)
5927{
5928 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5929 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5930 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5931 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5932 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5933 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5934 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5935 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5936 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5937/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5938/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5939 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5940 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5941 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5942/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5943/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5944 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5945 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5946 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5947 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5948/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5949/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5950 if (CHIP_REV_IS_FPGA(bp))
5951 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5952 else
5953 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5954 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5955 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5956 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5957/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5958/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5959 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5960 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5961/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5962 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5963}
5964
34f80b04 5965
81f75bbf
EG
5966static void bnx2x_reset_common(struct bnx2x *bp)
5967{
5968 /* reset_common */
5969 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5970 0xd3ffff7f);
5971 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5972}
5973
573f2035
EG
5974static void bnx2x_init_pxp(struct bnx2x *bp)
5975{
5976 u16 devctl;
5977 int r_order, w_order;
5978
5979 pci_read_config_word(bp->pdev,
5980 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5981 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5982 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5983 if (bp->mrrs == -1)
5984 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5985 else {
5986 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5987 r_order = bp->mrrs;
5988 }
5989
5990 bnx2x_init_pxp_arb(bp, r_order, w_order);
5991}
fd4ef40d
EG
5992
5993static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5994{
5995 u32 val;
5996 u8 port;
5997 u8 is_required = 0;
5998
5999 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6000 SHARED_HW_CFG_FAN_FAILURE_MASK;
6001
6002 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6003 is_required = 1;
6004
6005 /*
6006 * The fan failure mechanism is usually related to the PHY type since
6007 * the power consumption of the board is affected by the PHY. Currently,
6008 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6009 */
6010 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6011 for (port = PORT_0; port < PORT_MAX; port++) {
6012 u32 phy_type =
6013 SHMEM_RD(bp, dev_info.port_hw_config[port].
6014 external_phy_config) &
6015 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6016 is_required |=
6017 ((phy_type ==
6018 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
6019 (phy_type ==
6020 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
6021 (phy_type ==
6022 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6023 }
6024
6025 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6026
6027 if (is_required == 0)
6028 return;
6029
6030 /* Fan failure is indicated by SPIO 5 */
6031 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6032 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6033
6034 /* set to active low mode */
6035 val = REG_RD(bp, MISC_REG_SPIO_INT);
6036 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6037 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6038 REG_WR(bp, MISC_REG_SPIO_INT, val);
6039
6040 /* enable interrupt to signal the IGU */
6041 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6042 val |= (1 << MISC_REGISTERS_SPIO_5);
6043 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6044}
6045
34f80b04 6046static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 6047{
a2fbb9ea 6048 u32 val, i;
37b091ba
MC
6049#ifdef BCM_CNIC
6050 u32 wb_write[2];
6051#endif
a2fbb9ea 6052
34f80b04 6053 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 6054
81f75bbf 6055 bnx2x_reset_common(bp);
34f80b04
EG
6056 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6057 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 6058
94a78b79 6059 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
6060 if (CHIP_IS_E1H(bp))
6061 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 6062
34f80b04
EG
6063 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6064 msleep(30);
6065 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 6066
94a78b79 6067 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
6068 if (CHIP_IS_E1(bp)) {
6069 /* enable HW interrupt from PXP on USDM overflow
6070 bit 16 on INT_MASK_0 */
6071 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6072 }
a2fbb9ea 6073
94a78b79 6074 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 6075 bnx2x_init_pxp(bp);
a2fbb9ea
ET
6076
6077#ifdef __BIG_ENDIAN
34f80b04
EG
6078 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6079 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6080 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6081 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6082 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6083 /* make sure this value is 0 */
6084 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6085
6086/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6087 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6088 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6089 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6090 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6091#endif
6092
34f80b04 6093 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
37b091ba 6094#ifdef BCM_CNIC
34f80b04
EG
6095 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6096 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6097 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6098#endif
6099
34f80b04
EG
6100 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6101 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6102
34f80b04
EG
6103 /* let the HW do it's magic ... */
6104 msleep(100);
6105 /* finish PXP init */
6106 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6107 if (val != 1) {
6108 BNX2X_ERR("PXP2 CFG failed\n");
6109 return -EBUSY;
6110 }
6111 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6112 if (val != 1) {
6113 BNX2X_ERR("PXP2 RD_INIT failed\n");
6114 return -EBUSY;
6115 }
a2fbb9ea 6116
34f80b04
EG
6117 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6118 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6119
94a78b79 6120 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6121
34f80b04
EG
6122 /* clean the DMAE memory */
6123 bp->dmae_ready = 1;
6124 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6125
94a78b79
VZ
6126 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6127 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6128 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6129 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6130
34f80b04
EG
6131 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6132 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6133 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6134 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6135
94a78b79 6136 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba
MC
6137
6138#ifdef BCM_CNIC
6139 wb_write[0] = 0;
6140 wb_write[1] = 0;
6141 for (i = 0; i < 64; i++) {
6142 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6143 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6144
6145 if (CHIP_IS_E1H(bp)) {
6146 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6147 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6148 wb_write, 2);
6149 }
6150 }
6151#endif
34f80b04
EG
6152 /* soft reset pulse */
6153 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6154 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 6155
37b091ba 6156#ifdef BCM_CNIC
94a78b79 6157 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6158#endif
a2fbb9ea 6159
94a78b79 6160 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6161 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6162 if (!CHIP_REV_IS_SLOW(bp)) {
6163 /* enable hw interrupt from doorbell Q */
6164 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6165 }
a2fbb9ea 6166
94a78b79
VZ
6167 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6168 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6169 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 6170#ifndef BCM_CNIC
3196a88a
EG
6171 /* set NIC mode */
6172 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 6173#endif
34f80b04
EG
6174 if (CHIP_IS_E1H(bp))
6175 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6176
94a78b79
VZ
6177 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6178 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6179 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6180 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6181
ca00392c
EG
6182 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6183 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6184 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6185 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6186
94a78b79
VZ
6187 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6188 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6189 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6190 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6191
34f80b04
EG
6192 /* sync semi rtc */
6193 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6194 0x80000000);
6195 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6196 0x80000000);
a2fbb9ea 6197
94a78b79
VZ
6198 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6199 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6200 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6201
34f80b04
EG
6202 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6203 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6204 REG_WR(bp, i, 0xc0cac01a);
6205 /* TODO: replace with something meaningful */
6206 }
94a78b79 6207 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
6208#ifdef BCM_CNIC
6209 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6210 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6211 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6212 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6213 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6214 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6215 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6216 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6217 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6218 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6219#endif
34f80b04 6220 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6221
34f80b04
EG
6222 if (sizeof(union cdu_context) != 1024)
6223 /* we currently assume that a context is 1024 bytes */
7995c64e
JP
6224 pr_alert("please adjust the size of cdu_context(%ld)\n",
6225 (long)sizeof(union cdu_context));
a2fbb9ea 6226
94a78b79 6227 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6228 val = (4 << 24) + (0 << 12) + 1024;
6229 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6230
94a78b79 6231 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6232 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6233 /* enable context validation interrupt from CFC */
6234 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6235
6236 /* set the thresholds to prevent CFC/CDU race */
6237 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6238
94a78b79
VZ
6239 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6240 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6241
94a78b79 6242 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6243 /* Reset PCIE errors for debug */
6244 REG_WR(bp, 0x2814, 0xffffffff);
6245 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6246
94a78b79 6247 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6248 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6249 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6250 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6251
94a78b79 6252 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6253 if (CHIP_IS_E1H(bp)) {
6254 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6255 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6256 }
6257
6258 if (CHIP_REV_IS_SLOW(bp))
6259 msleep(200);
6260
6261 /* finish CFC init */
6262 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6263 if (val != 1) {
6264 BNX2X_ERR("CFC LL_INIT failed\n");
6265 return -EBUSY;
6266 }
6267 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6268 if (val != 1) {
6269 BNX2X_ERR("CFC AC_INIT failed\n");
6270 return -EBUSY;
6271 }
6272 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6273 if (val != 1) {
6274 BNX2X_ERR("CFC CAM_INIT failed\n");
6275 return -EBUSY;
6276 }
6277 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6278
34f80b04
EG
6279 /* read NIG statistic
6280 to see if this is our first up since powerup */
6281 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6282 val = *bnx2x_sp(bp, wb_data[0]);
6283
6284 /* do internal memory self test */
6285 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6286 BNX2X_ERR("internal mem self test failed\n");
6287 return -EBUSY;
6288 }
6289
35b19ba5 6290 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6291 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6292 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6293 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6294 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6295 bp->port.need_hw_lock = 1;
6296 break;
6297
34f80b04
EG
6298 default:
6299 break;
6300 }
f1410647 6301
fd4ef40d
EG
6302 bnx2x_setup_fan_failure_detection(bp);
6303
34f80b04
EG
6304 /* clear PXP2 attentions */
6305 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6306
34f80b04 6307 enable_blocks_attention(bp);
a2fbb9ea 6308
6bbca910
YR
6309 if (!BP_NOMCP(bp)) {
6310 bnx2x_acquire_phy_lock(bp);
6311 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6312 bnx2x_release_phy_lock(bp);
6313 } else
6314 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6315
34f80b04
EG
6316 return 0;
6317}
a2fbb9ea 6318
34f80b04
EG
6319static int bnx2x_init_port(struct bnx2x *bp)
6320{
6321 int port = BP_PORT(bp);
94a78b79 6322 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6323 u32 low, high;
34f80b04 6324 u32 val;
a2fbb9ea 6325
34f80b04
EG
6326 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6327
6328 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6329
94a78b79 6330 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6331 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6332
6333 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6334 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6335 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 6336 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6337
37b091ba
MC
6338#ifdef BCM_CNIC
6339 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
a2fbb9ea 6340
94a78b79 6341 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
6342 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6343 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 6344#endif
94a78b79 6345 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6346
94a78b79 6347 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6348 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6349 /* no pause for emulation and FPGA */
6350 low = 0;
6351 high = 513;
6352 } else {
6353 if (IS_E1HMF(bp))
6354 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6355 else if (bp->dev->mtu > 4096) {
6356 if (bp->flags & ONE_PORT_FLAG)
6357 low = 160;
6358 else {
6359 val = bp->dev->mtu;
6360 /* (24*1024 + val*4)/256 */
6361 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6362 }
6363 } else
6364 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6365 high = low + 56; /* 14*1024/256 */
6366 }
6367 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6368 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6369
6370
94a78b79 6371 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6372
94a78b79 6373 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6374 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6375 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6376 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6377
94a78b79
VZ
6378 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6379 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6380 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6381 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6382
94a78b79 6383 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6384 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6385
94a78b79 6386 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6387
6388 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6389 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6390
6391 /* update threshold */
34f80b04 6392 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6393 /* update init credit */
34f80b04 6394 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6395
6396 /* probe changes */
34f80b04 6397 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6398 msleep(5);
34f80b04 6399 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea 6400
37b091ba
MC
6401#ifdef BCM_CNIC
6402 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 6403#endif
94a78b79 6404 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6405 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6406
6407 if (CHIP_IS_E1(bp)) {
6408 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6409 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6410 }
94a78b79 6411 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6412
94a78b79 6413 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6414 /* init aeu_mask_attn_func_0/1:
6415 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6416 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6417 * bits 4-7 are used for "per vn group attention" */
6418 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6419 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6420
94a78b79 6421 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6422 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6423 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6424 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6425 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6426
94a78b79 6427 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6428
6429 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6430
6431 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6432 /* 0x2 disable e1hov, 0x1 enable */
6433 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6434 (IS_E1HMF(bp) ? 0x1 : 0x2));
6435
1c06328c
EG
6436 {
6437 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6438 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6439 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6440 }
34f80b04
EG
6441 }
6442
94a78b79 6443 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6444 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6445
35b19ba5 6446 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6447 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6448 {
6449 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6450
6451 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6452 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6453
6454 /* The GPIO should be swapped if the swap register is
6455 set and active */
6456 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6457 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6458
6459 /* Select function upon port-swap configuration */
6460 if (port == 0) {
6461 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6462 aeu_gpio_mask = (swap_val && swap_override) ?
6463 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6464 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6465 } else {
6466 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6467 aeu_gpio_mask = (swap_val && swap_override) ?
6468 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6469 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6470 }
6471 val = REG_RD(bp, offset);
6472 /* add GPIO3 to group */
6473 val |= aeu_gpio_mask;
6474 REG_WR(bp, offset, val);
6475 }
6476 break;
6477
35b19ba5 6478 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6479 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6480 /* add SPIO 5 to group 0 */
4d295db0
EG
6481 {
6482 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6483 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6484 val = REG_RD(bp, reg_addr);
f1410647 6485 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6486 REG_WR(bp, reg_addr, val);
6487 }
f1410647
ET
6488 break;
6489
6490 default:
6491 break;
6492 }
6493
c18487ee 6494 bnx2x__link_reset(bp);
a2fbb9ea 6495
34f80b04
EG
6496 return 0;
6497}
6498
6499#define ILT_PER_FUNC (768/2)
6500#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6501/* the phys address is shifted right 12 bits and has an added
6502 1=valid bit added to the 53rd bit
6503 then since this is a wide register(TM)
6504 we split it into two 32 bit writes
6505 */
6506#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6507#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6508#define PXP_ONE_ILT(x) (((x) << 10) | x)
6509#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6510
37b091ba
MC
6511#ifdef BCM_CNIC
6512#define CNIC_ILT_LINES 127
6513#define CNIC_CTX_PER_ILT 16
6514#else
34f80b04 6515#define CNIC_ILT_LINES 0
37b091ba 6516#endif
34f80b04
EG
6517
6518static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6519{
6520 int reg;
6521
6522 if (CHIP_IS_E1H(bp))
6523 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6524 else /* E1 */
6525 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6526
6527 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6528}
6529
6530static int bnx2x_init_func(struct bnx2x *bp)
6531{
6532 int port = BP_PORT(bp);
6533 int func = BP_FUNC(bp);
8badd27a 6534 u32 addr, val;
34f80b04
EG
6535 int i;
6536
6537 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6538
8badd27a
EG
6539 /* set MSI reconfigure capability */
6540 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6541 val = REG_RD(bp, addr);
6542 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6543 REG_WR(bp, addr, val);
6544
34f80b04
EG
6545 i = FUNC_ILT_BASE(func);
6546
6547 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6548 if (CHIP_IS_E1H(bp)) {
6549 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6550 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6551 } else /* E1 */
6552 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6553 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6554
37b091ba
MC
6555#ifdef BCM_CNIC
6556 i += 1 + CNIC_ILT_LINES;
6557 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6558 if (CHIP_IS_E1(bp))
6559 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6560 else {
6561 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6562 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6563 }
6564
6565 i++;
6566 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6567 if (CHIP_IS_E1(bp))
6568 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6569 else {
6570 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6571 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6572 }
6573
6574 i++;
6575 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6576 if (CHIP_IS_E1(bp))
6577 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6578 else {
6579 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6580 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6581 }
6582
6583 /* tell the searcher where the T2 table is */
6584 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6585
6586 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6587 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6588
6589 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6590 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6591 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6592
6593 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6594#endif
34f80b04
EG
6595
6596 if (CHIP_IS_E1H(bp)) {
573f2035
EG
6597 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6598 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6599 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6600 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6601 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6602 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6603 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6604 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6605 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
6606
6607 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6608 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6609 }
6610
6611 /* HC init per function */
6612 if (CHIP_IS_E1H(bp)) {
6613 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6614
6615 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6616 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6617 }
94a78b79 6618 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6619
c14423fe 6620 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6621 REG_WR(bp, 0x2114, 0xffffffff);
6622 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6623
34f80b04
EG
6624 return 0;
6625}
6626
6627static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6628{
6629 int i, rc = 0;
a2fbb9ea 6630
34f80b04
EG
6631 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6632 BP_FUNC(bp), load_code);
a2fbb9ea 6633
34f80b04
EG
6634 bp->dmae_ready = 0;
6635 mutex_init(&bp->dmae_mutex);
54016b26
EG
6636 rc = bnx2x_gunzip_init(bp);
6637 if (rc)
6638 return rc;
a2fbb9ea 6639
34f80b04
EG
6640 switch (load_code) {
6641 case FW_MSG_CODE_DRV_LOAD_COMMON:
6642 rc = bnx2x_init_common(bp);
6643 if (rc)
6644 goto init_hw_err;
6645 /* no break */
6646
6647 case FW_MSG_CODE_DRV_LOAD_PORT:
6648 bp->dmae_ready = 1;
6649 rc = bnx2x_init_port(bp);
6650 if (rc)
6651 goto init_hw_err;
6652 /* no break */
6653
6654 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6655 bp->dmae_ready = 1;
6656 rc = bnx2x_init_func(bp);
6657 if (rc)
6658 goto init_hw_err;
6659 break;
6660
6661 default:
6662 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6663 break;
6664 }
6665
6666 if (!BP_NOMCP(bp)) {
6667 int func = BP_FUNC(bp);
a2fbb9ea
ET
6668
6669 bp->fw_drv_pulse_wr_seq =
34f80b04 6670 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6671 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
6672 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6673 }
a2fbb9ea 6674
34f80b04
EG
6675 /* this needs to be done before gunzip end */
6676 bnx2x_zero_def_sb(bp);
6677 for_each_queue(bp, i)
6678 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
37b091ba
MC
6679#ifdef BCM_CNIC
6680 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6681#endif
34f80b04
EG
6682
6683init_hw_err:
6684 bnx2x_gunzip_end(bp);
6685
6686 return rc;
a2fbb9ea
ET
6687}
6688
a2fbb9ea
ET
6689static void bnx2x_free_mem(struct bnx2x *bp)
6690{
6691
6692#define BNX2X_PCI_FREE(x, y, size) \
6693 do { \
6694 if (x) { \
6695 pci_free_consistent(bp->pdev, size, x, y); \
6696 x = NULL; \
6697 y = 0; \
6698 } \
6699 } while (0)
6700
6701#define BNX2X_FREE(x) \
6702 do { \
6703 if (x) { \
6704 vfree(x); \
6705 x = NULL; \
6706 } \
6707 } while (0)
6708
6709 int i;
6710
6711 /* fastpath */
555f6c78 6712 /* Common */
a2fbb9ea
ET
6713 for_each_queue(bp, i) {
6714
555f6c78 6715 /* status blocks */
a2fbb9ea
ET
6716 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6717 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6718 sizeof(struct host_status_block));
555f6c78
EG
6719 }
6720 /* Rx */
54b9ddaa 6721 for_each_queue(bp, i) {
a2fbb9ea 6722
555f6c78 6723 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6724 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6725 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6726 bnx2x_fp(bp, i, rx_desc_mapping),
6727 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6728
6729 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6730 bnx2x_fp(bp, i, rx_comp_mapping),
6731 sizeof(struct eth_fast_path_rx_cqe) *
6732 NUM_RCQ_BD);
a2fbb9ea 6733
7a9b2557 6734 /* SGE ring */
32626230 6735 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6736 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6737 bnx2x_fp(bp, i, rx_sge_mapping),
6738 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6739 }
555f6c78 6740 /* Tx */
54b9ddaa 6741 for_each_queue(bp, i) {
555f6c78
EG
6742
6743 /* fastpath tx rings: tx_buf tx_desc */
6744 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6745 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6746 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6747 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6748 }
a2fbb9ea
ET
6749 /* end of fastpath */
6750
6751 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6752 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6753
6754 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6755 sizeof(struct bnx2x_slowpath));
a2fbb9ea 6756
37b091ba 6757#ifdef BCM_CNIC
a2fbb9ea
ET
6758 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6759 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6760 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6761 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
37b091ba
MC
6762 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6763 sizeof(struct host_status_block));
a2fbb9ea 6764#endif
7a9b2557 6765 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6766
6767#undef BNX2X_PCI_FREE
6768#undef BNX2X_KFREE
6769}
6770
6771static int bnx2x_alloc_mem(struct bnx2x *bp)
6772{
6773
6774#define BNX2X_PCI_ALLOC(x, y, size) \
6775 do { \
6776 x = pci_alloc_consistent(bp->pdev, size, y); \
6777 if (x == NULL) \
6778 goto alloc_mem_err; \
6779 memset(x, 0, size); \
6780 } while (0)
6781
6782#define BNX2X_ALLOC(x, size) \
6783 do { \
6784 x = vmalloc(size); \
6785 if (x == NULL) \
6786 goto alloc_mem_err; \
6787 memset(x, 0, size); \
6788 } while (0)
6789
6790 int i;
6791
6792 /* fastpath */
555f6c78 6793 /* Common */
a2fbb9ea
ET
6794 for_each_queue(bp, i) {
6795 bnx2x_fp(bp, i, bp) = bp;
6796
555f6c78 6797 /* status blocks */
a2fbb9ea
ET
6798 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6799 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6800 sizeof(struct host_status_block));
555f6c78
EG
6801 }
6802 /* Rx */
54b9ddaa 6803 for_each_queue(bp, i) {
a2fbb9ea 6804
555f6c78 6805 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6806 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6807 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6808 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6809 &bnx2x_fp(bp, i, rx_desc_mapping),
6810 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6811
6812 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6813 &bnx2x_fp(bp, i, rx_comp_mapping),
6814 sizeof(struct eth_fast_path_rx_cqe) *
6815 NUM_RCQ_BD);
6816
7a9b2557
VZ
6817 /* SGE ring */
6818 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6819 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6820 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6821 &bnx2x_fp(bp, i, rx_sge_mapping),
6822 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6823 }
555f6c78 6824 /* Tx */
54b9ddaa 6825 for_each_queue(bp, i) {
555f6c78 6826
555f6c78
EG
6827 /* fastpath tx rings: tx_buf tx_desc */
6828 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6829 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6830 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6831 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6832 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6833 }
a2fbb9ea
ET
6834 /* end of fastpath */
6835
6836 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6837 sizeof(struct host_def_status_block));
6838
6839 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6840 sizeof(struct bnx2x_slowpath));
6841
37b091ba 6842#ifdef BCM_CNIC
a2fbb9ea
ET
6843 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6844
a2fbb9ea
ET
6845 /* allocate searcher T2 table
6846 we allocate 1/4 of alloc num for T2
6847 (which is not entered into the ILT) */
6848 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6849
37b091ba 6850 /* Initialize T2 (for 1024 connections) */
a2fbb9ea 6851 for (i = 0; i < 16*1024; i += 64)
37b091ba 6852 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
a2fbb9ea 6853
37b091ba 6854 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
a2fbb9ea
ET
6855 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6856
6857 /* QM queues (128*MAX_CONN) */
6858 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
37b091ba
MC
6859
6860 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6861 sizeof(struct host_status_block));
a2fbb9ea
ET
6862#endif
6863
6864 /* Slow path ring */
6865 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6866
6867 return 0;
6868
6869alloc_mem_err:
6870 bnx2x_free_mem(bp);
6871 return -ENOMEM;
6872
6873#undef BNX2X_PCI_ALLOC
6874#undef BNX2X_ALLOC
6875}
6876
6877static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6878{
6879 int i;
6880
54b9ddaa 6881 for_each_queue(bp, i) {
a2fbb9ea
ET
6882 struct bnx2x_fastpath *fp = &bp->fp[i];
6883
6884 u16 bd_cons = fp->tx_bd_cons;
6885 u16 sw_prod = fp->tx_pkt_prod;
6886 u16 sw_cons = fp->tx_pkt_cons;
6887
a2fbb9ea
ET
6888 while (sw_cons != sw_prod) {
6889 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6890 sw_cons++;
6891 }
6892 }
6893}
6894
6895static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6896{
6897 int i, j;
6898
54b9ddaa 6899 for_each_queue(bp, j) {
a2fbb9ea
ET
6900 struct bnx2x_fastpath *fp = &bp->fp[j];
6901
a2fbb9ea
ET
6902 for (i = 0; i < NUM_RX_BD; i++) {
6903 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6904 struct sk_buff *skb = rx_buf->skb;
6905
6906 if (skb == NULL)
6907 continue;
6908
6909 pci_unmap_single(bp->pdev,
6910 pci_unmap_addr(rx_buf, mapping),
356e2385 6911 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6912
6913 rx_buf->skb = NULL;
6914 dev_kfree_skb(skb);
6915 }
7a9b2557 6916 if (!fp->disable_tpa)
32626230
EG
6917 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6918 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6919 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6920 }
6921}
6922
6923static void bnx2x_free_skbs(struct bnx2x *bp)
6924{
6925 bnx2x_free_tx_skbs(bp);
6926 bnx2x_free_rx_skbs(bp);
6927}
6928
6929static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6930{
34f80b04 6931 int i, offset = 1;
a2fbb9ea
ET
6932
6933 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6934 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6935 bp->msix_table[0].vector);
6936
37b091ba
MC
6937#ifdef BCM_CNIC
6938 offset++;
6939#endif
a2fbb9ea 6940 for_each_queue(bp, i) {
c14423fe 6941 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6942 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6943 bnx2x_fp(bp, i, state));
6944
34f80b04 6945 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6946 }
a2fbb9ea
ET
6947}
6948
6cbe5065 6949static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
a2fbb9ea 6950{
a2fbb9ea 6951 if (bp->flags & USING_MSIX_FLAG) {
6cbe5065
VZ
6952 if (!disable_only)
6953 bnx2x_free_msix_irqs(bp);
a2fbb9ea 6954 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6955 bp->flags &= ~USING_MSIX_FLAG;
6956
8badd27a 6957 } else if (bp->flags & USING_MSI_FLAG) {
6cbe5065
VZ
6958 if (!disable_only)
6959 free_irq(bp->pdev->irq, bp->dev);
8badd27a
EG
6960 pci_disable_msi(bp->pdev);
6961 bp->flags &= ~USING_MSI_FLAG;
6962
6cbe5065 6963 } else if (!disable_only)
a2fbb9ea
ET
6964 free_irq(bp->pdev->irq, bp->dev);
6965}
6966
6967static int bnx2x_enable_msix(struct bnx2x *bp)
6968{
8badd27a
EG
6969 int i, rc, offset = 1;
6970 int igu_vec = 0;
a2fbb9ea 6971
8badd27a
EG
6972 bp->msix_table[0].entry = igu_vec;
6973 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6974
37b091ba
MC
6975#ifdef BCM_CNIC
6976 igu_vec = BP_L_ID(bp) + offset;
6977 bp->msix_table[1].entry = igu_vec;
6978 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6979 offset++;
6980#endif
34f80b04 6981 for_each_queue(bp, i) {
8badd27a 6982 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6983 bp->msix_table[i + offset].entry = igu_vec;
6984 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6985 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6986 }
6987
34f80b04 6988 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6989 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6990 if (rc) {
8badd27a
EG
6991 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6992 return rc;
34f80b04 6993 }
8badd27a 6994
a2fbb9ea
ET
6995 bp->flags |= USING_MSIX_FLAG;
6996
6997 return 0;
a2fbb9ea
ET
6998}
6999
a2fbb9ea
ET
7000static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7001{
34f80b04 7002 int i, rc, offset = 1;
a2fbb9ea 7003
a2fbb9ea
ET
7004 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7005 bp->dev->name, bp->dev);
a2fbb9ea
ET
7006 if (rc) {
7007 BNX2X_ERR("request sp irq failed\n");
7008 return -EBUSY;
7009 }
7010
37b091ba
MC
7011#ifdef BCM_CNIC
7012 offset++;
7013#endif
a2fbb9ea 7014 for_each_queue(bp, i) {
555f6c78 7015 struct bnx2x_fastpath *fp = &bp->fp[i];
54b9ddaa
VZ
7016 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7017 bp->dev->name, i);
ca00392c 7018
34f80b04 7019 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 7020 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 7021 if (rc) {
555f6c78 7022 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
7023 bnx2x_free_msix_irqs(bp);
7024 return -EBUSY;
7025 }
7026
555f6c78 7027 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
7028 }
7029
555f6c78 7030 i = BNX2X_NUM_QUEUES(bp);
7995c64e
JP
7031 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
7032 bp->msix_table[0].vector,
7033 0, bp->msix_table[offset].vector,
7034 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 7035
a2fbb9ea 7036 return 0;
a2fbb9ea
ET
7037}
7038
8badd27a
EG
7039static int bnx2x_enable_msi(struct bnx2x *bp)
7040{
7041 int rc;
7042
7043 rc = pci_enable_msi(bp->pdev);
7044 if (rc) {
7045 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7046 return -1;
7047 }
7048 bp->flags |= USING_MSI_FLAG;
7049
7050 return 0;
7051}
7052
a2fbb9ea
ET
7053static int bnx2x_req_irq(struct bnx2x *bp)
7054{
8badd27a 7055 unsigned long flags;
34f80b04 7056 int rc;
a2fbb9ea 7057
8badd27a
EG
7058 if (bp->flags & USING_MSI_FLAG)
7059 flags = 0;
7060 else
7061 flags = IRQF_SHARED;
7062
7063 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 7064 bp->dev->name, bp->dev);
a2fbb9ea
ET
7065 if (!rc)
7066 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7067
7068 return rc;
a2fbb9ea
ET
7069}
7070
65abd74d
YG
7071static void bnx2x_napi_enable(struct bnx2x *bp)
7072{
7073 int i;
7074
54b9ddaa 7075 for_each_queue(bp, i)
65abd74d
YG
7076 napi_enable(&bnx2x_fp(bp, i, napi));
7077}
7078
7079static void bnx2x_napi_disable(struct bnx2x *bp)
7080{
7081 int i;
7082
54b9ddaa 7083 for_each_queue(bp, i)
65abd74d
YG
7084 napi_disable(&bnx2x_fp(bp, i, napi));
7085}
7086
7087static void bnx2x_netif_start(struct bnx2x *bp)
7088{
e1510706
EG
7089 int intr_sem;
7090
7091 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7092 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7093
7094 if (intr_sem) {
65abd74d 7095 if (netif_running(bp->dev)) {
65abd74d
YG
7096 bnx2x_napi_enable(bp);
7097 bnx2x_int_enable(bp);
555f6c78
EG
7098 if (bp->state == BNX2X_STATE_OPEN)
7099 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
7100 }
7101 }
7102}
7103
f8ef6e44 7104static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 7105{
f8ef6e44 7106 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 7107 bnx2x_napi_disable(bp);
762d5f6c 7108 netif_tx_disable(bp->dev);
65abd74d
YG
7109}
7110
a2fbb9ea
ET
7111/*
7112 * Init service functions
7113 */
7114
e665bfda
MC
7115/**
7116 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7117 *
7118 * @param bp driver descriptor
7119 * @param set set or clear an entry (1 or 0)
7120 * @param mac pointer to a buffer containing a MAC
7121 * @param cl_bit_vec bit vector of clients to register a MAC for
7122 * @param cam_offset offset in a CAM to use
7123 * @param with_bcast set broadcast MAC as well
7124 */
7125static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7126 u32 cl_bit_vec, u8 cam_offset,
7127 u8 with_bcast)
a2fbb9ea
ET
7128{
7129 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 7130 int port = BP_PORT(bp);
a2fbb9ea
ET
7131
7132 /* CAM allocation
7133 * unicasts 0-31:port0 32-63:port1
7134 * multicast 64-127:port0 128-191:port1
7135 */
e665bfda
MC
7136 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7137 config->hdr.offset = cam_offset;
7138 config->hdr.client_id = 0xff;
a2fbb9ea
ET
7139 config->hdr.reserved1 = 0;
7140
7141 /* primary MAC */
7142 config->config_table[0].cam_entry.msb_mac_addr =
e665bfda 7143 swab16(*(u16 *)&mac[0]);
a2fbb9ea 7144 config->config_table[0].cam_entry.middle_mac_addr =
e665bfda 7145 swab16(*(u16 *)&mac[2]);
a2fbb9ea 7146 config->config_table[0].cam_entry.lsb_mac_addr =
e665bfda 7147 swab16(*(u16 *)&mac[4]);
34f80b04 7148 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7149 if (set)
7150 config->config_table[0].target_table_entry.flags = 0;
7151 else
7152 CAM_INVALIDATE(config->config_table[0]);
ca00392c 7153 config->config_table[0].target_table_entry.clients_bit_vector =
e665bfda 7154 cpu_to_le32(cl_bit_vec);
a2fbb9ea
ET
7155 config->config_table[0].target_table_entry.vlan_id = 0;
7156
3101c2bc
YG
7157 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7158 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7159 config->config_table[0].cam_entry.msb_mac_addr,
7160 config->config_table[0].cam_entry.middle_mac_addr,
7161 config->config_table[0].cam_entry.lsb_mac_addr);
7162
7163 /* broadcast */
e665bfda
MC
7164 if (with_bcast) {
7165 config->config_table[1].cam_entry.msb_mac_addr =
7166 cpu_to_le16(0xffff);
7167 config->config_table[1].cam_entry.middle_mac_addr =
7168 cpu_to_le16(0xffff);
7169 config->config_table[1].cam_entry.lsb_mac_addr =
7170 cpu_to_le16(0xffff);
7171 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7172 if (set)
7173 config->config_table[1].target_table_entry.flags =
7174 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7175 else
7176 CAM_INVALIDATE(config->config_table[1]);
7177 config->config_table[1].target_table_entry.clients_bit_vector =
7178 cpu_to_le32(cl_bit_vec);
7179 config->config_table[1].target_table_entry.vlan_id = 0;
7180 }
a2fbb9ea
ET
7181
7182 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7183 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7184 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7185}
7186
e665bfda
MC
7187/**
7188 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7189 *
7190 * @param bp driver descriptor
7191 * @param set set or clear an entry (1 or 0)
7192 * @param mac pointer to a buffer containing a MAC
7193 * @param cl_bit_vec bit vector of clients to register a MAC for
7194 * @param cam_offset offset in a CAM to use
7195 */
7196static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7197 u32 cl_bit_vec, u8 cam_offset)
34f80b04
EG
7198{
7199 struct mac_configuration_cmd_e1h *config =
7200 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7201
8d9c5f34 7202 config->hdr.length = 1;
e665bfda
MC
7203 config->hdr.offset = cam_offset;
7204 config->hdr.client_id = 0xff;
34f80b04
EG
7205 config->hdr.reserved1 = 0;
7206
7207 /* primary MAC */
7208 config->config_table[0].msb_mac_addr =
e665bfda 7209 swab16(*(u16 *)&mac[0]);
34f80b04 7210 config->config_table[0].middle_mac_addr =
e665bfda 7211 swab16(*(u16 *)&mac[2]);
34f80b04 7212 config->config_table[0].lsb_mac_addr =
e665bfda 7213 swab16(*(u16 *)&mac[4]);
ca00392c 7214 config->config_table[0].clients_bit_vector =
e665bfda 7215 cpu_to_le32(cl_bit_vec);
34f80b04
EG
7216 config->config_table[0].vlan_id = 0;
7217 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7218 if (set)
7219 config->config_table[0].flags = BP_PORT(bp);
7220 else
7221 config->config_table[0].flags =
7222 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7223
e665bfda 7224 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
3101c2bc 7225 (set ? "setting" : "clearing"),
34f80b04
EG
7226 config->config_table[0].msb_mac_addr,
7227 config->config_table[0].middle_mac_addr,
e665bfda 7228 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
34f80b04
EG
7229
7230 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7231 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7232 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7233}
7234
a2fbb9ea
ET
7235static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7236 int *state_p, int poll)
7237{
7238 /* can take a while if any port is running */
8b3a0f0b 7239 int cnt = 5000;
a2fbb9ea 7240
c14423fe
ET
7241 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7242 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7243
7244 might_sleep();
34f80b04 7245 while (cnt--) {
a2fbb9ea
ET
7246 if (poll) {
7247 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7248 /* if index is different from 0
7249 * the reply for some commands will
3101c2bc 7250 * be on the non default queue
a2fbb9ea
ET
7251 */
7252 if (idx)
7253 bnx2x_rx_int(&bp->fp[idx], 10);
7254 }
a2fbb9ea 7255
3101c2bc 7256 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7257 if (*state_p == state) {
7258#ifdef BNX2X_STOP_ON_ERROR
7259 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7260#endif
a2fbb9ea 7261 return 0;
8b3a0f0b 7262 }
a2fbb9ea 7263
a2fbb9ea 7264 msleep(1);
e3553b29
EG
7265
7266 if (bp->panic)
7267 return -EIO;
a2fbb9ea
ET
7268 }
7269
a2fbb9ea 7270 /* timeout! */
49d66772
ET
7271 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7272 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7273#ifdef BNX2X_STOP_ON_ERROR
7274 bnx2x_panic();
7275#endif
a2fbb9ea 7276
49d66772 7277 return -EBUSY;
a2fbb9ea
ET
7278}
7279
e665bfda
MC
7280static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7281{
7282 bp->set_mac_pending++;
7283 smp_wmb();
7284
7285 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7286 (1 << bp->fp->cl_id), BP_FUNC(bp));
7287
7288 /* Wait for a completion */
7289 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7290}
7291
7292static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7293{
7294 bp->set_mac_pending++;
7295 smp_wmb();
7296
7297 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7298 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7299 1);
7300
7301 /* Wait for a completion */
7302 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7303}
7304
993ac7b5
MC
7305#ifdef BCM_CNIC
7306/**
7307 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7308 * MAC(s). This function will wait until the ramdord completion
7309 * returns.
7310 *
7311 * @param bp driver handle
7312 * @param set set or clear the CAM entry
7313 *
7314 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7315 */
7316static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7317{
7318 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7319
7320 bp->set_mac_pending++;
7321 smp_wmb();
7322
7323 /* Send a SET_MAC ramrod */
7324 if (CHIP_IS_E1(bp))
7325 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7326 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7327 1);
7328 else
7329 /* CAM allocation for E1H
7330 * unicasts: by func number
7331 * multicast: 20+FUNC*20, 20 each
7332 */
7333 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7334 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7335
7336 /* Wait for a completion when setting */
7337 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7338
7339 return 0;
7340}
7341#endif
7342
a2fbb9ea
ET
7343static int bnx2x_setup_leading(struct bnx2x *bp)
7344{
34f80b04 7345 int rc;
a2fbb9ea 7346
c14423fe 7347 /* reset IGU state */
34f80b04 7348 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7349
7350 /* SETUP ramrod */
7351 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7352
34f80b04
EG
7353 /* Wait for completion */
7354 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7355
34f80b04 7356 return rc;
a2fbb9ea
ET
7357}
7358
7359static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7360{
555f6c78
EG
7361 struct bnx2x_fastpath *fp = &bp->fp[index];
7362
a2fbb9ea 7363 /* reset IGU state */
555f6c78 7364 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7365
228241eb 7366 /* SETUP ramrod */
555f6c78
EG
7367 fp->state = BNX2X_FP_STATE_OPENING;
7368 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7369 fp->cl_id, 0);
a2fbb9ea
ET
7370
7371 /* Wait for completion */
7372 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7373 &(fp->state), 0);
a2fbb9ea
ET
7374}
7375
a2fbb9ea 7376static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7377
54b9ddaa 7378static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
ca00392c 7379{
ca00392c
EG
7380
7381 switch (bp->multi_mode) {
7382 case ETH_RSS_MODE_DISABLED:
54b9ddaa 7383 bp->num_queues = 1;
ca00392c
EG
7384 break;
7385
7386 case ETH_RSS_MODE_REGULAR:
54b9ddaa
VZ
7387 if (num_queues)
7388 bp->num_queues = min_t(u32, num_queues,
7389 BNX2X_MAX_QUEUES(bp));
ca00392c 7390 else
54b9ddaa
VZ
7391 bp->num_queues = min_t(u32, num_online_cpus(),
7392 BNX2X_MAX_QUEUES(bp));
ca00392c
EG
7393 break;
7394
7395
7396 default:
54b9ddaa 7397 bp->num_queues = 1;
ca00392c
EG
7398 break;
7399 }
ca00392c
EG
7400}
7401
54b9ddaa 7402static int bnx2x_set_num_queues(struct bnx2x *bp)
a2fbb9ea 7403{
ca00392c 7404 int rc = 0;
a2fbb9ea 7405
8badd27a
EG
7406 switch (int_mode) {
7407 case INT_MODE_INTx:
7408 case INT_MODE_MSI:
54b9ddaa 7409 bp->num_queues = 1;
ca00392c 7410 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a
EG
7411 break;
7412
7413 case INT_MODE_MSIX:
7414 default:
54b9ddaa
VZ
7415 /* Set number of queues according to bp->multi_mode value */
7416 bnx2x_set_num_queues_msix(bp);
ca00392c 7417
54b9ddaa
VZ
7418 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7419 bp->num_queues);
ca00392c 7420
2dfe0e1f
EG
7421 /* if we can't use MSI-X we only need one fp,
7422 * so try to enable MSI-X with the requested number of fp's
7423 * and fallback to MSI or legacy INTx with one fp
7424 */
ca00392c 7425 rc = bnx2x_enable_msix(bp);
54b9ddaa 7426 if (rc)
34f80b04 7427 /* failed to enable MSI-X */
54b9ddaa 7428 bp->num_queues = 1;
8badd27a 7429 break;
a2fbb9ea 7430 }
54b9ddaa 7431 bp->dev->real_num_tx_queues = bp->num_queues;
ca00392c 7432 return rc;
8badd27a
EG
7433}
7434
993ac7b5
MC
7435#ifdef BCM_CNIC
7436static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7437static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7438#endif
8badd27a
EG
7439
7440/* must be called with rtnl_lock */
7441static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7442{
7443 u32 load_code;
ca00392c
EG
7444 int i, rc;
7445
8badd27a 7446#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7447 if (unlikely(bp->panic))
7448 return -EPERM;
7449#endif
7450
7451 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7452
54b9ddaa 7453 rc = bnx2x_set_num_queues(bp);
c14423fe 7454
6cbe5065
VZ
7455 if (bnx2x_alloc_mem(bp)) {
7456 bnx2x_free_irq(bp, true);
a2fbb9ea 7457 return -ENOMEM;
6cbe5065 7458 }
a2fbb9ea 7459
54b9ddaa 7460 for_each_queue(bp, i)
7a9b2557
VZ
7461 bnx2x_fp(bp, i, disable_tpa) =
7462 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7463
54b9ddaa 7464 for_each_queue(bp, i)
2dfe0e1f
EG
7465 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7466 bnx2x_poll, 128);
7467
2dfe0e1f
EG
7468 bnx2x_napi_enable(bp);
7469
34f80b04
EG
7470 if (bp->flags & USING_MSIX_FLAG) {
7471 rc = bnx2x_req_msix_irqs(bp);
7472 if (rc) {
6cbe5065 7473 bnx2x_free_irq(bp, true);
2dfe0e1f 7474 goto load_error1;
34f80b04
EG
7475 }
7476 } else {
ca00392c 7477 /* Fall to INTx if failed to enable MSI-X due to lack of
54b9ddaa 7478 memory (in bnx2x_set_num_queues()) */
8badd27a
EG
7479 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7480 bnx2x_enable_msi(bp);
34f80b04
EG
7481 bnx2x_ack_int(bp);
7482 rc = bnx2x_req_irq(bp);
7483 if (rc) {
2dfe0e1f 7484 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6cbe5065 7485 bnx2x_free_irq(bp, true);
2dfe0e1f 7486 goto load_error1;
a2fbb9ea 7487 }
8badd27a
EG
7488 if (bp->flags & USING_MSI_FLAG) {
7489 bp->dev->irq = bp->pdev->irq;
7995c64e
JP
7490 netdev_info(bp->dev, "using MSI IRQ %d\n",
7491 bp->pdev->irq);
8badd27a 7492 }
a2fbb9ea
ET
7493 }
7494
2dfe0e1f
EG
7495 /* Send LOAD_REQUEST command to MCP
7496 Returns the type of LOAD command:
7497 if it is the first port to be initialized
7498 common blocks should be initialized, otherwise - not
7499 */
7500 if (!BP_NOMCP(bp)) {
7501 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7502 if (!load_code) {
7503 BNX2X_ERR("MCP response failure, aborting\n");
7504 rc = -EBUSY;
7505 goto load_error2;
7506 }
7507 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7508 rc = -EBUSY; /* other port in diagnostic mode */
7509 goto load_error2;
7510 }
7511
7512 } else {
7513 int port = BP_PORT(bp);
7514
f5372251 7515 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7516 load_count[0], load_count[1], load_count[2]);
7517 load_count[0]++;
7518 load_count[1 + port]++;
f5372251 7519 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7520 load_count[0], load_count[1], load_count[2]);
7521 if (load_count[0] == 1)
7522 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7523 else if (load_count[1 + port] == 1)
7524 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7525 else
7526 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7527 }
7528
7529 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7530 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7531 bp->port.pmf = 1;
7532 else
7533 bp->port.pmf = 0;
7534 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7535
a2fbb9ea 7536 /* Initialize HW */
34f80b04
EG
7537 rc = bnx2x_init_hw(bp, load_code);
7538 if (rc) {
a2fbb9ea 7539 BNX2X_ERR("HW init failed, aborting\n");
f1e1a199
VZ
7540 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7541 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7542 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
2dfe0e1f 7543 goto load_error2;
a2fbb9ea
ET
7544 }
7545
a2fbb9ea 7546 /* Setup NIC internals and enable interrupts */
471de716 7547 bnx2x_nic_init(bp, load_code);
a2fbb9ea 7548
2691d51d
EG
7549 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7550 (bp->common.shmem2_base))
7551 SHMEM2_WR(bp, dcc_support,
7552 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7553 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7554
a2fbb9ea 7555 /* Send LOAD_DONE command to MCP */
34f80b04 7556 if (!BP_NOMCP(bp)) {
228241eb
ET
7557 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7558 if (!load_code) {
da5a662a 7559 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7560 rc = -EBUSY;
2dfe0e1f 7561 goto load_error3;
a2fbb9ea
ET
7562 }
7563 }
7564
7565 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7566
34f80b04
EG
7567 rc = bnx2x_setup_leading(bp);
7568 if (rc) {
da5a662a 7569 BNX2X_ERR("Setup leading failed!\n");
e3553b29 7570#ifndef BNX2X_STOP_ON_ERROR
2dfe0e1f 7571 goto load_error3;
e3553b29
EG
7572#else
7573 bp->panic = 1;
7574 return -EBUSY;
7575#endif
34f80b04 7576 }
a2fbb9ea 7577
34f80b04
EG
7578 if (CHIP_IS_E1H(bp))
7579 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7580 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
f34d28ea 7581 bp->flags |= MF_FUNC_DIS;
34f80b04 7582 }
a2fbb9ea 7583
ca00392c 7584 if (bp->state == BNX2X_STATE_OPEN) {
37b091ba
MC
7585#ifdef BCM_CNIC
7586 /* Enable Timer scan */
7587 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7588#endif
34f80b04
EG
7589 for_each_nondefault_queue(bp, i) {
7590 rc = bnx2x_setup_multi(bp, i);
7591 if (rc)
37b091ba
MC
7592#ifdef BCM_CNIC
7593 goto load_error4;
7594#else
2dfe0e1f 7595 goto load_error3;
37b091ba 7596#endif
34f80b04 7597 }
a2fbb9ea 7598
ca00392c 7599 if (CHIP_IS_E1(bp))
e665bfda 7600 bnx2x_set_eth_mac_addr_e1(bp, 1);
ca00392c 7601 else
e665bfda 7602 bnx2x_set_eth_mac_addr_e1h(bp, 1);
993ac7b5
MC
7603#ifdef BCM_CNIC
7604 /* Set iSCSI L2 MAC */
7605 mutex_lock(&bp->cnic_mutex);
7606 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7607 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7608 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
4a6e47a4
MC
7609 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
7610 CNIC_SB_ID(bp));
993ac7b5
MC
7611 }
7612 mutex_unlock(&bp->cnic_mutex);
7613#endif
ca00392c 7614 }
34f80b04
EG
7615
7616 if (bp->port.pmf)
b5bf9068 7617 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7618
7619 /* Start fast path */
34f80b04
EG
7620 switch (load_mode) {
7621 case LOAD_NORMAL:
ca00392c
EG
7622 if (bp->state == BNX2X_STATE_OPEN) {
7623 /* Tx queue should be only reenabled */
7624 netif_tx_wake_all_queues(bp->dev);
7625 }
2dfe0e1f 7626 /* Initialize the receive filter. */
34f80b04
EG
7627 bnx2x_set_rx_mode(bp->dev);
7628 break;
7629
7630 case LOAD_OPEN:
555f6c78 7631 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
7632 if (bp->state != BNX2X_STATE_OPEN)
7633 netif_tx_disable(bp->dev);
2dfe0e1f 7634 /* Initialize the receive filter. */
34f80b04 7635 bnx2x_set_rx_mode(bp->dev);
34f80b04 7636 break;
a2fbb9ea 7637
34f80b04 7638 case LOAD_DIAG:
2dfe0e1f 7639 /* Initialize the receive filter. */
a2fbb9ea 7640 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7641 bp->state = BNX2X_STATE_DIAG;
7642 break;
7643
7644 default:
7645 break;
a2fbb9ea
ET
7646 }
7647
34f80b04
EG
7648 if (!bp->port.pmf)
7649 bnx2x__link_status_update(bp);
7650
a2fbb9ea
ET
7651 /* start the timer */
7652 mod_timer(&bp->timer, jiffies + bp->current_interval);
7653
993ac7b5
MC
7654#ifdef BCM_CNIC
7655 bnx2x_setup_cnic_irq_info(bp);
7656 if (bp->state == BNX2X_STATE_OPEN)
7657 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7658#endif
34f80b04 7659
a2fbb9ea
ET
7660 return 0;
7661
37b091ba
MC
7662#ifdef BCM_CNIC
7663load_error4:
7664 /* Disable Timer scan */
7665 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7666#endif
2dfe0e1f
EG
7667load_error3:
7668 bnx2x_int_disable_sync(bp, 1);
7669 if (!BP_NOMCP(bp)) {
7670 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7671 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7672 }
7673 bp->port.pmf = 0;
7a9b2557
VZ
7674 /* Free SKBs, SGEs, TPA pool and driver internals */
7675 bnx2x_free_skbs(bp);
54b9ddaa 7676 for_each_queue(bp, i)
3196a88a 7677 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7678load_error2:
d1014634 7679 /* Release IRQs */
6cbe5065 7680 bnx2x_free_irq(bp, false);
2dfe0e1f
EG
7681load_error1:
7682 bnx2x_napi_disable(bp);
54b9ddaa 7683 for_each_queue(bp, i)
7cde1c8b 7684 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7685 bnx2x_free_mem(bp);
7686
34f80b04 7687 return rc;
a2fbb9ea
ET
7688}
7689
7690static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7691{
555f6c78 7692 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7693 int rc;
7694
c14423fe 7695 /* halt the connection */
555f6c78
EG
7696 fp->state = BNX2X_FP_STATE_HALTING;
7697 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7698
34f80b04 7699 /* Wait for completion */
a2fbb9ea 7700 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7701 &(fp->state), 1);
c14423fe 7702 if (rc) /* timeout */
a2fbb9ea
ET
7703 return rc;
7704
7705 /* delete cfc entry */
7706 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7707
34f80b04
EG
7708 /* Wait for completion */
7709 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7710 &(fp->state), 1);
34f80b04 7711 return rc;
a2fbb9ea
ET
7712}
7713
da5a662a 7714static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7715{
4781bfad 7716 __le16 dsb_sp_prod_idx;
c14423fe 7717 /* if the other port is handling traffic,
a2fbb9ea 7718 this can take a lot of time */
34f80b04
EG
7719 int cnt = 500;
7720 int rc;
a2fbb9ea
ET
7721
7722 might_sleep();
7723
7724 /* Send HALT ramrod */
7725 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7726 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7727
34f80b04
EG
7728 /* Wait for completion */
7729 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7730 &(bp->fp[0].state), 1);
7731 if (rc) /* timeout */
da5a662a 7732 return rc;
a2fbb9ea 7733
49d66772 7734 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7735
228241eb 7736 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7737 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7738
49d66772 7739 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7740 we are going to reset the chip anyway
7741 so there is not much to do if this times out
7742 */
34f80b04 7743 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7744 if (!cnt) {
7745 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7746 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7747 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7748#ifdef BNX2X_STOP_ON_ERROR
7749 bnx2x_panic();
7750#endif
36e552ab 7751 rc = -EBUSY;
34f80b04
EG
7752 break;
7753 }
7754 cnt--;
da5a662a 7755 msleep(1);
5650d9d4 7756 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7757 }
7758 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7759 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7760
7761 return rc;
a2fbb9ea
ET
7762}
7763
34f80b04
EG
7764static void bnx2x_reset_func(struct bnx2x *bp)
7765{
7766 int port = BP_PORT(bp);
7767 int func = BP_FUNC(bp);
7768 int base, i;
7769
7770 /* Configure IGU */
7771 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7772 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7773
37b091ba
MC
7774#ifdef BCM_CNIC
7775 /* Disable Timer scan */
7776 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7777 /*
7778 * Wait for at least 10ms and up to 2 second for the timers scan to
7779 * complete
7780 */
7781 for (i = 0; i < 200; i++) {
7782 msleep(10);
7783 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7784 break;
7785 }
7786#endif
34f80b04
EG
7787 /* Clear ILT */
7788 base = FUNC_ILT_BASE(func);
7789 for (i = base; i < base + ILT_PER_FUNC; i++)
7790 bnx2x_ilt_wr(bp, i, 0);
7791}
7792
7793static void bnx2x_reset_port(struct bnx2x *bp)
7794{
7795 int port = BP_PORT(bp);
7796 u32 val;
7797
7798 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7799
7800 /* Do not rcv packets to BRB */
7801 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7802 /* Do not direct rcv packets that are not for MCP to the BRB */
7803 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7804 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7805
7806 /* Configure AEU */
7807 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7808
7809 msleep(100);
7810 /* Check for BRB port occupancy */
7811 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7812 if (val)
7813 DP(NETIF_MSG_IFDOWN,
33471629 7814 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7815
7816 /* TODO: Close Doorbell port? */
7817}
7818
34f80b04
EG
7819static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7820{
7821 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7822 BP_FUNC(bp), reset_code);
7823
7824 switch (reset_code) {
7825 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7826 bnx2x_reset_port(bp);
7827 bnx2x_reset_func(bp);
7828 bnx2x_reset_common(bp);
7829 break;
7830
7831 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7832 bnx2x_reset_port(bp);
7833 bnx2x_reset_func(bp);
7834 break;
7835
7836 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7837 bnx2x_reset_func(bp);
7838 break;
49d66772 7839
34f80b04
EG
7840 default:
7841 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7842 break;
7843 }
7844}
7845
33471629 7846/* must be called with rtnl_lock */
34f80b04 7847static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7848{
da5a662a 7849 int port = BP_PORT(bp);
a2fbb9ea 7850 u32 reset_code = 0;
da5a662a 7851 int i, cnt, rc;
a2fbb9ea 7852
993ac7b5
MC
7853#ifdef BCM_CNIC
7854 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7855#endif
a2fbb9ea
ET
7856 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7857
ab6ad5a4 7858 /* Set "drop all" */
228241eb
ET
7859 bp->rx_mode = BNX2X_RX_MODE_NONE;
7860 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7861
ab6ad5a4 7862 /* Disable HW interrupts, NAPI and Tx */
f8ef6e44 7863 bnx2x_netif_stop(bp, 1);
e94d8af3 7864
34f80b04
EG
7865 del_timer_sync(&bp->timer);
7866 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7867 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7868 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7869
70b9986c 7870 /* Release IRQs */
6cbe5065 7871 bnx2x_free_irq(bp, false);
70b9986c 7872
555f6c78 7873 /* Wait until tx fastpath tasks complete */
54b9ddaa 7874 for_each_queue(bp, i) {
228241eb
ET
7875 struct bnx2x_fastpath *fp = &bp->fp[i];
7876
34f80b04 7877 cnt = 1000;
e8b5fc51 7878 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7879
7961f791 7880 bnx2x_tx_int(fp);
34f80b04
EG
7881 if (!cnt) {
7882 BNX2X_ERR("timeout waiting for queue[%d]\n",
7883 i);
7884#ifdef BNX2X_STOP_ON_ERROR
7885 bnx2x_panic();
7886 return -EBUSY;
7887#else
7888 break;
7889#endif
7890 }
7891 cnt--;
da5a662a 7892 msleep(1);
34f80b04 7893 }
228241eb 7894 }
da5a662a
VZ
7895 /* Give HW time to discard old tx messages */
7896 msleep(1);
a2fbb9ea 7897
3101c2bc
YG
7898 if (CHIP_IS_E1(bp)) {
7899 struct mac_configuration_cmd *config =
7900 bnx2x_sp(bp, mcast_config);
7901
e665bfda 7902 bnx2x_set_eth_mac_addr_e1(bp, 0);
3101c2bc 7903
8d9c5f34 7904 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7905 CAM_INVALIDATE(config->config_table[i]);
7906
8d9c5f34 7907 config->hdr.length = i;
3101c2bc
YG
7908 if (CHIP_REV_IS_SLOW(bp))
7909 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7910 else
7911 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7912 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7913 config->hdr.reserved1 = 0;
7914
e665bfda
MC
7915 bp->set_mac_pending++;
7916 smp_wmb();
7917
3101c2bc
YG
7918 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7919 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7920 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7921
7922 } else { /* E1H */
65abd74d
YG
7923 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7924
e665bfda 7925 bnx2x_set_eth_mac_addr_e1h(bp, 0);
3101c2bc
YG
7926
7927 for (i = 0; i < MC_HASH_SIZE; i++)
7928 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
7929
7930 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc 7931 }
993ac7b5
MC
7932#ifdef BCM_CNIC
7933 /* Clear iSCSI L2 MAC */
7934 mutex_lock(&bp->cnic_mutex);
7935 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7936 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7937 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7938 }
7939 mutex_unlock(&bp->cnic_mutex);
7940#endif
3101c2bc 7941
65abd74d
YG
7942 if (unload_mode == UNLOAD_NORMAL)
7943 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7944
7d0446c2 7945 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7946 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7947
7d0446c2 7948 else if (bp->wol) {
65abd74d
YG
7949 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7950 u8 *mac_addr = bp->dev->dev_addr;
7951 u32 val;
7952 /* The mac address is written to entries 1-4 to
7953 preserve entry 0 which is used by the PMF */
7954 u8 entry = (BP_E1HVN(bp) + 1)*8;
7955
7956 val = (mac_addr[0] << 8) | mac_addr[1];
7957 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7958
7959 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7960 (mac_addr[4] << 8) | mac_addr[5];
7961 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7962
7963 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7964
7965 } else
7966 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7967
34f80b04
EG
7968 /* Close multi and leading connections
7969 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7970 for_each_nondefault_queue(bp, i)
7971 if (bnx2x_stop_multi(bp, i))
228241eb 7972 goto unload_error;
a2fbb9ea 7973
da5a662a
VZ
7974 rc = bnx2x_stop_leading(bp);
7975 if (rc) {
34f80b04 7976 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7977#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7978 return -EBUSY;
da5a662a
VZ
7979#else
7980 goto unload_error;
34f80b04 7981#endif
228241eb
ET
7982 }
7983
7984unload_error:
34f80b04 7985 if (!BP_NOMCP(bp))
228241eb 7986 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7987 else {
f5372251 7988 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7989 load_count[0], load_count[1], load_count[2]);
7990 load_count[0]--;
da5a662a 7991 load_count[1 + port]--;
f5372251 7992 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7993 load_count[0], load_count[1], load_count[2]);
7994 if (load_count[0] == 0)
7995 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7996 else if (load_count[1 + port] == 0)
34f80b04
EG
7997 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7998 else
7999 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8000 }
a2fbb9ea 8001
34f80b04
EG
8002 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8003 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8004 bnx2x__link_reset(bp);
a2fbb9ea
ET
8005
8006 /* Reset the chip */
228241eb 8007 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
8008
8009 /* Report UNLOAD_DONE to MCP */
34f80b04 8010 if (!BP_NOMCP(bp))
a2fbb9ea 8011 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 8012
9a035440 8013 bp->port.pmf = 0;
a2fbb9ea 8014
7a9b2557 8015 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 8016 bnx2x_free_skbs(bp);
54b9ddaa 8017 for_each_queue(bp, i)
3196a88a 8018 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 8019 for_each_queue(bp, i)
7cde1c8b 8020 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
8021 bnx2x_free_mem(bp);
8022
8023 bp->state = BNX2X_STATE_CLOSED;
228241eb 8024
a2fbb9ea
ET
8025 netif_carrier_off(bp->dev);
8026
8027 return 0;
8028}
8029
34f80b04
EG
8030static void bnx2x_reset_task(struct work_struct *work)
8031{
8032 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8033
8034#ifdef BNX2X_STOP_ON_ERROR
8035 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8036 " so reset not done to allow debug dump,\n"
ad361c98 8037 " you will need to reboot when done\n");
34f80b04
EG
8038 return;
8039#endif
8040
8041 rtnl_lock();
8042
8043 if (!netif_running(bp->dev))
8044 goto reset_task_exit;
8045
8046 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8047 bnx2x_nic_load(bp, LOAD_NORMAL);
8048
8049reset_task_exit:
8050 rtnl_unlock();
8051}
8052
a2fbb9ea
ET
8053/* end of nic load/unload */
8054
8055/* ethtool_ops */
8056
8057/*
8058 * Init service functions
8059 */
8060
f1ef27ef
EG
8061static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8062{
8063 switch (func) {
8064 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8065 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8066 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8067 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8068 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8069 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8070 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8071 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8072 default:
8073 BNX2X_ERR("Unsupported function index: %d\n", func);
8074 return (u32)(-1);
8075 }
8076}
8077
8078static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8079{
8080 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8081
8082 /* Flush all outstanding writes */
8083 mmiowb();
8084
8085 /* Pretend to be function 0 */
8086 REG_WR(bp, reg, 0);
8087 /* Flush the GRC transaction (in the chip) */
8088 new_val = REG_RD(bp, reg);
8089 if (new_val != 0) {
8090 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8091 new_val);
8092 BUG();
8093 }
8094
8095 /* From now we are in the "like-E1" mode */
8096 bnx2x_int_disable(bp);
8097
8098 /* Flush all outstanding writes */
8099 mmiowb();
8100
8101 /* Restore the original funtion settings */
8102 REG_WR(bp, reg, orig_func);
8103 new_val = REG_RD(bp, reg);
8104 if (new_val != orig_func) {
8105 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8106 orig_func, new_val);
8107 BUG();
8108 }
8109}
8110
8111static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8112{
8113 if (CHIP_IS_E1H(bp))
8114 bnx2x_undi_int_disable_e1h(bp, func);
8115 else
8116 bnx2x_int_disable(bp);
8117}
8118
34f80b04
EG
8119static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8120{
8121 u32 val;
8122
8123 /* Check if there is any driver already loaded */
8124 val = REG_RD(bp, MISC_REG_UNPREPARED);
8125 if (val == 0x1) {
8126 /* Check if it is the UNDI driver
8127 * UNDI driver initializes CID offset for normal bell to 0x7
8128 */
4a37fb66 8129 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
8130 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8131 if (val == 0x7) {
8132 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8133 /* save our func */
34f80b04 8134 int func = BP_FUNC(bp);
da5a662a
VZ
8135 u32 swap_en;
8136 u32 swap_val;
34f80b04 8137
b4661739
EG
8138 /* clear the UNDI indication */
8139 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8140
34f80b04
EG
8141 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8142
8143 /* try unload UNDI on port 0 */
8144 bp->func = 0;
da5a662a
VZ
8145 bp->fw_seq =
8146 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8147 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 8148 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
8149
8150 /* if UNDI is loaded on the other port */
8151 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8152
da5a662a
VZ
8153 /* send "DONE" for previous unload */
8154 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8155
8156 /* unload UNDI on port 1 */
34f80b04 8157 bp->func = 1;
da5a662a
VZ
8158 bp->fw_seq =
8159 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8160 DRV_MSG_SEQ_NUMBER_MASK);
8161 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8162
8163 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
8164 }
8165
b4661739
EG
8166 /* now it's safe to release the lock */
8167 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8168
f1ef27ef 8169 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
8170
8171 /* close input traffic and wait for it */
8172 /* Do not rcv packets to BRB */
8173 REG_WR(bp,
8174 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8175 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8176 /* Do not direct rcv packets that are not for MCP to
8177 * the BRB */
8178 REG_WR(bp,
8179 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8180 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8181 /* clear AEU */
8182 REG_WR(bp,
8183 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8184 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8185 msleep(10);
8186
8187 /* save NIG port swap info */
8188 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8189 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
8190 /* reset device */
8191 REG_WR(bp,
8192 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 8193 0xd3ffffff);
34f80b04
EG
8194 REG_WR(bp,
8195 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8196 0x1403);
da5a662a
VZ
8197 /* take the NIG out of reset and restore swap values */
8198 REG_WR(bp,
8199 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8200 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8201 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8202 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8203
8204 /* send unload done to the MCP */
8205 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8206
8207 /* restore our func and fw_seq */
8208 bp->func = func;
8209 bp->fw_seq =
8210 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8211 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
8212
8213 } else
8214 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
8215 }
8216}
8217
8218static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8219{
8220 u32 val, val2, val3, val4, id;
72ce58c3 8221 u16 pmc;
34f80b04
EG
8222
8223 /* Get the chip revision id and number. */
8224 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8225 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8226 id = ((val & 0xffff) << 16);
8227 val = REG_RD(bp, MISC_REG_CHIP_REV);
8228 id |= ((val & 0xf) << 12);
8229 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8230 id |= ((val & 0xff) << 4);
5a40e08e 8231 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
8232 id |= (val & 0xf);
8233 bp->common.chip_id = id;
8234 bp->link_params.chip_id = bp->common.chip_id;
8235 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8236
1c06328c
EG
8237 val = (REG_RD(bp, 0x2874) & 0x55);
8238 if ((bp->common.chip_id & 0x1) ||
8239 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8240 bp->flags |= ONE_PORT_FLAG;
8241 BNX2X_DEV_INFO("single port device\n");
8242 }
8243
34f80b04
EG
8244 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8245 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8246 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8247 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8248 bp->common.flash_size, bp->common.flash_size);
8249
8250 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 8251 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 8252 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
8253 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8254 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
8255
8256 if (!bp->common.shmem_base ||
8257 (bp->common.shmem_base < 0xA0000) ||
8258 (bp->common.shmem_base >= 0xC0000)) {
8259 BNX2X_DEV_INFO("MCP not active\n");
8260 bp->flags |= NO_MCP_FLAG;
8261 return;
8262 }
8263
8264 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8265 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8266 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8267 BNX2X_ERR("BAD MCP validity signature\n");
8268
8269 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 8270 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
8271
8272 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8273 SHARED_HW_CFG_LED_MODE_MASK) >>
8274 SHARED_HW_CFG_LED_MODE_SHIFT);
8275
c2c8b03e
EG
8276 bp->link_params.feature_config_flags = 0;
8277 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8278 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8279 bp->link_params.feature_config_flags |=
8280 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8281 else
8282 bp->link_params.feature_config_flags &=
8283 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8284
34f80b04
EG
8285 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8286 bp->common.bc_ver = val;
8287 BNX2X_DEV_INFO("bc_ver %X\n", val);
8288 if (val < BNX2X_BC_VER) {
8289 /* for now only warn
8290 * later we might need to enforce this */
8291 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8292 " please upgrade BC\n", BNX2X_BC_VER, val);
8293 }
4d295db0
EG
8294 bp->link_params.feature_config_flags |=
8295 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8296 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
8297
8298 if (BP_E1HVN(bp) == 0) {
8299 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8300 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8301 } else {
8302 /* no WOL capability for E1HVN != 0 */
8303 bp->flags |= NO_WOL_FLAG;
8304 }
8305 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 8306 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
8307
8308 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8309 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8310 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8311 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8312
7995c64e 8313 pr_info("part number %X-%X-%X-%X\n", val, val2, val3, val4);
34f80b04
EG
8314}
8315
8316static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8317 u32 switch_cfg)
a2fbb9ea 8318{
34f80b04 8319 int port = BP_PORT(bp);
a2fbb9ea
ET
8320 u32 ext_phy_type;
8321
a2fbb9ea
ET
8322 switch (switch_cfg) {
8323 case SWITCH_CFG_1G:
8324 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8325
c18487ee
YR
8326 ext_phy_type =
8327 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8328 switch (ext_phy_type) {
8329 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8330 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8331 ext_phy_type);
8332
34f80b04
EG
8333 bp->port.supported |= (SUPPORTED_10baseT_Half |
8334 SUPPORTED_10baseT_Full |
8335 SUPPORTED_100baseT_Half |
8336 SUPPORTED_100baseT_Full |
8337 SUPPORTED_1000baseT_Full |
8338 SUPPORTED_2500baseX_Full |
8339 SUPPORTED_TP |
8340 SUPPORTED_FIBRE |
8341 SUPPORTED_Autoneg |
8342 SUPPORTED_Pause |
8343 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8344 break;
8345
8346 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8347 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8348 ext_phy_type);
8349
34f80b04
EG
8350 bp->port.supported |= (SUPPORTED_10baseT_Half |
8351 SUPPORTED_10baseT_Full |
8352 SUPPORTED_100baseT_Half |
8353 SUPPORTED_100baseT_Full |
8354 SUPPORTED_1000baseT_Full |
8355 SUPPORTED_TP |
8356 SUPPORTED_FIBRE |
8357 SUPPORTED_Autoneg |
8358 SUPPORTED_Pause |
8359 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8360 break;
8361
8362 default:
8363 BNX2X_ERR("NVRAM config error. "
8364 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 8365 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8366 return;
8367 }
8368
34f80b04
EG
8369 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8370 port*0x10);
8371 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
8372 break;
8373
8374 case SWITCH_CFG_10G:
8375 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8376
c18487ee
YR
8377 ext_phy_type =
8378 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8379 switch (ext_phy_type) {
8380 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8381 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8382 ext_phy_type);
8383
34f80b04
EG
8384 bp->port.supported |= (SUPPORTED_10baseT_Half |
8385 SUPPORTED_10baseT_Full |
8386 SUPPORTED_100baseT_Half |
8387 SUPPORTED_100baseT_Full |
8388 SUPPORTED_1000baseT_Full |
8389 SUPPORTED_2500baseX_Full |
8390 SUPPORTED_10000baseT_Full |
8391 SUPPORTED_TP |
8392 SUPPORTED_FIBRE |
8393 SUPPORTED_Autoneg |
8394 SUPPORTED_Pause |
8395 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8396 break;
8397
589abe3a
EG
8398 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8399 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 8400 ext_phy_type);
f1410647 8401
34f80b04 8402 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8403 SUPPORTED_1000baseT_Full |
34f80b04 8404 SUPPORTED_FIBRE |
589abe3a 8405 SUPPORTED_Autoneg |
34f80b04
EG
8406 SUPPORTED_Pause |
8407 SUPPORTED_Asym_Pause);
f1410647
ET
8408 break;
8409
589abe3a
EG
8410 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8411 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
8412 ext_phy_type);
8413
34f80b04 8414 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8415 SUPPORTED_2500baseX_Full |
34f80b04 8416 SUPPORTED_1000baseT_Full |
589abe3a
EG
8417 SUPPORTED_FIBRE |
8418 SUPPORTED_Autoneg |
8419 SUPPORTED_Pause |
8420 SUPPORTED_Asym_Pause);
8421 break;
8422
8423 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8424 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8425 ext_phy_type);
8426
8427 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
8428 SUPPORTED_FIBRE |
8429 SUPPORTED_Pause |
8430 SUPPORTED_Asym_Pause);
f1410647
ET
8431 break;
8432
589abe3a
EG
8433 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8434 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
8435 ext_phy_type);
8436
34f80b04
EG
8437 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8438 SUPPORTED_1000baseT_Full |
8439 SUPPORTED_FIBRE |
34f80b04
EG
8440 SUPPORTED_Pause |
8441 SUPPORTED_Asym_Pause);
f1410647
ET
8442 break;
8443
589abe3a
EG
8444 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8445 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
8446 ext_phy_type);
8447
34f80b04 8448 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 8449 SUPPORTED_1000baseT_Full |
34f80b04 8450 SUPPORTED_Autoneg |
589abe3a 8451 SUPPORTED_FIBRE |
34f80b04
EG
8452 SUPPORTED_Pause |
8453 SUPPORTED_Asym_Pause);
c18487ee
YR
8454 break;
8455
4d295db0
EG
8456 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8457 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8458 ext_phy_type);
8459
8460 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8461 SUPPORTED_1000baseT_Full |
8462 SUPPORTED_Autoneg |
8463 SUPPORTED_FIBRE |
8464 SUPPORTED_Pause |
8465 SUPPORTED_Asym_Pause);
8466 break;
8467
f1410647
ET
8468 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8469 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8470 ext_phy_type);
8471
34f80b04
EG
8472 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8473 SUPPORTED_TP |
8474 SUPPORTED_Autoneg |
8475 SUPPORTED_Pause |
8476 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8477 break;
8478
28577185
EG
8479 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8480 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8481 ext_phy_type);
8482
8483 bp->port.supported |= (SUPPORTED_10baseT_Half |
8484 SUPPORTED_10baseT_Full |
8485 SUPPORTED_100baseT_Half |
8486 SUPPORTED_100baseT_Full |
8487 SUPPORTED_1000baseT_Full |
8488 SUPPORTED_10000baseT_Full |
8489 SUPPORTED_TP |
8490 SUPPORTED_Autoneg |
8491 SUPPORTED_Pause |
8492 SUPPORTED_Asym_Pause);
8493 break;
8494
c18487ee
YR
8495 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8496 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8497 bp->link_params.ext_phy_config);
8498 break;
8499
a2fbb9ea
ET
8500 default:
8501 BNX2X_ERR("NVRAM config error. "
8502 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 8503 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8504 return;
8505 }
8506
34f80b04
EG
8507 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8508 port*0x18);
8509 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 8510
a2fbb9ea
ET
8511 break;
8512
8513 default:
8514 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 8515 bp->port.link_config);
a2fbb9ea
ET
8516 return;
8517 }
34f80b04 8518 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
8519
8520 /* mask what we support according to speed_cap_mask */
c18487ee
YR
8521 if (!(bp->link_params.speed_cap_mask &
8522 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 8523 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 8524
c18487ee
YR
8525 if (!(bp->link_params.speed_cap_mask &
8526 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 8527 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 8528
c18487ee
YR
8529 if (!(bp->link_params.speed_cap_mask &
8530 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 8531 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 8532
c18487ee
YR
8533 if (!(bp->link_params.speed_cap_mask &
8534 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 8535 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 8536
c18487ee
YR
8537 if (!(bp->link_params.speed_cap_mask &
8538 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
8539 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8540 SUPPORTED_1000baseT_Full);
a2fbb9ea 8541
c18487ee
YR
8542 if (!(bp->link_params.speed_cap_mask &
8543 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 8544 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 8545
c18487ee
YR
8546 if (!(bp->link_params.speed_cap_mask &
8547 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 8548 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 8549
34f80b04 8550 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
8551}
8552
34f80b04 8553static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8554{
c18487ee 8555 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 8556
34f80b04 8557 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 8558 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 8559 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 8560 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8561 bp->port.advertising = bp->port.supported;
a2fbb9ea 8562 } else {
c18487ee
YR
8563 u32 ext_phy_type =
8564 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8565
8566 if ((ext_phy_type ==
8567 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8568 (ext_phy_type ==
8569 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 8570 /* force 10G, no AN */
c18487ee 8571 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 8572 bp->port.advertising =
a2fbb9ea
ET
8573 (ADVERTISED_10000baseT_Full |
8574 ADVERTISED_FIBRE);
8575 break;
8576 }
8577 BNX2X_ERR("NVRAM config error. "
8578 "Invalid link_config 0x%x"
8579 " Autoneg not supported\n",
34f80b04 8580 bp->port.link_config);
a2fbb9ea
ET
8581 return;
8582 }
8583 break;
8584
8585 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 8586 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 8587 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
8588 bp->port.advertising = (ADVERTISED_10baseT_Full |
8589 ADVERTISED_TP);
a2fbb9ea
ET
8590 } else {
8591 BNX2X_ERR("NVRAM config error. "
8592 "Invalid link_config 0x%x"
8593 " speed_cap_mask 0x%x\n",
34f80b04 8594 bp->port.link_config,
c18487ee 8595 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8596 return;
8597 }
8598 break;
8599
8600 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 8601 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
8602 bp->link_params.req_line_speed = SPEED_10;
8603 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8604 bp->port.advertising = (ADVERTISED_10baseT_Half |
8605 ADVERTISED_TP);
a2fbb9ea
ET
8606 } else {
8607 BNX2X_ERR("NVRAM config error. "
8608 "Invalid link_config 0x%x"
8609 " speed_cap_mask 0x%x\n",
34f80b04 8610 bp->port.link_config,
c18487ee 8611 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8612 return;
8613 }
8614 break;
8615
8616 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 8617 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 8618 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
8619 bp->port.advertising = (ADVERTISED_100baseT_Full |
8620 ADVERTISED_TP);
a2fbb9ea
ET
8621 } else {
8622 BNX2X_ERR("NVRAM config error. "
8623 "Invalid link_config 0x%x"
8624 " speed_cap_mask 0x%x\n",
34f80b04 8625 bp->port.link_config,
c18487ee 8626 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8627 return;
8628 }
8629 break;
8630
8631 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 8632 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
8633 bp->link_params.req_line_speed = SPEED_100;
8634 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8635 bp->port.advertising = (ADVERTISED_100baseT_Half |
8636 ADVERTISED_TP);
a2fbb9ea
ET
8637 } else {
8638 BNX2X_ERR("NVRAM config error. "
8639 "Invalid link_config 0x%x"
8640 " speed_cap_mask 0x%x\n",
34f80b04 8641 bp->port.link_config,
c18487ee 8642 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8643 return;
8644 }
8645 break;
8646
8647 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 8648 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 8649 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
8650 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8651 ADVERTISED_TP);
a2fbb9ea
ET
8652 } else {
8653 BNX2X_ERR("NVRAM config error. "
8654 "Invalid link_config 0x%x"
8655 " speed_cap_mask 0x%x\n",
34f80b04 8656 bp->port.link_config,
c18487ee 8657 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8658 return;
8659 }
8660 break;
8661
8662 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8663 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8664 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8665 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8666 ADVERTISED_TP);
a2fbb9ea
ET
8667 } else {
8668 BNX2X_ERR("NVRAM config error. "
8669 "Invalid link_config 0x%x"
8670 " speed_cap_mask 0x%x\n",
34f80b04 8671 bp->port.link_config,
c18487ee 8672 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8673 return;
8674 }
8675 break;
8676
8677 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8678 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8679 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8680 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8681 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8682 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8683 ADVERTISED_FIBRE);
a2fbb9ea
ET
8684 } else {
8685 BNX2X_ERR("NVRAM config error. "
8686 "Invalid link_config 0x%x"
8687 " speed_cap_mask 0x%x\n",
34f80b04 8688 bp->port.link_config,
c18487ee 8689 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8690 return;
8691 }
8692 break;
8693
8694 default:
8695 BNX2X_ERR("NVRAM config error. "
8696 "BAD link speed link_config 0x%x\n",
34f80b04 8697 bp->port.link_config);
c18487ee 8698 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8699 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8700 break;
8701 }
a2fbb9ea 8702
34f80b04
EG
8703 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8704 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8705 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8706 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8707 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8708
c18487ee 8709 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8710 " advertising 0x%x\n",
c18487ee
YR
8711 bp->link_params.req_line_speed,
8712 bp->link_params.req_duplex,
34f80b04 8713 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8714}
8715
e665bfda
MC
8716static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8717{
8718 mac_hi = cpu_to_be16(mac_hi);
8719 mac_lo = cpu_to_be32(mac_lo);
8720 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8721 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8722}
8723
34f80b04 8724static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8725{
34f80b04
EG
8726 int port = BP_PORT(bp);
8727 u32 val, val2;
589abe3a 8728 u32 config;
c2c8b03e 8729 u16 i;
01cd4528 8730 u32 ext_phy_type;
a2fbb9ea 8731
c18487ee 8732 bp->link_params.bp = bp;
34f80b04 8733 bp->link_params.port = port;
c18487ee 8734
c18487ee 8735 bp->link_params.lane_config =
a2fbb9ea 8736 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8737 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8738 SHMEM_RD(bp,
8739 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
8740 /* BCM8727_NOC => BCM8727 no over current */
8741 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8742 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8743 bp->link_params.ext_phy_config &=
8744 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8745 bp->link_params.ext_phy_config |=
8746 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8747 bp->link_params.feature_config_flags |=
8748 FEATURE_CONFIG_BCM8727_NOC;
8749 }
8750
c18487ee 8751 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8752 SHMEM_RD(bp,
8753 dev_info.port_hw_config[port].speed_capability_mask);
8754
34f80b04 8755 bp->port.link_config =
a2fbb9ea
ET
8756 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8757
c2c8b03e
EG
8758 /* Get the 4 lanes xgxs config rx and tx */
8759 for (i = 0; i < 2; i++) {
8760 val = SHMEM_RD(bp,
8761 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8762 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8763 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8764
8765 val = SHMEM_RD(bp,
8766 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8767 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8768 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8769 }
8770
3ce2c3f9
EG
8771 /* If the device is capable of WoL, set the default state according
8772 * to the HW
8773 */
4d295db0 8774 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8775 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8776 (config & PORT_FEATURE_WOL_ENABLED));
8777
c2c8b03e
EG
8778 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8779 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8780 bp->link_params.lane_config,
8781 bp->link_params.ext_phy_config,
34f80b04 8782 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8783
4d295db0
EG
8784 bp->link_params.switch_cfg |= (bp->port.link_config &
8785 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 8786 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8787
8788 bnx2x_link_settings_requested(bp);
8789
01cd4528
EG
8790 /*
8791 * If connected directly, work with the internal PHY, otherwise, work
8792 * with the external PHY
8793 */
8794 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8795 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8796 bp->mdio.prtad = bp->link_params.phy_addr;
8797
8798 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8799 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8800 bp->mdio.prtad =
659bc5c4 8801 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
01cd4528 8802
a2fbb9ea
ET
8803 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8804 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 8805 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
8806 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8807 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
8808
8809#ifdef BCM_CNIC
8810 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8811 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8812 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8813#endif
34f80b04
EG
8814}
8815
8816static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8817{
8818 int func = BP_FUNC(bp);
8819 u32 val, val2;
8820 int rc = 0;
a2fbb9ea 8821
34f80b04 8822 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8823
34f80b04
EG
8824 bp->e1hov = 0;
8825 bp->e1hmf = 0;
8826 if (CHIP_IS_E1H(bp)) {
8827 bp->mf_config =
8828 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8829
2691d51d 8830 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 8831 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 8832 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 8833 bp->e1hmf = 1;
2691d51d
EG
8834 BNX2X_DEV_INFO("%s function mode\n",
8835 IS_E1HMF(bp) ? "multi" : "single");
8836
8837 if (IS_E1HMF(bp)) {
8838 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8839 e1hov_tag) &
8840 FUNC_MF_CFG_E1HOV_TAG_MASK);
8841 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8842 bp->e1hov = val;
8843 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8844 "(0x%04x)\n",
8845 func, bp->e1hov, bp->e1hov);
8846 } else {
34f80b04
EG
8847 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8848 " aborting\n", func);
8849 rc = -EPERM;
8850 }
2691d51d
EG
8851 } else {
8852 if (BP_E1HVN(bp)) {
8853 BNX2X_ERR("!!! VN %d in single function mode,"
8854 " aborting\n", BP_E1HVN(bp));
8855 rc = -EPERM;
8856 }
34f80b04
EG
8857 }
8858 }
a2fbb9ea 8859
34f80b04
EG
8860 if (!BP_NOMCP(bp)) {
8861 bnx2x_get_port_hwinfo(bp);
8862
8863 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8864 DRV_MSG_SEQ_NUMBER_MASK);
8865 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8866 }
8867
8868 if (IS_E1HMF(bp)) {
8869 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8870 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8871 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8872 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8873 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8874 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8875 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8876 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8877 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8878 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8879 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8880 ETH_ALEN);
8881 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8882 ETH_ALEN);
a2fbb9ea 8883 }
34f80b04
EG
8884
8885 return rc;
a2fbb9ea
ET
8886 }
8887
34f80b04
EG
8888 if (BP_NOMCP(bp)) {
8889 /* only supposed to happen on emulation/FPGA */
33471629 8890 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8891 random_ether_addr(bp->dev->dev_addr);
8892 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8893 }
a2fbb9ea 8894
34f80b04
EG
8895 return rc;
8896}
8897
8898static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8899{
8900 int func = BP_FUNC(bp);
87942b46 8901 int timer_interval;
34f80b04
EG
8902 int rc;
8903
da5a662a
VZ
8904 /* Disable interrupt handling until HW is initialized */
8905 atomic_set(&bp->intr_sem, 1);
e1510706 8906 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8907
34f80b04 8908 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 8909 mutex_init(&bp->fw_mb_mutex);
993ac7b5
MC
8910#ifdef BCM_CNIC
8911 mutex_init(&bp->cnic_mutex);
8912#endif
a2fbb9ea 8913
1cf167f2 8914 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8915 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8916
8917 rc = bnx2x_get_hwinfo(bp);
8918
8919 /* need to reset chip if undi was active */
8920 if (!BP_NOMCP(bp))
8921 bnx2x_undi_unload(bp);
8922
8923 if (CHIP_REV_IS_FPGA(bp))
7995c64e 8924 pr_err("FPGA detected\n");
34f80b04
EG
8925
8926 if (BP_NOMCP(bp) && (func == 0))
7995c64e 8927 pr_err("MCP disabled, must load devices in order!\n");
34f80b04 8928
555f6c78 8929 /* Set multi queue mode */
8badd27a
EG
8930 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8931 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
7995c64e 8932 pr_err("Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8933 multi_mode = ETH_RSS_MODE_DISABLED;
8934 }
8935 bp->multi_mode = multi_mode;
8936
8937
4fd89b7a
DK
8938 bp->dev->features |= NETIF_F_GRO;
8939
7a9b2557
VZ
8940 /* Set TPA flags */
8941 if (disable_tpa) {
8942 bp->flags &= ~TPA_ENABLE_FLAG;
8943 bp->dev->features &= ~NETIF_F_LRO;
8944 } else {
8945 bp->flags |= TPA_ENABLE_FLAG;
8946 bp->dev->features |= NETIF_F_LRO;
8947 }
8948
a18f5128
EG
8949 if (CHIP_IS_E1(bp))
8950 bp->dropless_fc = 0;
8951 else
8952 bp->dropless_fc = dropless_fc;
8953
8d5726c4 8954 bp->mrrs = mrrs;
7a9b2557 8955
34f80b04
EG
8956 bp->tx_ring_size = MAX_TX_AVAIL;
8957 bp->rx_ring_size = MAX_RX_AVAIL;
8958
8959 bp->rx_csum = 1;
34f80b04 8960
7d323bfd
EG
8961 /* make sure that the numbers are in the right granularity */
8962 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8963 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
34f80b04 8964
87942b46
EG
8965 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8966 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8967
8968 init_timer(&bp->timer);
8969 bp->timer.expires = jiffies + bp->current_interval;
8970 bp->timer.data = (unsigned long) bp;
8971 bp->timer.function = bnx2x_timer;
8972
8973 return rc;
a2fbb9ea
ET
8974}
8975
8976/*
8977 * ethtool service functions
8978 */
8979
8980/* All ethtool functions called with rtnl_lock */
8981
8982static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8983{
8984 struct bnx2x *bp = netdev_priv(dev);
8985
34f80b04
EG
8986 cmd->supported = bp->port.supported;
8987 cmd->advertising = bp->port.advertising;
a2fbb9ea 8988
f34d28ea
EG
8989 if ((bp->state == BNX2X_STATE_OPEN) &&
8990 !(bp->flags & MF_FUNC_DIS) &&
8991 (bp->link_vars.link_up)) {
c18487ee
YR
8992 cmd->speed = bp->link_vars.line_speed;
8993 cmd->duplex = bp->link_vars.duplex;
b015e3d1
EG
8994 if (IS_E1HMF(bp)) {
8995 u16 vn_max_rate;
34f80b04 8996
b015e3d1
EG
8997 vn_max_rate =
8998 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
34f80b04 8999 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
b015e3d1
EG
9000 if (vn_max_rate < cmd->speed)
9001 cmd->speed = vn_max_rate;
9002 }
9003 } else {
9004 cmd->speed = -1;
9005 cmd->duplex = -1;
34f80b04 9006 }
a2fbb9ea 9007
c18487ee
YR
9008 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9009 u32 ext_phy_type =
9010 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
9011
9012 switch (ext_phy_type) {
9013 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 9014 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 9015 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
9016 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9017 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9018 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 9019 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
9020 cmd->port = PORT_FIBRE;
9021 break;
9022
9023 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 9024 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
9025 cmd->port = PORT_TP;
9026 break;
9027
c18487ee
YR
9028 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9029 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9030 bp->link_params.ext_phy_config);
9031 break;
9032
f1410647
ET
9033 default:
9034 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
9035 bp->link_params.ext_phy_config);
9036 break;
f1410647
ET
9037 }
9038 } else
a2fbb9ea 9039 cmd->port = PORT_TP;
a2fbb9ea 9040
01cd4528 9041 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
9042 cmd->transceiver = XCVR_INTERNAL;
9043
c18487ee 9044 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 9045 cmd->autoneg = AUTONEG_ENABLE;
f1410647 9046 else
a2fbb9ea 9047 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
9048
9049 cmd->maxtxpkt = 0;
9050 cmd->maxrxpkt = 0;
9051
9052 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9053 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9054 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9055 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9056 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9057 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9058 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9059
9060 return 0;
9061}
9062
9063static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9064{
9065 struct bnx2x *bp = netdev_priv(dev);
9066 u32 advertising;
9067
34f80b04
EG
9068 if (IS_E1HMF(bp))
9069 return 0;
9070
a2fbb9ea
ET
9071 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9072 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9073 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9074 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9075 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9076 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9077 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9078
a2fbb9ea 9079 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
9080 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9081 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 9082 return -EINVAL;
f1410647 9083 }
a2fbb9ea
ET
9084
9085 /* advertise the requested speed and duplex if supported */
34f80b04 9086 cmd->advertising &= bp->port.supported;
a2fbb9ea 9087
c18487ee
YR
9088 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9089 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
9090 bp->port.advertising |= (ADVERTISED_Autoneg |
9091 cmd->advertising);
a2fbb9ea
ET
9092
9093 } else { /* forced speed */
9094 /* advertise the requested speed and duplex if supported */
9095 switch (cmd->speed) {
9096 case SPEED_10:
9097 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 9098 if (!(bp->port.supported &
f1410647
ET
9099 SUPPORTED_10baseT_Full)) {
9100 DP(NETIF_MSG_LINK,
9101 "10M full not supported\n");
a2fbb9ea 9102 return -EINVAL;
f1410647 9103 }
a2fbb9ea
ET
9104
9105 advertising = (ADVERTISED_10baseT_Full |
9106 ADVERTISED_TP);
9107 } else {
34f80b04 9108 if (!(bp->port.supported &
f1410647
ET
9109 SUPPORTED_10baseT_Half)) {
9110 DP(NETIF_MSG_LINK,
9111 "10M half not supported\n");
a2fbb9ea 9112 return -EINVAL;
f1410647 9113 }
a2fbb9ea
ET
9114
9115 advertising = (ADVERTISED_10baseT_Half |
9116 ADVERTISED_TP);
9117 }
9118 break;
9119
9120 case SPEED_100:
9121 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 9122 if (!(bp->port.supported &
f1410647
ET
9123 SUPPORTED_100baseT_Full)) {
9124 DP(NETIF_MSG_LINK,
9125 "100M full not supported\n");
a2fbb9ea 9126 return -EINVAL;
f1410647 9127 }
a2fbb9ea
ET
9128
9129 advertising = (ADVERTISED_100baseT_Full |
9130 ADVERTISED_TP);
9131 } else {
34f80b04 9132 if (!(bp->port.supported &
f1410647
ET
9133 SUPPORTED_100baseT_Half)) {
9134 DP(NETIF_MSG_LINK,
9135 "100M half not supported\n");
a2fbb9ea 9136 return -EINVAL;
f1410647 9137 }
a2fbb9ea
ET
9138
9139 advertising = (ADVERTISED_100baseT_Half |
9140 ADVERTISED_TP);
9141 }
9142 break;
9143
9144 case SPEED_1000:
f1410647
ET
9145 if (cmd->duplex != DUPLEX_FULL) {
9146 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 9147 return -EINVAL;
f1410647 9148 }
a2fbb9ea 9149
34f80b04 9150 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 9151 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 9152 return -EINVAL;
f1410647 9153 }
a2fbb9ea
ET
9154
9155 advertising = (ADVERTISED_1000baseT_Full |
9156 ADVERTISED_TP);
9157 break;
9158
9159 case SPEED_2500:
f1410647
ET
9160 if (cmd->duplex != DUPLEX_FULL) {
9161 DP(NETIF_MSG_LINK,
9162 "2.5G half not supported\n");
a2fbb9ea 9163 return -EINVAL;
f1410647 9164 }
a2fbb9ea 9165
34f80b04 9166 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
9167 DP(NETIF_MSG_LINK,
9168 "2.5G full not supported\n");
a2fbb9ea 9169 return -EINVAL;
f1410647 9170 }
a2fbb9ea 9171
f1410647 9172 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
9173 ADVERTISED_TP);
9174 break;
9175
9176 case SPEED_10000:
f1410647
ET
9177 if (cmd->duplex != DUPLEX_FULL) {
9178 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 9179 return -EINVAL;
f1410647 9180 }
a2fbb9ea 9181
34f80b04 9182 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 9183 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 9184 return -EINVAL;
f1410647 9185 }
a2fbb9ea
ET
9186
9187 advertising = (ADVERTISED_10000baseT_Full |
9188 ADVERTISED_FIBRE);
9189 break;
9190
9191 default:
f1410647 9192 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
9193 return -EINVAL;
9194 }
9195
c18487ee
YR
9196 bp->link_params.req_line_speed = cmd->speed;
9197 bp->link_params.req_duplex = cmd->duplex;
34f80b04 9198 bp->port.advertising = advertising;
a2fbb9ea
ET
9199 }
9200
c18487ee 9201 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 9202 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 9203 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 9204 bp->port.advertising);
a2fbb9ea 9205
34f80b04 9206 if (netif_running(dev)) {
bb2a0f7a 9207 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9208 bnx2x_link_set(bp);
9209 }
a2fbb9ea
ET
9210
9211 return 0;
9212}
9213
0a64ea57
EG
9214#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9215#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9216
9217static int bnx2x_get_regs_len(struct net_device *dev)
9218{
0a64ea57 9219 struct bnx2x *bp = netdev_priv(dev);
0d28e49a 9220 int regdump_len = 0;
0a64ea57
EG
9221 int i;
9222
0a64ea57
EG
9223 if (CHIP_IS_E1(bp)) {
9224 for (i = 0; i < REGS_COUNT; i++)
9225 if (IS_E1_ONLINE(reg_addrs[i].info))
9226 regdump_len += reg_addrs[i].size;
9227
9228 for (i = 0; i < WREGS_COUNT_E1; i++)
9229 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9230 regdump_len += wreg_addrs_e1[i].size *
9231 (1 + wreg_addrs_e1[i].read_regs_count);
9232
9233 } else { /* E1H */
9234 for (i = 0; i < REGS_COUNT; i++)
9235 if (IS_E1H_ONLINE(reg_addrs[i].info))
9236 regdump_len += reg_addrs[i].size;
9237
9238 for (i = 0; i < WREGS_COUNT_E1H; i++)
9239 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9240 regdump_len += wreg_addrs_e1h[i].size *
9241 (1 + wreg_addrs_e1h[i].read_regs_count);
9242 }
9243 regdump_len *= 4;
9244 regdump_len += sizeof(struct dump_hdr);
9245
9246 return regdump_len;
9247}
9248
9249static void bnx2x_get_regs(struct net_device *dev,
9250 struct ethtool_regs *regs, void *_p)
9251{
9252 u32 *p = _p, i, j;
9253 struct bnx2x *bp = netdev_priv(dev);
9254 struct dump_hdr dump_hdr = {0};
9255
9256 regs->version = 0;
9257 memset(p, 0, regs->len);
9258
9259 if (!netif_running(bp->dev))
9260 return;
9261
9262 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9263 dump_hdr.dump_sign = dump_sign_all;
9264 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9265 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9266 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9267 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9268 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9269
9270 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9271 p += dump_hdr.hdr_size + 1;
9272
9273 if (CHIP_IS_E1(bp)) {
9274 for (i = 0; i < REGS_COUNT; i++)
9275 if (IS_E1_ONLINE(reg_addrs[i].info))
9276 for (j = 0; j < reg_addrs[i].size; j++)
9277 *p++ = REG_RD(bp,
9278 reg_addrs[i].addr + j*4);
9279
9280 } else { /* E1H */
9281 for (i = 0; i < REGS_COUNT; i++)
9282 if (IS_E1H_ONLINE(reg_addrs[i].info))
9283 for (j = 0; j < reg_addrs[i].size; j++)
9284 *p++ = REG_RD(bp,
9285 reg_addrs[i].addr + j*4);
9286 }
9287}
9288
0d28e49a
EG
9289#define PHY_FW_VER_LEN 10
9290
9291static void bnx2x_get_drvinfo(struct net_device *dev,
9292 struct ethtool_drvinfo *info)
9293{
9294 struct bnx2x *bp = netdev_priv(dev);
9295 u8 phy_fw_ver[PHY_FW_VER_LEN];
9296
9297 strcpy(info->driver, DRV_MODULE_NAME);
9298 strcpy(info->version, DRV_MODULE_VERSION);
9299
9300 phy_fw_ver[0] = '\0';
9301 if (bp->port.pmf) {
9302 bnx2x_acquire_phy_lock(bp);
9303 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9304 (bp->state != BNX2X_STATE_CLOSED),
9305 phy_fw_ver, PHY_FW_VER_LEN);
9306 bnx2x_release_phy_lock(bp);
9307 }
9308
9309 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9310 (bp->common.bc_ver & 0xff0000) >> 16,
9311 (bp->common.bc_ver & 0xff00) >> 8,
9312 (bp->common.bc_ver & 0xff),
9313 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9314 strcpy(info->bus_info, pci_name(bp->pdev));
9315 info->n_stats = BNX2X_NUM_STATS;
9316 info->testinfo_len = BNX2X_NUM_TESTS;
9317 info->eedump_len = bp->common.flash_size;
9318 info->regdump_len = bnx2x_get_regs_len(dev);
9319}
9320
a2fbb9ea
ET
9321static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9322{
9323 struct bnx2x *bp = netdev_priv(dev);
9324
9325 if (bp->flags & NO_WOL_FLAG) {
9326 wol->supported = 0;
9327 wol->wolopts = 0;
9328 } else {
9329 wol->supported = WAKE_MAGIC;
9330 if (bp->wol)
9331 wol->wolopts = WAKE_MAGIC;
9332 else
9333 wol->wolopts = 0;
9334 }
9335 memset(&wol->sopass, 0, sizeof(wol->sopass));
9336}
9337
9338static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9339{
9340 struct bnx2x *bp = netdev_priv(dev);
9341
9342 if (wol->wolopts & ~WAKE_MAGIC)
9343 return -EINVAL;
9344
9345 if (wol->wolopts & WAKE_MAGIC) {
9346 if (bp->flags & NO_WOL_FLAG)
9347 return -EINVAL;
9348
9349 bp->wol = 1;
34f80b04 9350 } else
a2fbb9ea 9351 bp->wol = 0;
34f80b04 9352
a2fbb9ea
ET
9353 return 0;
9354}
9355
9356static u32 bnx2x_get_msglevel(struct net_device *dev)
9357{
9358 struct bnx2x *bp = netdev_priv(dev);
9359
7995c64e 9360 return bp->msg_enable;
a2fbb9ea
ET
9361}
9362
9363static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9364{
9365 struct bnx2x *bp = netdev_priv(dev);
9366
9367 if (capable(CAP_NET_ADMIN))
7995c64e 9368 bp->msg_enable = level;
a2fbb9ea
ET
9369}
9370
9371static int bnx2x_nway_reset(struct net_device *dev)
9372{
9373 struct bnx2x *bp = netdev_priv(dev);
9374
34f80b04
EG
9375 if (!bp->port.pmf)
9376 return 0;
a2fbb9ea 9377
34f80b04 9378 if (netif_running(dev)) {
bb2a0f7a 9379 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9380 bnx2x_link_set(bp);
9381 }
a2fbb9ea
ET
9382
9383 return 0;
9384}
9385
ab6ad5a4 9386static u32 bnx2x_get_link(struct net_device *dev)
01e53298
NO
9387{
9388 struct bnx2x *bp = netdev_priv(dev);
9389
f34d28ea
EG
9390 if (bp->flags & MF_FUNC_DIS)
9391 return 0;
9392
01e53298
NO
9393 return bp->link_vars.link_up;
9394}
9395
a2fbb9ea
ET
9396static int bnx2x_get_eeprom_len(struct net_device *dev)
9397{
9398 struct bnx2x *bp = netdev_priv(dev);
9399
34f80b04 9400 return bp->common.flash_size;
a2fbb9ea
ET
9401}
9402
9403static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9404{
34f80b04 9405 int port = BP_PORT(bp);
a2fbb9ea
ET
9406 int count, i;
9407 u32 val = 0;
9408
9409 /* adjust timeout for emulation/FPGA */
9410 count = NVRAM_TIMEOUT_COUNT;
9411 if (CHIP_REV_IS_SLOW(bp))
9412 count *= 100;
9413
9414 /* request access to nvram interface */
9415 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9416 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9417
9418 for (i = 0; i < count*10; i++) {
9419 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9420 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9421 break;
9422
9423 udelay(5);
9424 }
9425
9426 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 9427 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
9428 return -EBUSY;
9429 }
9430
9431 return 0;
9432}
9433
9434static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9435{
34f80b04 9436 int port = BP_PORT(bp);
a2fbb9ea
ET
9437 int count, i;
9438 u32 val = 0;
9439
9440 /* adjust timeout for emulation/FPGA */
9441 count = NVRAM_TIMEOUT_COUNT;
9442 if (CHIP_REV_IS_SLOW(bp))
9443 count *= 100;
9444
9445 /* relinquish nvram interface */
9446 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9447 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9448
9449 for (i = 0; i < count*10; i++) {
9450 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9451 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9452 break;
9453
9454 udelay(5);
9455 }
9456
9457 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 9458 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
9459 return -EBUSY;
9460 }
9461
9462 return 0;
9463}
9464
9465static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9466{
9467 u32 val;
9468
9469 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9470
9471 /* enable both bits, even on read */
9472 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9473 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9474 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9475}
9476
9477static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9478{
9479 u32 val;
9480
9481 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9482
9483 /* disable both bits, even after read */
9484 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9485 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9486 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9487}
9488
4781bfad 9489static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
9490 u32 cmd_flags)
9491{
f1410647 9492 int count, i, rc;
a2fbb9ea
ET
9493 u32 val;
9494
9495 /* build the command word */
9496 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9497
9498 /* need to clear DONE bit separately */
9499 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9500
9501 /* address of the NVRAM to read from */
9502 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9503 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9504
9505 /* issue a read command */
9506 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9507
9508 /* adjust timeout for emulation/FPGA */
9509 count = NVRAM_TIMEOUT_COUNT;
9510 if (CHIP_REV_IS_SLOW(bp))
9511 count *= 100;
9512
9513 /* wait for completion */
9514 *ret_val = 0;
9515 rc = -EBUSY;
9516 for (i = 0; i < count; i++) {
9517 udelay(5);
9518 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9519
9520 if (val & MCPR_NVM_COMMAND_DONE) {
9521 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
9522 /* we read nvram data in cpu order
9523 * but ethtool sees it as an array of bytes
9524 * converting to big-endian will do the work */
4781bfad 9525 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
9526 rc = 0;
9527 break;
9528 }
9529 }
9530
9531 return rc;
9532}
9533
9534static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9535 int buf_size)
9536{
9537 int rc;
9538 u32 cmd_flags;
4781bfad 9539 __be32 val;
a2fbb9ea
ET
9540
9541 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9542 DP(BNX2X_MSG_NVM,
c14423fe 9543 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9544 offset, buf_size);
9545 return -EINVAL;
9546 }
9547
34f80b04
EG
9548 if (offset + buf_size > bp->common.flash_size) {
9549 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9550 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9551 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9552 return -EINVAL;
9553 }
9554
9555 /* request access to nvram interface */
9556 rc = bnx2x_acquire_nvram_lock(bp);
9557 if (rc)
9558 return rc;
9559
9560 /* enable access to nvram interface */
9561 bnx2x_enable_nvram_access(bp);
9562
9563 /* read the first word(s) */
9564 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9565 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9566 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9567 memcpy(ret_buf, &val, 4);
9568
9569 /* advance to the next dword */
9570 offset += sizeof(u32);
9571 ret_buf += sizeof(u32);
9572 buf_size -= sizeof(u32);
9573 cmd_flags = 0;
9574 }
9575
9576 if (rc == 0) {
9577 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9578 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9579 memcpy(ret_buf, &val, 4);
9580 }
9581
9582 /* disable access to nvram interface */
9583 bnx2x_disable_nvram_access(bp);
9584 bnx2x_release_nvram_lock(bp);
9585
9586 return rc;
9587}
9588
9589static int bnx2x_get_eeprom(struct net_device *dev,
9590 struct ethtool_eeprom *eeprom, u8 *eebuf)
9591{
9592 struct bnx2x *bp = netdev_priv(dev);
9593 int rc;
9594
2add3acb
EG
9595 if (!netif_running(dev))
9596 return -EAGAIN;
9597
34f80b04 9598 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9599 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9600 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9601 eeprom->len, eeprom->len);
9602
9603 /* parameters already validated in ethtool_get_eeprom */
9604
9605 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9606
9607 return rc;
9608}
9609
9610static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9611 u32 cmd_flags)
9612{
f1410647 9613 int count, i, rc;
a2fbb9ea
ET
9614
9615 /* build the command word */
9616 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9617
9618 /* need to clear DONE bit separately */
9619 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9620
9621 /* write the data */
9622 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9623
9624 /* address of the NVRAM to write to */
9625 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9626 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9627
9628 /* issue the write command */
9629 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9630
9631 /* adjust timeout for emulation/FPGA */
9632 count = NVRAM_TIMEOUT_COUNT;
9633 if (CHIP_REV_IS_SLOW(bp))
9634 count *= 100;
9635
9636 /* wait for completion */
9637 rc = -EBUSY;
9638 for (i = 0; i < count; i++) {
9639 udelay(5);
9640 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9641 if (val & MCPR_NVM_COMMAND_DONE) {
9642 rc = 0;
9643 break;
9644 }
9645 }
9646
9647 return rc;
9648}
9649
f1410647 9650#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
9651
9652static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9653 int buf_size)
9654{
9655 int rc;
9656 u32 cmd_flags;
9657 u32 align_offset;
4781bfad 9658 __be32 val;
a2fbb9ea 9659
34f80b04
EG
9660 if (offset + buf_size > bp->common.flash_size) {
9661 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9662 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9663 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9664 return -EINVAL;
9665 }
9666
9667 /* request access to nvram interface */
9668 rc = bnx2x_acquire_nvram_lock(bp);
9669 if (rc)
9670 return rc;
9671
9672 /* enable access to nvram interface */
9673 bnx2x_enable_nvram_access(bp);
9674
9675 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9676 align_offset = (offset & ~0x03);
9677 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9678
9679 if (rc == 0) {
9680 val &= ~(0xff << BYTE_OFFSET(offset));
9681 val |= (*data_buf << BYTE_OFFSET(offset));
9682
9683 /* nvram data is returned as an array of bytes
9684 * convert it back to cpu order */
9685 val = be32_to_cpu(val);
9686
a2fbb9ea
ET
9687 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9688 cmd_flags);
9689 }
9690
9691 /* disable access to nvram interface */
9692 bnx2x_disable_nvram_access(bp);
9693 bnx2x_release_nvram_lock(bp);
9694
9695 return rc;
9696}
9697
9698static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9699 int buf_size)
9700{
9701 int rc;
9702 u32 cmd_flags;
9703 u32 val;
9704 u32 written_so_far;
9705
34f80b04 9706 if (buf_size == 1) /* ethtool */
a2fbb9ea 9707 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9708
9709 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9710 DP(BNX2X_MSG_NVM,
c14423fe 9711 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9712 offset, buf_size);
9713 return -EINVAL;
9714 }
9715
34f80b04
EG
9716 if (offset + buf_size > bp->common.flash_size) {
9717 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9718 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9719 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9720 return -EINVAL;
9721 }
9722
9723 /* request access to nvram interface */
9724 rc = bnx2x_acquire_nvram_lock(bp);
9725 if (rc)
9726 return rc;
9727
9728 /* enable access to nvram interface */
9729 bnx2x_enable_nvram_access(bp);
9730
9731 written_so_far = 0;
9732 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9733 while ((written_so_far < buf_size) && (rc == 0)) {
9734 if (written_so_far == (buf_size - sizeof(u32)))
9735 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9736 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9737 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9738 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9739 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9740
9741 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9742
9743 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9744
9745 /* advance to the next dword */
9746 offset += sizeof(u32);
9747 data_buf += sizeof(u32);
9748 written_so_far += sizeof(u32);
9749 cmd_flags = 0;
9750 }
9751
9752 /* disable access to nvram interface */
9753 bnx2x_disable_nvram_access(bp);
9754 bnx2x_release_nvram_lock(bp);
9755
9756 return rc;
9757}
9758
9759static int bnx2x_set_eeprom(struct net_device *dev,
9760 struct ethtool_eeprom *eeprom, u8 *eebuf)
9761{
9762 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
9763 int port = BP_PORT(bp);
9764 int rc = 0;
a2fbb9ea 9765
9f4c9583
EG
9766 if (!netif_running(dev))
9767 return -EAGAIN;
9768
34f80b04 9769 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9770 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9771 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9772 eeprom->len, eeprom->len);
9773
9774 /* parameters already validated in ethtool_set_eeprom */
9775
f57a6025
EG
9776 /* PHY eeprom can be accessed only by the PMF */
9777 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9778 !bp->port.pmf)
9779 return -EINVAL;
9780
9781 if (eeprom->magic == 0x50485950) {
9782 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9783 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 9784
f57a6025
EG
9785 bnx2x_acquire_phy_lock(bp);
9786 rc |= bnx2x_link_reset(&bp->link_params,
9787 &bp->link_vars, 0);
9788 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9789 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9790 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9791 MISC_REGISTERS_GPIO_HIGH, port);
9792 bnx2x_release_phy_lock(bp);
9793 bnx2x_link_report(bp);
9794
9795 } else if (eeprom->magic == 0x50485952) {
9796 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
f34d28ea 9797 if (bp->state == BNX2X_STATE_OPEN) {
4a37fb66 9798 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
9799 rc |= bnx2x_link_reset(&bp->link_params,
9800 &bp->link_vars, 1);
9801
9802 rc |= bnx2x_phy_init(&bp->link_params,
9803 &bp->link_vars);
4a37fb66 9804 bnx2x_release_phy_lock(bp);
f57a6025
EG
9805 bnx2x_calc_fc_adv(bp);
9806 }
9807 } else if (eeprom->magic == 0x53985943) {
9808 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9809 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9810 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9811 u8 ext_phy_addr =
659bc5c4 9812 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
f57a6025
EG
9813
9814 /* DSP Remove Download Mode */
9815 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9816 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 9817
f57a6025
EG
9818 bnx2x_acquire_phy_lock(bp);
9819
9820 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9821
9822 /* wait 0.5 sec to allow it to run */
9823 msleep(500);
9824 bnx2x_ext_phy_hw_reset(bp, port);
9825 msleep(500);
9826 bnx2x_release_phy_lock(bp);
9827 }
9828 } else
c18487ee 9829 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9830
9831 return rc;
9832}
9833
9834static int bnx2x_get_coalesce(struct net_device *dev,
9835 struct ethtool_coalesce *coal)
9836{
9837 struct bnx2x *bp = netdev_priv(dev);
9838
9839 memset(coal, 0, sizeof(struct ethtool_coalesce));
9840
9841 coal->rx_coalesce_usecs = bp->rx_ticks;
9842 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9843
9844 return 0;
9845}
9846
ca00392c 9847#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
a2fbb9ea
ET
9848static int bnx2x_set_coalesce(struct net_device *dev,
9849 struct ethtool_coalesce *coal)
9850{
9851 struct bnx2x *bp = netdev_priv(dev);
9852
9853 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
ca00392c
EG
9854 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9855 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea
ET
9856
9857 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
ca00392c
EG
9858 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9859 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea 9860
34f80b04 9861 if (netif_running(dev))
a2fbb9ea
ET
9862 bnx2x_update_coalesce(bp);
9863
9864 return 0;
9865}
9866
9867static void bnx2x_get_ringparam(struct net_device *dev,
9868 struct ethtool_ringparam *ering)
9869{
9870 struct bnx2x *bp = netdev_priv(dev);
9871
9872 ering->rx_max_pending = MAX_RX_AVAIL;
9873 ering->rx_mini_max_pending = 0;
9874 ering->rx_jumbo_max_pending = 0;
9875
9876 ering->rx_pending = bp->rx_ring_size;
9877 ering->rx_mini_pending = 0;
9878 ering->rx_jumbo_pending = 0;
9879
9880 ering->tx_max_pending = MAX_TX_AVAIL;
9881 ering->tx_pending = bp->tx_ring_size;
9882}
9883
9884static int bnx2x_set_ringparam(struct net_device *dev,
9885 struct ethtool_ringparam *ering)
9886{
9887 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9888 int rc = 0;
a2fbb9ea
ET
9889
9890 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9891 (ering->tx_pending > MAX_TX_AVAIL) ||
9892 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9893 return -EINVAL;
9894
9895 bp->rx_ring_size = ering->rx_pending;
9896 bp->tx_ring_size = ering->tx_pending;
9897
34f80b04
EG
9898 if (netif_running(dev)) {
9899 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9900 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9901 }
9902
34f80b04 9903 return rc;
a2fbb9ea
ET
9904}
9905
9906static void bnx2x_get_pauseparam(struct net_device *dev,
9907 struct ethtool_pauseparam *epause)
9908{
9909 struct bnx2x *bp = netdev_priv(dev);
9910
356e2385
EG
9911 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9912 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9913 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9914
c0700f90
DM
9915 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9916 BNX2X_FLOW_CTRL_RX);
9917 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9918 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9919
9920 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9921 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9922 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9923}
9924
9925static int bnx2x_set_pauseparam(struct net_device *dev,
9926 struct ethtool_pauseparam *epause)
9927{
9928 struct bnx2x *bp = netdev_priv(dev);
9929
34f80b04
EG
9930 if (IS_E1HMF(bp))
9931 return 0;
9932
a2fbb9ea
ET
9933 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9934 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9935 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9936
c0700f90 9937 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9938
f1410647 9939 if (epause->rx_pause)
c0700f90 9940 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9941
f1410647 9942 if (epause->tx_pause)
c0700f90 9943 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9944
c0700f90
DM
9945 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9946 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9947
c18487ee 9948 if (epause->autoneg) {
34f80b04 9949 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9950 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9951 return -EINVAL;
9952 }
a2fbb9ea 9953
c18487ee 9954 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9955 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9956 }
a2fbb9ea 9957
c18487ee
YR
9958 DP(NETIF_MSG_LINK,
9959 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9960
9961 if (netif_running(dev)) {
bb2a0f7a 9962 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9963 bnx2x_link_set(bp);
9964 }
a2fbb9ea
ET
9965
9966 return 0;
9967}
9968
df0f2343
VZ
9969static int bnx2x_set_flags(struct net_device *dev, u32 data)
9970{
9971 struct bnx2x *bp = netdev_priv(dev);
9972 int changed = 0;
9973 int rc = 0;
9974
9975 /* TPA requires Rx CSUM offloading */
9976 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
d43a7e67
VZ
9977 if (!disable_tpa) {
9978 if (!(dev->features & NETIF_F_LRO)) {
9979 dev->features |= NETIF_F_LRO;
9980 bp->flags |= TPA_ENABLE_FLAG;
9981 changed = 1;
9982 }
9983 } else
9984 rc = -EINVAL;
df0f2343
VZ
9985 } else if (dev->features & NETIF_F_LRO) {
9986 dev->features &= ~NETIF_F_LRO;
9987 bp->flags &= ~TPA_ENABLE_FLAG;
9988 changed = 1;
9989 }
9990
9991 if (changed && netif_running(dev)) {
9992 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9993 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9994 }
9995
9996 return rc;
9997}
9998
a2fbb9ea
ET
9999static u32 bnx2x_get_rx_csum(struct net_device *dev)
10000{
10001 struct bnx2x *bp = netdev_priv(dev);
10002
10003 return bp->rx_csum;
10004}
10005
10006static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10007{
10008 struct bnx2x *bp = netdev_priv(dev);
df0f2343 10009 int rc = 0;
a2fbb9ea
ET
10010
10011 bp->rx_csum = data;
df0f2343
VZ
10012
10013 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10014 TPA'ed packets will be discarded due to wrong TCP CSUM */
10015 if (!data) {
10016 u32 flags = ethtool_op_get_flags(dev);
10017
10018 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10019 }
10020
10021 return rc;
a2fbb9ea
ET
10022}
10023
10024static int bnx2x_set_tso(struct net_device *dev, u32 data)
10025{
755735eb 10026 if (data) {
a2fbb9ea 10027 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
10028 dev->features |= NETIF_F_TSO6;
10029 } else {
a2fbb9ea 10030 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
10031 dev->features &= ~NETIF_F_TSO6;
10032 }
10033
a2fbb9ea
ET
10034 return 0;
10035}
10036
f3c87cdd 10037static const struct {
a2fbb9ea
ET
10038 char string[ETH_GSTRING_LEN];
10039} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
10040 { "register_test (offline)" },
10041 { "memory_test (offline)" },
10042 { "loopback_test (offline)" },
10043 { "nvram_test (online)" },
10044 { "interrupt_test (online)" },
10045 { "link_test (online)" },
d3d4f495 10046 { "idle check (online)" }
a2fbb9ea
ET
10047};
10048
f3c87cdd
YG
10049static int bnx2x_test_registers(struct bnx2x *bp)
10050{
10051 int idx, i, rc = -ENODEV;
10052 u32 wr_val = 0;
9dabc424 10053 int port = BP_PORT(bp);
f3c87cdd
YG
10054 static const struct {
10055 u32 offset0;
10056 u32 offset1;
10057 u32 mask;
10058 } reg_tbl[] = {
10059/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10060 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
10061 { HC_REG_AGG_INT_0, 4, 0x000003ff },
10062 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
10063 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
10064 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
10065 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
10066 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10067 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
10068 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10069/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
10070 { QM_REG_CONNNUM_0, 4, 0x000fffff },
10071 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
10072 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
10073 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
10074 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10075 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
10076 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 10077 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
10078 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
10079/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
10080 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
10081 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
10082 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
10083 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
10084 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
10085 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
10086 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
10087 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
10088 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
10089/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
10090 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
10091 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
10092 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10093 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
10094 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10095 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
10096
10097 { 0xffffffff, 0, 0x00000000 }
10098 };
10099
10100 if (!netif_running(bp->dev))
10101 return rc;
10102
10103 /* Repeat the test twice:
10104 First by writing 0x00000000, second by writing 0xffffffff */
10105 for (idx = 0; idx < 2; idx++) {
10106
10107 switch (idx) {
10108 case 0:
10109 wr_val = 0;
10110 break;
10111 case 1:
10112 wr_val = 0xffffffff;
10113 break;
10114 }
10115
10116 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10117 u32 offset, mask, save_val, val;
f3c87cdd
YG
10118
10119 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10120 mask = reg_tbl[i].mask;
10121
10122 save_val = REG_RD(bp, offset);
10123
10124 REG_WR(bp, offset, wr_val);
10125 val = REG_RD(bp, offset);
10126
10127 /* Restore the original register's value */
10128 REG_WR(bp, offset, save_val);
10129
10130 /* verify that value is as expected value */
10131 if ((val & mask) != (wr_val & mask))
10132 goto test_reg_exit;
10133 }
10134 }
10135
10136 rc = 0;
10137
10138test_reg_exit:
10139 return rc;
10140}
10141
10142static int bnx2x_test_memory(struct bnx2x *bp)
10143{
10144 int i, j, rc = -ENODEV;
10145 u32 val;
10146 static const struct {
10147 u32 offset;
10148 int size;
10149 } mem_tbl[] = {
10150 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
10151 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10152 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
10153 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
10154 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
10155 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
10156 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
10157
10158 { 0xffffffff, 0 }
10159 };
10160 static const struct {
10161 char *name;
10162 u32 offset;
9dabc424
YG
10163 u32 e1_mask;
10164 u32 e1h_mask;
f3c87cdd 10165 } prty_tbl[] = {
9dabc424
YG
10166 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
10167 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
10168 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
10169 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
10170 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
10171 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
10172
10173 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
10174 };
10175
10176 if (!netif_running(bp->dev))
10177 return rc;
10178
10179 /* Go through all the memories */
10180 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10181 for (j = 0; j < mem_tbl[i].size; j++)
10182 REG_RD(bp, mem_tbl[i].offset + j*4);
10183
10184 /* Check the parity status */
10185 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10186 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
10187 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10188 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
10189 DP(NETIF_MSG_HW,
10190 "%s is 0x%x\n", prty_tbl[i].name, val);
10191 goto test_mem_exit;
10192 }
10193 }
10194
10195 rc = 0;
10196
10197test_mem_exit:
10198 return rc;
10199}
10200
f3c87cdd
YG
10201static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10202{
10203 int cnt = 1000;
10204
10205 if (link_up)
10206 while (bnx2x_link_test(bp) && cnt--)
10207 msleep(10);
10208}
10209
10210static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10211{
10212 unsigned int pkt_size, num_pkts, i;
10213 struct sk_buff *skb;
10214 unsigned char *packet;
ca00392c 10215 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
54b9ddaa 10216 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
f3c87cdd
YG
10217 u16 tx_start_idx, tx_idx;
10218 u16 rx_start_idx, rx_idx;
ca00392c 10219 u16 pkt_prod, bd_prod;
f3c87cdd 10220 struct sw_tx_bd *tx_buf;
ca00392c
EG
10221 struct eth_tx_start_bd *tx_start_bd;
10222 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
10223 dma_addr_t mapping;
10224 union eth_rx_cqe *cqe;
10225 u8 cqe_fp_flags;
10226 struct sw_rx_bd *rx_buf;
10227 u16 len;
10228 int rc = -ENODEV;
10229
b5bf9068
EG
10230 /* check the loopback mode */
10231 switch (loopback_mode) {
10232 case BNX2X_PHY_LOOPBACK:
10233 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10234 return -EINVAL;
10235 break;
10236 case BNX2X_MAC_LOOPBACK:
f3c87cdd 10237 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 10238 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
10239 break;
10240 default:
f3c87cdd 10241 return -EINVAL;
b5bf9068 10242 }
f3c87cdd 10243
b5bf9068
EG
10244 /* prepare the loopback packet */
10245 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10246 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
10247 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10248 if (!skb) {
10249 rc = -ENOMEM;
10250 goto test_loopback_exit;
10251 }
10252 packet = skb_put(skb, pkt_size);
10253 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
10254 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10255 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
10256 for (i = ETH_HLEN; i < pkt_size; i++)
10257 packet[i] = (unsigned char) (i & 0xff);
10258
b5bf9068 10259 /* send the loopback packet */
f3c87cdd 10260 num_pkts = 0;
ca00392c
EG
10261 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10262 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 10263
ca00392c
EG
10264 pkt_prod = fp_tx->tx_pkt_prod++;
10265 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10266 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 10267 tx_buf->skb = skb;
ca00392c 10268 tx_buf->flags = 0;
f3c87cdd 10269
ca00392c
EG
10270 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10271 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
f3c87cdd
YG
10272 mapping = pci_map_single(bp->pdev, skb->data,
10273 skb_headlen(skb), PCI_DMA_TODEVICE);
ca00392c
EG
10274 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10275 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10276 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10277 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10278 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10279 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10280 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10281 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10282
10283 /* turn on parsing and get a BD */
10284 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10285 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10286
10287 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 10288
58f4c4cf
EG
10289 wmb();
10290
ca00392c
EG
10291 fp_tx->tx_db.data.prod += 2;
10292 barrier();
54b9ddaa 10293 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
f3c87cdd
YG
10294
10295 mmiowb();
10296
10297 num_pkts++;
ca00392c 10298 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
10299
10300 udelay(100);
10301
ca00392c 10302 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
10303 if (tx_idx != tx_start_idx + num_pkts)
10304 goto test_loopback_exit;
10305
ca00392c 10306 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
10307 if (rx_idx != rx_start_idx + num_pkts)
10308 goto test_loopback_exit;
10309
ca00392c 10310 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
10311 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10312 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10313 goto test_loopback_rx_exit;
10314
10315 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10316 if (len != pkt_size)
10317 goto test_loopback_rx_exit;
10318
ca00392c 10319 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
10320 skb = rx_buf->skb;
10321 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10322 for (i = ETH_HLEN; i < pkt_size; i++)
10323 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10324 goto test_loopback_rx_exit;
10325
10326 rc = 0;
10327
10328test_loopback_rx_exit:
f3c87cdd 10329
ca00392c
EG
10330 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10331 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10332 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10333 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
10334
10335 /* Update producers */
ca00392c
EG
10336 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10337 fp_rx->rx_sge_prod);
f3c87cdd
YG
10338
10339test_loopback_exit:
10340 bp->link_params.loopback_mode = LOOPBACK_NONE;
10341
10342 return rc;
10343}
10344
10345static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10346{
b5bf9068 10347 int rc = 0, res;
f3c87cdd
YG
10348
10349 if (!netif_running(bp->dev))
10350 return BNX2X_LOOPBACK_FAILED;
10351
f8ef6e44 10352 bnx2x_netif_stop(bp, 1);
3910c8ae 10353 bnx2x_acquire_phy_lock(bp);
f3c87cdd 10354
b5bf9068
EG
10355 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10356 if (res) {
10357 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10358 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
10359 }
10360
b5bf9068
EG
10361 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10362 if (res) {
10363 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10364 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
10365 }
10366
3910c8ae 10367 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
10368 bnx2x_netif_start(bp);
10369
10370 return rc;
10371}
10372
10373#define CRC32_RESIDUAL 0xdebb20e3
10374
10375static int bnx2x_test_nvram(struct bnx2x *bp)
10376{
10377 static const struct {
10378 int offset;
10379 int size;
10380 } nvram_tbl[] = {
10381 { 0, 0x14 }, /* bootstrap */
10382 { 0x14, 0xec }, /* dir */
10383 { 0x100, 0x350 }, /* manuf_info */
10384 { 0x450, 0xf0 }, /* feature_info */
10385 { 0x640, 0x64 }, /* upgrade_key_info */
10386 { 0x6a4, 0x64 },
10387 { 0x708, 0x70 }, /* manuf_key_info */
10388 { 0x778, 0x70 },
10389 { 0, 0 }
10390 };
4781bfad 10391 __be32 buf[0x350 / 4];
f3c87cdd
YG
10392 u8 *data = (u8 *)buf;
10393 int i, rc;
ab6ad5a4 10394 u32 magic, crc;
f3c87cdd
YG
10395
10396 rc = bnx2x_nvram_read(bp, 0, data, 4);
10397 if (rc) {
f5372251 10398 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
10399 goto test_nvram_exit;
10400 }
10401
10402 magic = be32_to_cpu(buf[0]);
10403 if (magic != 0x669955aa) {
10404 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10405 rc = -ENODEV;
10406 goto test_nvram_exit;
10407 }
10408
10409 for (i = 0; nvram_tbl[i].size; i++) {
10410
10411 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10412 nvram_tbl[i].size);
10413 if (rc) {
10414 DP(NETIF_MSG_PROBE,
f5372251 10415 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
10416 goto test_nvram_exit;
10417 }
10418
ab6ad5a4
EG
10419 crc = ether_crc_le(nvram_tbl[i].size, data);
10420 if (crc != CRC32_RESIDUAL) {
f3c87cdd 10421 DP(NETIF_MSG_PROBE,
ab6ad5a4 10422 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
f3c87cdd
YG
10423 rc = -ENODEV;
10424 goto test_nvram_exit;
10425 }
10426 }
10427
10428test_nvram_exit:
10429 return rc;
10430}
10431
10432static int bnx2x_test_intr(struct bnx2x *bp)
10433{
10434 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10435 int i, rc;
10436
10437 if (!netif_running(bp->dev))
10438 return -ENODEV;
10439
8d9c5f34 10440 config->hdr.length = 0;
af246401 10441 if (CHIP_IS_E1(bp))
0c43f43f
VZ
10442 /* use last unicast entries */
10443 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
af246401
EG
10444 else
10445 config->hdr.offset = BP_FUNC(bp);
0626b899 10446 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
10447 config->hdr.reserved1 = 0;
10448
e665bfda
MC
10449 bp->set_mac_pending++;
10450 smp_wmb();
f3c87cdd
YG
10451 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10452 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10453 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10454 if (rc == 0) {
f3c87cdd
YG
10455 for (i = 0; i < 10; i++) {
10456 if (!bp->set_mac_pending)
10457 break;
e665bfda 10458 smp_rmb();
f3c87cdd
YG
10459 msleep_interruptible(10);
10460 }
10461 if (i == 10)
10462 rc = -ENODEV;
10463 }
10464
10465 return rc;
10466}
10467
a2fbb9ea
ET
10468static void bnx2x_self_test(struct net_device *dev,
10469 struct ethtool_test *etest, u64 *buf)
10470{
10471 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
10472
10473 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10474
f3c87cdd 10475 if (!netif_running(dev))
a2fbb9ea 10476 return;
a2fbb9ea 10477
33471629 10478 /* offline tests are not supported in MF mode */
f3c87cdd
YG
10479 if (IS_E1HMF(bp))
10480 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10481
10482 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
10483 int port = BP_PORT(bp);
10484 u32 val;
f3c87cdd
YG
10485 u8 link_up;
10486
279abdf5
EG
10487 /* save current value of input enable for TX port IF */
10488 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10489 /* disable input for TX port IF */
10490 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10491
061bc702 10492 link_up = (bnx2x_link_test(bp) == 0);
f3c87cdd
YG
10493 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10494 bnx2x_nic_load(bp, LOAD_DIAG);
10495 /* wait until link state is restored */
10496 bnx2x_wait_for_link(bp, link_up);
10497
10498 if (bnx2x_test_registers(bp) != 0) {
10499 buf[0] = 1;
10500 etest->flags |= ETH_TEST_FL_FAILED;
10501 }
10502 if (bnx2x_test_memory(bp) != 0) {
10503 buf[1] = 1;
10504 etest->flags |= ETH_TEST_FL_FAILED;
10505 }
10506 buf[2] = bnx2x_test_loopback(bp, link_up);
10507 if (buf[2] != 0)
10508 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 10509
f3c87cdd 10510 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
10511
10512 /* restore input for TX port IF */
10513 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10514
f3c87cdd
YG
10515 bnx2x_nic_load(bp, LOAD_NORMAL);
10516 /* wait until link state is restored */
10517 bnx2x_wait_for_link(bp, link_up);
10518 }
10519 if (bnx2x_test_nvram(bp) != 0) {
10520 buf[3] = 1;
a2fbb9ea
ET
10521 etest->flags |= ETH_TEST_FL_FAILED;
10522 }
f3c87cdd
YG
10523 if (bnx2x_test_intr(bp) != 0) {
10524 buf[4] = 1;
10525 etest->flags |= ETH_TEST_FL_FAILED;
10526 }
10527 if (bp->port.pmf)
10528 if (bnx2x_link_test(bp) != 0) {
10529 buf[5] = 1;
10530 etest->flags |= ETH_TEST_FL_FAILED;
10531 }
f3c87cdd
YG
10532
10533#ifdef BNX2X_EXTRA_DEBUG
10534 bnx2x_panic_dump(bp);
10535#endif
a2fbb9ea
ET
10536}
10537
de832a55
EG
10538static const struct {
10539 long offset;
10540 int size;
10541 u8 string[ETH_GSTRING_LEN];
10542} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10543/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10544 { Q_STATS_OFFSET32(error_bytes_received_hi),
10545 8, "[%d]: rx_error_bytes" },
10546 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10547 8, "[%d]: rx_ucast_packets" },
10548 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10549 8, "[%d]: rx_mcast_packets" },
10550 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10551 8, "[%d]: rx_bcast_packets" },
10552 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10553 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10554 4, "[%d]: rx_phy_ip_err_discards"},
10555 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10556 4, "[%d]: rx_skb_alloc_discard" },
10557 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10558
10559/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10560 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10561 8, "[%d]: tx_packets" }
10562};
10563
bb2a0f7a
YG
10564static const struct {
10565 long offset;
10566 int size;
10567 u32 flags;
66e855f3
YG
10568#define STATS_FLAGS_PORT 1
10569#define STATS_FLAGS_FUNC 2
de832a55 10570#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 10571 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 10572} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
10573/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10574 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 10575 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 10576 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 10577 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 10578 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 10579 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 10580 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 10581 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 10582 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 10583 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 10584 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 10585 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 10586 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
10587 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10588 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10589 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10590 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10591/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10592 8, STATS_FLAGS_PORT, "rx_fragments" },
10593 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10594 8, STATS_FLAGS_PORT, "rx_jabbers" },
10595 { STATS_OFFSET32(no_buff_discard_hi),
10596 8, STATS_FLAGS_BOTH, "rx_discards" },
10597 { STATS_OFFSET32(mac_filter_discard),
10598 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10599 { STATS_OFFSET32(xxoverflow_discard),
10600 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10601 { STATS_OFFSET32(brb_drop_hi),
10602 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10603 { STATS_OFFSET32(brb_truncate_hi),
10604 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10605 { STATS_OFFSET32(pause_frames_received_hi),
10606 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10607 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10608 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10609 { STATS_OFFSET32(nig_timer_max),
10610 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10611/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10612 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10613 { STATS_OFFSET32(rx_skb_alloc_failed),
10614 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10615 { STATS_OFFSET32(hw_csum_err),
10616 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10617
10618 { STATS_OFFSET32(total_bytes_transmitted_hi),
10619 8, STATS_FLAGS_BOTH, "tx_bytes" },
10620 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10621 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10622 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10623 8, STATS_FLAGS_BOTH, "tx_packets" },
10624 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10625 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10626 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10627 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 10628 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 10629 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 10630 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 10631 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 10632/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 10633 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 10634 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 10635 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 10636 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 10637 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 10638 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 10639 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 10640 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 10641 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 10642 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 10643 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 10644 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 10645 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 10646 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 10647 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 10648 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 10649 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 10650 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 10651 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 10652/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 10653 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
10654 { STATS_OFFSET32(pause_frames_sent_hi),
10655 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
10656};
10657
de832a55
EG
10658#define IS_PORT_STAT(i) \
10659 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10660#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10661#define IS_E1HMF_MODE_STAT(bp) \
7995c64e 10662 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
66e855f3 10663
15f0a394
BH
10664static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10665{
10666 struct bnx2x *bp = netdev_priv(dev);
10667 int i, num_stats;
10668
10669 switch(stringset) {
10670 case ETH_SS_STATS:
10671 if (is_multi(bp)) {
54b9ddaa 10672 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
15f0a394
BH
10673 if (!IS_E1HMF_MODE_STAT(bp))
10674 num_stats += BNX2X_NUM_STATS;
10675 } else {
10676 if (IS_E1HMF_MODE_STAT(bp)) {
10677 num_stats = 0;
10678 for (i = 0; i < BNX2X_NUM_STATS; i++)
10679 if (IS_FUNC_STAT(i))
10680 num_stats++;
10681 } else
10682 num_stats = BNX2X_NUM_STATS;
10683 }
10684 return num_stats;
10685
10686 case ETH_SS_TEST:
10687 return BNX2X_NUM_TESTS;
10688
10689 default:
10690 return -EINVAL;
10691 }
10692}
10693
a2fbb9ea
ET
10694static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10695{
bb2a0f7a 10696 struct bnx2x *bp = netdev_priv(dev);
de832a55 10697 int i, j, k;
bb2a0f7a 10698
a2fbb9ea
ET
10699 switch (stringset) {
10700 case ETH_SS_STATS:
de832a55
EG
10701 if (is_multi(bp)) {
10702 k = 0;
54b9ddaa 10703 for_each_queue(bp, i) {
de832a55
EG
10704 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10705 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10706 bnx2x_q_stats_arr[j].string, i);
10707 k += BNX2X_NUM_Q_STATS;
10708 }
10709 if (IS_E1HMF_MODE_STAT(bp))
10710 break;
10711 for (j = 0; j < BNX2X_NUM_STATS; j++)
10712 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10713 bnx2x_stats_arr[j].string);
10714 } else {
10715 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10716 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10717 continue;
10718 strcpy(buf + j*ETH_GSTRING_LEN,
10719 bnx2x_stats_arr[i].string);
10720 j++;
10721 }
bb2a0f7a 10722 }
a2fbb9ea
ET
10723 break;
10724
10725 case ETH_SS_TEST:
10726 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10727 break;
10728 }
10729}
10730
a2fbb9ea
ET
10731static void bnx2x_get_ethtool_stats(struct net_device *dev,
10732 struct ethtool_stats *stats, u64 *buf)
10733{
10734 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
10735 u32 *hw_stats, *offset;
10736 int i, j, k;
bb2a0f7a 10737
de832a55
EG
10738 if (is_multi(bp)) {
10739 k = 0;
54b9ddaa 10740 for_each_queue(bp, i) {
de832a55
EG
10741 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10742 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10743 if (bnx2x_q_stats_arr[j].size == 0) {
10744 /* skip this counter */
10745 buf[k + j] = 0;
10746 continue;
10747 }
10748 offset = (hw_stats +
10749 bnx2x_q_stats_arr[j].offset);
10750 if (bnx2x_q_stats_arr[j].size == 4) {
10751 /* 4-byte counter */
10752 buf[k + j] = (u64) *offset;
10753 continue;
10754 }
10755 /* 8-byte counter */
10756 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10757 }
10758 k += BNX2X_NUM_Q_STATS;
10759 }
10760 if (IS_E1HMF_MODE_STAT(bp))
10761 return;
10762 hw_stats = (u32 *)&bp->eth_stats;
10763 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10764 if (bnx2x_stats_arr[j].size == 0) {
10765 /* skip this counter */
10766 buf[k + j] = 0;
10767 continue;
10768 }
10769 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10770 if (bnx2x_stats_arr[j].size == 4) {
10771 /* 4-byte counter */
10772 buf[k + j] = (u64) *offset;
10773 continue;
10774 }
10775 /* 8-byte counter */
10776 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10777 }
de832a55
EG
10778 } else {
10779 hw_stats = (u32 *)&bp->eth_stats;
10780 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10781 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10782 continue;
10783 if (bnx2x_stats_arr[i].size == 0) {
10784 /* skip this counter */
10785 buf[j] = 0;
10786 j++;
10787 continue;
10788 }
10789 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10790 if (bnx2x_stats_arr[i].size == 4) {
10791 /* 4-byte counter */
10792 buf[j] = (u64) *offset;
10793 j++;
10794 continue;
10795 }
10796 /* 8-byte counter */
10797 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10798 j++;
a2fbb9ea 10799 }
a2fbb9ea
ET
10800 }
10801}
10802
10803static int bnx2x_phys_id(struct net_device *dev, u32 data)
10804{
10805 struct bnx2x *bp = netdev_priv(dev);
10806 int i;
10807
34f80b04
EG
10808 if (!netif_running(dev))
10809 return 0;
10810
10811 if (!bp->port.pmf)
10812 return 0;
10813
a2fbb9ea
ET
10814 if (data == 0)
10815 data = 2;
10816
10817 for (i = 0; i < (data * 2); i++) {
c18487ee 10818 if ((i % 2) == 0)
7846e471
YR
10819 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10820 SPEED_1000);
c18487ee 10821 else
7846e471 10822 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
c18487ee 10823
a2fbb9ea
ET
10824 msleep_interruptible(500);
10825 if (signal_pending(current))
10826 break;
10827 }
10828
c18487ee 10829 if (bp->link_vars.link_up)
7846e471
YR
10830 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10831 bp->link_vars.line_speed);
a2fbb9ea
ET
10832
10833 return 0;
10834}
10835
0fc0b732 10836static const struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10837 .get_settings = bnx2x_get_settings,
10838 .set_settings = bnx2x_set_settings,
10839 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10840 .get_regs_len = bnx2x_get_regs_len,
10841 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10842 .get_wol = bnx2x_get_wol,
10843 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10844 .get_msglevel = bnx2x_get_msglevel,
10845 .set_msglevel = bnx2x_set_msglevel,
10846 .nway_reset = bnx2x_nway_reset,
01e53298 10847 .get_link = bnx2x_get_link,
7a9b2557
VZ
10848 .get_eeprom_len = bnx2x_get_eeprom_len,
10849 .get_eeprom = bnx2x_get_eeprom,
10850 .set_eeprom = bnx2x_set_eeprom,
10851 .get_coalesce = bnx2x_get_coalesce,
10852 .set_coalesce = bnx2x_set_coalesce,
10853 .get_ringparam = bnx2x_get_ringparam,
10854 .set_ringparam = bnx2x_set_ringparam,
10855 .get_pauseparam = bnx2x_get_pauseparam,
10856 .set_pauseparam = bnx2x_set_pauseparam,
10857 .get_rx_csum = bnx2x_get_rx_csum,
10858 .set_rx_csum = bnx2x_set_rx_csum,
10859 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10860 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10861 .set_flags = bnx2x_set_flags,
10862 .get_flags = ethtool_op_get_flags,
10863 .get_sg = ethtool_op_get_sg,
10864 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10865 .get_tso = ethtool_op_get_tso,
10866 .set_tso = bnx2x_set_tso,
7a9b2557 10867 .self_test = bnx2x_self_test,
15f0a394 10868 .get_sset_count = bnx2x_get_sset_count,
7a9b2557 10869 .get_strings = bnx2x_get_strings,
a2fbb9ea 10870 .phys_id = bnx2x_phys_id,
bb2a0f7a 10871 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10872};
10873
10874/* end of ethtool_ops */
10875
10876/****************************************************************************
10877* General service functions
10878****************************************************************************/
10879
10880static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10881{
10882 u16 pmcsr;
10883
10884 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10885
10886 switch (state) {
10887 case PCI_D0:
34f80b04 10888 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10889 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10890 PCI_PM_CTRL_PME_STATUS));
10891
10892 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10893 /* delay required during transition out of D3hot */
a2fbb9ea 10894 msleep(20);
34f80b04 10895 break;
a2fbb9ea 10896
34f80b04
EG
10897 case PCI_D3hot:
10898 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10899 pmcsr |= 3;
a2fbb9ea 10900
34f80b04
EG
10901 if (bp->wol)
10902 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10903
34f80b04
EG
10904 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10905 pmcsr);
a2fbb9ea 10906
34f80b04
EG
10907 /* No more memory access after this point until
10908 * device is brought back to D0.
10909 */
10910 break;
10911
10912 default:
10913 return -EINVAL;
10914 }
10915 return 0;
a2fbb9ea
ET
10916}
10917
237907c1
EG
10918static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10919{
10920 u16 rx_cons_sb;
10921
10922 /* Tell compiler that status block fields can change */
10923 barrier();
10924 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10925 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10926 rx_cons_sb++;
10927 return (fp->rx_comp_cons != rx_cons_sb);
10928}
10929
34f80b04
EG
10930/*
10931 * net_device service functions
10932 */
10933
a2fbb9ea
ET
10934static int bnx2x_poll(struct napi_struct *napi, int budget)
10935{
54b9ddaa 10936 int work_done = 0;
a2fbb9ea
ET
10937 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10938 napi);
10939 struct bnx2x *bp = fp->bp;
a2fbb9ea 10940
54b9ddaa 10941 while (1) {
a2fbb9ea 10942#ifdef BNX2X_STOP_ON_ERROR
54b9ddaa
VZ
10943 if (unlikely(bp->panic)) {
10944 napi_complete(napi);
10945 return 0;
10946 }
a2fbb9ea
ET
10947#endif
10948
54b9ddaa
VZ
10949 if (bnx2x_has_tx_work(fp))
10950 bnx2x_tx_int(fp);
356e2385 10951
54b9ddaa
VZ
10952 if (bnx2x_has_rx_work(fp)) {
10953 work_done += bnx2x_rx_int(fp, budget - work_done);
a2fbb9ea 10954
54b9ddaa
VZ
10955 /* must not complete if we consumed full budget */
10956 if (work_done >= budget)
10957 break;
10958 }
a2fbb9ea 10959
54b9ddaa
VZ
10960 /* Fall out from the NAPI loop if needed */
10961 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10962 bnx2x_update_fpsb_idx(fp);
10963 /* bnx2x_has_rx_work() reads the status block, thus we need
10964 * to ensure that status block indices have been actually read
10965 * (bnx2x_update_fpsb_idx) prior to this check
10966 * (bnx2x_has_rx_work) so that we won't write the "newer"
10967 * value of the status block to IGU (if there was a DMA right
10968 * after bnx2x_has_rx_work and if there is no rmb, the memory
10969 * reading (bnx2x_update_fpsb_idx) may be postponed to right
10970 * before bnx2x_ack_sb). In this case there will never be
10971 * another interrupt until there is another update of the
10972 * status block, while there is still unhandled work.
10973 */
10974 rmb();
a2fbb9ea 10975
54b9ddaa
VZ
10976 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10977 napi_complete(napi);
10978 /* Re-enable interrupts */
10979 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10980 le16_to_cpu(fp->fp_c_idx),
10981 IGU_INT_NOP, 1);
10982 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10983 le16_to_cpu(fp->fp_u_idx),
10984 IGU_INT_ENABLE, 1);
10985 break;
10986 }
10987 }
a2fbb9ea 10988 }
356e2385 10989
a2fbb9ea
ET
10990 return work_done;
10991}
10992
755735eb
EG
10993
10994/* we split the first BD into headers and data BDs
33471629 10995 * to ease the pain of our fellow microcode engineers
755735eb
EG
10996 * we use one mapping for both BDs
10997 * So far this has only been observed to happen
10998 * in Other Operating Systems(TM)
10999 */
11000static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
11001 struct bnx2x_fastpath *fp,
ca00392c
EG
11002 struct sw_tx_bd *tx_buf,
11003 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
11004 u16 bd_prod, int nbd)
11005{
ca00392c 11006 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
11007 struct eth_tx_bd *d_tx_bd;
11008 dma_addr_t mapping;
11009 int old_len = le16_to_cpu(h_tx_bd->nbytes);
11010
11011 /* first fix first BD */
11012 h_tx_bd->nbd = cpu_to_le16(nbd);
11013 h_tx_bd->nbytes = cpu_to_le16(hlen);
11014
11015 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11016 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11017 h_tx_bd->addr_lo, h_tx_bd->nbd);
11018
11019 /* now get a new data BD
11020 * (after the pbd) and fill it */
11021 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 11022 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
11023
11024 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11025 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11026
11027 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11028 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11029 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
11030
11031 /* this marks the BD as one that has no individual mapping */
11032 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11033
755735eb
EG
11034 DP(NETIF_MSG_TX_QUEUED,
11035 "TSO split data size is %d (%x:%x)\n",
11036 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11037
ca00392c
EG
11038 /* update tx_bd */
11039 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
11040
11041 return bd_prod;
11042}
11043
11044static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11045{
11046 if (fix > 0)
11047 csum = (u16) ~csum_fold(csum_sub(csum,
11048 csum_partial(t_header - fix, fix, 0)));
11049
11050 else if (fix < 0)
11051 csum = (u16) ~csum_fold(csum_add(csum,
11052 csum_partial(t_header, -fix, 0)));
11053
11054 return swab16(csum);
11055}
11056
11057static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11058{
11059 u32 rc;
11060
11061 if (skb->ip_summed != CHECKSUM_PARTIAL)
11062 rc = XMIT_PLAIN;
11063
11064 else {
4781bfad 11065 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
11066 rc = XMIT_CSUM_V6;
11067 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11068 rc |= XMIT_CSUM_TCP;
11069
11070 } else {
11071 rc = XMIT_CSUM_V4;
11072 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11073 rc |= XMIT_CSUM_TCP;
11074 }
11075 }
11076
11077 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
d6a2f98b 11078 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
755735eb
EG
11079
11080 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
d6a2f98b 11081 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
755735eb
EG
11082
11083 return rc;
11084}
11085
632da4d6 11086#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
11087/* check if packet requires linearization (packet is too fragmented)
11088 no need to check fragmentation if page size > 8K (there will be no
11089 violation to FW restrictions) */
755735eb
EG
11090static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11091 u32 xmit_type)
11092{
11093 int to_copy = 0;
11094 int hlen = 0;
11095 int first_bd_sz = 0;
11096
11097 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11098 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11099
11100 if (xmit_type & XMIT_GSO) {
11101 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11102 /* Check if LSO packet needs to be copied:
11103 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11104 int wnd_size = MAX_FETCH_BD - 3;
33471629 11105 /* Number of windows to check */
755735eb
EG
11106 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11107 int wnd_idx = 0;
11108 int frag_idx = 0;
11109 u32 wnd_sum = 0;
11110
11111 /* Headers length */
11112 hlen = (int)(skb_transport_header(skb) - skb->data) +
11113 tcp_hdrlen(skb);
11114
11115 /* Amount of data (w/o headers) on linear part of SKB*/
11116 first_bd_sz = skb_headlen(skb) - hlen;
11117
11118 wnd_sum = first_bd_sz;
11119
11120 /* Calculate the first sum - it's special */
11121 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11122 wnd_sum +=
11123 skb_shinfo(skb)->frags[frag_idx].size;
11124
11125 /* If there was data on linear skb data - check it */
11126 if (first_bd_sz > 0) {
11127 if (unlikely(wnd_sum < lso_mss)) {
11128 to_copy = 1;
11129 goto exit_lbl;
11130 }
11131
11132 wnd_sum -= first_bd_sz;
11133 }
11134
11135 /* Others are easier: run through the frag list and
11136 check all windows */
11137 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11138 wnd_sum +=
11139 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11140
11141 if (unlikely(wnd_sum < lso_mss)) {
11142 to_copy = 1;
11143 break;
11144 }
11145 wnd_sum -=
11146 skb_shinfo(skb)->frags[wnd_idx].size;
11147 }
755735eb
EG
11148 } else {
11149 /* in non-LSO too fragmented packet should always
11150 be linearized */
11151 to_copy = 1;
11152 }
11153 }
11154
11155exit_lbl:
11156 if (unlikely(to_copy))
11157 DP(NETIF_MSG_TX_QUEUED,
11158 "Linearization IS REQUIRED for %s packet. "
11159 "num_frags %d hlen %d first_bd_sz %d\n",
11160 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11161 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11162
11163 return to_copy;
11164}
632da4d6 11165#endif
755735eb
EG
11166
11167/* called with netif_tx_lock
a2fbb9ea 11168 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 11169 * netif_wake_queue()
a2fbb9ea 11170 */
61357325 11171static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
a2fbb9ea
ET
11172{
11173 struct bnx2x *bp = netdev_priv(dev);
54b9ddaa 11174 struct bnx2x_fastpath *fp;
555f6c78 11175 struct netdev_queue *txq;
a2fbb9ea 11176 struct sw_tx_bd *tx_buf;
ca00392c
EG
11177 struct eth_tx_start_bd *tx_start_bd;
11178 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
11179 struct eth_tx_parse_bd *pbd = NULL;
11180 u16 pkt_prod, bd_prod;
755735eb 11181 int nbd, fp_index;
a2fbb9ea 11182 dma_addr_t mapping;
755735eb 11183 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
11184 int i;
11185 u8 hlen = 0;
ca00392c 11186 __le16 pkt_size = 0;
a2fbb9ea
ET
11187
11188#ifdef BNX2X_STOP_ON_ERROR
11189 if (unlikely(bp->panic))
11190 return NETDEV_TX_BUSY;
11191#endif
11192
555f6c78
EG
11193 fp_index = skb_get_queue_mapping(skb);
11194 txq = netdev_get_tx_queue(dev, fp_index);
11195
54b9ddaa 11196 fp = &bp->fp[fp_index];
755735eb 11197
231fd58a 11198 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
54b9ddaa 11199 fp->eth_q_stats.driver_xoff++;
555f6c78 11200 netif_tx_stop_queue(txq);
a2fbb9ea
ET
11201 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11202 return NETDEV_TX_BUSY;
11203 }
11204
755735eb
EG
11205 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
11206 " gso type %x xmit_type %x\n",
11207 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11208 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11209
632da4d6 11210#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
11211 /* First, check if we need to linearize the skb (due to FW
11212 restrictions). No need to check fragmentation if page size > 8K
11213 (there will be no violation to FW restrictions) */
755735eb
EG
11214 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11215 /* Statistics of linearization */
11216 bp->lin_cnt++;
11217 if (skb_linearize(skb) != 0) {
11218 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11219 "silently dropping this SKB\n");
11220 dev_kfree_skb_any(skb);
da5a662a 11221 return NETDEV_TX_OK;
755735eb
EG
11222 }
11223 }
632da4d6 11224#endif
755735eb 11225
a2fbb9ea 11226 /*
755735eb 11227 Please read carefully. First we use one BD which we mark as start,
ca00392c 11228 then we have a parsing info BD (used for TSO or xsum),
755735eb 11229 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
11230 (don't forget to mark the last one as last,
11231 and to unmap only AFTER you write to the BD ...)
755735eb 11232 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
11233 */
11234
11235 pkt_prod = fp->tx_pkt_prod++;
755735eb 11236 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 11237
755735eb 11238 /* get a tx_buf and first BD */
a2fbb9ea 11239 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 11240 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 11241
ca00392c
EG
11242 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11243 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11244 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 11245 /* header nbd */
ca00392c 11246 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 11247
755735eb
EG
11248 /* remember the first BD of the packet */
11249 tx_buf->first_bd = fp->tx_bd_prod;
11250 tx_buf->skb = skb;
ca00392c 11251 tx_buf->flags = 0;
a2fbb9ea
ET
11252
11253 DP(NETIF_MSG_TX_QUEUED,
11254 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 11255 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 11256
0c6671b0
EG
11257#ifdef BCM_VLAN
11258 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11259 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
11260 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11261 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 11262 } else
0c6671b0 11263#endif
ca00392c 11264 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 11265
ca00392c
EG
11266 /* turn on parsing and get a BD */
11267 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11268 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 11269
ca00392c 11270 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
11271
11272 if (xmit_type & XMIT_CSUM) {
ca00392c 11273 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
11274
11275 /* for now NS flag is not used in Linux */
4781bfad
EG
11276 pbd->global_data =
11277 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11278 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 11279
755735eb
EG
11280 pbd->ip_hlen = (skb_transport_header(skb) -
11281 skb_network_header(skb)) / 2;
11282
11283 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 11284
755735eb 11285 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 11286 hlen = hlen*2;
a2fbb9ea 11287
ca00392c 11288 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
11289
11290 if (xmit_type & XMIT_CSUM_V4)
ca00392c 11291 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
11292 ETH_TX_BD_FLAGS_IP_CSUM;
11293 else
ca00392c
EG
11294 tx_start_bd->bd_flags.as_bitfield |=
11295 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
11296
11297 if (xmit_type & XMIT_CSUM_TCP) {
11298 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11299
11300 } else {
11301 s8 fix = SKB_CS_OFF(skb); /* signed! */
11302
ca00392c 11303 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 11304
755735eb 11305 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11306 "hlen %d fix %d csum before fix %x\n",
11307 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
11308
11309 /* HW bug: fixup the CSUM */
11310 pbd->tcp_pseudo_csum =
11311 bnx2x_csum_fix(skb_transport_header(skb),
11312 SKB_CS(skb), fix);
11313
11314 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11315 pbd->tcp_pseudo_csum);
11316 }
a2fbb9ea
ET
11317 }
11318
11319 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 11320 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea 11321
ca00392c
EG
11322 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11323 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11324 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11325 tx_start_bd->nbd = cpu_to_le16(nbd);
11326 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11327 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
11328
11329 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 11330 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
11331 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11332 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11333 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 11334
755735eb 11335 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
11336
11337 DP(NETIF_MSG_TX_QUEUED,
11338 "TSO packet len %d hlen %d total len %d tso size %d\n",
11339 skb->len, hlen, skb_headlen(skb),
11340 skb_shinfo(skb)->gso_size);
11341
ca00392c 11342 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 11343
755735eb 11344 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
11345 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11346 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
11347
11348 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11349 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
11350 pbd->tcp_flags = pbd_tcp_flags(skb);
11351
11352 if (xmit_type & XMIT_GSO_V4) {
11353 pbd->ip_id = swab16(ip_hdr(skb)->id);
11354 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
11355 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11356 ip_hdr(skb)->daddr,
11357 0, IPPROTO_TCP, 0));
755735eb
EG
11358
11359 } else
11360 pbd->tcp_pseudo_csum =
11361 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11362 &ipv6_hdr(skb)->daddr,
11363 0, IPPROTO_TCP, 0));
11364
a2fbb9ea
ET
11365 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11366 }
ca00392c 11367 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 11368
755735eb
EG
11369 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11370 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 11371
755735eb 11372 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
11373 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11374 if (total_pkt_bd == NULL)
11375 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 11376
755735eb
EG
11377 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11378 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 11379
ca00392c
EG
11380 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11381 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11382 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11383 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 11384
755735eb 11385 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11386 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11387 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11388 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
11389 }
11390
ca00392c 11391 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 11392
a2fbb9ea
ET
11393 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11394
755735eb 11395 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
11396 * if the packet contains or ends with it
11397 */
11398 if (TX_BD_POFF(bd_prod) < nbd)
11399 nbd++;
11400
ca00392c
EG
11401 if (total_pkt_bd != NULL)
11402 total_pkt_bd->total_pkt_bytes = pkt_size;
11403
a2fbb9ea
ET
11404 if (pbd)
11405 DP(NETIF_MSG_TX_QUEUED,
11406 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11407 " tcp_flags %x xsum %x seq %u hlen %u\n",
11408 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11409 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 11410 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 11411
755735eb 11412 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 11413
58f4c4cf
EG
11414 /*
11415 * Make sure that the BD data is updated before updating the producer
11416 * since FW might read the BD right after the producer is updated.
11417 * This is only applicable for weak-ordered memory model archs such
11418 * as IA-64. The following barrier is also mandatory since FW will
11419 * assumes packets must have BDs.
11420 */
11421 wmb();
11422
ca00392c
EG
11423 fp->tx_db.data.prod += nbd;
11424 barrier();
54b9ddaa 11425 DOORBELL(bp, fp->index, fp->tx_db.raw);
a2fbb9ea
ET
11426
11427 mmiowb();
11428
755735eb 11429 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
11430
11431 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 11432 netif_tx_stop_queue(txq);
9baddeb8
SG
11433
11434 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
11435 * ordering of set_bit() in netif_tx_stop_queue() and read of
11436 * fp->bd_tx_cons */
58f4c4cf 11437 smp_mb();
9baddeb8 11438
54b9ddaa 11439 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 11440 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 11441 netif_tx_wake_queue(txq);
a2fbb9ea 11442 }
54b9ddaa 11443 fp->tx_pkt++;
a2fbb9ea
ET
11444
11445 return NETDEV_TX_OK;
11446}
11447
bb2a0f7a 11448/* called with rtnl_lock */
a2fbb9ea
ET
11449static int bnx2x_open(struct net_device *dev)
11450{
11451 struct bnx2x *bp = netdev_priv(dev);
11452
6eccabb3
EG
11453 netif_carrier_off(dev);
11454
a2fbb9ea
ET
11455 bnx2x_set_power_state(bp, PCI_D0);
11456
bb2a0f7a 11457 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
11458}
11459
bb2a0f7a 11460/* called with rtnl_lock */
a2fbb9ea
ET
11461static int bnx2x_close(struct net_device *dev)
11462{
a2fbb9ea
ET
11463 struct bnx2x *bp = netdev_priv(dev);
11464
11465 /* Unload the driver, release IRQs */
bb2a0f7a
YG
11466 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11467 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11468 if (!CHIP_REV_IS_SLOW(bp))
11469 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
11470
11471 return 0;
11472}
11473
f5372251 11474/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
11475static void bnx2x_set_rx_mode(struct net_device *dev)
11476{
11477 struct bnx2x *bp = netdev_priv(dev);
11478 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11479 int port = BP_PORT(bp);
11480
11481 if (bp->state != BNX2X_STATE_OPEN) {
11482 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11483 return;
11484 }
11485
11486 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11487
11488 if (dev->flags & IFF_PROMISC)
11489 rx_mode = BNX2X_RX_MODE_PROMISC;
11490
11491 else if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf
JP
11492 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
11493 CHIP_IS_E1(bp)))
34f80b04
EG
11494 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11495
11496 else { /* some multicasts */
11497 if (CHIP_IS_E1(bp)) {
11498 int i, old, offset;
11499 struct dev_mc_list *mclist;
11500 struct mac_configuration_cmd *config =
11501 bnx2x_sp(bp, mcast_config);
11502
0ddf477b
JP
11503 i = 0;
11504 netdev_for_each_mc_addr(mclist, dev) {
34f80b04
EG
11505 config->config_table[i].
11506 cam_entry.msb_mac_addr =
11507 swab16(*(u16 *)&mclist->dmi_addr[0]);
11508 config->config_table[i].
11509 cam_entry.middle_mac_addr =
11510 swab16(*(u16 *)&mclist->dmi_addr[2]);
11511 config->config_table[i].
11512 cam_entry.lsb_mac_addr =
11513 swab16(*(u16 *)&mclist->dmi_addr[4]);
11514 config->config_table[i].cam_entry.flags =
11515 cpu_to_le16(port);
11516 config->config_table[i].
11517 target_table_entry.flags = 0;
ca00392c
EG
11518 config->config_table[i].target_table_entry.
11519 clients_bit_vector =
11520 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
11521 config->config_table[i].
11522 target_table_entry.vlan_id = 0;
11523
11524 DP(NETIF_MSG_IFUP,
11525 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11526 config->config_table[i].
11527 cam_entry.msb_mac_addr,
11528 config->config_table[i].
11529 cam_entry.middle_mac_addr,
11530 config->config_table[i].
11531 cam_entry.lsb_mac_addr);
0ddf477b 11532 i++;
34f80b04 11533 }
8d9c5f34 11534 old = config->hdr.length;
34f80b04
EG
11535 if (old > i) {
11536 for (; i < old; i++) {
11537 if (CAM_IS_INVALID(config->
11538 config_table[i])) {
af246401 11539 /* already invalidated */
34f80b04
EG
11540 break;
11541 }
11542 /* invalidate */
11543 CAM_INVALIDATE(config->
11544 config_table[i]);
11545 }
11546 }
11547
11548 if (CHIP_REV_IS_SLOW(bp))
11549 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11550 else
11551 offset = BNX2X_MAX_MULTICAST*(1 + port);
11552
8d9c5f34 11553 config->hdr.length = i;
34f80b04 11554 config->hdr.offset = offset;
8d9c5f34 11555 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
11556 config->hdr.reserved1 = 0;
11557
e665bfda
MC
11558 bp->set_mac_pending++;
11559 smp_wmb();
11560
34f80b04
EG
11561 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11562 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11563 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11564 0);
11565 } else { /* E1H */
11566 /* Accept one or more multicasts */
11567 struct dev_mc_list *mclist;
11568 u32 mc_filter[MC_HASH_SIZE];
11569 u32 crc, bit, regidx;
11570 int i;
11571
11572 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11573
0ddf477b 11574 netdev_for_each_mc_addr(mclist, dev) {
7c510e4b
JB
11575 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11576 mclist->dmi_addr);
34f80b04
EG
11577
11578 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11579 bit = (crc >> 24) & 0xff;
11580 regidx = bit >> 5;
11581 bit &= 0x1f;
11582 mc_filter[regidx] |= (1 << bit);
11583 }
11584
11585 for (i = 0; i < MC_HASH_SIZE; i++)
11586 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11587 mc_filter[i]);
11588 }
11589 }
11590
11591 bp->rx_mode = rx_mode;
11592 bnx2x_set_storm_rx_mode(bp);
11593}
11594
11595/* called with rtnl_lock */
a2fbb9ea
ET
11596static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11597{
11598 struct sockaddr *addr = p;
11599 struct bnx2x *bp = netdev_priv(dev);
11600
34f80b04 11601 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
11602 return -EINVAL;
11603
11604 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
11605 if (netif_running(dev)) {
11606 if (CHIP_IS_E1(bp))
e665bfda 11607 bnx2x_set_eth_mac_addr_e1(bp, 1);
34f80b04 11608 else
e665bfda 11609 bnx2x_set_eth_mac_addr_e1h(bp, 1);
34f80b04 11610 }
a2fbb9ea
ET
11611
11612 return 0;
11613}
11614
c18487ee 11615/* called with rtnl_lock */
01cd4528
EG
11616static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11617 int devad, u16 addr)
a2fbb9ea 11618{
01cd4528
EG
11619 struct bnx2x *bp = netdev_priv(netdev);
11620 u16 value;
11621 int rc;
11622 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 11623
01cd4528
EG
11624 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11625 prtad, devad, addr);
a2fbb9ea 11626
01cd4528
EG
11627 if (prtad != bp->mdio.prtad) {
11628 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11629 prtad, bp->mdio.prtad);
11630 return -EINVAL;
11631 }
11632
11633 /* The HW expects different devad if CL22 is used */
11634 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 11635
01cd4528
EG
11636 bnx2x_acquire_phy_lock(bp);
11637 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11638 devad, addr, &value);
11639 bnx2x_release_phy_lock(bp);
11640 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 11641
01cd4528
EG
11642 if (!rc)
11643 rc = value;
11644 return rc;
11645}
a2fbb9ea 11646
01cd4528
EG
11647/* called with rtnl_lock */
11648static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11649 u16 addr, u16 value)
11650{
11651 struct bnx2x *bp = netdev_priv(netdev);
11652 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11653 int rc;
11654
11655 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11656 " value 0x%x\n", prtad, devad, addr, value);
11657
11658 if (prtad != bp->mdio.prtad) {
11659 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11660 prtad, bp->mdio.prtad);
11661 return -EINVAL;
a2fbb9ea
ET
11662 }
11663
01cd4528
EG
11664 /* The HW expects different devad if CL22 is used */
11665 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 11666
01cd4528
EG
11667 bnx2x_acquire_phy_lock(bp);
11668 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11669 devad, addr, value);
11670 bnx2x_release_phy_lock(bp);
11671 return rc;
11672}
c18487ee 11673
01cd4528
EG
11674/* called with rtnl_lock */
11675static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11676{
11677 struct bnx2x *bp = netdev_priv(dev);
11678 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 11679
01cd4528
EG
11680 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11681 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 11682
01cd4528
EG
11683 if (!netif_running(dev))
11684 return -EAGAIN;
11685
11686 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
11687}
11688
34f80b04 11689/* called with rtnl_lock */
a2fbb9ea
ET
11690static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11691{
11692 struct bnx2x *bp = netdev_priv(dev);
34f80b04 11693 int rc = 0;
a2fbb9ea
ET
11694
11695 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11696 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11697 return -EINVAL;
11698
11699 /* This does not race with packet allocation
c14423fe 11700 * because the actual alloc size is
a2fbb9ea
ET
11701 * only updated as part of load
11702 */
11703 dev->mtu = new_mtu;
11704
11705 if (netif_running(dev)) {
34f80b04
EG
11706 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11707 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 11708 }
34f80b04
EG
11709
11710 return rc;
a2fbb9ea
ET
11711}
11712
11713static void bnx2x_tx_timeout(struct net_device *dev)
11714{
11715 struct bnx2x *bp = netdev_priv(dev);
11716
11717#ifdef BNX2X_STOP_ON_ERROR
11718 if (!bp->panic)
11719 bnx2x_panic();
11720#endif
11721 /* This allows the netif to be shutdown gracefully before resetting */
11722 schedule_work(&bp->reset_task);
11723}
11724
11725#ifdef BCM_VLAN
34f80b04 11726/* called with rtnl_lock */
a2fbb9ea
ET
11727static void bnx2x_vlan_rx_register(struct net_device *dev,
11728 struct vlan_group *vlgrp)
11729{
11730 struct bnx2x *bp = netdev_priv(dev);
11731
11732 bp->vlgrp = vlgrp;
0c6671b0
EG
11733
11734 /* Set flags according to the required capabilities */
11735 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11736
11737 if (dev->features & NETIF_F_HW_VLAN_TX)
11738 bp->flags |= HW_VLAN_TX_FLAG;
11739
11740 if (dev->features & NETIF_F_HW_VLAN_RX)
11741 bp->flags |= HW_VLAN_RX_FLAG;
11742
a2fbb9ea 11743 if (netif_running(dev))
49d66772 11744 bnx2x_set_client_config(bp);
a2fbb9ea 11745}
34f80b04 11746
a2fbb9ea
ET
11747#endif
11748
257ddbda 11749#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
11750static void poll_bnx2x(struct net_device *dev)
11751{
11752 struct bnx2x *bp = netdev_priv(dev);
11753
11754 disable_irq(bp->pdev->irq);
11755 bnx2x_interrupt(bp->pdev->irq, dev);
11756 enable_irq(bp->pdev->irq);
11757}
11758#endif
11759
c64213cd
SH
11760static const struct net_device_ops bnx2x_netdev_ops = {
11761 .ndo_open = bnx2x_open,
11762 .ndo_stop = bnx2x_close,
11763 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 11764 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
11765 .ndo_set_mac_address = bnx2x_change_mac_addr,
11766 .ndo_validate_addr = eth_validate_addr,
11767 .ndo_do_ioctl = bnx2x_ioctl,
11768 .ndo_change_mtu = bnx2x_change_mtu,
11769 .ndo_tx_timeout = bnx2x_tx_timeout,
11770#ifdef BCM_VLAN
11771 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11772#endif
257ddbda 11773#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
11774 .ndo_poll_controller = poll_bnx2x,
11775#endif
11776};
11777
34f80b04
EG
11778static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11779 struct net_device *dev)
a2fbb9ea
ET
11780{
11781 struct bnx2x *bp;
11782 int rc;
11783
11784 SET_NETDEV_DEV(dev, &pdev->dev);
11785 bp = netdev_priv(dev);
11786
34f80b04
EG
11787 bp->dev = dev;
11788 bp->pdev = pdev;
a2fbb9ea 11789 bp->flags = 0;
34f80b04 11790 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11791
11792 rc = pci_enable_device(pdev);
11793 if (rc) {
7995c64e 11794 pr_err("Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
11795 goto err_out;
11796 }
11797
11798 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7995c64e 11799 pr_err("Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
11800 rc = -ENODEV;
11801 goto err_out_disable;
11802 }
11803
11804 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
7995c64e 11805 pr_err("Cannot find second PCI device base address, aborting\n");
a2fbb9ea
ET
11806 rc = -ENODEV;
11807 goto err_out_disable;
11808 }
11809
34f80b04
EG
11810 if (atomic_read(&pdev->enable_cnt) == 1) {
11811 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11812 if (rc) {
7995c64e 11813 pr_err("Cannot obtain PCI resources, aborting\n");
34f80b04
EG
11814 goto err_out_disable;
11815 }
a2fbb9ea 11816
34f80b04
EG
11817 pci_set_master(pdev);
11818 pci_save_state(pdev);
11819 }
a2fbb9ea
ET
11820
11821 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11822 if (bp->pm_cap == 0) {
7995c64e 11823 pr_err("Cannot find power management capability, aborting\n");
a2fbb9ea
ET
11824 rc = -EIO;
11825 goto err_out_release;
11826 }
11827
11828 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11829 if (bp->pcie_cap == 0) {
7995c64e 11830 pr_err("Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
11831 rc = -EIO;
11832 goto err_out_release;
11833 }
11834
6a35528a 11835 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11836 bp->flags |= USING_DAC_FLAG;
6a35528a 11837 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
7995c64e 11838 pr_err("pci_set_consistent_dma_mask failed, aborting\n");
a2fbb9ea
ET
11839 rc = -EIO;
11840 goto err_out_release;
11841 }
11842
284901a9 11843 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
7995c64e 11844 pr_err("System does not support DMA, aborting\n");
a2fbb9ea
ET
11845 rc = -EIO;
11846 goto err_out_release;
11847 }
11848
34f80b04
EG
11849 dev->mem_start = pci_resource_start(pdev, 0);
11850 dev->base_addr = dev->mem_start;
11851 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11852
11853 dev->irq = pdev->irq;
11854
275f165f 11855 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 11856 if (!bp->regview) {
7995c64e 11857 pr_err("Cannot map register space, aborting\n");
a2fbb9ea
ET
11858 rc = -ENOMEM;
11859 goto err_out_release;
11860 }
11861
34f80b04
EG
11862 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11863 min_t(u64, BNX2X_DB_SIZE,
11864 pci_resource_len(pdev, 2)));
a2fbb9ea 11865 if (!bp->doorbells) {
7995c64e 11866 pr_err("Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
11867 rc = -ENOMEM;
11868 goto err_out_unmap;
11869 }
11870
11871 bnx2x_set_power_state(bp, PCI_D0);
11872
34f80b04
EG
11873 /* clean indirect addresses */
11874 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11875 PCICFG_VENDOR_ID_OFFSET);
11876 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11877 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11878 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11879 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11880
34f80b04 11881 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11882
c64213cd 11883 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11884 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11885 dev->features |= NETIF_F_SG;
11886 dev->features |= NETIF_F_HW_CSUM;
11887 if (bp->flags & USING_DAC_FLAG)
11888 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
11889 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11890 dev->features |= NETIF_F_TSO6;
34f80b04
EG
11891#ifdef BCM_VLAN
11892 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11893 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
11894
11895 dev->vlan_features |= NETIF_F_SG;
11896 dev->vlan_features |= NETIF_F_HW_CSUM;
11897 if (bp->flags & USING_DAC_FLAG)
11898 dev->vlan_features |= NETIF_F_HIGHDMA;
11899 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11900 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 11901#endif
a2fbb9ea 11902
01cd4528
EG
11903 /* get_port_hwinfo() will set prtad and mmds properly */
11904 bp->mdio.prtad = MDIO_PRTAD_NONE;
11905 bp->mdio.mmds = 0;
11906 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11907 bp->mdio.dev = dev;
11908 bp->mdio.mdio_read = bnx2x_mdio_read;
11909 bp->mdio.mdio_write = bnx2x_mdio_write;
11910
a2fbb9ea
ET
11911 return 0;
11912
11913err_out_unmap:
11914 if (bp->regview) {
11915 iounmap(bp->regview);
11916 bp->regview = NULL;
11917 }
a2fbb9ea
ET
11918 if (bp->doorbells) {
11919 iounmap(bp->doorbells);
11920 bp->doorbells = NULL;
11921 }
11922
11923err_out_release:
34f80b04
EG
11924 if (atomic_read(&pdev->enable_cnt) == 1)
11925 pci_release_regions(pdev);
a2fbb9ea
ET
11926
11927err_out_disable:
11928 pci_disable_device(pdev);
11929 pci_set_drvdata(pdev, NULL);
11930
11931err_out:
11932 return rc;
11933}
11934
37f9ce62
EG
11935static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11936 int *width, int *speed)
25047950
ET
11937{
11938 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11939
37f9ce62 11940 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 11941
37f9ce62
EG
11942 /* return value of 1=2.5GHz 2=5GHz */
11943 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 11944}
37f9ce62 11945
94a78b79
VZ
11946static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11947{
37f9ce62 11948 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
11949 struct bnx2x_fw_file_hdr *fw_hdr;
11950 struct bnx2x_fw_file_section *sections;
94a78b79 11951 u32 offset, len, num_ops;
37f9ce62 11952 u16 *ops_offsets;
94a78b79 11953 int i;
37f9ce62 11954 const u8 *fw_ver;
94a78b79
VZ
11955
11956 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11957 return -EINVAL;
11958
11959 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11960 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11961
11962 /* Make sure none of the offsets and sizes make us read beyond
11963 * the end of the firmware data */
11964 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11965 offset = be32_to_cpu(sections[i].offset);
11966 len = be32_to_cpu(sections[i].len);
11967 if (offset + len > firmware->size) {
7995c64e 11968 pr_err("Section %d length is out of bounds\n", i);
94a78b79
VZ
11969 return -EINVAL;
11970 }
11971 }
11972
11973 /* Likewise for the init_ops offsets */
11974 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11975 ops_offsets = (u16 *)(firmware->data + offset);
11976 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11977
11978 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11979 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
7995c64e 11980 pr_err("Section offset %d is out of bounds\n", i);
94a78b79
VZ
11981 return -EINVAL;
11982 }
11983 }
11984
11985 /* Check FW version */
11986 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11987 fw_ver = firmware->data + offset;
11988 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11989 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11990 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11991 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
7995c64e 11992 pr_err("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
11993 fw_ver[0], fw_ver[1], fw_ver[2],
11994 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11995 BCM_5710_FW_MINOR_VERSION,
11996 BCM_5710_FW_REVISION_VERSION,
11997 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 11998 return -EINVAL;
94a78b79
VZ
11999 }
12000
12001 return 0;
12002}
12003
ab6ad5a4 12004static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 12005{
ab6ad5a4
EG
12006 const __be32 *source = (const __be32 *)_source;
12007 u32 *target = (u32 *)_target;
94a78b79 12008 u32 i;
94a78b79
VZ
12009
12010 for (i = 0; i < n/4; i++)
12011 target[i] = be32_to_cpu(source[i]);
12012}
12013
12014/*
12015 Ops array is stored in the following format:
12016 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12017 */
ab6ad5a4 12018static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 12019{
ab6ad5a4
EG
12020 const __be32 *source = (const __be32 *)_source;
12021 struct raw_op *target = (struct raw_op *)_target;
94a78b79 12022 u32 i, j, tmp;
94a78b79 12023
ab6ad5a4 12024 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
12025 tmp = be32_to_cpu(source[j]);
12026 target[i].op = (tmp >> 24) & 0xff;
12027 target[i].offset = tmp & 0xffffff;
12028 target[i].raw_data = be32_to_cpu(source[j+1]);
12029 }
12030}
ab6ad5a4
EG
12031
12032static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 12033{
ab6ad5a4
EG
12034 const __be16 *source = (const __be16 *)_source;
12035 u16 *target = (u16 *)_target;
94a78b79 12036 u32 i;
94a78b79
VZ
12037
12038 for (i = 0; i < n/2; i++)
12039 target[i] = be16_to_cpu(source[i]);
12040}
12041
7995c64e
JP
12042#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12043do { \
12044 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12045 bp->arr = kmalloc(len, GFP_KERNEL); \
12046 if (!bp->arr) { \
12047 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
12048 goto lbl; \
12049 } \
12050 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12051 (u8 *)bp->arr, len); \
12052} while (0)
94a78b79 12053
94a78b79
VZ
12054static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12055{
45229b42 12056 const char *fw_file_name;
94a78b79 12057 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 12058 int rc;
94a78b79 12059
94a78b79 12060 if (CHIP_IS_E1(bp))
45229b42 12061 fw_file_name = FW_FILE_NAME_E1;
94a78b79 12062 else
45229b42 12063 fw_file_name = FW_FILE_NAME_E1H;
94a78b79 12064
7995c64e 12065 pr_info("Loading %s\n", fw_file_name);
94a78b79
VZ
12066
12067 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12068 if (rc) {
7995c64e 12069 pr_err("Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
12070 goto request_firmware_exit;
12071 }
12072
12073 rc = bnx2x_check_firmware(bp);
12074 if (rc) {
7995c64e 12075 pr_err("Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
12076 goto request_firmware_exit;
12077 }
12078
12079 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12080
12081 /* Initialize the pointers to the init arrays */
12082 /* Blob */
12083 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12084
12085 /* Opcodes */
12086 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12087
12088 /* Offsets */
ab6ad5a4
EG
12089 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12090 be16_to_cpu_n);
94a78b79
VZ
12091
12092 /* STORMs firmware */
573f2035
EG
12093 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12094 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12095 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12096 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12097 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12098 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12099 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12100 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12101 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12102 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12103 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12104 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12105 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12106 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12107 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12108 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
12109
12110 return 0;
ab6ad5a4 12111
94a78b79
VZ
12112init_offsets_alloc_err:
12113 kfree(bp->init_ops);
12114init_ops_alloc_err:
12115 kfree(bp->init_data);
12116request_firmware_exit:
12117 release_firmware(bp->firmware);
12118
12119 return rc;
12120}
12121
12122
a2fbb9ea
ET
12123static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12124 const struct pci_device_id *ent)
12125{
a2fbb9ea
ET
12126 struct net_device *dev = NULL;
12127 struct bnx2x *bp;
37f9ce62 12128 int pcie_width, pcie_speed;
25047950 12129 int rc;
a2fbb9ea 12130
a2fbb9ea 12131 /* dev zeroed in init_etherdev */
555f6c78 12132 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04 12133 if (!dev) {
7995c64e 12134 pr_err("Cannot allocate net device\n");
a2fbb9ea 12135 return -ENOMEM;
34f80b04 12136 }
a2fbb9ea 12137
a2fbb9ea 12138 bp = netdev_priv(dev);
7995c64e 12139 bp->msg_enable = debug;
a2fbb9ea 12140
df4770de
EG
12141 pci_set_drvdata(pdev, dev);
12142
34f80b04 12143 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
12144 if (rc < 0) {
12145 free_netdev(dev);
12146 return rc;
12147 }
12148
34f80b04 12149 rc = bnx2x_init_bp(bp);
693fc0d1
EG
12150 if (rc)
12151 goto init_one_exit;
12152
94a78b79
VZ
12153 /* Set init arrays */
12154 rc = bnx2x_init_firmware(bp, &pdev->dev);
12155 if (rc) {
7995c64e 12156 pr_err("Error loading firmware\n");
94a78b79
VZ
12157 goto init_one_exit;
12158 }
12159
693fc0d1 12160 rc = register_netdev(dev);
34f80b04 12161 if (rc) {
693fc0d1 12162 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
12163 goto init_one_exit;
12164 }
12165
37f9ce62 12166 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
7995c64e
JP
12167 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
12168 board_info[ent->driver_data].name,
12169 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12170 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12171 dev->base_addr, bp->pdev->irq, dev->dev_addr);
c016201c 12172
a2fbb9ea 12173 return 0;
34f80b04
EG
12174
12175init_one_exit:
12176 if (bp->regview)
12177 iounmap(bp->regview);
12178
12179 if (bp->doorbells)
12180 iounmap(bp->doorbells);
12181
12182 free_netdev(dev);
12183
12184 if (atomic_read(&pdev->enable_cnt) == 1)
12185 pci_release_regions(pdev);
12186
12187 pci_disable_device(pdev);
12188 pci_set_drvdata(pdev, NULL);
12189
12190 return rc;
a2fbb9ea
ET
12191}
12192
12193static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12194{
12195 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
12196 struct bnx2x *bp;
12197
12198 if (!dev) {
7995c64e 12199 pr_err("BAD net device from bnx2x_init_one\n");
228241eb
ET
12200 return;
12201 }
228241eb 12202 bp = netdev_priv(dev);
a2fbb9ea 12203
a2fbb9ea
ET
12204 unregister_netdev(dev);
12205
94a78b79
VZ
12206 kfree(bp->init_ops_offsets);
12207 kfree(bp->init_ops);
12208 kfree(bp->init_data);
12209 release_firmware(bp->firmware);
12210
a2fbb9ea
ET
12211 if (bp->regview)
12212 iounmap(bp->regview);
12213
12214 if (bp->doorbells)
12215 iounmap(bp->doorbells);
12216
12217 free_netdev(dev);
34f80b04
EG
12218
12219 if (atomic_read(&pdev->enable_cnt) == 1)
12220 pci_release_regions(pdev);
12221
a2fbb9ea
ET
12222 pci_disable_device(pdev);
12223 pci_set_drvdata(pdev, NULL);
12224}
12225
12226static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12227{
12228 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
12229 struct bnx2x *bp;
12230
34f80b04 12231 if (!dev) {
7995c64e 12232 pr_err("BAD net device from bnx2x_init_one\n");
34f80b04
EG
12233 return -ENODEV;
12234 }
12235 bp = netdev_priv(dev);
a2fbb9ea 12236
34f80b04 12237 rtnl_lock();
a2fbb9ea 12238
34f80b04 12239 pci_save_state(pdev);
228241eb 12240
34f80b04
EG
12241 if (!netif_running(dev)) {
12242 rtnl_unlock();
12243 return 0;
12244 }
a2fbb9ea
ET
12245
12246 netif_device_detach(dev);
a2fbb9ea 12247
da5a662a 12248 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 12249
a2fbb9ea 12250 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 12251
34f80b04
EG
12252 rtnl_unlock();
12253
a2fbb9ea
ET
12254 return 0;
12255}
12256
12257static int bnx2x_resume(struct pci_dev *pdev)
12258{
12259 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 12260 struct bnx2x *bp;
a2fbb9ea
ET
12261 int rc;
12262
228241eb 12263 if (!dev) {
7995c64e 12264 pr_err("BAD net device from bnx2x_init_one\n");
228241eb
ET
12265 return -ENODEV;
12266 }
228241eb 12267 bp = netdev_priv(dev);
a2fbb9ea 12268
34f80b04
EG
12269 rtnl_lock();
12270
228241eb 12271 pci_restore_state(pdev);
34f80b04
EG
12272
12273 if (!netif_running(dev)) {
12274 rtnl_unlock();
12275 return 0;
12276 }
12277
a2fbb9ea
ET
12278 bnx2x_set_power_state(bp, PCI_D0);
12279 netif_device_attach(dev);
12280
da5a662a 12281 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 12282
34f80b04
EG
12283 rtnl_unlock();
12284
12285 return rc;
a2fbb9ea
ET
12286}
12287
f8ef6e44
YG
12288static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12289{
12290 int i;
12291
12292 bp->state = BNX2X_STATE_ERROR;
12293
12294 bp->rx_mode = BNX2X_RX_MODE_NONE;
12295
12296 bnx2x_netif_stop(bp, 0);
12297
12298 del_timer_sync(&bp->timer);
12299 bp->stats_state = STATS_STATE_DISABLED;
12300 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12301
12302 /* Release IRQs */
6cbe5065 12303 bnx2x_free_irq(bp, false);
f8ef6e44
YG
12304
12305 if (CHIP_IS_E1(bp)) {
12306 struct mac_configuration_cmd *config =
12307 bnx2x_sp(bp, mcast_config);
12308
8d9c5f34 12309 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
12310 CAM_INVALIDATE(config->config_table[i]);
12311 }
12312
12313 /* Free SKBs, SGEs, TPA pool and driver internals */
12314 bnx2x_free_skbs(bp);
54b9ddaa 12315 for_each_queue(bp, i)
f8ef6e44 12316 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 12317 for_each_queue(bp, i)
7cde1c8b 12318 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
12319 bnx2x_free_mem(bp);
12320
12321 bp->state = BNX2X_STATE_CLOSED;
12322
12323 netif_carrier_off(bp->dev);
12324
12325 return 0;
12326}
12327
12328static void bnx2x_eeh_recover(struct bnx2x *bp)
12329{
12330 u32 val;
12331
12332 mutex_init(&bp->port.phy_mutex);
12333
12334 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12335 bp->link_params.shmem_base = bp->common.shmem_base;
12336 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12337
12338 if (!bp->common.shmem_base ||
12339 (bp->common.shmem_base < 0xA0000) ||
12340 (bp->common.shmem_base >= 0xC0000)) {
12341 BNX2X_DEV_INFO("MCP not active\n");
12342 bp->flags |= NO_MCP_FLAG;
12343 return;
12344 }
12345
12346 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12347 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12348 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12349 BNX2X_ERR("BAD MCP validity signature\n");
12350
12351 if (!BP_NOMCP(bp)) {
12352 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12353 & DRV_MSG_SEQ_NUMBER_MASK);
12354 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12355 }
12356}
12357
493adb1f
WX
12358/**
12359 * bnx2x_io_error_detected - called when PCI error is detected
12360 * @pdev: Pointer to PCI device
12361 * @state: The current pci connection state
12362 *
12363 * This function is called after a PCI bus error affecting
12364 * this device has been detected.
12365 */
12366static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12367 pci_channel_state_t state)
12368{
12369 struct net_device *dev = pci_get_drvdata(pdev);
12370 struct bnx2x *bp = netdev_priv(dev);
12371
12372 rtnl_lock();
12373
12374 netif_device_detach(dev);
12375
07ce50e4
DN
12376 if (state == pci_channel_io_perm_failure) {
12377 rtnl_unlock();
12378 return PCI_ERS_RESULT_DISCONNECT;
12379 }
12380
493adb1f 12381 if (netif_running(dev))
f8ef6e44 12382 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
12383
12384 pci_disable_device(pdev);
12385
12386 rtnl_unlock();
12387
12388 /* Request a slot reset */
12389 return PCI_ERS_RESULT_NEED_RESET;
12390}
12391
12392/**
12393 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12394 * @pdev: Pointer to PCI device
12395 *
12396 * Restart the card from scratch, as if from a cold-boot.
12397 */
12398static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12399{
12400 struct net_device *dev = pci_get_drvdata(pdev);
12401 struct bnx2x *bp = netdev_priv(dev);
12402
12403 rtnl_lock();
12404
12405 if (pci_enable_device(pdev)) {
12406 dev_err(&pdev->dev,
12407 "Cannot re-enable PCI device after reset\n");
12408 rtnl_unlock();
12409 return PCI_ERS_RESULT_DISCONNECT;
12410 }
12411
12412 pci_set_master(pdev);
12413 pci_restore_state(pdev);
12414
12415 if (netif_running(dev))
12416 bnx2x_set_power_state(bp, PCI_D0);
12417
12418 rtnl_unlock();
12419
12420 return PCI_ERS_RESULT_RECOVERED;
12421}
12422
12423/**
12424 * bnx2x_io_resume - called when traffic can start flowing again
12425 * @pdev: Pointer to PCI device
12426 *
12427 * This callback is called when the error recovery driver tells us that
12428 * its OK to resume normal operation.
12429 */
12430static void bnx2x_io_resume(struct pci_dev *pdev)
12431{
12432 struct net_device *dev = pci_get_drvdata(pdev);
12433 struct bnx2x *bp = netdev_priv(dev);
12434
12435 rtnl_lock();
12436
f8ef6e44
YG
12437 bnx2x_eeh_recover(bp);
12438
493adb1f 12439 if (netif_running(dev))
f8ef6e44 12440 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
12441
12442 netif_device_attach(dev);
12443
12444 rtnl_unlock();
12445}
12446
12447static struct pci_error_handlers bnx2x_err_handler = {
12448 .error_detected = bnx2x_io_error_detected,
356e2385
EG
12449 .slot_reset = bnx2x_io_slot_reset,
12450 .resume = bnx2x_io_resume,
493adb1f
WX
12451};
12452
a2fbb9ea 12453static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
12454 .name = DRV_MODULE_NAME,
12455 .id_table = bnx2x_pci_tbl,
12456 .probe = bnx2x_init_one,
12457 .remove = __devexit_p(bnx2x_remove_one),
12458 .suspend = bnx2x_suspend,
12459 .resume = bnx2x_resume,
12460 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
12461};
12462
12463static int __init bnx2x_init(void)
12464{
dd21ca6d
SG
12465 int ret;
12466
7995c64e 12467 pr_info("%s", version);
938cf541 12468
1cf167f2
EG
12469 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12470 if (bnx2x_wq == NULL) {
7995c64e 12471 pr_err("Cannot create workqueue\n");
1cf167f2
EG
12472 return -ENOMEM;
12473 }
12474
dd21ca6d
SG
12475 ret = pci_register_driver(&bnx2x_pci_driver);
12476 if (ret) {
7995c64e 12477 pr_err("Cannot register driver\n");
dd21ca6d
SG
12478 destroy_workqueue(bnx2x_wq);
12479 }
12480 return ret;
a2fbb9ea
ET
12481}
12482
12483static void __exit bnx2x_cleanup(void)
12484{
12485 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
12486
12487 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
12488}
12489
12490module_init(bnx2x_init);
12491module_exit(bnx2x_cleanup);
12492
993ac7b5
MC
12493#ifdef BCM_CNIC
12494
12495/* count denotes the number of new completions we have seen */
12496static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12497{
12498 struct eth_spe *spe;
12499
12500#ifdef BNX2X_STOP_ON_ERROR
12501 if (unlikely(bp->panic))
12502 return;
12503#endif
12504
12505 spin_lock_bh(&bp->spq_lock);
12506 bp->cnic_spq_pending -= count;
12507
12508 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12509 bp->cnic_spq_pending++) {
12510
12511 if (!bp->cnic_kwq_pending)
12512 break;
12513
12514 spe = bnx2x_sp_get_next(bp);
12515 *spe = *bp->cnic_kwq_cons;
12516
12517 bp->cnic_kwq_pending--;
12518
12519 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12520 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12521
12522 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12523 bp->cnic_kwq_cons = bp->cnic_kwq;
12524 else
12525 bp->cnic_kwq_cons++;
12526 }
12527 bnx2x_sp_prod_update(bp);
12528 spin_unlock_bh(&bp->spq_lock);
12529}
12530
12531static int bnx2x_cnic_sp_queue(struct net_device *dev,
12532 struct kwqe_16 *kwqes[], u32 count)
12533{
12534 struct bnx2x *bp = netdev_priv(dev);
12535 int i;
12536
12537#ifdef BNX2X_STOP_ON_ERROR
12538 if (unlikely(bp->panic))
12539 return -EIO;
12540#endif
12541
12542 spin_lock_bh(&bp->spq_lock);
12543
12544 for (i = 0; i < count; i++) {
12545 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12546
12547 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12548 break;
12549
12550 *bp->cnic_kwq_prod = *spe;
12551
12552 bp->cnic_kwq_pending++;
12553
12554 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12555 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12556 spe->data.mac_config_addr.hi,
12557 spe->data.mac_config_addr.lo,
12558 bp->cnic_kwq_pending);
12559
12560 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12561 bp->cnic_kwq_prod = bp->cnic_kwq;
12562 else
12563 bp->cnic_kwq_prod++;
12564 }
12565
12566 spin_unlock_bh(&bp->spq_lock);
12567
12568 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12569 bnx2x_cnic_sp_post(bp, 0);
12570
12571 return i;
12572}
12573
12574static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12575{
12576 struct cnic_ops *c_ops;
12577 int rc = 0;
12578
12579 mutex_lock(&bp->cnic_mutex);
12580 c_ops = bp->cnic_ops;
12581 if (c_ops)
12582 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12583 mutex_unlock(&bp->cnic_mutex);
12584
12585 return rc;
12586}
12587
12588static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12589{
12590 struct cnic_ops *c_ops;
12591 int rc = 0;
12592
12593 rcu_read_lock();
12594 c_ops = rcu_dereference(bp->cnic_ops);
12595 if (c_ops)
12596 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12597 rcu_read_unlock();
12598
12599 return rc;
12600}
12601
12602/*
12603 * for commands that have no data
12604 */
12605static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12606{
12607 struct cnic_ctl_info ctl = {0};
12608
12609 ctl.cmd = cmd;
12610
12611 return bnx2x_cnic_ctl_send(bp, &ctl);
12612}
12613
12614static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12615{
12616 struct cnic_ctl_info ctl;
12617
12618 /* first we tell CNIC and only then we count this as a completion */
12619 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12620 ctl.data.comp.cid = cid;
12621
12622 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12623 bnx2x_cnic_sp_post(bp, 1);
12624}
12625
12626static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12627{
12628 struct bnx2x *bp = netdev_priv(dev);
12629 int rc = 0;
12630
12631 switch (ctl->cmd) {
12632 case DRV_CTL_CTXTBL_WR_CMD: {
12633 u32 index = ctl->data.io.offset;
12634 dma_addr_t addr = ctl->data.io.dma_addr;
12635
12636 bnx2x_ilt_wr(bp, index, addr);
12637 break;
12638 }
12639
12640 case DRV_CTL_COMPLETION_CMD: {
12641 int count = ctl->data.comp.comp_count;
12642
12643 bnx2x_cnic_sp_post(bp, count);
12644 break;
12645 }
12646
12647 /* rtnl_lock is held. */
12648 case DRV_CTL_START_L2_CMD: {
12649 u32 cli = ctl->data.ring.client_id;
12650
12651 bp->rx_mode_cl_mask |= (1 << cli);
12652 bnx2x_set_storm_rx_mode(bp);
12653 break;
12654 }
12655
12656 /* rtnl_lock is held. */
12657 case DRV_CTL_STOP_L2_CMD: {
12658 u32 cli = ctl->data.ring.client_id;
12659
12660 bp->rx_mode_cl_mask &= ~(1 << cli);
12661 bnx2x_set_storm_rx_mode(bp);
12662 break;
12663 }
12664
12665 default:
12666 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12667 rc = -EINVAL;
12668 }
12669
12670 return rc;
12671}
12672
12673static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12674{
12675 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12676
12677 if (bp->flags & USING_MSIX_FLAG) {
12678 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12679 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12680 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12681 } else {
12682 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12683 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12684 }
12685 cp->irq_arr[0].status_blk = bp->cnic_sb;
12686 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12687 cp->irq_arr[1].status_blk = bp->def_status_blk;
12688 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12689
12690 cp->num_irq = 2;
12691}
12692
12693static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12694 void *data)
12695{
12696 struct bnx2x *bp = netdev_priv(dev);
12697 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12698
12699 if (ops == NULL)
12700 return -EINVAL;
12701
12702 if (atomic_read(&bp->intr_sem) != 0)
12703 return -EBUSY;
12704
12705 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12706 if (!bp->cnic_kwq)
12707 return -ENOMEM;
12708
12709 bp->cnic_kwq_cons = bp->cnic_kwq;
12710 bp->cnic_kwq_prod = bp->cnic_kwq;
12711 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12712
12713 bp->cnic_spq_pending = 0;
12714 bp->cnic_kwq_pending = 0;
12715
12716 bp->cnic_data = data;
12717
12718 cp->num_irq = 0;
12719 cp->drv_state = CNIC_DRV_STATE_REGD;
12720
12721 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12722
12723 bnx2x_setup_cnic_irq_info(bp);
12724 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12725 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12726 rcu_assign_pointer(bp->cnic_ops, ops);
12727
12728 return 0;
12729}
12730
12731static int bnx2x_unregister_cnic(struct net_device *dev)
12732{
12733 struct bnx2x *bp = netdev_priv(dev);
12734 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12735
12736 mutex_lock(&bp->cnic_mutex);
12737 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12738 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12739 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12740 }
12741 cp->drv_state = 0;
12742 rcu_assign_pointer(bp->cnic_ops, NULL);
12743 mutex_unlock(&bp->cnic_mutex);
12744 synchronize_rcu();
12745 kfree(bp->cnic_kwq);
12746 bp->cnic_kwq = NULL;
12747
12748 return 0;
12749}
12750
12751struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12752{
12753 struct bnx2x *bp = netdev_priv(dev);
12754 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12755
12756 cp->drv_owner = THIS_MODULE;
12757 cp->chip_id = CHIP_ID(bp);
12758 cp->pdev = bp->pdev;
12759 cp->io_base = bp->regview;
12760 cp->io_base2 = bp->doorbells;
12761 cp->max_kwqe_pending = 8;
12762 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12763 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12764 cp->ctx_tbl_len = CNIC_ILT_LINES;
12765 cp->starting_cid = BCM_CNIC_CID_START;
12766 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12767 cp->drv_ctl = bnx2x_drv_ctl;
12768 cp->drv_register_cnic = bnx2x_register_cnic;
12769 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12770
12771 return cp;
12772}
12773EXPORT_SYMBOL(bnx2x_cnic_probe);
12774
12775#endif /* BCM_CNIC */
94a78b79 12776