]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
net: Introduce skb_orphan_try()
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea 51#include <linux/io.h>
45229b42 52#include <linux/stringify.h>
a2fbb9ea 53
359d8b15 54
a2fbb9ea
ET
55#include "bnx2x.h"
56#include "bnx2x_init.h"
94a78b79 57#include "bnx2x_init_ops.h"
0a64ea57 58#include "bnx2x_dump.h"
a2fbb9ea 59
4fd89b7a
DK
60#define DRV_MODULE_VERSION "1.52.1-8"
61#define DRV_MODULE_RELDATE "2010/04/01"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
94a78b79
VZ
64#include <linux/firmware.h>
65#include "bnx2x_fw_file_hdr.h"
66/* FW files */
45229b42
BH
67#define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
94a78b79 74
34f80b04
EG
75/* Time in jiffies before concluding the transmitter is hung */
76#define TX_TIMEOUT (5*HZ)
a2fbb9ea 77
53a10565 78static char version[] __devinitdata =
34f80b04 79 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
80 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
24e3fcef 82MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 83MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
84MODULE_LICENSE("GPL");
85MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
86MODULE_FIRMWARE(FW_FILE_NAME_E1);
87MODULE_FIRMWARE(FW_FILE_NAME_E1H);
a2fbb9ea 88
555f6c78
EG
89static int multi_mode = 1;
90module_param(multi_mode, int, 0);
ca00392c
EG
91MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
93
54b9ddaa
VZ
94static int num_queues;
95module_param(num_queues, int, 0);
96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
555f6c78 98
19680c48 99static int disable_tpa;
19680c48 100module_param(disable_tpa, int, 0);
9898f86d 101MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
102
103static int int_mode;
104module_param(int_mode, int, 0);
105MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
106
a18f5128
EG
107static int dropless_fc;
108module_param(dropless_fc, int, 0);
109MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
9898f86d 111static int poll;
a2fbb9ea 112module_param(poll, int, 0);
9898f86d 113MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
114
115static int mrrs = -1;
116module_param(mrrs, int, 0);
117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
9898f86d 119static int debug;
a2fbb9ea 120module_param(debug, int, 0);
9898f86d
EG
121MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 124
1cf167f2 125static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
126
127enum bnx2x_board_type {
128 BCM57710 = 0,
34f80b04
EG
129 BCM57711 = 1,
130 BCM57711E = 2,
a2fbb9ea
ET
131};
132
34f80b04 133/* indexed by board_type, above */
53a10565 134static struct {
a2fbb9ea
ET
135 char *name;
136} board_info[] __devinitdata = {
34f80b04
EG
137 { "Broadcom NetXtreme II BCM57710 XGb" },
138 { "Broadcom NetXtreme II BCM57711 XGb" },
139 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
140};
141
34f80b04 142
a3aa1884 143static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
144 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
147 { 0 }
148};
149
150MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
151
152/****************************************************************************
153* General service functions
154****************************************************************************/
155
156/* used only at init
157 * locking is done by mcp
158 */
573f2035 159void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
160{
161 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164 PCICFG_VENDOR_ID_OFFSET);
165}
166
a2fbb9ea
ET
167static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168{
169 u32 val;
170
171 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174 PCICFG_VENDOR_ID_OFFSET);
175
176 return val;
177}
a2fbb9ea
ET
178
179static const u32 dmae_reg_go_c[] = {
180 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
184};
185
186/* copy command into DMAE command memory and set DMAE command go */
187static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188 int idx)
189{
190 u32 cmd_offset;
191 int i;
192
193 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
196
ad8d3948
EG
197 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
199 }
200 REG_WR(bp, dmae_reg_go_c[idx], 1);
201}
202
ad8d3948
EG
203void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204 u32 len32)
a2fbb9ea 205{
5ff7b6d4 206 struct dmae_command dmae;
a2fbb9ea 207 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
208 int cnt = 200;
209
210 if (!bp->dmae_ready) {
211 u32 *data = bnx2x_sp(bp, wb_data[0]);
212
213 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
214 " using indirect\n", dst_addr, len32);
215 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216 return;
217 }
218
5ff7b6d4 219 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 220
5ff7b6d4
EG
221 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 224#ifdef __BIG_ENDIAN
5ff7b6d4 225 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 226#else
5ff7b6d4 227 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 228#endif
5ff7b6d4
EG
229 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231 dmae.src_addr_lo = U64_LO(dma_addr);
232 dmae.src_addr_hi = U64_HI(dma_addr);
233 dmae.dst_addr_lo = dst_addr >> 2;
234 dmae.dst_addr_hi = 0;
235 dmae.len = len32;
236 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 239
c3eefaf6 240 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
241 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
242 "dst_addr [%x:%08x (%08x)]\n"
243 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
244 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 247 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
248 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 250
5ff7b6d4
EG
251 mutex_lock(&bp->dmae_mutex);
252
a2fbb9ea
ET
253 *wb_comp = 0;
254
5ff7b6d4 255 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
256
257 udelay(5);
ad8d3948
EG
258
259 while (*wb_comp != DMAE_COMP_VAL) {
260 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
261
ad8d3948 262 if (!cnt) {
c3eefaf6 263 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
264 break;
265 }
ad8d3948 266 cnt--;
12469401
YG
267 /* adjust delay for emulation/FPGA */
268 if (CHIP_REV_IS_SLOW(bp))
269 msleep(100);
270 else
271 udelay(5);
a2fbb9ea 272 }
ad8d3948
EG
273
274 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
275}
276
c18487ee 277void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 278{
5ff7b6d4 279 struct dmae_command dmae;
a2fbb9ea 280 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
281 int cnt = 200;
282
283 if (!bp->dmae_ready) {
284 u32 *data = bnx2x_sp(bp, wb_data[0]);
285 int i;
286
287 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
288 " using indirect\n", src_addr, len32);
289 for (i = 0; i < len32; i++)
290 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291 return;
292 }
293
5ff7b6d4 294 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 295
5ff7b6d4
EG
296 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 299#ifdef __BIG_ENDIAN
5ff7b6d4 300 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 301#else
5ff7b6d4 302 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 303#endif
5ff7b6d4
EG
304 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306 dmae.src_addr_lo = src_addr >> 2;
307 dmae.src_addr_hi = 0;
308 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310 dmae.len = len32;
311 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 314
c3eefaf6 315 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
316 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
317 "dst_addr [%x:%08x (%08x)]\n"
318 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
319 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 322
5ff7b6d4
EG
323 mutex_lock(&bp->dmae_mutex);
324
325 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
326 *wb_comp = 0;
327
5ff7b6d4 328 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
329
330 udelay(5);
ad8d3948
EG
331
332 while (*wb_comp != DMAE_COMP_VAL) {
333
ad8d3948 334 if (!cnt) {
c3eefaf6 335 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
336 break;
337 }
ad8d3948 338 cnt--;
12469401
YG
339 /* adjust delay for emulation/FPGA */
340 if (CHIP_REV_IS_SLOW(bp))
341 msleep(100);
342 else
343 udelay(5);
a2fbb9ea 344 }
ad8d3948 345 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
346 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
348
349 mutex_unlock(&bp->dmae_mutex);
350}
351
573f2035
EG
352void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353 u32 addr, u32 len)
354{
355 int offset = 0;
356
357 while (len > DMAE_LEN32_WR_MAX) {
358 bnx2x_write_dmae(bp, phys_addr + offset,
359 addr + offset, DMAE_LEN32_WR_MAX);
360 offset += DMAE_LEN32_WR_MAX * 4;
361 len -= DMAE_LEN32_WR_MAX;
362 }
363
364 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
365}
366
ad8d3948
EG
367/* used only for slowpath so not inlined */
368static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369{
370 u32 wb_write[2];
371
372 wb_write[0] = val_hi;
373 wb_write[1] = val_lo;
374 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 375}
a2fbb9ea 376
ad8d3948
EG
377#ifdef USE_WB_RD
378static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379{
380 u32 wb_data[2];
381
382 REG_RD_DMAE(bp, reg, wb_data, 2);
383
384 return HILO_U64(wb_data[0], wb_data[1]);
385}
386#endif
387
a2fbb9ea
ET
388static int bnx2x_mc_assert(struct bnx2x *bp)
389{
a2fbb9ea 390 char last_idx;
34f80b04
EG
391 int i, rc = 0;
392 u32 row0, row1, row2, row3;
393
394 /* XSTORM */
395 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
396 XSTORM_ASSERT_LIST_INDEX_OFFSET);
397 if (last_idx)
398 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
399
400 /* print the asserts */
401 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
402
403 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404 XSTORM_ASSERT_LIST_OFFSET(i));
405 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
407 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
409 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
411
412 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
413 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
414 " 0x%08x 0x%08x 0x%08x\n",
415 i, row3, row2, row1, row0);
416 rc++;
417 } else {
418 break;
419 }
420 }
421
422 /* TSTORM */
423 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
424 TSTORM_ASSERT_LIST_INDEX_OFFSET);
425 if (last_idx)
426 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
427
428 /* print the asserts */
429 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
430
431 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432 TSTORM_ASSERT_LIST_OFFSET(i));
433 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
435 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
437 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
439
440 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
441 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
442 " 0x%08x 0x%08x 0x%08x\n",
443 i, row3, row2, row1, row0);
444 rc++;
445 } else {
446 break;
447 }
448 }
449
450 /* CSTORM */
451 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
452 CSTORM_ASSERT_LIST_INDEX_OFFSET);
453 if (last_idx)
454 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
455
456 /* print the asserts */
457 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
458
459 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460 CSTORM_ASSERT_LIST_OFFSET(i));
461 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
463 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
465 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
467
468 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
469 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
470 " 0x%08x 0x%08x 0x%08x\n",
471 i, row3, row2, row1, row0);
472 rc++;
473 } else {
474 break;
475 }
476 }
477
478 /* USTORM */
479 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
480 USTORM_ASSERT_LIST_INDEX_OFFSET);
481 if (last_idx)
482 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
483
484 /* print the asserts */
485 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
486
487 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
488 USTORM_ASSERT_LIST_OFFSET(i));
489 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i) + 4);
491 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 8);
493 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 12);
495
496 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
497 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
498 " 0x%08x 0x%08x 0x%08x\n",
499 i, row3, row2, row1, row0);
500 rc++;
501 } else {
502 break;
a2fbb9ea
ET
503 }
504 }
34f80b04 505
a2fbb9ea
ET
506 return rc;
507}
c14423fe 508
a2fbb9ea
ET
509static void bnx2x_fw_dump(struct bnx2x *bp)
510{
511 u32 mark, offset;
4781bfad 512 __be32 data[9];
a2fbb9ea
ET
513 int word;
514
515 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772 516 mark = ((mark + 0x3) & ~0x3);
7995c64e 517 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 518
7995c64e 519 pr_err("");
a2fbb9ea
ET
520 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
521 for (word = 0; word < 8; word++)
522 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
523 offset + 4*word));
524 data[8] = 0x0;
7995c64e 525 pr_cont("%s", (char *)data);
a2fbb9ea
ET
526 }
527 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
528 for (word = 0; word < 8; word++)
529 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
530 offset + 4*word));
531 data[8] = 0x0;
7995c64e 532 pr_cont("%s", (char *)data);
a2fbb9ea 533 }
7995c64e 534 pr_err("end of fw dump\n");
a2fbb9ea
ET
535}
536
537static void bnx2x_panic_dump(struct bnx2x *bp)
538{
539 int i;
540 u16 j, start, end;
541
66e855f3
YG
542 bp->stats_state = STATS_STATE_DISABLED;
543 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
544
a2fbb9ea
ET
545 BNX2X_ERR("begin crash dump -----------------\n");
546
8440d2b6
EG
547 /* Indices */
548 /* Common */
549 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
550 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
551 " spq_prod_idx(%u)\n",
552 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
554
555 /* Rx */
54b9ddaa 556 for_each_queue(bp, i) {
a2fbb9ea 557 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 558
c3eefaf6 559 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
560 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
561 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 562 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
563 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 565 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
566 " fp_u_idx(%x) *sb_u_idx(%x)\n",
567 fp->rx_sge_prod, fp->last_max_sge,
568 le16_to_cpu(fp->fp_u_idx),
569 fp->status_blk->u_status_block.status_block_index);
570 }
a2fbb9ea 571
8440d2b6 572 /* Tx */
54b9ddaa 573 for_each_queue(bp, i) {
8440d2b6 574 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 575
c3eefaf6 576 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
577 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
578 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
579 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 580 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
ca00392c 581 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 582 fp->status_blk->c_status_block.status_block_index,
ca00392c 583 fp->tx_db.data.prod);
8440d2b6 584 }
a2fbb9ea 585
8440d2b6
EG
586 /* Rings */
587 /* Rx */
54b9ddaa 588 for_each_queue(bp, i) {
8440d2b6 589 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
590
591 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
592 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 593 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
594 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
595 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
596
c3eefaf6
EG
597 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
598 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
599 }
600
3196a88a
EG
601 start = RX_SGE(fp->rx_sge_prod);
602 end = RX_SGE(fp->last_max_sge);
8440d2b6 603 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
604 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
605 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
606
c3eefaf6
EG
607 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
608 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
609 }
610
a2fbb9ea
ET
611 start = RCQ_BD(fp->rx_comp_cons - 10);
612 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 613 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
614 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
615
c3eefaf6
EG
616 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
617 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
618 }
619 }
620
8440d2b6 621 /* Tx */
54b9ddaa 622 for_each_queue(bp, i) {
8440d2b6
EG
623 struct bnx2x_fastpath *fp = &bp->fp[i];
624
625 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
626 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
627 for (j = start; j != end; j = TX_BD(j + 1)) {
628 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
629
c3eefaf6
EG
630 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
631 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
632 }
633
634 start = TX_BD(fp->tx_bd_cons - 10);
635 end = TX_BD(fp->tx_bd_cons + 254);
636 for (j = start; j != end; j = TX_BD(j + 1)) {
637 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
638
c3eefaf6
EG
639 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
640 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
641 }
642 }
a2fbb9ea 643
34f80b04 644 bnx2x_fw_dump(bp);
a2fbb9ea
ET
645 bnx2x_mc_assert(bp);
646 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
647}
648
615f8fd9 649static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 650{
34f80b04 651 int port = BP_PORT(bp);
a2fbb9ea
ET
652 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
653 u32 val = REG_RD(bp, addr);
654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 655 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
656
657 if (msix) {
8badd27a
EG
658 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
659 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
660 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
661 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
662 } else if (msi) {
663 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
664 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
665 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
666 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
667 } else {
668 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 669 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
670 HC_CONFIG_0_REG_INT_LINE_EN_0 |
671 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 672
8badd27a
EG
673 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674 val, port, addr);
615f8fd9
ET
675
676 REG_WR(bp, addr, val);
677
a2fbb9ea
ET
678 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
679 }
680
8badd27a
EG
681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
682 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
683
684 REG_WR(bp, addr, val);
37dbbf32
EG
685 /*
686 * Ensure that HC_CONFIG is written before leading/trailing edge config
687 */
688 mmiowb();
689 barrier();
34f80b04
EG
690
691 if (CHIP_IS_E1H(bp)) {
692 /* init leading/trailing edge */
693 if (IS_E1HMF(bp)) {
8badd27a 694 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 695 if (bp->port.pmf)
4acac6a5
EG
696 /* enable nig and gpio3 attention */
697 val |= 0x1100;
34f80b04
EG
698 } else
699 val = 0xffff;
700
701 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
702 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
703 }
37dbbf32
EG
704
705 /* Make sure that interrupts are indeed enabled from here on */
706 mmiowb();
a2fbb9ea
ET
707}
708
615f8fd9 709static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 710{
34f80b04 711 int port = BP_PORT(bp);
a2fbb9ea
ET
712 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
713 u32 val = REG_RD(bp, addr);
714
715 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
716 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
717 HC_CONFIG_0_REG_INT_LINE_EN_0 |
718 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
719
720 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
721 val, port, addr);
722
8badd27a
EG
723 /* flush all outstanding writes */
724 mmiowb();
725
a2fbb9ea
ET
726 REG_WR(bp, addr, val);
727 if (REG_RD(bp, addr) != val)
728 BNX2X_ERR("BUG! proper val not read from IGU!\n");
729}
730
f8ef6e44 731static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 732{
a2fbb9ea 733 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 734 int i, offset;
a2fbb9ea 735
34f80b04 736 /* disable interrupt handling */
a2fbb9ea 737 atomic_inc(&bp->intr_sem);
e1510706
EG
738 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
739
f8ef6e44
YG
740 if (disable_hw)
741 /* prevent the HW from sending interrupts */
742 bnx2x_int_disable(bp);
a2fbb9ea
ET
743
744 /* make sure all ISRs are done */
745 if (msix) {
8badd27a
EG
746 synchronize_irq(bp->msix_table[0].vector);
747 offset = 1;
37b091ba
MC
748#ifdef BCM_CNIC
749 offset++;
750#endif
a2fbb9ea 751 for_each_queue(bp, i)
8badd27a 752 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
753 } else
754 synchronize_irq(bp->pdev->irq);
755
756 /* make sure sp_task is not running */
1cf167f2
EG
757 cancel_delayed_work(&bp->sp_task);
758 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
759}
760
34f80b04 761/* fast path */
a2fbb9ea
ET
762
763/*
34f80b04 764 * General service functions
a2fbb9ea
ET
765 */
766
34f80b04 767static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
768 u8 storm, u16 index, u8 op, u8 update)
769{
5c862848
EG
770 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
771 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
772 struct igu_ack_register igu_ack;
773
774 igu_ack.status_block_index = index;
775 igu_ack.sb_id_and_flags =
34f80b04 776 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
777 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
778 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
779 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
780
5c862848
EG
781 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
782 (*(u32 *)&igu_ack), hc_addr);
783 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
784
785 /* Make sure that ACK is written */
786 mmiowb();
787 barrier();
a2fbb9ea
ET
788}
789
54b9ddaa 790static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
791{
792 struct host_status_block *fpsb = fp->status_blk;
a2fbb9ea
ET
793
794 barrier(); /* status block is written to by the chip */
54b9ddaa
VZ
795 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
796 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
a2fbb9ea
ET
797}
798
a2fbb9ea
ET
799static u16 bnx2x_ack_int(struct bnx2x *bp)
800{
5c862848
EG
801 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
802 COMMAND_REG_SIMD_MASK);
803 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 804
5c862848
EG
805 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
806 result, hc_addr);
a2fbb9ea 807
a2fbb9ea
ET
808 return result;
809}
810
811
812/*
813 * fast path service functions
814 */
815
e8b5fc51
VZ
816static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
817{
818 /* Tell compiler that consumer and producer can change */
819 barrier();
820 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
821}
822
a2fbb9ea
ET
823/* free skb in the packet ring at pos idx
824 * return idx of last bd freed
825 */
826static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
827 u16 idx)
828{
829 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
830 struct eth_tx_start_bd *tx_start_bd;
831 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 832 struct sk_buff *skb = tx_buf->skb;
34f80b04 833 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
834 int nbd;
835
54b9ddaa
VZ
836 /* prefetch skb end pointer to speedup dev_kfree_skb() */
837 prefetch(&skb->end);
838
a2fbb9ea
ET
839 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
840 idx, tx_buf, skb);
841
842 /* unmap first bd */
843 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c 844 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
1a983142 845 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
ca00392c 846 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 847
ca00392c 848 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 849#ifdef BNX2X_STOP_ON_ERROR
ca00392c 850 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 851 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
852 bnx2x_panic();
853 }
854#endif
ca00392c 855 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 856
ca00392c
EG
857 /* Get the next bd */
858 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 859
ca00392c
EG
860 /* Skip a parse bd... */
861 --nbd;
862 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863
864 /* ...and the TSO split header bd since they have no mapping */
865 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
866 --nbd;
867 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
868 }
869
870 /* now free frags */
871 while (nbd > 0) {
872
873 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c 874 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
1a983142
FT
875 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
876 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
a2fbb9ea
ET
877 if (--nbd)
878 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
879 }
880
881 /* release skb */
53e5e96e 882 WARN_ON(!skb);
54b9ddaa 883 dev_kfree_skb(skb);
a2fbb9ea
ET
884 tx_buf->first_bd = 0;
885 tx_buf->skb = NULL;
886
34f80b04 887 return new_cons;
a2fbb9ea
ET
888}
889
34f80b04 890static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 891{
34f80b04
EG
892 s16 used;
893 u16 prod;
894 u16 cons;
a2fbb9ea 895
a2fbb9ea
ET
896 prod = fp->tx_bd_prod;
897 cons = fp->tx_bd_cons;
898
34f80b04
EG
899 /* NUM_TX_RINGS = number of "next-page" entries
900 It will be used as a threshold */
901 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 902
34f80b04 903#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
904 WARN_ON(used < 0);
905 WARN_ON(used > fp->bp->tx_ring_size);
906 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 907#endif
a2fbb9ea 908
34f80b04 909 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
910}
911
54b9ddaa
VZ
912static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
913{
914 u16 hw_cons;
915
916 /* Tell compiler that status block fields can change */
917 barrier();
918 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
919 return hw_cons != fp->tx_pkt_cons;
920}
921
922static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
923{
924 struct bnx2x *bp = fp->bp;
555f6c78 925 struct netdev_queue *txq;
a2fbb9ea 926 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
a2fbb9ea
ET
927
928#ifdef BNX2X_STOP_ON_ERROR
929 if (unlikely(bp->panic))
54b9ddaa 930 return -1;
a2fbb9ea
ET
931#endif
932
54b9ddaa 933 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
934 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
935 sw_cons = fp->tx_pkt_cons;
936
937 while (sw_cons != hw_cons) {
938 u16 pkt_cons;
939
940 pkt_cons = TX_BD(sw_cons);
941
942 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
943
34f80b04 944 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
945 hw_cons, sw_cons, pkt_cons);
946
34f80b04 947/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
948 rmb();
949 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
950 }
951*/
952 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
953 sw_cons++;
a2fbb9ea
ET
954 }
955
956 fp->tx_pkt_cons = sw_cons;
957 fp->tx_bd_cons = bd_cons;
958
c16cc0b4
VZ
959 /* Need to make the tx_bd_cons update visible to start_xmit()
960 * before checking for netif_tx_queue_stopped(). Without the
961 * memory barrier, there is a small possibility that
962 * start_xmit() will miss it and cause the queue to be stopped
963 * forever.
964 */
2d99cf16 965 smp_mb();
c16cc0b4 966
a2fbb9ea 967 /* TBD need a thresh? */
555f6c78 968 if (unlikely(netif_tx_queue_stopped(txq))) {
c16cc0b4
VZ
969 /* Taking tx_lock() is needed to prevent reenabling the queue
970 * while it's empty. This could have happen if rx_action() gets
971 * suspended in bnx2x_tx_int() after the condition before
972 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
973 *
974 * stops the queue->sees fresh tx_bd_cons->releases the queue->
975 * sends some packets consuming the whole queue again->
976 * stops the queue
6044735d 977 */
c16cc0b4
VZ
978
979 __netif_tx_lock(txq, smp_processor_id());
6044735d 980
555f6c78 981 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 982 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 983 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 984 netif_tx_wake_queue(txq);
c16cc0b4
VZ
985
986 __netif_tx_unlock(txq);
a2fbb9ea 987 }
54b9ddaa 988 return 0;
a2fbb9ea
ET
989}
990
993ac7b5
MC
991#ifdef BCM_CNIC
992static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
993#endif
3196a88a 994
a2fbb9ea
ET
995static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
996 union eth_rx_cqe *rr_cqe)
997{
998 struct bnx2x *bp = fp->bp;
999 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1000 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1001
34f80b04 1002 DP(BNX2X_MSG_SP,
a2fbb9ea 1003 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 1004 fp->index, cid, command, bp->state,
34f80b04 1005 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
1006
1007 bp->spq_left++;
1008
0626b899 1009 if (fp->index) {
a2fbb9ea
ET
1010 switch (command | fp->state) {
1011 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1012 BNX2X_FP_STATE_OPENING):
1013 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1014 cid);
1015 fp->state = BNX2X_FP_STATE_OPEN;
1016 break;
1017
1018 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1019 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1020 cid);
1021 fp->state = BNX2X_FP_STATE_HALTED;
1022 break;
1023
1024 default:
34f80b04
EG
1025 BNX2X_ERR("unexpected MC reply (%d) "
1026 "fp->state is %x\n", command, fp->state);
1027 break;
a2fbb9ea 1028 }
34f80b04 1029 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1030 return;
1031 }
c14423fe 1032
a2fbb9ea
ET
1033 switch (command | bp->state) {
1034 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1035 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1036 bp->state = BNX2X_STATE_OPEN;
1037 break;
1038
1039 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1040 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1041 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1042 fp->state = BNX2X_FP_STATE_HALTED;
1043 break;
1044
a2fbb9ea 1045 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1046 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1047 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1048 break;
1049
993ac7b5
MC
1050#ifdef BCM_CNIC
1051 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1052 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1053 bnx2x_cnic_cfc_comp(bp, cid);
1054 break;
1055#endif
3196a88a 1056
a2fbb9ea 1057 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1058 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1059 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
e665bfda
MC
1060 bp->set_mac_pending--;
1061 smp_wmb();
a2fbb9ea
ET
1062 break;
1063
49d66772 1064 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1065 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
e665bfda
MC
1066 bp->set_mac_pending--;
1067 smp_wmb();
49d66772
ET
1068 break;
1069
a2fbb9ea 1070 default:
34f80b04 1071 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1072 command, bp->state);
34f80b04 1073 break;
a2fbb9ea 1074 }
34f80b04 1075 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1076}
1077
7a9b2557
VZ
1078static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1079 struct bnx2x_fastpath *fp, u16 index)
1080{
1081 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1082 struct page *page = sw_buf->page;
1083 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1084
1085 /* Skip "next page" elements */
1086 if (!page)
1087 return;
1088
1a983142 1089 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
4f40f2cb 1090 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1091 __free_pages(page, PAGES_PER_SGE_SHIFT);
1092
1093 sw_buf->page = NULL;
1094 sge->addr_hi = 0;
1095 sge->addr_lo = 0;
1096}
1097
1098static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1099 struct bnx2x_fastpath *fp, int last)
1100{
1101 int i;
1102
1103 for (i = 0; i < last; i++)
1104 bnx2x_free_rx_sge(bp, fp, i);
1105}
1106
1107static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1108 struct bnx2x_fastpath *fp, u16 index)
1109{
1110 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1111 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1112 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1113 dma_addr_t mapping;
1114
1115 if (unlikely(page == NULL))
1116 return -ENOMEM;
1117
1a983142
FT
1118 mapping = dma_map_page(&bp->pdev->dev, page, 0,
1119 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
8d8bb39b 1120 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1121 __free_pages(page, PAGES_PER_SGE_SHIFT);
1122 return -ENOMEM;
1123 }
1124
1125 sw_buf->page = page;
1a983142 1126 dma_unmap_addr_set(sw_buf, mapping, mapping);
7a9b2557
VZ
1127
1128 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1129 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1130
1131 return 0;
1132}
1133
a2fbb9ea
ET
1134static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1135 struct bnx2x_fastpath *fp, u16 index)
1136{
1137 struct sk_buff *skb;
1138 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1139 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1140 dma_addr_t mapping;
1141
1142 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1143 if (unlikely(skb == NULL))
1144 return -ENOMEM;
1145
1a983142
FT
1146 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1147 DMA_FROM_DEVICE);
8d8bb39b 1148 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1149 dev_kfree_skb(skb);
1150 return -ENOMEM;
1151 }
1152
1153 rx_buf->skb = skb;
1a983142 1154 dma_unmap_addr_set(rx_buf, mapping, mapping);
a2fbb9ea
ET
1155
1156 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1157 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1158
1159 return 0;
1160}
1161
1162/* note that we are not allocating a new skb,
1163 * we are just moving one from cons to prod
1164 * we are not creating a new mapping,
1165 * so there is no need to check for dma_mapping_error().
1166 */
1167static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1168 struct sk_buff *skb, u16 cons, u16 prod)
1169{
1170 struct bnx2x *bp = fp->bp;
1171 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1172 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1173 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1174 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1175
1a983142
FT
1176 dma_sync_single_for_device(&bp->pdev->dev,
1177 dma_unmap_addr(cons_rx_buf, mapping),
1178 RX_COPY_THRESH, DMA_FROM_DEVICE);
a2fbb9ea
ET
1179
1180 prod_rx_buf->skb = cons_rx_buf->skb;
1a983142
FT
1181 dma_unmap_addr_set(prod_rx_buf, mapping,
1182 dma_unmap_addr(cons_rx_buf, mapping));
a2fbb9ea
ET
1183 *prod_bd = *cons_bd;
1184}
1185
7a9b2557
VZ
1186static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1187 u16 idx)
1188{
1189 u16 last_max = fp->last_max_sge;
1190
1191 if (SUB_S16(idx, last_max) > 0)
1192 fp->last_max_sge = idx;
1193}
1194
1195static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1196{
1197 int i, j;
1198
1199 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1200 int idx = RX_SGE_CNT * i - 1;
1201
1202 for (j = 0; j < 2; j++) {
1203 SGE_MASK_CLEAR_BIT(fp, idx);
1204 idx--;
1205 }
1206 }
1207}
1208
1209static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1210 struct eth_fast_path_rx_cqe *fp_cqe)
1211{
1212 struct bnx2x *bp = fp->bp;
4f40f2cb 1213 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1214 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1215 SGE_PAGE_SHIFT;
7a9b2557
VZ
1216 u16 last_max, last_elem, first_elem;
1217 u16 delta = 0;
1218 u16 i;
1219
1220 if (!sge_len)
1221 return;
1222
1223 /* First mark all used pages */
1224 for (i = 0; i < sge_len; i++)
1225 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1226
1227 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1228 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1229
1230 /* Here we assume that the last SGE index is the biggest */
1231 prefetch((void *)(fp->sge_mask));
1232 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1233
1234 last_max = RX_SGE(fp->last_max_sge);
1235 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1236 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1237
1238 /* If ring is not full */
1239 if (last_elem + 1 != first_elem)
1240 last_elem++;
1241
1242 /* Now update the prod */
1243 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1244 if (likely(fp->sge_mask[i]))
1245 break;
1246
1247 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1248 delta += RX_SGE_MASK_ELEM_SZ;
1249 }
1250
1251 if (delta > 0) {
1252 fp->rx_sge_prod += delta;
1253 /* clear page-end entries */
1254 bnx2x_clear_sge_mask_next_elems(fp);
1255 }
1256
1257 DP(NETIF_MSG_RX_STATUS,
1258 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1259 fp->last_max_sge, fp->rx_sge_prod);
1260}
1261
1262static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1263{
1264 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1265 memset(fp->sge_mask, 0xff,
1266 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1267
33471629
EG
1268 /* Clear the two last indices in the page to 1:
1269 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1270 hence will never be indicated and should be removed from
1271 the calculations. */
1272 bnx2x_clear_sge_mask_next_elems(fp);
1273}
1274
1275static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1276 struct sk_buff *skb, u16 cons, u16 prod)
1277{
1278 struct bnx2x *bp = fp->bp;
1279 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1280 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1281 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1282 dma_addr_t mapping;
1283
1284 /* move empty skb from pool to prod and map it */
1285 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1a983142
FT
1286 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1287 bp->rx_buf_size, DMA_FROM_DEVICE);
1288 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
7a9b2557
VZ
1289
1290 /* move partial skb from cons to pool (don't unmap yet) */
1291 fp->tpa_pool[queue] = *cons_rx_buf;
1292
1293 /* mark bin state as start - print error if current state != stop */
1294 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1295 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1296
1297 fp->tpa_state[queue] = BNX2X_TPA_START;
1298
1299 /* point prod_bd to new skb */
1300 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1301 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1302
1303#ifdef BNX2X_STOP_ON_ERROR
1304 fp->tpa_queue_used |= (1 << queue);
1305#ifdef __powerpc64__
1306 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1307#else
1308 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1309#endif
1310 fp->tpa_queue_used);
1311#endif
1312}
1313
1314static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1315 struct sk_buff *skb,
1316 struct eth_fast_path_rx_cqe *fp_cqe,
1317 u16 cqe_idx)
1318{
1319 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1320 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1321 u32 i, frag_len, frag_size, pages;
1322 int err;
1323 int j;
1324
1325 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1326 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1327
1328 /* This is needed in order to enable forwarding support */
1329 if (frag_size)
4f40f2cb 1330 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1331 max(frag_size, (u32)len_on_bd));
1332
1333#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1334 if (pages >
1335 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1336 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1337 pages, cqe_idx);
1338 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1339 fp_cqe->pkt_len, len_on_bd);
1340 bnx2x_panic();
1341 return -EINVAL;
1342 }
1343#endif
1344
1345 /* Run through the SGL and compose the fragmented skb */
1346 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1347 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1348
1349 /* FW gives the indices of the SGE as if the ring is an array
1350 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1351 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1352 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1353 old_rx_pg = *rx_pg;
1354
1355 /* If we fail to allocate a substitute page, we simply stop
1356 where we are and drop the whole packet */
1357 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1358 if (unlikely(err)) {
de832a55 1359 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1360 return err;
1361 }
1362
1363 /* Unmap the page as we r going to pass it to the stack */
1a983142
FT
1364 dma_unmap_page(&bp->pdev->dev,
1365 dma_unmap_addr(&old_rx_pg, mapping),
1366 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
7a9b2557
VZ
1367
1368 /* Add one frag and update the appropriate fields in the skb */
1369 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1370
1371 skb->data_len += frag_len;
1372 skb->truesize += frag_len;
1373 skb->len += frag_len;
1374
1375 frag_size -= frag_len;
1376 }
1377
1378 return 0;
1379}
1380
1381static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1382 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1383 u16 cqe_idx)
1384{
1385 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1386 struct sk_buff *skb = rx_buf->skb;
1387 /* alloc new skb */
1388 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1389
1390 /* Unmap skb in the pool anyway, as we are going to change
1391 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1392 fails. */
1a983142
FT
1393 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1394 bp->rx_buf_size, DMA_FROM_DEVICE);
7a9b2557 1395
7a9b2557 1396 if (likely(new_skb)) {
66e855f3
YG
1397 /* fix ip xsum and give it to the stack */
1398 /* (no need to map the new skb) */
0c6671b0
EG
1399#ifdef BCM_VLAN
1400 int is_vlan_cqe =
1401 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1402 PARSING_FLAGS_VLAN);
1403 int is_not_hwaccel_vlan_cqe =
1404 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1405#endif
7a9b2557
VZ
1406
1407 prefetch(skb);
1408 prefetch(((char *)(skb)) + 128);
1409
7a9b2557
VZ
1410#ifdef BNX2X_STOP_ON_ERROR
1411 if (pad + len > bp->rx_buf_size) {
1412 BNX2X_ERR("skb_put is about to fail... "
1413 "pad %d len %d rx_buf_size %d\n",
1414 pad, len, bp->rx_buf_size);
1415 bnx2x_panic();
1416 return;
1417 }
1418#endif
1419
1420 skb_reserve(skb, pad);
1421 skb_put(skb, len);
1422
1423 skb->protocol = eth_type_trans(skb, bp->dev);
1424 skb->ip_summed = CHECKSUM_UNNECESSARY;
1425
1426 {
1427 struct iphdr *iph;
1428
1429 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1430#ifdef BCM_VLAN
1431 /* If there is no Rx VLAN offloading -
1432 take VLAN tag into an account */
1433 if (unlikely(is_not_hwaccel_vlan_cqe))
1434 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1435#endif
7a9b2557
VZ
1436 iph->check = 0;
1437 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1438 }
1439
1440 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1441 &cqe->fast_path_cqe, cqe_idx)) {
1442#ifdef BCM_VLAN
0c6671b0
EG
1443 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1444 (!is_not_hwaccel_vlan_cqe))
4fd89b7a
DK
1445 vlan_gro_receive(&fp->napi, bp->vlgrp,
1446 le16_to_cpu(cqe->fast_path_cqe.
1447 vlan_tag), skb);
7a9b2557
VZ
1448 else
1449#endif
4fd89b7a 1450 napi_gro_receive(&fp->napi, skb);
7a9b2557
VZ
1451 } else {
1452 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1453 " - dropping packet!\n");
1454 dev_kfree_skb(skb);
1455 }
1456
7a9b2557
VZ
1457
1458 /* put new skb in bin */
1459 fp->tpa_pool[queue].skb = new_skb;
1460
1461 } else {
66e855f3 1462 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1463 DP(NETIF_MSG_RX_STATUS,
1464 "Failed to allocate new skb - dropping packet!\n");
de832a55 1465 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1466 }
1467
1468 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1469}
1470
1471static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1472 struct bnx2x_fastpath *fp,
1473 u16 bd_prod, u16 rx_comp_prod,
1474 u16 rx_sge_prod)
1475{
8d9c5f34 1476 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1477 int i;
1478
1479 /* Update producers */
1480 rx_prods.bd_prod = bd_prod;
1481 rx_prods.cqe_prod = rx_comp_prod;
1482 rx_prods.sge_prod = rx_sge_prod;
1483
58f4c4cf
EG
1484 /*
1485 * Make sure that the BD and SGE data is updated before updating the
1486 * producers since FW might read the BD/SGE right after the producer
1487 * is updated.
1488 * This is only applicable for weak-ordered memory model archs such
1489 * as IA-64. The following barrier is also mandatory since FW will
1490 * assumes BDs must have buffers.
1491 */
1492 wmb();
1493
8d9c5f34
EG
1494 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1495 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1496 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1497 ((u32 *)&rx_prods)[i]);
1498
58f4c4cf
EG
1499 mmiowb(); /* keep prod updates ordered */
1500
7a9b2557 1501 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1502 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1503 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1504}
1505
a2fbb9ea
ET
1506static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1507{
1508 struct bnx2x *bp = fp->bp;
34f80b04 1509 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1510 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1511 int rx_pkt = 0;
1512
1513#ifdef BNX2X_STOP_ON_ERROR
1514 if (unlikely(bp->panic))
1515 return 0;
1516#endif
1517
34f80b04
EG
1518 /* CQ "next element" is of the size of the regular element,
1519 that's why it's ok here */
a2fbb9ea
ET
1520 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1521 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1522 hw_comp_cons++;
1523
1524 bd_cons = fp->rx_bd_cons;
1525 bd_prod = fp->rx_bd_prod;
34f80b04 1526 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1527 sw_comp_cons = fp->rx_comp_cons;
1528 sw_comp_prod = fp->rx_comp_prod;
1529
1530 /* Memory barrier necessary as speculative reads of the rx
1531 * buffer can be ahead of the index in the status block
1532 */
1533 rmb();
1534
1535 DP(NETIF_MSG_RX_STATUS,
1536 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1537 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1538
1539 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1540 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1541 struct sk_buff *skb;
1542 union eth_rx_cqe *cqe;
34f80b04
EG
1543 u8 cqe_fp_flags;
1544 u16 len, pad;
a2fbb9ea
ET
1545
1546 comp_ring_cons = RCQ_BD(sw_comp_cons);
1547 bd_prod = RX_BD(bd_prod);
1548 bd_cons = RX_BD(bd_cons);
1549
619e7a66
EG
1550 /* Prefetch the page containing the BD descriptor
1551 at producer's index. It will be needed when new skb is
1552 allocated */
1553 prefetch((void *)(PAGE_ALIGN((unsigned long)
1554 (&fp->rx_desc_ring[bd_prod])) -
1555 PAGE_SIZE + 1));
1556
a2fbb9ea 1557 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1558 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1559
a2fbb9ea 1560 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1561 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1562 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1563 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1564 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1565 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1566
1567 /* is this a slowpath msg? */
34f80b04 1568 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1569 bnx2x_sp_event(fp, cqe);
1570 goto next_cqe;
1571
1572 /* this is an rx packet */
1573 } else {
1574 rx_buf = &fp->rx_buf_ring[bd_cons];
1575 skb = rx_buf->skb;
54b9ddaa
VZ
1576 prefetch(skb);
1577 prefetch((u8 *)skb + 256);
a2fbb9ea
ET
1578 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1579 pad = cqe->fast_path_cqe.placement_offset;
1580
7a9b2557
VZ
1581 /* If CQE is marked both TPA_START and TPA_END
1582 it is a non-TPA CQE */
1583 if ((!fp->disable_tpa) &&
1584 (TPA_TYPE(cqe_fp_flags) !=
1585 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1586 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1587
1588 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1589 DP(NETIF_MSG_RX_STATUS,
1590 "calling tpa_start on queue %d\n",
1591 queue);
1592
1593 bnx2x_tpa_start(fp, queue, skb,
1594 bd_cons, bd_prod);
1595 goto next_rx;
1596 }
1597
1598 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1599 DP(NETIF_MSG_RX_STATUS,
1600 "calling tpa_stop on queue %d\n",
1601 queue);
1602
1603 if (!BNX2X_RX_SUM_FIX(cqe))
1604 BNX2X_ERR("STOP on none TCP "
1605 "data\n");
1606
1607 /* This is a size of the linear data
1608 on this skb */
1609 len = le16_to_cpu(cqe->fast_path_cqe.
1610 len_on_bd);
1611 bnx2x_tpa_stop(bp, fp, queue, pad,
1612 len, cqe, comp_ring_cons);
1613#ifdef BNX2X_STOP_ON_ERROR
1614 if (bp->panic)
17cb4006 1615 return 0;
7a9b2557
VZ
1616#endif
1617
1618 bnx2x_update_sge_prod(fp,
1619 &cqe->fast_path_cqe);
1620 goto next_cqe;
1621 }
1622 }
1623
1a983142
FT
1624 dma_sync_single_for_device(&bp->pdev->dev,
1625 dma_unmap_addr(rx_buf, mapping),
1626 pad + RX_COPY_THRESH,
1627 DMA_FROM_DEVICE);
a2fbb9ea
ET
1628 prefetch(skb);
1629 prefetch(((char *)(skb)) + 128);
1630
1631 /* is this an error packet? */
34f80b04 1632 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1633 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1634 "ERROR flags %x rx packet %u\n",
1635 cqe_fp_flags, sw_comp_cons);
de832a55 1636 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1637 goto reuse_rx;
1638 }
1639
1640 /* Since we don't have a jumbo ring
1641 * copy small packets if mtu > 1500
1642 */
1643 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1644 (len <= RX_COPY_THRESH)) {
1645 struct sk_buff *new_skb;
1646
1647 new_skb = netdev_alloc_skb(bp->dev,
1648 len + pad);
1649 if (new_skb == NULL) {
1650 DP(NETIF_MSG_RX_ERR,
34f80b04 1651 "ERROR packet dropped "
a2fbb9ea 1652 "because of alloc failure\n");
de832a55 1653 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1654 goto reuse_rx;
1655 }
1656
1657 /* aligned copy */
1658 skb_copy_from_linear_data_offset(skb, pad,
1659 new_skb->data + pad, len);
1660 skb_reserve(new_skb, pad);
1661 skb_put(new_skb, len);
1662
1663 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1664
1665 skb = new_skb;
1666
a119a069
EG
1667 } else
1668 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1a983142
FT
1669 dma_unmap_single(&bp->pdev->dev,
1670 dma_unmap_addr(rx_buf, mapping),
437cf2f1 1671 bp->rx_buf_size,
1a983142 1672 DMA_FROM_DEVICE);
a2fbb9ea
ET
1673 skb_reserve(skb, pad);
1674 skb_put(skb, len);
1675
1676 } else {
1677 DP(NETIF_MSG_RX_ERR,
34f80b04 1678 "ERROR packet dropped because "
a2fbb9ea 1679 "of alloc failure\n");
de832a55 1680 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1681reuse_rx:
1682 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1683 goto next_rx;
1684 }
1685
1686 skb->protocol = eth_type_trans(skb, bp->dev);
1687
1688 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1689 if (bp->rx_csum) {
1adcd8be
EG
1690 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1691 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1692 else
de832a55 1693 fp->eth_q_stats.hw_csum_err++;
66e855f3 1694 }
a2fbb9ea
ET
1695 }
1696
748e5439 1697 skb_record_rx_queue(skb, fp->index);
ab6ad5a4 1698
a2fbb9ea 1699#ifdef BCM_VLAN
0c6671b0 1700 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1701 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1702 PARSING_FLAGS_VLAN))
4fd89b7a
DK
1703 vlan_gro_receive(&fp->napi, bp->vlgrp,
1704 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
a2fbb9ea
ET
1705 else
1706#endif
4fd89b7a 1707 napi_gro_receive(&fp->napi, skb);
a2fbb9ea 1708
a2fbb9ea
ET
1709
1710next_rx:
1711 rx_buf->skb = NULL;
1712
1713 bd_cons = NEXT_RX_IDX(bd_cons);
1714 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1715 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1716 rx_pkt++;
a2fbb9ea
ET
1717next_cqe:
1718 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1719 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1720
34f80b04 1721 if (rx_pkt == budget)
a2fbb9ea
ET
1722 break;
1723 } /* while */
1724
1725 fp->rx_bd_cons = bd_cons;
34f80b04 1726 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1727 fp->rx_comp_cons = sw_comp_cons;
1728 fp->rx_comp_prod = sw_comp_prod;
1729
7a9b2557
VZ
1730 /* Update producers */
1731 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1732 fp->rx_sge_prod);
a2fbb9ea
ET
1733
1734 fp->rx_pkt += rx_pkt;
1735 fp->rx_calls++;
1736
1737 return rx_pkt;
1738}
1739
1740static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1741{
1742 struct bnx2x_fastpath *fp = fp_cookie;
1743 struct bnx2x *bp = fp->bp;
a2fbb9ea 1744
da5a662a
VZ
1745 /* Return here if interrupt is disabled */
1746 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1747 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1748 return IRQ_HANDLED;
1749 }
1750
34f80b04 1751 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1752 fp->index, fp->sb_id);
0626b899 1753 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1754
1755#ifdef BNX2X_STOP_ON_ERROR
1756 if (unlikely(bp->panic))
1757 return IRQ_HANDLED;
1758#endif
ca00392c 1759
54b9ddaa
VZ
1760 /* Handle Rx and Tx according to MSI-X vector */
1761 prefetch(fp->rx_cons_sb);
1762 prefetch(fp->tx_cons_sb);
1763 prefetch(&fp->status_blk->u_status_block.status_block_index);
1764 prefetch(&fp->status_blk->c_status_block.status_block_index);
1765 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
34f80b04 1766
a2fbb9ea
ET
1767 return IRQ_HANDLED;
1768}
1769
1770static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1771{
555f6c78 1772 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1773 u16 status = bnx2x_ack_int(bp);
34f80b04 1774 u16 mask;
ca00392c 1775 int i;
a2fbb9ea 1776
34f80b04 1777 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1778 if (unlikely(status == 0)) {
1779 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1780 return IRQ_NONE;
1781 }
f5372251 1782 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1783
34f80b04 1784 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1785 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1786 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1787 return IRQ_HANDLED;
1788 }
1789
3196a88a
EG
1790#ifdef BNX2X_STOP_ON_ERROR
1791 if (unlikely(bp->panic))
1792 return IRQ_HANDLED;
1793#endif
1794
ca00392c
EG
1795 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1796 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1797
ca00392c
EG
1798 mask = 0x2 << fp->sb_id;
1799 if (status & mask) {
54b9ddaa
VZ
1800 /* Handle Rx and Tx according to SB id */
1801 prefetch(fp->rx_cons_sb);
1802 prefetch(&fp->status_blk->u_status_block.
1803 status_block_index);
1804 prefetch(fp->tx_cons_sb);
1805 prefetch(&fp->status_blk->c_status_block.
1806 status_block_index);
1807 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
1808 status &= ~mask;
1809 }
a2fbb9ea
ET
1810 }
1811
993ac7b5
MC
1812#ifdef BCM_CNIC
1813 mask = 0x2 << CNIC_SB_ID(bp);
1814 if (status & (mask | 0x1)) {
1815 struct cnic_ops *c_ops = NULL;
1816
1817 rcu_read_lock();
1818 c_ops = rcu_dereference(bp->cnic_ops);
1819 if (c_ops)
1820 c_ops->cnic_handler(bp->cnic_data, NULL);
1821 rcu_read_unlock();
1822
1823 status &= ~mask;
1824 }
1825#endif
a2fbb9ea 1826
34f80b04 1827 if (unlikely(status & 0x1)) {
1cf167f2 1828 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1829
1830 status &= ~0x1;
1831 if (!status)
1832 return IRQ_HANDLED;
1833 }
1834
34f80b04
EG
1835 if (status)
1836 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1837 status);
a2fbb9ea 1838
c18487ee 1839 return IRQ_HANDLED;
a2fbb9ea
ET
1840}
1841
c18487ee 1842/* end of fast path */
a2fbb9ea 1843
bb2a0f7a 1844static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1845
c18487ee
YR
1846/* Link */
1847
1848/*
1849 * General service functions
1850 */
a2fbb9ea 1851
4a37fb66 1852static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1853{
1854 u32 lock_status;
1855 u32 resource_bit = (1 << resource);
4a37fb66
YG
1856 int func = BP_FUNC(bp);
1857 u32 hw_lock_control_reg;
c18487ee 1858 int cnt;
a2fbb9ea 1859
c18487ee
YR
1860 /* Validating that the resource is within range */
1861 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1862 DP(NETIF_MSG_HW,
1863 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1864 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1865 return -EINVAL;
1866 }
a2fbb9ea 1867
4a37fb66
YG
1868 if (func <= 5) {
1869 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1870 } else {
1871 hw_lock_control_reg =
1872 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1873 }
1874
c18487ee 1875 /* Validating that the resource is not already taken */
4a37fb66 1876 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1877 if (lock_status & resource_bit) {
1878 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1879 lock_status, resource_bit);
1880 return -EEXIST;
1881 }
a2fbb9ea 1882
46230476
EG
1883 /* Try for 5 second every 5ms */
1884 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1885 /* Try to acquire the lock */
4a37fb66
YG
1886 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1887 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1888 if (lock_status & resource_bit)
1889 return 0;
a2fbb9ea 1890
c18487ee 1891 msleep(5);
a2fbb9ea 1892 }
c18487ee
YR
1893 DP(NETIF_MSG_HW, "Timeout\n");
1894 return -EAGAIN;
1895}
a2fbb9ea 1896
4a37fb66 1897static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1898{
1899 u32 lock_status;
1900 u32 resource_bit = (1 << resource);
4a37fb66
YG
1901 int func = BP_FUNC(bp);
1902 u32 hw_lock_control_reg;
a2fbb9ea 1903
c18487ee
YR
1904 /* Validating that the resource is within range */
1905 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1906 DP(NETIF_MSG_HW,
1907 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1908 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1909 return -EINVAL;
1910 }
1911
4a37fb66
YG
1912 if (func <= 5) {
1913 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1914 } else {
1915 hw_lock_control_reg =
1916 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1917 }
1918
c18487ee 1919 /* Validating that the resource is currently taken */
4a37fb66 1920 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1921 if (!(lock_status & resource_bit)) {
1922 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1923 lock_status, resource_bit);
1924 return -EFAULT;
a2fbb9ea
ET
1925 }
1926
4a37fb66 1927 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1928 return 0;
1929}
1930
1931/* HW Lock for shared dual port PHYs */
4a37fb66 1932static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1933{
34f80b04 1934 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1935
46c6a674
EG
1936 if (bp->port.need_hw_lock)
1937 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1938}
a2fbb9ea 1939
4a37fb66 1940static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1941{
46c6a674
EG
1942 if (bp->port.need_hw_lock)
1943 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1944
34f80b04 1945 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1946}
a2fbb9ea 1947
4acac6a5
EG
1948int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1949{
1950 /* The GPIO should be swapped if swap register is set and active */
1951 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1952 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1953 int gpio_shift = gpio_num +
1954 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1955 u32 gpio_mask = (1 << gpio_shift);
1956 u32 gpio_reg;
1957 int value;
1958
1959 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1960 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1961 return -EINVAL;
1962 }
1963
1964 /* read GPIO value */
1965 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1966
1967 /* get the requested pin value */
1968 if ((gpio_reg & gpio_mask) == gpio_mask)
1969 value = 1;
1970 else
1971 value = 0;
1972
1973 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1974
1975 return value;
1976}
1977
17de50b7 1978int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1979{
1980 /* The GPIO should be swapped if swap register is set and active */
1981 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1982 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1983 int gpio_shift = gpio_num +
1984 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1985 u32 gpio_mask = (1 << gpio_shift);
1986 u32 gpio_reg;
a2fbb9ea 1987
c18487ee
YR
1988 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1989 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1990 return -EINVAL;
1991 }
a2fbb9ea 1992
4a37fb66 1993 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1994 /* read GPIO and mask except the float bits */
1995 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1996
c18487ee
YR
1997 switch (mode) {
1998 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1999 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2000 gpio_num, gpio_shift);
2001 /* clear FLOAT and set CLR */
2002 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2003 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2004 break;
a2fbb9ea 2005
c18487ee
YR
2006 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2007 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2008 gpio_num, gpio_shift);
2009 /* clear FLOAT and set SET */
2010 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2011 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2012 break;
a2fbb9ea 2013
17de50b7 2014 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
2015 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2016 gpio_num, gpio_shift);
2017 /* set FLOAT */
2018 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2019 break;
a2fbb9ea 2020
c18487ee
YR
2021 default:
2022 break;
a2fbb9ea
ET
2023 }
2024
c18487ee 2025 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 2026 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 2027
c18487ee 2028 return 0;
a2fbb9ea
ET
2029}
2030
4acac6a5
EG
2031int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2032{
2033 /* The GPIO should be swapped if swap register is set and active */
2034 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2035 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2036 int gpio_shift = gpio_num +
2037 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2038 u32 gpio_mask = (1 << gpio_shift);
2039 u32 gpio_reg;
2040
2041 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2042 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2043 return -EINVAL;
2044 }
2045
2046 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2047 /* read GPIO int */
2048 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2049
2050 switch (mode) {
2051 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2052 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2053 "output low\n", gpio_num, gpio_shift);
2054 /* clear SET and set CLR */
2055 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2056 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2057 break;
2058
2059 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2060 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2061 "output high\n", gpio_num, gpio_shift);
2062 /* clear CLR and set SET */
2063 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2064 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2065 break;
2066
2067 default:
2068 break;
2069 }
2070
2071 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2072 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2073
2074 return 0;
2075}
2076
c18487ee 2077static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2078{
c18487ee
YR
2079 u32 spio_mask = (1 << spio_num);
2080 u32 spio_reg;
a2fbb9ea 2081
c18487ee
YR
2082 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2083 (spio_num > MISC_REGISTERS_SPIO_7)) {
2084 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2085 return -EINVAL;
a2fbb9ea
ET
2086 }
2087
4a37fb66 2088 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2089 /* read SPIO and mask except the float bits */
2090 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2091
c18487ee 2092 switch (mode) {
6378c025 2093 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2094 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2095 /* clear FLOAT and set CLR */
2096 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2097 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2098 break;
a2fbb9ea 2099
6378c025 2100 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2101 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2102 /* clear FLOAT and set SET */
2103 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2104 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2105 break;
a2fbb9ea 2106
c18487ee
YR
2107 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2108 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2109 /* set FLOAT */
2110 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2111 break;
a2fbb9ea 2112
c18487ee
YR
2113 default:
2114 break;
a2fbb9ea
ET
2115 }
2116
c18487ee 2117 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2118 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2119
a2fbb9ea
ET
2120 return 0;
2121}
2122
c18487ee 2123static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2124{
ad33ea3a
EG
2125 switch (bp->link_vars.ieee_fc &
2126 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2127 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2128 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2129 ADVERTISED_Pause);
2130 break;
356e2385 2131
c18487ee 2132 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2133 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2134 ADVERTISED_Pause);
2135 break;
356e2385 2136
c18487ee 2137 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2138 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2139 break;
356e2385 2140
c18487ee 2141 default:
34f80b04 2142 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2143 ADVERTISED_Pause);
2144 break;
2145 }
2146}
f1410647 2147
c18487ee
YR
2148static void bnx2x_link_report(struct bnx2x *bp)
2149{
f34d28ea 2150 if (bp->flags & MF_FUNC_DIS) {
2691d51d 2151 netif_carrier_off(bp->dev);
7995c64e 2152 netdev_err(bp->dev, "NIC Link is Down\n");
2691d51d
EG
2153 return;
2154 }
2155
c18487ee 2156 if (bp->link_vars.link_up) {
35c5f8fe
EG
2157 u16 line_speed;
2158
c18487ee
YR
2159 if (bp->state == BNX2X_STATE_OPEN)
2160 netif_carrier_on(bp->dev);
7995c64e 2161 netdev_info(bp->dev, "NIC Link is Up, ");
f1410647 2162
35c5f8fe
EG
2163 line_speed = bp->link_vars.line_speed;
2164 if (IS_E1HMF(bp)) {
2165 u16 vn_max_rate;
2166
2167 vn_max_rate =
2168 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2169 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2170 if (vn_max_rate < line_speed)
2171 line_speed = vn_max_rate;
2172 }
7995c64e 2173 pr_cont("%d Mbps ", line_speed);
f1410647 2174
c18487ee 2175 if (bp->link_vars.duplex == DUPLEX_FULL)
7995c64e 2176 pr_cont("full duplex");
c18487ee 2177 else
7995c64e 2178 pr_cont("half duplex");
f1410647 2179
c0700f90
DM
2180 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2181 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
7995c64e 2182 pr_cont(", receive ");
356e2385
EG
2183 if (bp->link_vars.flow_ctrl &
2184 BNX2X_FLOW_CTRL_TX)
7995c64e 2185 pr_cont("& transmit ");
c18487ee 2186 } else {
7995c64e 2187 pr_cont(", transmit ");
c18487ee 2188 }
7995c64e 2189 pr_cont("flow control ON");
c18487ee 2190 }
7995c64e 2191 pr_cont("\n");
f1410647 2192
c18487ee
YR
2193 } else { /* link_down */
2194 netif_carrier_off(bp->dev);
7995c64e 2195 netdev_err(bp->dev, "NIC Link is Down\n");
f1410647 2196 }
c18487ee
YR
2197}
2198
b5bf9068 2199static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2200{
19680c48
EG
2201 if (!BP_NOMCP(bp)) {
2202 u8 rc;
a2fbb9ea 2203
19680c48 2204 /* Initialize link parameters structure variables */
8c99e7b0
YR
2205 /* It is recommended to turn off RX FC for jumbo frames
2206 for better performance */
0c593270 2207 if (bp->dev->mtu > 5000)
c0700f90 2208 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2209 else
c0700f90 2210 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2211
4a37fb66 2212 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2213
2214 if (load_mode == LOAD_DIAG)
2215 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2216
19680c48 2217 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2218
4a37fb66 2219 bnx2x_release_phy_lock(bp);
a2fbb9ea 2220
3c96c68b
EG
2221 bnx2x_calc_fc_adv(bp);
2222
b5bf9068
EG
2223 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2224 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2225 bnx2x_link_report(bp);
b5bf9068 2226 }
34f80b04 2227
19680c48
EG
2228 return rc;
2229 }
f5372251 2230 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2231 return -EINVAL;
a2fbb9ea
ET
2232}
2233
c18487ee 2234static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2235{
19680c48 2236 if (!BP_NOMCP(bp)) {
4a37fb66 2237 bnx2x_acquire_phy_lock(bp);
19680c48 2238 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2239 bnx2x_release_phy_lock(bp);
a2fbb9ea 2240
19680c48
EG
2241 bnx2x_calc_fc_adv(bp);
2242 } else
f5372251 2243 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2244}
a2fbb9ea 2245
c18487ee
YR
2246static void bnx2x__link_reset(struct bnx2x *bp)
2247{
19680c48 2248 if (!BP_NOMCP(bp)) {
4a37fb66 2249 bnx2x_acquire_phy_lock(bp);
589abe3a 2250 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2251 bnx2x_release_phy_lock(bp);
19680c48 2252 } else
f5372251 2253 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2254}
a2fbb9ea 2255
c18487ee
YR
2256static u8 bnx2x_link_test(struct bnx2x *bp)
2257{
2258 u8 rc;
a2fbb9ea 2259
4a37fb66 2260 bnx2x_acquire_phy_lock(bp);
c18487ee 2261 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2262 bnx2x_release_phy_lock(bp);
a2fbb9ea 2263
c18487ee
YR
2264 return rc;
2265}
a2fbb9ea 2266
8a1c38d1 2267static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2268{
8a1c38d1
EG
2269 u32 r_param = bp->link_vars.line_speed / 8;
2270 u32 fair_periodic_timeout_usec;
2271 u32 t_fair;
34f80b04 2272
8a1c38d1
EG
2273 memset(&(bp->cmng.rs_vars), 0,
2274 sizeof(struct rate_shaping_vars_per_port));
2275 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2276
8a1c38d1
EG
2277 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2278 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2279
8a1c38d1
EG
2280 /* this is the threshold below which no timer arming will occur
2281 1.25 coefficient is for the threshold to be a little bigger
2282 than the real time, to compensate for timer in-accuracy */
2283 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2284 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2285
8a1c38d1
EG
2286 /* resolution of fairness timer */
2287 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2288 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2289 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2290
8a1c38d1
EG
2291 /* this is the threshold below which we won't arm the timer anymore */
2292 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2293
8a1c38d1
EG
2294 /* we multiply by 1e3/8 to get bytes/msec.
2295 We don't want the credits to pass a credit
2296 of the t_fair*FAIR_MEM (algorithm resolution) */
2297 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2298 /* since each tick is 4 usec */
2299 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2300}
2301
2691d51d
EG
2302/* Calculates the sum of vn_min_rates.
2303 It's needed for further normalizing of the min_rates.
2304 Returns:
2305 sum of vn_min_rates.
2306 or
2307 0 - if all the min_rates are 0.
2308 In the later case fainess algorithm should be deactivated.
2309 If not all min_rates are zero then those that are zeroes will be set to 1.
2310 */
2311static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2312{
2313 int all_zero = 1;
2314 int port = BP_PORT(bp);
2315 int vn;
2316
2317 bp->vn_weight_sum = 0;
2318 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2319 int func = 2*vn + port;
2320 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2321 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2322 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2323
2324 /* Skip hidden vns */
2325 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2326 continue;
2327
2328 /* If min rate is zero - set it to 1 */
2329 if (!vn_min_rate)
2330 vn_min_rate = DEF_MIN_RATE;
2331 else
2332 all_zero = 0;
2333
2334 bp->vn_weight_sum += vn_min_rate;
2335 }
2336
2337 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
2338 if (all_zero) {
2339 bp->cmng.flags.cmng_enables &=
2340 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2341 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2342 " fairness will be disabled\n");
2343 } else
2344 bp->cmng.flags.cmng_enables |=
2345 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
2346}
2347
8a1c38d1 2348static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2349{
2350 struct rate_shaping_vars_per_vn m_rs_vn;
2351 struct fairness_vars_per_vn m_fair_vn;
2352 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2353 u16 vn_min_rate, vn_max_rate;
2354 int i;
2355
2356 /* If function is hidden - set min and max to zeroes */
2357 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2358 vn_min_rate = 0;
2359 vn_max_rate = 0;
2360
2361 } else {
2362 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2363 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1
EG
2364 /* If min rate is zero - set it to 1 */
2365 if (!vn_min_rate)
34f80b04
EG
2366 vn_min_rate = DEF_MIN_RATE;
2367 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2368 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2369 }
8a1c38d1 2370 DP(NETIF_MSG_IFUP,
b015e3d1 2371 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 2372 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2373
2374 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2375 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2376
2377 /* global vn counter - maximal Mbps for this vn */
2378 m_rs_vn.vn_counter.rate = vn_max_rate;
2379
2380 /* quota - number of bytes transmitted in this period */
2381 m_rs_vn.vn_counter.quota =
2382 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2383
8a1c38d1 2384 if (bp->vn_weight_sum) {
34f80b04
EG
2385 /* credit for each period of the fairness algorithm:
2386 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2387 vn_weight_sum should not be larger than 10000, thus
2388 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2389 than zero */
34f80b04 2390 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2391 max((u32)(vn_min_rate * (T_FAIR_COEF /
2392 (8 * bp->vn_weight_sum))),
2393 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2394 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2395 m_fair_vn.vn_credit_delta);
2396 }
2397
34f80b04
EG
2398 /* Store it to internal memory */
2399 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2400 REG_WR(bp, BAR_XSTRORM_INTMEM +
2401 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2402 ((u32 *)(&m_rs_vn))[i]);
2403
2404 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2405 REG_WR(bp, BAR_XSTRORM_INTMEM +
2406 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2407 ((u32 *)(&m_fair_vn))[i]);
2408}
2409
8a1c38d1 2410
c18487ee
YR
2411/* This function is called upon link interrupt */
2412static void bnx2x_link_attn(struct bnx2x *bp)
2413{
bb2a0f7a
YG
2414 /* Make sure that we are synced with the current statistics */
2415 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2416
c18487ee 2417 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2418
bb2a0f7a
YG
2419 if (bp->link_vars.link_up) {
2420
1c06328c 2421 /* dropless flow control */
a18f5128 2422 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
2423 int port = BP_PORT(bp);
2424 u32 pause_enabled = 0;
2425
2426 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2427 pause_enabled = 1;
2428
2429 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2430 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2431 pause_enabled);
2432 }
2433
bb2a0f7a
YG
2434 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2435 struct host_port_stats *pstats;
2436
2437 pstats = bnx2x_sp(bp, port_stats);
2438 /* reset old bmac stats */
2439 memset(&(pstats->mac_stx[0]), 0,
2440 sizeof(struct mac_stx));
2441 }
f34d28ea 2442 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2443 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2444 }
2445
c18487ee
YR
2446 /* indicate link status */
2447 bnx2x_link_report(bp);
34f80b04
EG
2448
2449 if (IS_E1HMF(bp)) {
8a1c38d1 2450 int port = BP_PORT(bp);
34f80b04 2451 int func;
8a1c38d1 2452 int vn;
34f80b04 2453
ab6ad5a4 2454 /* Set the attention towards other drivers on the same port */
34f80b04
EG
2455 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2456 if (vn == BP_E1HVN(bp))
2457 continue;
2458
8a1c38d1 2459 func = ((vn << 1) | port);
34f80b04
EG
2460 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2461 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2462 }
34f80b04 2463
8a1c38d1
EG
2464 if (bp->link_vars.link_up) {
2465 int i;
2466
2467 /* Init rate shaping and fairness contexts */
2468 bnx2x_init_port_minmax(bp);
34f80b04 2469
34f80b04 2470 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2471 bnx2x_init_vn_minmax(bp, 2*vn + port);
2472
2473 /* Store it to internal memory */
2474 for (i = 0;
2475 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2476 REG_WR(bp, BAR_XSTRORM_INTMEM +
2477 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2478 ((u32 *)(&bp->cmng))[i]);
2479 }
34f80b04 2480 }
c18487ee 2481}
a2fbb9ea 2482
c18487ee
YR
2483static void bnx2x__link_status_update(struct bnx2x *bp)
2484{
f34d28ea 2485 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 2486 return;
a2fbb9ea 2487
c18487ee 2488 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2489
bb2a0f7a
YG
2490 if (bp->link_vars.link_up)
2491 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2492 else
2493 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2494
2691d51d
EG
2495 bnx2x_calc_vn_weight_sum(bp);
2496
c18487ee
YR
2497 /* indicate link status */
2498 bnx2x_link_report(bp);
a2fbb9ea 2499}
a2fbb9ea 2500
34f80b04
EG
2501static void bnx2x_pmf_update(struct bnx2x *bp)
2502{
2503 int port = BP_PORT(bp);
2504 u32 val;
2505
2506 bp->port.pmf = 1;
2507 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2508
2509 /* enable nig attention */
2510 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2511 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2512 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2513
2514 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2515}
2516
c18487ee 2517/* end of Link */
a2fbb9ea
ET
2518
2519/* slow path */
2520
2521/*
2522 * General service functions
2523 */
2524
2691d51d
EG
2525/* send the MCP a request, block until there is a reply */
2526u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2527{
2528 int func = BP_FUNC(bp);
2529 u32 seq = ++bp->fw_seq;
2530 u32 rc = 0;
2531 u32 cnt = 1;
2532 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2533
c4ff7cbf 2534 mutex_lock(&bp->fw_mb_mutex);
2691d51d
EG
2535 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2536 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2537
2538 do {
2539 /* let the FW do it's magic ... */
2540 msleep(delay);
2541
2542 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2543
c4ff7cbf
EG
2544 /* Give the FW up to 5 second (500*10ms) */
2545 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2546
2547 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2548 cnt*delay, rc, seq);
2549
2550 /* is this a reply to our command? */
2551 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2552 rc &= FW_MSG_CODE_MASK;
2553 else {
2554 /* FW BUG! */
2555 BNX2X_ERR("FW failed to respond!\n");
2556 bnx2x_fw_dump(bp);
2557 rc = 0;
2558 }
c4ff7cbf 2559 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2560
2561 return rc;
2562}
2563
2564static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
e665bfda 2565static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2691d51d
EG
2566static void bnx2x_set_rx_mode(struct net_device *dev);
2567
2568static void bnx2x_e1h_disable(struct bnx2x *bp)
2569{
2570 int port = BP_PORT(bp);
2691d51d
EG
2571
2572 netif_tx_disable(bp->dev);
2691d51d
EG
2573
2574 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2575
2691d51d
EG
2576 netif_carrier_off(bp->dev);
2577}
2578
2579static void bnx2x_e1h_enable(struct bnx2x *bp)
2580{
2581 int port = BP_PORT(bp);
2582
2583 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2584
2691d51d
EG
2585 /* Tx queue should be only reenabled */
2586 netif_tx_wake_all_queues(bp->dev);
2587
061bc702
EG
2588 /*
2589 * Should not call netif_carrier_on since it will be called if the link
2590 * is up when checking for link state
2591 */
2691d51d
EG
2592}
2593
2594static void bnx2x_update_min_max(struct bnx2x *bp)
2595{
2596 int port = BP_PORT(bp);
2597 int vn, i;
2598
2599 /* Init rate shaping and fairness contexts */
2600 bnx2x_init_port_minmax(bp);
2601
2602 bnx2x_calc_vn_weight_sum(bp);
2603
2604 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2605 bnx2x_init_vn_minmax(bp, 2*vn + port);
2606
2607 if (bp->port.pmf) {
2608 int func;
2609
2610 /* Set the attention towards other drivers on the same port */
2611 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2612 if (vn == BP_E1HVN(bp))
2613 continue;
2614
2615 func = ((vn << 1) | port);
2616 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2617 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2618 }
2619
2620 /* Store it to internal memory */
2621 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2622 REG_WR(bp, BAR_XSTRORM_INTMEM +
2623 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2624 ((u32 *)(&bp->cmng))[i]);
2625 }
2626}
2627
2628static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2629{
2691d51d 2630 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2691d51d
EG
2631
2632 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2633
f34d28ea
EG
2634 /*
2635 * This is the only place besides the function initialization
2636 * where the bp->flags can change so it is done without any
2637 * locks
2638 */
2691d51d
EG
2639 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2640 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
f34d28ea 2641 bp->flags |= MF_FUNC_DIS;
2691d51d
EG
2642
2643 bnx2x_e1h_disable(bp);
2644 } else {
2645 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
f34d28ea 2646 bp->flags &= ~MF_FUNC_DIS;
2691d51d
EG
2647
2648 bnx2x_e1h_enable(bp);
2649 }
2650 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2651 }
2652 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2653
2654 bnx2x_update_min_max(bp);
2655 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2656 }
2657
2658 /* Report results to MCP */
2659 if (dcc_event)
2660 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2661 else
2662 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2663}
2664
28912902
MC
2665/* must be called under the spq lock */
2666static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2667{
2668 struct eth_spe *next_spe = bp->spq_prod_bd;
2669
2670 if (bp->spq_prod_bd == bp->spq_last_bd) {
2671 bp->spq_prod_bd = bp->spq;
2672 bp->spq_prod_idx = 0;
2673 DP(NETIF_MSG_TIMER, "end of spq\n");
2674 } else {
2675 bp->spq_prod_bd++;
2676 bp->spq_prod_idx++;
2677 }
2678 return next_spe;
2679}
2680
2681/* must be called under the spq lock */
2682static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2683{
2684 int func = BP_FUNC(bp);
2685
2686 /* Make sure that BD data is updated before writing the producer */
2687 wmb();
2688
2689 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2690 bp->spq_prod_idx);
2691 mmiowb();
2692}
2693
a2fbb9ea
ET
2694/* the slow path queue is odd since completions arrive on the fastpath ring */
2695static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2696 u32 data_hi, u32 data_lo, int common)
2697{
28912902 2698 struct eth_spe *spe;
a2fbb9ea 2699
34f80b04
EG
2700 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2701 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2702 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2703 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2704 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2705
2706#ifdef BNX2X_STOP_ON_ERROR
2707 if (unlikely(bp->panic))
2708 return -EIO;
2709#endif
2710
34f80b04 2711 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2712
2713 if (!bp->spq_left) {
2714 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2715 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2716 bnx2x_panic();
2717 return -EBUSY;
2718 }
f1410647 2719
28912902
MC
2720 spe = bnx2x_sp_get_next(bp);
2721
a2fbb9ea 2722 /* CID needs port number to be encoded int it */
28912902 2723 spe->hdr.conn_and_cmd_data =
a2fbb9ea
ET
2724 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2725 HW_CID(bp, cid)));
28912902 2726 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 2727 if (common)
28912902 2728 spe->hdr.type |=
a2fbb9ea
ET
2729 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2730
28912902
MC
2731 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2732 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
2733
2734 bp->spq_left--;
2735
28912902 2736 bnx2x_sp_prod_update(bp);
34f80b04 2737 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2738 return 0;
2739}
2740
2741/* acquire split MCP access lock register */
4a37fb66 2742static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2743{
a2fbb9ea 2744 u32 i, j, val;
34f80b04 2745 int rc = 0;
a2fbb9ea
ET
2746
2747 might_sleep();
2748 i = 100;
2749 for (j = 0; j < i*10; j++) {
2750 val = (1UL << 31);
2751 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2752 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2753 if (val & (1L << 31))
2754 break;
2755
2756 msleep(5);
2757 }
a2fbb9ea 2758 if (!(val & (1L << 31))) {
19680c48 2759 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2760 rc = -EBUSY;
2761 }
2762
2763 return rc;
2764}
2765
4a37fb66
YG
2766/* release split MCP access lock register */
2767static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2768{
2769 u32 val = 0;
2770
2771 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2772}
2773
2774static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2775{
2776 struct host_def_status_block *def_sb = bp->def_status_blk;
2777 u16 rc = 0;
2778
2779 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2780 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2781 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2782 rc |= 1;
2783 }
2784 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2785 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2786 rc |= 2;
2787 }
2788 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2789 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2790 rc |= 4;
2791 }
2792 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2793 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2794 rc |= 8;
2795 }
2796 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2797 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2798 rc |= 16;
2799 }
2800 return rc;
2801}
2802
2803/*
2804 * slow path service functions
2805 */
2806
2807static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2808{
34f80b04 2809 int port = BP_PORT(bp);
5c862848
EG
2810 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2811 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2812 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2813 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2814 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2815 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2816 u32 aeu_mask;
87942b46 2817 u32 nig_mask = 0;
a2fbb9ea 2818
a2fbb9ea
ET
2819 if (bp->attn_state & asserted)
2820 BNX2X_ERR("IGU ERROR\n");
2821
3fcaf2e5
EG
2822 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2823 aeu_mask = REG_RD(bp, aeu_addr);
2824
a2fbb9ea 2825 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2826 aeu_mask, asserted);
2827 aeu_mask &= ~(asserted & 0xff);
2828 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2829
3fcaf2e5
EG
2830 REG_WR(bp, aeu_addr, aeu_mask);
2831 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2832
3fcaf2e5 2833 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2834 bp->attn_state |= asserted;
3fcaf2e5 2835 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2836
2837 if (asserted & ATTN_HARD_WIRED_MASK) {
2838 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2839
a5e9a7cf
EG
2840 bnx2x_acquire_phy_lock(bp);
2841
877e9aa4 2842 /* save nig interrupt mask */
87942b46 2843 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2844 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2845
c18487ee 2846 bnx2x_link_attn(bp);
a2fbb9ea
ET
2847
2848 /* handle unicore attn? */
2849 }
2850 if (asserted & ATTN_SW_TIMER_4_FUNC)
2851 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2852
2853 if (asserted & GPIO_2_FUNC)
2854 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2855
2856 if (asserted & GPIO_3_FUNC)
2857 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2858
2859 if (asserted & GPIO_4_FUNC)
2860 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2861
2862 if (port == 0) {
2863 if (asserted & ATTN_GENERAL_ATTN_1) {
2864 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2865 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2866 }
2867 if (asserted & ATTN_GENERAL_ATTN_2) {
2868 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2869 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2870 }
2871 if (asserted & ATTN_GENERAL_ATTN_3) {
2872 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2873 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2874 }
2875 } else {
2876 if (asserted & ATTN_GENERAL_ATTN_4) {
2877 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2878 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2879 }
2880 if (asserted & ATTN_GENERAL_ATTN_5) {
2881 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2882 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2883 }
2884 if (asserted & ATTN_GENERAL_ATTN_6) {
2885 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2886 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2887 }
2888 }
2889
2890 } /* if hardwired */
2891
5c862848
EG
2892 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2893 asserted, hc_addr);
2894 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2895
2896 /* now set back the mask */
a5e9a7cf 2897 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2898 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2899 bnx2x_release_phy_lock(bp);
2900 }
a2fbb9ea
ET
2901}
2902
fd4ef40d
EG
2903static inline void bnx2x_fan_failure(struct bnx2x *bp)
2904{
2905 int port = BP_PORT(bp);
2906
2907 /* mark the failure */
2908 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2909 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2910 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2911 bp->link_params.ext_phy_config);
2912
2913 /* log the failure */
7995c64e
JP
2914 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
2915 "Please contact Dell Support for assistance.\n");
fd4ef40d 2916}
ab6ad5a4 2917
877e9aa4 2918static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2919{
34f80b04 2920 int port = BP_PORT(bp);
877e9aa4 2921 int reg_offset;
4d295db0 2922 u32 val, swap_val, swap_override;
877e9aa4 2923
34f80b04
EG
2924 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2925 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2926
34f80b04 2927 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2928
2929 val = REG_RD(bp, reg_offset);
2930 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2931 REG_WR(bp, reg_offset, val);
2932
2933 BNX2X_ERR("SPIO5 hw attention\n");
2934
fd4ef40d 2935 /* Fan failure attention */
35b19ba5
EG
2936 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2937 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2938 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2939 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2940 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2941 /* The PHY reset is controlled by GPIO 1 */
2942 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2943 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2944 break;
2945
4d295db0
EG
2946 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2947 /* The PHY reset is controlled by GPIO 1 */
2948 /* fake the port number to cancel the swap done in
2949 set_gpio() */
2950 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2951 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2952 port = (swap_val && swap_override) ^ 1;
2953 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2954 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2955 break;
2956
877e9aa4
ET
2957 default:
2958 break;
2959 }
fd4ef40d 2960 bnx2x_fan_failure(bp);
877e9aa4 2961 }
34f80b04 2962
589abe3a
EG
2963 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2964 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2965 bnx2x_acquire_phy_lock(bp);
2966 bnx2x_handle_module_detect_int(&bp->link_params);
2967 bnx2x_release_phy_lock(bp);
2968 }
2969
34f80b04
EG
2970 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2971
2972 val = REG_RD(bp, reg_offset);
2973 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2974 REG_WR(bp, reg_offset, val);
2975
2976 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2977 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2978 bnx2x_panic();
2979 }
877e9aa4
ET
2980}
2981
2982static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2983{
2984 u32 val;
2985
0626b899 2986 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2987
2988 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2989 BNX2X_ERR("DB hw attention 0x%x\n", val);
2990 /* DORQ discard attention */
2991 if (val & 0x2)
2992 BNX2X_ERR("FATAL error from DORQ\n");
2993 }
34f80b04
EG
2994
2995 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2996
2997 int port = BP_PORT(bp);
2998 int reg_offset;
2999
3000 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3001 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3002
3003 val = REG_RD(bp, reg_offset);
3004 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3005 REG_WR(bp, reg_offset, val);
3006
3007 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 3008 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
3009 bnx2x_panic();
3010 }
877e9aa4
ET
3011}
3012
3013static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3014{
3015 u32 val;
3016
3017 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3018
3019 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3020 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3021 /* CFC error attention */
3022 if (val & 0x2)
3023 BNX2X_ERR("FATAL error from CFC\n");
3024 }
3025
3026 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3027
3028 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3029 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3030 /* RQ_USDMDP_FIFO_OVERFLOW */
3031 if (val & 0x18000)
3032 BNX2X_ERR("FATAL error from PXP\n");
3033 }
34f80b04
EG
3034
3035 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3036
3037 int port = BP_PORT(bp);
3038 int reg_offset;
3039
3040 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3041 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3042
3043 val = REG_RD(bp, reg_offset);
3044 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3045 REG_WR(bp, reg_offset, val);
3046
3047 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3048 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3049 bnx2x_panic();
3050 }
877e9aa4
ET
3051}
3052
3053static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3054{
34f80b04
EG
3055 u32 val;
3056
877e9aa4
ET
3057 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3058
34f80b04
EG
3059 if (attn & BNX2X_PMF_LINK_ASSERT) {
3060 int func = BP_FUNC(bp);
3061
3062 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
b015e3d1
EG
3063 bp->mf_config = SHMEM_RD(bp,
3064 mf_cfg.func_mf_config[func].config);
2691d51d
EG
3065 val = SHMEM_RD(bp, func_mb[func].drv_status);
3066 if (val & DRV_STATUS_DCC_EVENT_MASK)
3067 bnx2x_dcc_event(bp,
3068 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3069 bnx2x__link_status_update(bp);
2691d51d 3070 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3071 bnx2x_pmf_update(bp);
3072
3073 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3074
3075 BNX2X_ERR("MC assert!\n");
3076 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3077 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3078 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3079 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3080 bnx2x_panic();
3081
3082 } else if (attn & BNX2X_MCP_ASSERT) {
3083
3084 BNX2X_ERR("MCP assert!\n");
3085 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3086 bnx2x_fw_dump(bp);
877e9aa4
ET
3087
3088 } else
3089 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3090 }
3091
3092 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3093 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3094 if (attn & BNX2X_GRC_TIMEOUT) {
3095 val = CHIP_IS_E1H(bp) ?
3096 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3097 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3098 }
3099 if (attn & BNX2X_GRC_RSV) {
3100 val = CHIP_IS_E1H(bp) ?
3101 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3102 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3103 }
877e9aa4 3104 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3105 }
3106}
3107
3108static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3109{
a2fbb9ea
ET
3110 struct attn_route attn;
3111 struct attn_route group_mask;
34f80b04 3112 int port = BP_PORT(bp);
877e9aa4 3113 int index;
a2fbb9ea
ET
3114 u32 reg_addr;
3115 u32 val;
3fcaf2e5 3116 u32 aeu_mask;
a2fbb9ea
ET
3117
3118 /* need to take HW lock because MCP or other port might also
3119 try to handle this event */
4a37fb66 3120 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
3121
3122 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3123 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3124 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3125 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3126 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3127 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3128
3129 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3130 if (deasserted & (1 << index)) {
3131 group_mask = bp->attn_group[index];
3132
34f80b04
EG
3133 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3134 index, group_mask.sig[0], group_mask.sig[1],
3135 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 3136
877e9aa4
ET
3137 bnx2x_attn_int_deasserted3(bp,
3138 attn.sig[3] & group_mask.sig[3]);
3139 bnx2x_attn_int_deasserted1(bp,
3140 attn.sig[1] & group_mask.sig[1]);
3141 bnx2x_attn_int_deasserted2(bp,
3142 attn.sig[2] & group_mask.sig[2]);
3143 bnx2x_attn_int_deasserted0(bp,
3144 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 3145
a2fbb9ea
ET
3146 if ((attn.sig[0] & group_mask.sig[0] &
3147 HW_PRTY_ASSERT_SET_0) ||
3148 (attn.sig[1] & group_mask.sig[1] &
3149 HW_PRTY_ASSERT_SET_1) ||
3150 (attn.sig[2] & group_mask.sig[2] &
3151 HW_PRTY_ASSERT_SET_2))
6378c025 3152 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
3153 }
3154 }
3155
4a37fb66 3156 bnx2x_release_alr(bp);
a2fbb9ea 3157
5c862848 3158 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3159
3160 val = ~deasserted;
3fcaf2e5
EG
3161 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3162 val, reg_addr);
5c862848 3163 REG_WR(bp, reg_addr, val);
a2fbb9ea 3164
a2fbb9ea 3165 if (~bp->attn_state & deasserted)
3fcaf2e5 3166 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3167
3168 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3169 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3170
3fcaf2e5
EG
3171 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3172 aeu_mask = REG_RD(bp, reg_addr);
3173
3174 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3175 aeu_mask, deasserted);
3176 aeu_mask |= (deasserted & 0xff);
3177 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3178
3fcaf2e5
EG
3179 REG_WR(bp, reg_addr, aeu_mask);
3180 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3181
3182 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3183 bp->attn_state &= ~deasserted;
3184 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3185}
3186
3187static void bnx2x_attn_int(struct bnx2x *bp)
3188{
3189 /* read local copy of bits */
68d59484
EG
3190 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3191 attn_bits);
3192 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3193 attn_bits_ack);
a2fbb9ea
ET
3194 u32 attn_state = bp->attn_state;
3195
3196 /* look for changed bits */
3197 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3198 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3199
3200 DP(NETIF_MSG_HW,
3201 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3202 attn_bits, attn_ack, asserted, deasserted);
3203
3204 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3205 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3206
3207 /* handle bits that were raised */
3208 if (asserted)
3209 bnx2x_attn_int_asserted(bp, asserted);
3210
3211 if (deasserted)
3212 bnx2x_attn_int_deasserted(bp, deasserted);
3213}
3214
3215static void bnx2x_sp_task(struct work_struct *work)
3216{
1cf167f2 3217 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3218 u16 status;
3219
34f80b04 3220
a2fbb9ea
ET
3221 /* Return here if interrupt is disabled */
3222 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3223 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3224 return;
3225 }
3226
3227 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3228/* if (status == 0) */
3229/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3230
3196a88a 3231 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 3232
877e9aa4
ET
3233 /* HW attentions */
3234 if (status & 0x1)
a2fbb9ea 3235 bnx2x_attn_int(bp);
a2fbb9ea 3236
68d59484 3237 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3238 IGU_INT_NOP, 1);
3239 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3240 IGU_INT_NOP, 1);
3241 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3242 IGU_INT_NOP, 1);
3243 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3244 IGU_INT_NOP, 1);
3245 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3246 IGU_INT_ENABLE, 1);
877e9aa4 3247
a2fbb9ea
ET
3248}
3249
3250static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3251{
3252 struct net_device *dev = dev_instance;
3253 struct bnx2x *bp = netdev_priv(dev);
3254
3255 /* Return here if interrupt is disabled */
3256 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3257 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3258 return IRQ_HANDLED;
3259 }
3260
8d9c5f34 3261 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3262
3263#ifdef BNX2X_STOP_ON_ERROR
3264 if (unlikely(bp->panic))
3265 return IRQ_HANDLED;
3266#endif
3267
993ac7b5
MC
3268#ifdef BCM_CNIC
3269 {
3270 struct cnic_ops *c_ops;
3271
3272 rcu_read_lock();
3273 c_ops = rcu_dereference(bp->cnic_ops);
3274 if (c_ops)
3275 c_ops->cnic_handler(bp->cnic_data, NULL);
3276 rcu_read_unlock();
3277 }
3278#endif
1cf167f2 3279 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3280
3281 return IRQ_HANDLED;
3282}
3283
3284/* end of slow path */
3285
3286/* Statistics */
3287
3288/****************************************************************************
3289* Macros
3290****************************************************************************/
3291
a2fbb9ea
ET
3292/* sum[hi:lo] += add[hi:lo] */
3293#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3294 do { \
3295 s_lo += a_lo; \
f5ba6772 3296 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3297 } while (0)
3298
3299/* difference = minuend - subtrahend */
3300#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3301 do { \
bb2a0f7a
YG
3302 if (m_lo < s_lo) { \
3303 /* underflow */ \
a2fbb9ea 3304 d_hi = m_hi - s_hi; \
bb2a0f7a 3305 if (d_hi > 0) { \
6378c025 3306 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3307 d_hi--; \
3308 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3309 } else { \
6378c025 3310 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3311 d_hi = 0; \
3312 d_lo = 0; \
3313 } \
bb2a0f7a
YG
3314 } else { \
3315 /* m_lo >= s_lo */ \
a2fbb9ea 3316 if (m_hi < s_hi) { \
bb2a0f7a
YG
3317 d_hi = 0; \
3318 d_lo = 0; \
3319 } else { \
6378c025 3320 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3321 d_hi = m_hi - s_hi; \
3322 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3323 } \
3324 } \
3325 } while (0)
3326
bb2a0f7a 3327#define UPDATE_STAT64(s, t) \
a2fbb9ea 3328 do { \
bb2a0f7a
YG
3329 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3330 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3331 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3332 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3333 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3334 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3335 } while (0)
3336
bb2a0f7a 3337#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3338 do { \
bb2a0f7a
YG
3339 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3340 diff.lo, new->s##_lo, old->s##_lo); \
3341 ADD_64(estats->t##_hi, diff.hi, \
3342 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3343 } while (0)
3344
3345/* sum[hi:lo] += add */
3346#define ADD_EXTEND_64(s_hi, s_lo, a) \
3347 do { \
3348 s_lo += a; \
3349 s_hi += (s_lo < a) ? 1 : 0; \
3350 } while (0)
3351
bb2a0f7a 3352#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3353 do { \
bb2a0f7a
YG
3354 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3355 pstats->mac_stx[1].s##_lo, \
3356 new->s); \
a2fbb9ea
ET
3357 } while (0)
3358
bb2a0f7a 3359#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3360 do { \
4781bfad
EG
3361 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3362 old_tclient->s = tclient->s; \
de832a55
EG
3363 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3364 } while (0)
3365
3366#define UPDATE_EXTEND_USTAT(s, t) \
3367 do { \
3368 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3369 old_uclient->s = uclient->s; \
3370 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3371 } while (0)
3372
3373#define UPDATE_EXTEND_XSTAT(s, t) \
3374 do { \
4781bfad
EG
3375 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3376 old_xclient->s = xclient->s; \
de832a55
EG
3377 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3378 } while (0)
3379
3380/* minuend -= subtrahend */
3381#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3382 do { \
3383 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3384 } while (0)
3385
3386/* minuend[hi:lo] -= subtrahend */
3387#define SUB_EXTEND_64(m_hi, m_lo, s) \
3388 do { \
3389 SUB_64(m_hi, 0, m_lo, s); \
3390 } while (0)
3391
3392#define SUB_EXTEND_USTAT(s, t) \
3393 do { \
3394 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3395 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3396 } while (0)
3397
3398/*
3399 * General service functions
3400 */
3401
3402static inline long bnx2x_hilo(u32 *hiref)
3403{
3404 u32 lo = *(hiref + 1);
3405#if (BITS_PER_LONG == 64)
3406 u32 hi = *hiref;
3407
3408 return HILO_U64(hi, lo);
3409#else
3410 return lo;
3411#endif
3412}
3413
3414/*
3415 * Init service functions
3416 */
3417
bb2a0f7a
YG
3418static void bnx2x_storm_stats_post(struct bnx2x *bp)
3419{
3420 if (!bp->stats_pending) {
3421 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3422 int i, rc;
bb2a0f7a
YG
3423
3424 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3425 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3426 for_each_queue(bp, i)
3427 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3428
3429 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3430 ((u32 *)&ramrod_data)[1],
3431 ((u32 *)&ramrod_data)[0], 0);
3432 if (rc == 0) {
3433 /* stats ramrod has it's own slot on the spq */
3434 bp->spq_left++;
3435 bp->stats_pending = 1;
3436 }
3437 }
3438}
3439
bb2a0f7a
YG
3440static void bnx2x_hw_stats_post(struct bnx2x *bp)
3441{
3442 struct dmae_command *dmae = &bp->stats_dmae;
3443 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3444
3445 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3446 if (CHIP_REV_IS_SLOW(bp))
3447 return;
bb2a0f7a
YG
3448
3449 /* loader */
3450 if (bp->executer_idx) {
3451 int loader_idx = PMF_DMAE_C(bp);
3452
3453 memset(dmae, 0, sizeof(struct dmae_command));
3454
3455 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3456 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3457 DMAE_CMD_DST_RESET |
3458#ifdef __BIG_ENDIAN
3459 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3460#else
3461 DMAE_CMD_ENDIANITY_DW_SWAP |
3462#endif
3463 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3464 DMAE_CMD_PORT_0) |
3465 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3466 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3467 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3468 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3469 sizeof(struct dmae_command) *
3470 (loader_idx + 1)) >> 2;
3471 dmae->dst_addr_hi = 0;
3472 dmae->len = sizeof(struct dmae_command) >> 2;
3473 if (CHIP_IS_E1(bp))
3474 dmae->len--;
3475 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3476 dmae->comp_addr_hi = 0;
3477 dmae->comp_val = 1;
3478
3479 *stats_comp = 0;
3480 bnx2x_post_dmae(bp, dmae, loader_idx);
3481
3482 } else if (bp->func_stx) {
3483 *stats_comp = 0;
3484 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3485 }
3486}
3487
3488static int bnx2x_stats_comp(struct bnx2x *bp)
3489{
3490 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3491 int cnt = 10;
3492
3493 might_sleep();
3494 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3495 if (!cnt) {
3496 BNX2X_ERR("timeout waiting for stats finished\n");
3497 break;
3498 }
3499 cnt--;
12469401 3500 msleep(1);
bb2a0f7a
YG
3501 }
3502 return 1;
3503}
3504
3505/*
3506 * Statistics service functions
3507 */
3508
3509static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3510{
3511 struct dmae_command *dmae;
3512 u32 opcode;
3513 int loader_idx = PMF_DMAE_C(bp);
3514 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3515
3516 /* sanity */
3517 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3518 BNX2X_ERR("BUG!\n");
3519 return;
3520 }
3521
3522 bp->executer_idx = 0;
3523
3524 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3525 DMAE_CMD_C_ENABLE |
3526 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3527#ifdef __BIG_ENDIAN
3528 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3529#else
3530 DMAE_CMD_ENDIANITY_DW_SWAP |
3531#endif
3532 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3533 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3534
3535 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3536 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3537 dmae->src_addr_lo = bp->port.port_stx >> 2;
3538 dmae->src_addr_hi = 0;
3539 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3540 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3541 dmae->len = DMAE_LEN32_RD_MAX;
3542 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3543 dmae->comp_addr_hi = 0;
3544 dmae->comp_val = 1;
3545
3546 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3547 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3548 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3549 dmae->src_addr_hi = 0;
7a9b2557
VZ
3550 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3551 DMAE_LEN32_RD_MAX * 4);
3552 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3553 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3554 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3555 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3556 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3557 dmae->comp_val = DMAE_COMP_VAL;
3558
3559 *stats_comp = 0;
3560 bnx2x_hw_stats_post(bp);
3561 bnx2x_stats_comp(bp);
3562}
3563
3564static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3565{
3566 struct dmae_command *dmae;
34f80b04 3567 int port = BP_PORT(bp);
bb2a0f7a 3568 int vn = BP_E1HVN(bp);
a2fbb9ea 3569 u32 opcode;
bb2a0f7a 3570 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3571 u32 mac_addr;
bb2a0f7a
YG
3572 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3573
3574 /* sanity */
3575 if (!bp->link_vars.link_up || !bp->port.pmf) {
3576 BNX2X_ERR("BUG!\n");
3577 return;
3578 }
a2fbb9ea
ET
3579
3580 bp->executer_idx = 0;
bb2a0f7a
YG
3581
3582 /* MCP */
3583 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3584 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3585 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3586#ifdef __BIG_ENDIAN
bb2a0f7a 3587 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3588#else
bb2a0f7a 3589 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3590#endif
bb2a0f7a
YG
3591 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3592 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3593
bb2a0f7a 3594 if (bp->port.port_stx) {
a2fbb9ea
ET
3595
3596 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3597 dmae->opcode = opcode;
bb2a0f7a
YG
3598 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3599 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3600 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3601 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3602 dmae->len = sizeof(struct host_port_stats) >> 2;
3603 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3604 dmae->comp_addr_hi = 0;
3605 dmae->comp_val = 1;
a2fbb9ea
ET
3606 }
3607
bb2a0f7a
YG
3608 if (bp->func_stx) {
3609
3610 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3611 dmae->opcode = opcode;
3612 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3613 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3614 dmae->dst_addr_lo = bp->func_stx >> 2;
3615 dmae->dst_addr_hi = 0;
3616 dmae->len = sizeof(struct host_func_stats) >> 2;
3617 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3618 dmae->comp_addr_hi = 0;
3619 dmae->comp_val = 1;
a2fbb9ea
ET
3620 }
3621
bb2a0f7a 3622 /* MAC */
a2fbb9ea
ET
3623 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3624 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3625 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3626#ifdef __BIG_ENDIAN
3627 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3628#else
3629 DMAE_CMD_ENDIANITY_DW_SWAP |
3630#endif
bb2a0f7a
YG
3631 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3632 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3633
c18487ee 3634 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3635
3636 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3637 NIG_REG_INGRESS_BMAC0_MEM);
3638
3639 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3640 BIGMAC_REGISTER_TX_STAT_GTBYT */
3641 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3642 dmae->opcode = opcode;
3643 dmae->src_addr_lo = (mac_addr +
3644 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3645 dmae->src_addr_hi = 0;
3646 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3647 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3648 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3649 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3650 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3651 dmae->comp_addr_hi = 0;
3652 dmae->comp_val = 1;
3653
3654 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3655 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3656 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3657 dmae->opcode = opcode;
3658 dmae->src_addr_lo = (mac_addr +
3659 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3660 dmae->src_addr_hi = 0;
3661 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3662 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3663 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3664 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3665 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3666 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3667 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3668 dmae->comp_addr_hi = 0;
3669 dmae->comp_val = 1;
3670
c18487ee 3671 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3672
3673 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3674
3675 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3676 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3677 dmae->opcode = opcode;
3678 dmae->src_addr_lo = (mac_addr +
3679 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3680 dmae->src_addr_hi = 0;
3681 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3682 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3683 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3684 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3685 dmae->comp_addr_hi = 0;
3686 dmae->comp_val = 1;
3687
3688 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3689 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3690 dmae->opcode = opcode;
3691 dmae->src_addr_lo = (mac_addr +
3692 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3693 dmae->src_addr_hi = 0;
3694 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3695 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3696 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3697 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3698 dmae->len = 1;
3699 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3700 dmae->comp_addr_hi = 0;
3701 dmae->comp_val = 1;
3702
3703 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3704 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3705 dmae->opcode = opcode;
3706 dmae->src_addr_lo = (mac_addr +
3707 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3708 dmae->src_addr_hi = 0;
3709 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3710 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3711 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3712 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3713 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3714 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3715 dmae->comp_addr_hi = 0;
3716 dmae->comp_val = 1;
3717 }
3718
3719 /* NIG */
bb2a0f7a
YG
3720 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3721 dmae->opcode = opcode;
3722 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3723 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3724 dmae->src_addr_hi = 0;
3725 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3726 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3727 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3728 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3729 dmae->comp_addr_hi = 0;
3730 dmae->comp_val = 1;
3731
3732 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3733 dmae->opcode = opcode;
3734 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3735 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3736 dmae->src_addr_hi = 0;
3737 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3738 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3739 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3740 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3741 dmae->len = (2*sizeof(u32)) >> 2;
3742 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3743 dmae->comp_addr_hi = 0;
3744 dmae->comp_val = 1;
3745
a2fbb9ea
ET
3746 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3747 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3748 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3749 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3750#ifdef __BIG_ENDIAN
3751 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3752#else
3753 DMAE_CMD_ENDIANITY_DW_SWAP |
3754#endif
bb2a0f7a
YG
3755 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3756 (vn << DMAE_CMD_E1HVN_SHIFT));
3757 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3758 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3759 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3760 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3761 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3762 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3763 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3764 dmae->len = (2*sizeof(u32)) >> 2;
3765 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3766 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3767 dmae->comp_val = DMAE_COMP_VAL;
3768
3769 *stats_comp = 0;
a2fbb9ea
ET
3770}
3771
bb2a0f7a 3772static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3773{
bb2a0f7a
YG
3774 struct dmae_command *dmae = &bp->stats_dmae;
3775 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3776
bb2a0f7a
YG
3777 /* sanity */
3778 if (!bp->func_stx) {
3779 BNX2X_ERR("BUG!\n");
3780 return;
3781 }
a2fbb9ea 3782
bb2a0f7a
YG
3783 bp->executer_idx = 0;
3784 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3785
bb2a0f7a
YG
3786 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3787 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3788 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3789#ifdef __BIG_ENDIAN
3790 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3791#else
3792 DMAE_CMD_ENDIANITY_DW_SWAP |
3793#endif
3794 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3795 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3796 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3797 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3798 dmae->dst_addr_lo = bp->func_stx >> 2;
3799 dmae->dst_addr_hi = 0;
3800 dmae->len = sizeof(struct host_func_stats) >> 2;
3801 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3802 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3803 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3804
bb2a0f7a
YG
3805 *stats_comp = 0;
3806}
a2fbb9ea 3807
bb2a0f7a
YG
3808static void bnx2x_stats_start(struct bnx2x *bp)
3809{
3810 if (bp->port.pmf)
3811 bnx2x_port_stats_init(bp);
3812
3813 else if (bp->func_stx)
3814 bnx2x_func_stats_init(bp);
3815
3816 bnx2x_hw_stats_post(bp);
3817 bnx2x_storm_stats_post(bp);
3818}
3819
3820static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3821{
3822 bnx2x_stats_comp(bp);
3823 bnx2x_stats_pmf_update(bp);
3824 bnx2x_stats_start(bp);
3825}
3826
3827static void bnx2x_stats_restart(struct bnx2x *bp)
3828{
3829 bnx2x_stats_comp(bp);
3830 bnx2x_stats_start(bp);
3831}
3832
3833static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3834{
3835 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3836 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3837 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3838 struct {
3839 u32 lo;
3840 u32 hi;
3841 } diff;
bb2a0f7a
YG
3842
3843 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3844 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3845 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3846 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3847 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3848 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3849 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3850 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3851 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3852 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3853 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3854 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3855 UPDATE_STAT64(tx_stat_gt127,
3856 tx_stat_etherstatspkts65octetsto127octets);
3857 UPDATE_STAT64(tx_stat_gt255,
3858 tx_stat_etherstatspkts128octetsto255octets);
3859 UPDATE_STAT64(tx_stat_gt511,
3860 tx_stat_etherstatspkts256octetsto511octets);
3861 UPDATE_STAT64(tx_stat_gt1023,
3862 tx_stat_etherstatspkts512octetsto1023octets);
3863 UPDATE_STAT64(tx_stat_gt1518,
3864 tx_stat_etherstatspkts1024octetsto1522octets);
3865 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3866 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3867 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3868 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3869 UPDATE_STAT64(tx_stat_gterr,
3870 tx_stat_dot3statsinternalmactransmiterrors);
3871 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3872
3873 estats->pause_frames_received_hi =
3874 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3875 estats->pause_frames_received_lo =
3876 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3877
3878 estats->pause_frames_sent_hi =
3879 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3880 estats->pause_frames_sent_lo =
3881 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3882}
3883
3884static void bnx2x_emac_stats_update(struct bnx2x *bp)
3885{
3886 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3887 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3888 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3889
3890 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3891 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3892 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3893 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3894 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3895 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3896 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3897 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3898 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3899 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3900 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3901 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3902 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3903 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3904 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3905 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3906 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3907 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3908 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3909 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3910 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3911 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3912 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3913 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3914 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3915 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3916 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3917 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3918 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3919 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3920 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3921
3922 estats->pause_frames_received_hi =
3923 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3924 estats->pause_frames_received_lo =
3925 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3926 ADD_64(estats->pause_frames_received_hi,
3927 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3928 estats->pause_frames_received_lo,
3929 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3930
3931 estats->pause_frames_sent_hi =
3932 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3933 estats->pause_frames_sent_lo =
3934 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3935 ADD_64(estats->pause_frames_sent_hi,
3936 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3937 estats->pause_frames_sent_lo,
3938 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3939}
3940
3941static int bnx2x_hw_stats_update(struct bnx2x *bp)
3942{
3943 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3944 struct nig_stats *old = &(bp->port.old_nig_stats);
3945 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3946 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3947 struct {
3948 u32 lo;
3949 u32 hi;
3950 } diff;
de832a55 3951 u32 nig_timer_max;
bb2a0f7a
YG
3952
3953 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3954 bnx2x_bmac_stats_update(bp);
3955
3956 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3957 bnx2x_emac_stats_update(bp);
3958
3959 else { /* unreached */
c3eefaf6 3960 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3961 return -1;
3962 }
a2fbb9ea 3963
bb2a0f7a
YG
3964 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3965 new->brb_discard - old->brb_discard);
66e855f3
YG
3966 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3967 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3968
bb2a0f7a
YG
3969 UPDATE_STAT64_NIG(egress_mac_pkt0,
3970 etherstatspkts1024octetsto1522octets);
3971 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3972
bb2a0f7a 3973 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3974
bb2a0f7a
YG
3975 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3976 sizeof(struct mac_stx));
3977 estats->brb_drop_hi = pstats->brb_drop_hi;
3978 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3979
bb2a0f7a 3980 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3981
de832a55
EG
3982 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3983 if (nig_timer_max != estats->nig_timer_max) {
3984 estats->nig_timer_max = nig_timer_max;
3985 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3986 }
3987
bb2a0f7a 3988 return 0;
a2fbb9ea
ET
3989}
3990
bb2a0f7a 3991static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3992{
3993 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3994 struct tstorm_per_port_stats *tport =
de832a55 3995 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3996 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3997 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3998 int i;
3999
6fe49bb9
EG
4000 memcpy(&(fstats->total_bytes_received_hi),
4001 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
4002 sizeof(struct host_func_stats) - 2*sizeof(u32));
4003 estats->error_bytes_received_hi = 0;
4004 estats->error_bytes_received_lo = 0;
4005 estats->etherstatsoverrsizepkts_hi = 0;
4006 estats->etherstatsoverrsizepkts_lo = 0;
4007 estats->no_buff_discard_hi = 0;
4008 estats->no_buff_discard_lo = 0;
a2fbb9ea 4009
54b9ddaa 4010 for_each_queue(bp, i) {
de832a55
EG
4011 struct bnx2x_fastpath *fp = &bp->fp[i];
4012 int cl_id = fp->cl_id;
4013 struct tstorm_per_client_stats *tclient =
4014 &stats->tstorm_common.client_statistics[cl_id];
4015 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4016 struct ustorm_per_client_stats *uclient =
4017 &stats->ustorm_common.client_statistics[cl_id];
4018 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4019 struct xstorm_per_client_stats *xclient =
4020 &stats->xstorm_common.client_statistics[cl_id];
4021 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4022 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4023 u32 diff;
4024
4025 /* are storm stats valid? */
4026 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 4027 bp->stats_counter) {
de832a55
EG
4028 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4029 " xstorm counter (%d) != stats_counter (%d)\n",
4030 i, xclient->stats_counter, bp->stats_counter);
4031 return -1;
4032 }
4033 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 4034 bp->stats_counter) {
de832a55
EG
4035 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4036 " tstorm counter (%d) != stats_counter (%d)\n",
4037 i, tclient->stats_counter, bp->stats_counter);
4038 return -2;
4039 }
4040 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4041 bp->stats_counter) {
4042 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4043 " ustorm counter (%d) != stats_counter (%d)\n",
4044 i, uclient->stats_counter, bp->stats_counter);
4045 return -4;
4046 }
a2fbb9ea 4047
de832a55 4048 qstats->total_bytes_received_hi =
ca00392c 4049 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 4050 qstats->total_bytes_received_lo =
ca00392c
EG
4051 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4052
4053 ADD_64(qstats->total_bytes_received_hi,
4054 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4055 qstats->total_bytes_received_lo,
4056 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4057
4058 ADD_64(qstats->total_bytes_received_hi,
4059 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4060 qstats->total_bytes_received_lo,
4061 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4062
4063 qstats->valid_bytes_received_hi =
4064 qstats->total_bytes_received_hi;
de832a55 4065 qstats->valid_bytes_received_lo =
ca00392c 4066 qstats->total_bytes_received_lo;
bb2a0f7a 4067
de832a55 4068 qstats->error_bytes_received_hi =
bb2a0f7a 4069 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4070 qstats->error_bytes_received_lo =
bb2a0f7a 4071 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4072
de832a55
EG
4073 ADD_64(qstats->total_bytes_received_hi,
4074 qstats->error_bytes_received_hi,
4075 qstats->total_bytes_received_lo,
4076 qstats->error_bytes_received_lo);
4077
4078 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4079 total_unicast_packets_received);
4080 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4081 total_multicast_packets_received);
4082 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4083 total_broadcast_packets_received);
4084 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4085 etherstatsoverrsizepkts);
4086 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4087
4088 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4089 total_unicast_packets_received);
4090 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4091 total_multicast_packets_received);
4092 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4093 total_broadcast_packets_received);
4094 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4095 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4096 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4097
4098 qstats->total_bytes_transmitted_hi =
ca00392c 4099 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4100 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4101 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4102
4103 ADD_64(qstats->total_bytes_transmitted_hi,
4104 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4105 qstats->total_bytes_transmitted_lo,
4106 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4107
4108 ADD_64(qstats->total_bytes_transmitted_hi,
4109 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4110 qstats->total_bytes_transmitted_lo,
4111 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4112
de832a55
EG
4113 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4114 total_unicast_packets_transmitted);
4115 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4116 total_multicast_packets_transmitted);
4117 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4118 total_broadcast_packets_transmitted);
4119
4120 old_tclient->checksum_discard = tclient->checksum_discard;
4121 old_tclient->ttl0_discard = tclient->ttl0_discard;
4122
4123 ADD_64(fstats->total_bytes_received_hi,
4124 qstats->total_bytes_received_hi,
4125 fstats->total_bytes_received_lo,
4126 qstats->total_bytes_received_lo);
4127 ADD_64(fstats->total_bytes_transmitted_hi,
4128 qstats->total_bytes_transmitted_hi,
4129 fstats->total_bytes_transmitted_lo,
4130 qstats->total_bytes_transmitted_lo);
4131 ADD_64(fstats->total_unicast_packets_received_hi,
4132 qstats->total_unicast_packets_received_hi,
4133 fstats->total_unicast_packets_received_lo,
4134 qstats->total_unicast_packets_received_lo);
4135 ADD_64(fstats->total_multicast_packets_received_hi,
4136 qstats->total_multicast_packets_received_hi,
4137 fstats->total_multicast_packets_received_lo,
4138 qstats->total_multicast_packets_received_lo);
4139 ADD_64(fstats->total_broadcast_packets_received_hi,
4140 qstats->total_broadcast_packets_received_hi,
4141 fstats->total_broadcast_packets_received_lo,
4142 qstats->total_broadcast_packets_received_lo);
4143 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4144 qstats->total_unicast_packets_transmitted_hi,
4145 fstats->total_unicast_packets_transmitted_lo,
4146 qstats->total_unicast_packets_transmitted_lo);
4147 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4148 qstats->total_multicast_packets_transmitted_hi,
4149 fstats->total_multicast_packets_transmitted_lo,
4150 qstats->total_multicast_packets_transmitted_lo);
4151 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4152 qstats->total_broadcast_packets_transmitted_hi,
4153 fstats->total_broadcast_packets_transmitted_lo,
4154 qstats->total_broadcast_packets_transmitted_lo);
4155 ADD_64(fstats->valid_bytes_received_hi,
4156 qstats->valid_bytes_received_hi,
4157 fstats->valid_bytes_received_lo,
4158 qstats->valid_bytes_received_lo);
4159
4160 ADD_64(estats->error_bytes_received_hi,
4161 qstats->error_bytes_received_hi,
4162 estats->error_bytes_received_lo,
4163 qstats->error_bytes_received_lo);
4164 ADD_64(estats->etherstatsoverrsizepkts_hi,
4165 qstats->etherstatsoverrsizepkts_hi,
4166 estats->etherstatsoverrsizepkts_lo,
4167 qstats->etherstatsoverrsizepkts_lo);
4168 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4169 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4170 }
4171
4172 ADD_64(fstats->total_bytes_received_hi,
4173 estats->rx_stat_ifhcinbadoctets_hi,
4174 fstats->total_bytes_received_lo,
4175 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4176
4177 memcpy(estats, &(fstats->total_bytes_received_hi),
4178 sizeof(struct host_func_stats) - 2*sizeof(u32));
4179
de832a55
EG
4180 ADD_64(estats->etherstatsoverrsizepkts_hi,
4181 estats->rx_stat_dot3statsframestoolong_hi,
4182 estats->etherstatsoverrsizepkts_lo,
4183 estats->rx_stat_dot3statsframestoolong_lo);
4184 ADD_64(estats->error_bytes_received_hi,
4185 estats->rx_stat_ifhcinbadoctets_hi,
4186 estats->error_bytes_received_lo,
4187 estats->rx_stat_ifhcinbadoctets_lo);
4188
4189 if (bp->port.pmf) {
4190 estats->mac_filter_discard =
4191 le32_to_cpu(tport->mac_filter_discard);
4192 estats->xxoverflow_discard =
4193 le32_to_cpu(tport->xxoverflow_discard);
4194 estats->brb_truncate_discard =
bb2a0f7a 4195 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4196 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4197 }
bb2a0f7a
YG
4198
4199 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4200
de832a55
EG
4201 bp->stats_pending = 0;
4202
a2fbb9ea
ET
4203 return 0;
4204}
4205
bb2a0f7a 4206static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4207{
bb2a0f7a 4208 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4209 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4210 int i;
a2fbb9ea
ET
4211
4212 nstats->rx_packets =
4213 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4214 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4215 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4216
4217 nstats->tx_packets =
4218 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4219 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4220 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4221
de832a55 4222 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4223
0e39e645 4224 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4225
de832a55 4226 nstats->rx_dropped = estats->mac_discard;
54b9ddaa 4227 for_each_queue(bp, i)
de832a55
EG
4228 nstats->rx_dropped +=
4229 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4230
a2fbb9ea
ET
4231 nstats->tx_dropped = 0;
4232
4233 nstats->multicast =
de832a55 4234 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4235
bb2a0f7a 4236 nstats->collisions =
de832a55 4237 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4238
4239 nstats->rx_length_errors =
de832a55
EG
4240 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4241 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4242 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4243 bnx2x_hilo(&estats->brb_truncate_hi);
4244 nstats->rx_crc_errors =
4245 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4246 nstats->rx_frame_errors =
4247 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4248 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4249 nstats->rx_missed_errors = estats->xxoverflow_discard;
4250
4251 nstats->rx_errors = nstats->rx_length_errors +
4252 nstats->rx_over_errors +
4253 nstats->rx_crc_errors +
4254 nstats->rx_frame_errors +
0e39e645
ET
4255 nstats->rx_fifo_errors +
4256 nstats->rx_missed_errors;
a2fbb9ea 4257
bb2a0f7a 4258 nstats->tx_aborted_errors =
de832a55
EG
4259 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4260 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4261 nstats->tx_carrier_errors =
4262 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4263 nstats->tx_fifo_errors = 0;
4264 nstats->tx_heartbeat_errors = 0;
4265 nstats->tx_window_errors = 0;
4266
4267 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4268 nstats->tx_carrier_errors +
4269 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4270}
4271
4272static void bnx2x_drv_stats_update(struct bnx2x *bp)
4273{
4274 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4275 int i;
4276
4277 estats->driver_xoff = 0;
4278 estats->rx_err_discard_pkt = 0;
4279 estats->rx_skb_alloc_failed = 0;
4280 estats->hw_csum_err = 0;
54b9ddaa 4281 for_each_queue(bp, i) {
de832a55
EG
4282 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4283
4284 estats->driver_xoff += qstats->driver_xoff;
4285 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4286 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4287 estats->hw_csum_err += qstats->hw_csum_err;
4288 }
a2fbb9ea
ET
4289}
4290
bb2a0f7a 4291static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4292{
bb2a0f7a 4293 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4294
bb2a0f7a
YG
4295 if (*stats_comp != DMAE_COMP_VAL)
4296 return;
4297
4298 if (bp->port.pmf)
de832a55 4299 bnx2x_hw_stats_update(bp);
a2fbb9ea 4300
de832a55
EG
4301 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4302 BNX2X_ERR("storm stats were not updated for 3 times\n");
4303 bnx2x_panic();
4304 return;
a2fbb9ea
ET
4305 }
4306
de832a55
EG
4307 bnx2x_net_stats_update(bp);
4308 bnx2x_drv_stats_update(bp);
4309
7995c64e 4310 if (netif_msg_timer(bp)) {
ca00392c 4311 struct bnx2x_fastpath *fp0_rx = bp->fp;
54b9ddaa 4312 struct bnx2x_fastpath *fp0_tx = bp->fp;
de832a55
EG
4313 struct tstorm_per_client_stats *old_tclient =
4314 &bp->fp->old_tclient;
4315 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4316 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4317 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4318 int i;
a2fbb9ea 4319
7995c64e 4320 netdev_printk(KERN_DEBUG, bp->dev, "\n");
a2fbb9ea
ET
4321 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4322 " tx pkt (%lx)\n",
ca00392c
EG
4323 bnx2x_tx_avail(fp0_tx),
4324 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4325 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4326 " rx pkt (%lx)\n",
ca00392c
EG
4327 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4328 fp0_rx->rx_comp_cons),
4329 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4330 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4331 "brb truncate %u\n",
4332 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4333 qstats->driver_xoff,
4334 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4335 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4336 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4337 "mac_discard %u mac_filter_discard %u "
4338 "xxovrflow_discard %u brb_truncate_discard %u "
4339 "ttl0_discard %u\n",
4781bfad 4340 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4341 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4342 bnx2x_hilo(&qstats->no_buff_discard_hi),
4343 estats->mac_discard, estats->mac_filter_discard,
4344 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4345 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4346
4347 for_each_queue(bp, i) {
4348 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4349 bnx2x_fp(bp, i, tx_pkt),
4350 bnx2x_fp(bp, i, rx_pkt),
4351 bnx2x_fp(bp, i, rx_calls));
4352 }
4353 }
4354
bb2a0f7a
YG
4355 bnx2x_hw_stats_post(bp);
4356 bnx2x_storm_stats_post(bp);
4357}
a2fbb9ea 4358
bb2a0f7a
YG
4359static void bnx2x_port_stats_stop(struct bnx2x *bp)
4360{
4361 struct dmae_command *dmae;
4362 u32 opcode;
4363 int loader_idx = PMF_DMAE_C(bp);
4364 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4365
bb2a0f7a 4366 bp->executer_idx = 0;
a2fbb9ea 4367
bb2a0f7a
YG
4368 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4369 DMAE_CMD_C_ENABLE |
4370 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4371#ifdef __BIG_ENDIAN
bb2a0f7a 4372 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4373#else
bb2a0f7a 4374 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4375#endif
bb2a0f7a
YG
4376 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4377 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4378
4379 if (bp->port.port_stx) {
4380
4381 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4382 if (bp->func_stx)
4383 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4384 else
4385 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4386 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4387 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4388 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4389 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4390 dmae->len = sizeof(struct host_port_stats) >> 2;
4391 if (bp->func_stx) {
4392 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4393 dmae->comp_addr_hi = 0;
4394 dmae->comp_val = 1;
4395 } else {
4396 dmae->comp_addr_lo =
4397 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4398 dmae->comp_addr_hi =
4399 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4400 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4401
bb2a0f7a
YG
4402 *stats_comp = 0;
4403 }
a2fbb9ea
ET
4404 }
4405
bb2a0f7a
YG
4406 if (bp->func_stx) {
4407
4408 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4409 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4410 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4411 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4412 dmae->dst_addr_lo = bp->func_stx >> 2;
4413 dmae->dst_addr_hi = 0;
4414 dmae->len = sizeof(struct host_func_stats) >> 2;
4415 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4416 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4417 dmae->comp_val = DMAE_COMP_VAL;
4418
4419 *stats_comp = 0;
a2fbb9ea 4420 }
bb2a0f7a
YG
4421}
4422
4423static void bnx2x_stats_stop(struct bnx2x *bp)
4424{
4425 int update = 0;
4426
4427 bnx2x_stats_comp(bp);
4428
4429 if (bp->port.pmf)
4430 update = (bnx2x_hw_stats_update(bp) == 0);
4431
4432 update |= (bnx2x_storm_stats_update(bp) == 0);
4433
4434 if (update) {
4435 bnx2x_net_stats_update(bp);
a2fbb9ea 4436
bb2a0f7a
YG
4437 if (bp->port.pmf)
4438 bnx2x_port_stats_stop(bp);
4439
4440 bnx2x_hw_stats_post(bp);
4441 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4442 }
4443}
4444
bb2a0f7a
YG
4445static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4446{
4447}
4448
4449static const struct {
4450 void (*action)(struct bnx2x *bp);
4451 enum bnx2x_stats_state next_state;
4452} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4453/* state event */
4454{
4455/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4456/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4457/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4458/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4459},
4460{
4461/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4462/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4463/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4464/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4465}
4466};
4467
4468static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4469{
4470 enum bnx2x_stats_state state = bp->stats_state;
4471
4472 bnx2x_stats_stm[state][event].action(bp);
4473 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4474
8924665a
EG
4475 /* Make sure the state has been "changed" */
4476 smp_wmb();
4477
7995c64e 4478 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
bb2a0f7a
YG
4479 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4480 state, event, bp->stats_state);
4481}
4482
6fe49bb9
EG
4483static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4484{
4485 struct dmae_command *dmae;
4486 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4487
4488 /* sanity */
4489 if (!bp->port.pmf || !bp->port.port_stx) {
4490 BNX2X_ERR("BUG!\n");
4491 return;
4492 }
4493
4494 bp->executer_idx = 0;
4495
4496 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4497 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4498 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4499 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4500#ifdef __BIG_ENDIAN
4501 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4502#else
4503 DMAE_CMD_ENDIANITY_DW_SWAP |
4504#endif
4505 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4506 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4507 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4508 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4509 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4510 dmae->dst_addr_hi = 0;
4511 dmae->len = sizeof(struct host_port_stats) >> 2;
4512 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4513 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4514 dmae->comp_val = DMAE_COMP_VAL;
4515
4516 *stats_comp = 0;
4517 bnx2x_hw_stats_post(bp);
4518 bnx2x_stats_comp(bp);
4519}
4520
4521static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4522{
4523 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4524 int port = BP_PORT(bp);
4525 int func;
4526 u32 func_stx;
4527
4528 /* sanity */
4529 if (!bp->port.pmf || !bp->func_stx) {
4530 BNX2X_ERR("BUG!\n");
4531 return;
4532 }
4533
4534 /* save our func_stx */
4535 func_stx = bp->func_stx;
4536
4537 for (vn = VN_0; vn < vn_max; vn++) {
4538 func = 2*vn + port;
4539
4540 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4541 bnx2x_func_stats_init(bp);
4542 bnx2x_hw_stats_post(bp);
4543 bnx2x_stats_comp(bp);
4544 }
4545
4546 /* restore our func_stx */
4547 bp->func_stx = func_stx;
4548}
4549
4550static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4551{
4552 struct dmae_command *dmae = &bp->stats_dmae;
4553 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4554
4555 /* sanity */
4556 if (!bp->func_stx) {
4557 BNX2X_ERR("BUG!\n");
4558 return;
4559 }
4560
4561 bp->executer_idx = 0;
4562 memset(dmae, 0, sizeof(struct dmae_command));
4563
4564 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4565 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4566 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4567#ifdef __BIG_ENDIAN
4568 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4569#else
4570 DMAE_CMD_ENDIANITY_DW_SWAP |
4571#endif
4572 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4573 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4574 dmae->src_addr_lo = bp->func_stx >> 2;
4575 dmae->src_addr_hi = 0;
4576 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4577 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4578 dmae->len = sizeof(struct host_func_stats) >> 2;
4579 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4580 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4581 dmae->comp_val = DMAE_COMP_VAL;
4582
4583 *stats_comp = 0;
4584 bnx2x_hw_stats_post(bp);
4585 bnx2x_stats_comp(bp);
4586}
4587
4588static void bnx2x_stats_init(struct bnx2x *bp)
4589{
4590 int port = BP_PORT(bp);
4591 int func = BP_FUNC(bp);
4592 int i;
4593
4594 bp->stats_pending = 0;
4595 bp->executer_idx = 0;
4596 bp->stats_counter = 0;
4597
4598 /* port and func stats for management */
4599 if (!BP_NOMCP(bp)) {
4600 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4601 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4602
4603 } else {
4604 bp->port.port_stx = 0;
4605 bp->func_stx = 0;
4606 }
4607 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4608 bp->port.port_stx, bp->func_stx);
4609
4610 /* port stats */
4611 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4612 bp->port.old_nig_stats.brb_discard =
4613 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4614 bp->port.old_nig_stats.brb_truncate =
4615 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4616 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4617 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4618 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4619 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4620
4621 /* function stats */
4622 for_each_queue(bp, i) {
4623 struct bnx2x_fastpath *fp = &bp->fp[i];
4624
4625 memset(&fp->old_tclient, 0,
4626 sizeof(struct tstorm_per_client_stats));
4627 memset(&fp->old_uclient, 0,
4628 sizeof(struct ustorm_per_client_stats));
4629 memset(&fp->old_xclient, 0,
4630 sizeof(struct xstorm_per_client_stats));
4631 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4632 }
4633
4634 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4635 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4636
4637 bp->stats_state = STATS_STATE_DISABLED;
4638
4639 if (bp->port.pmf) {
4640 if (bp->port.port_stx)
4641 bnx2x_port_stats_base_init(bp);
4642
4643 if (bp->func_stx)
4644 bnx2x_func_stats_base_init(bp);
4645
4646 } else if (bp->func_stx)
4647 bnx2x_func_stats_base_update(bp);
4648}
4649
a2fbb9ea
ET
4650static void bnx2x_timer(unsigned long data)
4651{
4652 struct bnx2x *bp = (struct bnx2x *) data;
4653
4654 if (!netif_running(bp->dev))
4655 return;
4656
4657 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4658 goto timer_restart;
a2fbb9ea
ET
4659
4660 if (poll) {
4661 struct bnx2x_fastpath *fp = &bp->fp[0];
4662 int rc;
4663
7961f791 4664 bnx2x_tx_int(fp);
a2fbb9ea
ET
4665 rc = bnx2x_rx_int(fp, 1000);
4666 }
4667
34f80b04
EG
4668 if (!BP_NOMCP(bp)) {
4669 int func = BP_FUNC(bp);
a2fbb9ea
ET
4670 u32 drv_pulse;
4671 u32 mcp_pulse;
4672
4673 ++bp->fw_drv_pulse_wr_seq;
4674 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4675 /* TBD - add SYSTEM_TIME */
4676 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4677 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4678
34f80b04 4679 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4680 MCP_PULSE_SEQ_MASK);
4681 /* The delta between driver pulse and mcp response
4682 * should be 1 (before mcp response) or 0 (after mcp response)
4683 */
4684 if ((drv_pulse != mcp_pulse) &&
4685 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4686 /* someone lost a heartbeat... */
4687 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4688 drv_pulse, mcp_pulse);
4689 }
4690 }
4691
f34d28ea 4692 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 4693 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4694
f1410647 4695timer_restart:
a2fbb9ea
ET
4696 mod_timer(&bp->timer, jiffies + bp->current_interval);
4697}
4698
4699/* end of Statistics */
4700
4701/* nic init */
4702
4703/*
4704 * nic init service functions
4705 */
4706
34f80b04 4707static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4708{
34f80b04
EG
4709 int port = BP_PORT(bp);
4710
ca00392c
EG
4711 /* "CSTORM" */
4712 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4713 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4714 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4715 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4716 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4717 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
4718}
4719
5c862848
EG
4720static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4721 dma_addr_t mapping, int sb_id)
34f80b04
EG
4722{
4723 int port = BP_PORT(bp);
bb2a0f7a 4724 int func = BP_FUNC(bp);
a2fbb9ea 4725 int index;
34f80b04 4726 u64 section;
a2fbb9ea
ET
4727
4728 /* USTORM */
4729 section = ((u64)mapping) + offsetof(struct host_status_block,
4730 u_status_block);
34f80b04 4731 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 4732
ca00392c
EG
4733 REG_WR(bp, BAR_CSTRORM_INTMEM +
4734 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4735 REG_WR(bp, BAR_CSTRORM_INTMEM +
4736 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4737 U64_HI(section));
ca00392c
EG
4738 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4739 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4740
4741 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
4742 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4743 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4744
4745 /* CSTORM */
4746 section = ((u64)mapping) + offsetof(struct host_status_block,
4747 c_status_block);
34f80b04 4748 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4749
4750 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4751 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4752 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4753 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4754 U64_HI(section));
7a9b2557 4755 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 4756 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4757
4758 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4759 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4760 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
4761
4762 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4763}
4764
4765static void bnx2x_zero_def_sb(struct bnx2x *bp)
4766{
4767 int func = BP_FUNC(bp);
a2fbb9ea 4768
ca00392c 4769 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
4770 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4771 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
4772 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4773 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4774 sizeof(struct cstorm_def_status_block_u)/4);
4775 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4776 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4777 sizeof(struct cstorm_def_status_block_c)/4);
4778 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
4779 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4780 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4781}
4782
4783static void bnx2x_init_def_sb(struct bnx2x *bp,
4784 struct host_def_status_block *def_sb,
34f80b04 4785 dma_addr_t mapping, int sb_id)
a2fbb9ea 4786{
34f80b04
EG
4787 int port = BP_PORT(bp);
4788 int func = BP_FUNC(bp);
a2fbb9ea
ET
4789 int index, val, reg_offset;
4790 u64 section;
4791
4792 /* ATTN */
4793 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4794 atten_status_block);
34f80b04 4795 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4796
49d66772
ET
4797 bp->attn_state = 0;
4798
a2fbb9ea
ET
4799 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4800 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4801
34f80b04 4802 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4803 bp->attn_group[index].sig[0] = REG_RD(bp,
4804 reg_offset + 0x10*index);
4805 bp->attn_group[index].sig[1] = REG_RD(bp,
4806 reg_offset + 0x4 + 0x10*index);
4807 bp->attn_group[index].sig[2] = REG_RD(bp,
4808 reg_offset + 0x8 + 0x10*index);
4809 bp->attn_group[index].sig[3] = REG_RD(bp,
4810 reg_offset + 0xc + 0x10*index);
4811 }
4812
a2fbb9ea
ET
4813 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4814 HC_REG_ATTN_MSG0_ADDR_L);
4815
4816 REG_WR(bp, reg_offset, U64_LO(section));
4817 REG_WR(bp, reg_offset + 4, U64_HI(section));
4818
4819 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4820
4821 val = REG_RD(bp, reg_offset);
34f80b04 4822 val |= sb_id;
a2fbb9ea
ET
4823 REG_WR(bp, reg_offset, val);
4824
4825 /* USTORM */
4826 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4827 u_def_status_block);
34f80b04 4828 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4829
ca00392c
EG
4830 REG_WR(bp, BAR_CSTRORM_INTMEM +
4831 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4832 REG_WR(bp, BAR_CSTRORM_INTMEM +
4833 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 4834 U64_HI(section));
ca00392c
EG
4835 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4836 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
4837
4838 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
4839 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4840 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
4841
4842 /* CSTORM */
4843 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4844 c_def_status_block);
34f80b04 4845 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4846
4847 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4848 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 4849 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4850 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 4851 U64_HI(section));
5c862848 4852 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 4853 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
4854
4855 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4856 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4857 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
4858
4859 /* TSTORM */
4860 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4861 t_def_status_block);
34f80b04 4862 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4863
4864 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4865 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4866 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4867 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4868 U64_HI(section));
5c862848 4869 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4870 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4871
4872 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4873 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4874 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4875
4876 /* XSTORM */
4877 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4878 x_def_status_block);
34f80b04 4879 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4880
4881 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4882 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4883 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4884 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4885 U64_HI(section));
5c862848 4886 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4887 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4888
4889 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4890 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4891 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4892
bb2a0f7a 4893 bp->stats_pending = 0;
66e855f3 4894 bp->set_mac_pending = 0;
bb2a0f7a 4895
34f80b04 4896 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4897}
4898
4899static void bnx2x_update_coalesce(struct bnx2x *bp)
4900{
34f80b04 4901 int port = BP_PORT(bp);
a2fbb9ea
ET
4902 int i;
4903
4904 for_each_queue(bp, i) {
34f80b04 4905 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4906
4907 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
4908 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4909 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4910 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 4911 bp->rx_ticks/(4 * BNX2X_BTR));
ca00392c
EG
4912 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4913 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4914 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 4915 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
4916
4917 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4918 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4919 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4920 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 4921 bp->tx_ticks/(4 * BNX2X_BTR));
a2fbb9ea 4922 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4923 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4924 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 4925 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
4926 }
4927}
4928
7a9b2557
VZ
4929static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4930 struct bnx2x_fastpath *fp, int last)
4931{
4932 int i;
4933
4934 for (i = 0; i < last; i++) {
4935 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4936 struct sk_buff *skb = rx_buf->skb;
4937
4938 if (skb == NULL) {
4939 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4940 continue;
4941 }
4942
4943 if (fp->tpa_state[i] == BNX2X_TPA_START)
1a983142
FT
4944 dma_unmap_single(&bp->pdev->dev,
4945 dma_unmap_addr(rx_buf, mapping),
4946 bp->rx_buf_size, DMA_FROM_DEVICE);
7a9b2557
VZ
4947
4948 dev_kfree_skb(skb);
4949 rx_buf->skb = NULL;
4950 }
4951}
4952
a2fbb9ea
ET
4953static void bnx2x_init_rx_rings(struct bnx2x *bp)
4954{
7a9b2557 4955 int func = BP_FUNC(bp);
32626230
EG
4956 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4957 ETH_MAX_AGGREGATION_QUEUES_E1H;
4958 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4959 int i, j;
a2fbb9ea 4960
87942b46 4961 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4962 DP(NETIF_MSG_IFUP,
4963 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4964
7a9b2557 4965 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4966
54b9ddaa 4967 for_each_queue(bp, j) {
32626230 4968 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4969
32626230 4970 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4971 fp->tpa_pool[i].skb =
4972 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4973 if (!fp->tpa_pool[i].skb) {
4974 BNX2X_ERR("Failed to allocate TPA "
4975 "skb pool for queue[%d] - "
4976 "disabling TPA on this "
4977 "queue!\n", j);
4978 bnx2x_free_tpa_pool(bp, fp, i);
4979 fp->disable_tpa = 1;
4980 break;
4981 }
1a983142 4982 dma_unmap_addr_set((struct sw_rx_bd *)
7a9b2557
VZ
4983 &bp->fp->tpa_pool[i],
4984 mapping, 0);
4985 fp->tpa_state[i] = BNX2X_TPA_STOP;
4986 }
4987 }
4988 }
4989
54b9ddaa 4990 for_each_queue(bp, j) {
a2fbb9ea
ET
4991 struct bnx2x_fastpath *fp = &bp->fp[j];
4992
4993 fp->rx_bd_cons = 0;
4994 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4995 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4996
4997 /* "next page" elements initialization */
4998 /* SGE ring */
4999 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5000 struct eth_rx_sge *sge;
5001
5002 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5003 sge->addr_hi =
5004 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5005 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5006 sge->addr_lo =
5007 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5008 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5009 }
5010
5011 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 5012
7a9b2557 5013 /* RX BD ring */
a2fbb9ea
ET
5014 for (i = 1; i <= NUM_RX_RINGS; i++) {
5015 struct eth_rx_bd *rx_bd;
5016
5017 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5018 rx_bd->addr_hi =
5019 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 5020 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5021 rx_bd->addr_lo =
5022 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 5023 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5024 }
5025
34f80b04 5026 /* CQ ring */
a2fbb9ea
ET
5027 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5028 struct eth_rx_cqe_next_page *nextpg;
5029
5030 nextpg = (struct eth_rx_cqe_next_page *)
5031 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5032 nextpg->addr_hi =
5033 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 5034 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5035 nextpg->addr_lo =
5036 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 5037 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5038 }
5039
7a9b2557
VZ
5040 /* Allocate SGEs and initialize the ring elements */
5041 for (i = 0, ring_prod = 0;
5042 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 5043
7a9b2557
VZ
5044 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5045 BNX2X_ERR("was only able to allocate "
5046 "%d rx sges\n", i);
5047 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5048 /* Cleanup already allocated elements */
5049 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 5050 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
5051 fp->disable_tpa = 1;
5052 ring_prod = 0;
5053 break;
5054 }
5055 ring_prod = NEXT_SGE_IDX(ring_prod);
5056 }
5057 fp->rx_sge_prod = ring_prod;
5058
5059 /* Allocate BDs and initialize BD ring */
66e855f3 5060 fp->rx_comp_cons = 0;
7a9b2557 5061 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
5062 for (i = 0; i < bp->rx_ring_size; i++) {
5063 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5064 BNX2X_ERR("was only able to allocate "
de832a55
EG
5065 "%d rx skbs on queue[%d]\n", i, j);
5066 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
5067 break;
5068 }
5069 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 5070 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5071 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5072 }
5073
7a9b2557
VZ
5074 fp->rx_bd_prod = ring_prod;
5075 /* must not have more available CQEs than BDs */
5076 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5077 cqe_ring_prod);
a2fbb9ea
ET
5078 fp->rx_pkt = fp->rx_calls = 0;
5079
7a9b2557
VZ
5080 /* Warning!
5081 * this will generate an interrupt (to the TSTORM)
5082 * must only be done after chip is initialized
5083 */
5084 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5085 fp->rx_sge_prod);
a2fbb9ea
ET
5086 if (j != 0)
5087 continue;
5088
5089 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5090 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5091 U64_LO(fp->rx_comp_mapping));
5092 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5093 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5094 U64_HI(fp->rx_comp_mapping));
5095 }
5096}
5097
5098static void bnx2x_init_tx_ring(struct bnx2x *bp)
5099{
5100 int i, j;
5101
54b9ddaa 5102 for_each_queue(bp, j) {
a2fbb9ea
ET
5103 struct bnx2x_fastpath *fp = &bp->fp[j];
5104
5105 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5106 struct eth_tx_next_bd *tx_next_bd =
5107 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5108
ca00392c 5109 tx_next_bd->addr_hi =
a2fbb9ea 5110 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5111 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5112 tx_next_bd->addr_lo =
a2fbb9ea 5113 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5114 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5115 }
5116
ca00392c
EG
5117 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5118 fp->tx_db.data.zero_fill1 = 0;
5119 fp->tx_db.data.prod = 0;
5120
a2fbb9ea
ET
5121 fp->tx_pkt_prod = 0;
5122 fp->tx_pkt_cons = 0;
5123 fp->tx_bd_prod = 0;
5124 fp->tx_bd_cons = 0;
5125 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5126 fp->tx_pkt = 0;
5127 }
5128}
5129
5130static void bnx2x_init_sp_ring(struct bnx2x *bp)
5131{
34f80b04 5132 int func = BP_FUNC(bp);
a2fbb9ea
ET
5133
5134 spin_lock_init(&bp->spq_lock);
5135
5136 bp->spq_left = MAX_SPQ_PENDING;
5137 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5138 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5139 bp->spq_prod_bd = bp->spq;
5140 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5141
34f80b04 5142 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5143 U64_LO(bp->spq_mapping));
34f80b04
EG
5144 REG_WR(bp,
5145 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5146 U64_HI(bp->spq_mapping));
5147
34f80b04 5148 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5149 bp->spq_prod_idx);
5150}
5151
5152static void bnx2x_init_context(struct bnx2x *bp)
5153{
5154 int i;
5155
54b9ddaa
VZ
5156 /* Rx */
5157 for_each_queue(bp, i) {
a2fbb9ea
ET
5158 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5159 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5160 u8 cl_id = fp->cl_id;
a2fbb9ea 5161
34f80b04
EG
5162 context->ustorm_st_context.common.sb_index_numbers =
5163 BNX2X_RX_SB_INDEX_NUM;
0626b899 5164 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5165 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5166 context->ustorm_st_context.common.flags =
de832a55
EG
5167 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5168 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5169 context->ustorm_st_context.common.statistics_counter_id =
5170 cl_id;
8d9c5f34 5171 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5172 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5173 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5174 bp->rx_buf_size;
34f80b04 5175 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5176 U64_HI(fp->rx_desc_mapping);
34f80b04 5177 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5178 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5179 if (!fp->disable_tpa) {
5180 context->ustorm_st_context.common.flags |=
ca00392c 5181 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5182 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
5183 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5184 (u32)0xffff);
7a9b2557
VZ
5185 context->ustorm_st_context.common.sge_page_base_hi =
5186 U64_HI(fp->rx_sge_mapping);
5187 context->ustorm_st_context.common.sge_page_base_lo =
5188 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5189
5190 context->ustorm_st_context.common.max_sges_for_packet =
5191 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5192 context->ustorm_st_context.common.max_sges_for_packet =
5193 ((context->ustorm_st_context.common.
5194 max_sges_for_packet + PAGES_PER_SGE - 1) &
5195 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5196 }
5197
8d9c5f34
EG
5198 context->ustorm_ag_context.cdu_usage =
5199 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5200 CDU_REGION_NUMBER_UCM_AG,
5201 ETH_CONNECTION_TYPE);
5202
ca00392c
EG
5203 context->xstorm_ag_context.cdu_reserved =
5204 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5205 CDU_REGION_NUMBER_XCM_AG,
5206 ETH_CONNECTION_TYPE);
5207 }
5208
54b9ddaa
VZ
5209 /* Tx */
5210 for_each_queue(bp, i) {
ca00392c
EG
5211 struct bnx2x_fastpath *fp = &bp->fp[i];
5212 struct eth_context *context =
54b9ddaa 5213 bnx2x_sp(bp, context[i].eth);
ca00392c
EG
5214
5215 context->cstorm_st_context.sb_index_number =
5216 C_SB_ETH_TX_CQ_INDEX;
5217 context->cstorm_st_context.status_block_id = fp->sb_id;
5218
8d9c5f34
EG
5219 context->xstorm_st_context.tx_bd_page_base_hi =
5220 U64_HI(fp->tx_desc_mapping);
5221 context->xstorm_st_context.tx_bd_page_base_lo =
5222 U64_LO(fp->tx_desc_mapping);
ca00392c 5223 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5224 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5225 }
5226}
5227
5228static void bnx2x_init_ind_table(struct bnx2x *bp)
5229{
26c8fa4d 5230 int func = BP_FUNC(bp);
a2fbb9ea
ET
5231 int i;
5232
555f6c78 5233 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5234 return;
5235
555f6c78
EG
5236 DP(NETIF_MSG_IFUP,
5237 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5238 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5239 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5240 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
54b9ddaa 5241 bp->fp->cl_id + (i % bp->num_queues));
a2fbb9ea
ET
5242}
5243
49d66772
ET
5244static void bnx2x_set_client_config(struct bnx2x *bp)
5245{
49d66772 5246 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5247 int port = BP_PORT(bp);
5248 int i;
49d66772 5249
e7799c5f 5250 tstorm_client.mtu = bp->dev->mtu;
49d66772 5251 tstorm_client.config_flags =
de832a55
EG
5252 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5253 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5254#ifdef BCM_VLAN
0c6671b0 5255 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5256 tstorm_client.config_flags |=
8d9c5f34 5257 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5258 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5259 }
5260#endif
49d66772
ET
5261
5262 for_each_queue(bp, i) {
de832a55
EG
5263 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5264
49d66772 5265 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5266 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5267 ((u32 *)&tstorm_client)[0]);
5268 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5269 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5270 ((u32 *)&tstorm_client)[1]);
5271 }
5272
34f80b04
EG
5273 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5274 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5275}
5276
a2fbb9ea
ET
5277static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5278{
a2fbb9ea 5279 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04 5280 int mode = bp->rx_mode;
37b091ba 5281 int mask = bp->rx_mode_cl_mask;
34f80b04 5282 int func = BP_FUNC(bp);
581ce43d 5283 int port = BP_PORT(bp);
a2fbb9ea 5284 int i;
581ce43d
EG
5285 /* All but management unicast packets should pass to the host as well */
5286 u32 llh_mask =
5287 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5288 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5289 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5290 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5291
3196a88a 5292 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5293
5294 switch (mode) {
5295 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5296 tstorm_mac_filter.ucast_drop_all = mask;
5297 tstorm_mac_filter.mcast_drop_all = mask;
5298 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5299 break;
356e2385 5300
a2fbb9ea 5301 case BNX2X_RX_MODE_NORMAL:
34f80b04 5302 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5303 break;
356e2385 5304
a2fbb9ea 5305 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5306 tstorm_mac_filter.mcast_accept_all = mask;
5307 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5308 break;
356e2385 5309
a2fbb9ea 5310 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5311 tstorm_mac_filter.ucast_accept_all = mask;
5312 tstorm_mac_filter.mcast_accept_all = mask;
5313 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5314 /* pass management unicast packets as well */
5315 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5316 break;
356e2385 5317
a2fbb9ea 5318 default:
34f80b04
EG
5319 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5320 break;
a2fbb9ea
ET
5321 }
5322
581ce43d
EG
5323 REG_WR(bp,
5324 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5325 llh_mask);
5326
a2fbb9ea
ET
5327 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5328 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5329 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5330 ((u32 *)&tstorm_mac_filter)[i]);
5331
34f80b04 5332/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5333 ((u32 *)&tstorm_mac_filter)[i]); */
5334 }
a2fbb9ea 5335
49d66772
ET
5336 if (mode != BNX2X_RX_MODE_NONE)
5337 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5338}
5339
471de716
EG
5340static void bnx2x_init_internal_common(struct bnx2x *bp)
5341{
5342 int i;
5343
5344 /* Zero this manually as its initialization is
5345 currently missing in the initTool */
5346 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5347 REG_WR(bp, BAR_USTRORM_INTMEM +
5348 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5349}
5350
5351static void bnx2x_init_internal_port(struct bnx2x *bp)
5352{
5353 int port = BP_PORT(bp);
5354
ca00392c
EG
5355 REG_WR(bp,
5356 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5357 REG_WR(bp,
5358 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5359 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5360 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5361}
5362
5363static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5364{
a2fbb9ea
ET
5365 struct tstorm_eth_function_common_config tstorm_config = {0};
5366 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5367 int port = BP_PORT(bp);
5368 int func = BP_FUNC(bp);
de832a55
EG
5369 int i, j;
5370 u32 offset;
471de716 5371 u16 max_agg_size;
a2fbb9ea
ET
5372
5373 if (is_multi(bp)) {
555f6c78 5374 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5375 tstorm_config.rss_result_mask = MULTI_MASK;
5376 }
ca00392c
EG
5377
5378 /* Enable TPA if needed */
5379 if (bp->flags & TPA_ENABLE_FLAG)
5380 tstorm_config.config_flags |=
5381 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5382
8d9c5f34
EG
5383 if (IS_E1HMF(bp))
5384 tstorm_config.config_flags |=
5385 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5386
34f80b04
EG
5387 tstorm_config.leading_client_id = BP_L_ID(bp);
5388
a2fbb9ea 5389 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5390 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5391 (*(u32 *)&tstorm_config));
5392
c14423fe 5393 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
37b091ba 5394 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
a2fbb9ea
ET
5395 bnx2x_set_storm_rx_mode(bp);
5396
de832a55
EG
5397 for_each_queue(bp, i) {
5398 u8 cl_id = bp->fp[i].cl_id;
5399
5400 /* reset xstorm per client statistics */
5401 offset = BAR_XSTRORM_INTMEM +
5402 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5403 for (j = 0;
5404 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5405 REG_WR(bp, offset + j*4, 0);
5406
5407 /* reset tstorm per client statistics */
5408 offset = BAR_TSTRORM_INTMEM +
5409 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5410 for (j = 0;
5411 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5412 REG_WR(bp, offset + j*4, 0);
5413
5414 /* reset ustorm per client statistics */
5415 offset = BAR_USTRORM_INTMEM +
5416 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5417 for (j = 0;
5418 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5419 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5420 }
5421
5422 /* Init statistics related context */
34f80b04 5423 stats_flags.collect_eth = 1;
a2fbb9ea 5424
66e855f3 5425 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5426 ((u32 *)&stats_flags)[0]);
66e855f3 5427 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5428 ((u32 *)&stats_flags)[1]);
5429
66e855f3 5430 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5431 ((u32 *)&stats_flags)[0]);
66e855f3 5432 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5433 ((u32 *)&stats_flags)[1]);
5434
de832a55
EG
5435 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5436 ((u32 *)&stats_flags)[0]);
5437 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5438 ((u32 *)&stats_flags)[1]);
5439
66e855f3 5440 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5441 ((u32 *)&stats_flags)[0]);
66e855f3 5442 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5443 ((u32 *)&stats_flags)[1]);
5444
66e855f3
YG
5445 REG_WR(bp, BAR_XSTRORM_INTMEM +
5446 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5447 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5448 REG_WR(bp, BAR_XSTRORM_INTMEM +
5449 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5450 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5451
5452 REG_WR(bp, BAR_TSTRORM_INTMEM +
5453 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5454 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5455 REG_WR(bp, BAR_TSTRORM_INTMEM +
5456 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5457 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5458
de832a55
EG
5459 REG_WR(bp, BAR_USTRORM_INTMEM +
5460 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5461 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5462 REG_WR(bp, BAR_USTRORM_INTMEM +
5463 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5464 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5465
34f80b04
EG
5466 if (CHIP_IS_E1H(bp)) {
5467 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5468 IS_E1HMF(bp));
5469 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5470 IS_E1HMF(bp));
5471 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5472 IS_E1HMF(bp));
5473 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5474 IS_E1HMF(bp));
5475
7a9b2557
VZ
5476 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5477 bp->e1hov);
34f80b04
EG
5478 }
5479
4f40f2cb
EG
5480 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5481 max_agg_size =
5482 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5483 SGE_PAGE_SIZE * PAGES_PER_SGE),
5484 (u32)0xffff);
54b9ddaa 5485 for_each_queue(bp, i) {
7a9b2557 5486 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5487
5488 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5489 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5490 U64_LO(fp->rx_comp_mapping));
5491 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5492 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5493 U64_HI(fp->rx_comp_mapping));
5494
ca00392c
EG
5495 /* Next page */
5496 REG_WR(bp, BAR_USTRORM_INTMEM +
5497 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5498 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5499 REG_WR(bp, BAR_USTRORM_INTMEM +
5500 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5501 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5502
7a9b2557 5503 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5504 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5505 max_agg_size);
5506 }
8a1c38d1 5507
1c06328c
EG
5508 /* dropless flow control */
5509 if (CHIP_IS_E1H(bp)) {
5510 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5511
5512 rx_pause.bd_thr_low = 250;
5513 rx_pause.cqe_thr_low = 250;
5514 rx_pause.cos = 1;
5515 rx_pause.sge_thr_low = 0;
5516 rx_pause.bd_thr_high = 350;
5517 rx_pause.cqe_thr_high = 350;
5518 rx_pause.sge_thr_high = 0;
5519
54b9ddaa 5520 for_each_queue(bp, i) {
1c06328c
EG
5521 struct bnx2x_fastpath *fp = &bp->fp[i];
5522
5523 if (!fp->disable_tpa) {
5524 rx_pause.sge_thr_low = 150;
5525 rx_pause.sge_thr_high = 250;
5526 }
5527
5528
5529 offset = BAR_USTRORM_INTMEM +
5530 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5531 fp->cl_id);
5532 for (j = 0;
5533 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5534 j++)
5535 REG_WR(bp, offset + j*4,
5536 ((u32 *)&rx_pause)[j]);
5537 }
5538 }
5539
8a1c38d1
EG
5540 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5541
5542 /* Init rate shaping and fairness contexts */
5543 if (IS_E1HMF(bp)) {
5544 int vn;
5545
5546 /* During init there is no active link
5547 Until link is up, set link rate to 10Gbps */
5548 bp->link_vars.line_speed = SPEED_10000;
5549 bnx2x_init_port_minmax(bp);
5550
b015e3d1
EG
5551 if (!BP_NOMCP(bp))
5552 bp->mf_config =
5553 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8a1c38d1
EG
5554 bnx2x_calc_vn_weight_sum(bp);
5555
5556 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5557 bnx2x_init_vn_minmax(bp, 2*vn + port);
5558
5559 /* Enable rate shaping and fairness */
b015e3d1 5560 bp->cmng.flags.cmng_enables |=
8a1c38d1 5561 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
b015e3d1 5562
8a1c38d1
EG
5563 } else {
5564 /* rate shaping and fairness are disabled */
5565 DP(NETIF_MSG_IFUP,
5566 "single function mode minmax will be disabled\n");
5567 }
5568
5569
5570 /* Store it to internal memory */
5571 if (bp->port.pmf)
5572 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5573 REG_WR(bp, BAR_XSTRORM_INTMEM +
5574 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5575 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5576}
5577
471de716
EG
5578static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5579{
5580 switch (load_code) {
5581 case FW_MSG_CODE_DRV_LOAD_COMMON:
5582 bnx2x_init_internal_common(bp);
5583 /* no break */
5584
5585 case FW_MSG_CODE_DRV_LOAD_PORT:
5586 bnx2x_init_internal_port(bp);
5587 /* no break */
5588
5589 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5590 bnx2x_init_internal_func(bp);
5591 break;
5592
5593 default:
5594 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5595 break;
5596 }
5597}
5598
5599static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5600{
5601 int i;
5602
5603 for_each_queue(bp, i) {
5604 struct bnx2x_fastpath *fp = &bp->fp[i];
5605
34f80b04 5606 fp->bp = bp;
a2fbb9ea 5607 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5608 fp->index = i;
34f80b04 5609 fp->cl_id = BP_L_ID(bp) + i;
37b091ba
MC
5610#ifdef BCM_CNIC
5611 fp->sb_id = fp->cl_id + 1;
5612#else
34f80b04 5613 fp->sb_id = fp->cl_id;
37b091ba 5614#endif
34f80b04 5615 DP(NETIF_MSG_IFUP,
f5372251
EG
5616 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5617 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5618 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5619 fp->sb_id);
5c862848 5620 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5621 }
5622
16119785
EG
5623 /* ensure status block indices were read */
5624 rmb();
5625
5626
5c862848
EG
5627 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5628 DEF_SB_ID);
5629 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5630 bnx2x_update_coalesce(bp);
5631 bnx2x_init_rx_rings(bp);
5632 bnx2x_init_tx_ring(bp);
5633 bnx2x_init_sp_ring(bp);
5634 bnx2x_init_context(bp);
471de716 5635 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5636 bnx2x_init_ind_table(bp);
0ef00459
EG
5637 bnx2x_stats_init(bp);
5638
5639 /* At this point, we are ready for interrupts */
5640 atomic_set(&bp->intr_sem, 0);
5641
5642 /* flush all before enabling interrupts */
5643 mb();
5644 mmiowb();
5645
615f8fd9 5646 bnx2x_int_enable(bp);
eb8da205
EG
5647
5648 /* Check for SPIO5 */
5649 bnx2x_attn_int_deasserted0(bp,
5650 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5651 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5652}
5653
5654/* end of nic init */
5655
5656/*
5657 * gzip service functions
5658 */
5659
5660static int bnx2x_gunzip_init(struct bnx2x *bp)
5661{
1a983142
FT
5662 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
5663 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
5664 if (bp->gunzip_buf == NULL)
5665 goto gunzip_nomem1;
5666
5667 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5668 if (bp->strm == NULL)
5669 goto gunzip_nomem2;
5670
5671 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5672 GFP_KERNEL);
5673 if (bp->strm->workspace == NULL)
5674 goto gunzip_nomem3;
5675
5676 return 0;
5677
5678gunzip_nomem3:
5679 kfree(bp->strm);
5680 bp->strm = NULL;
5681
5682gunzip_nomem2:
1a983142
FT
5683 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
5684 bp->gunzip_mapping);
a2fbb9ea
ET
5685 bp->gunzip_buf = NULL;
5686
5687gunzip_nomem1:
7995c64e 5688 netdev_err(bp->dev, "Cannot allocate firmware buffer for un-compression\n");
a2fbb9ea
ET
5689 return -ENOMEM;
5690}
5691
5692static void bnx2x_gunzip_end(struct bnx2x *bp)
5693{
5694 kfree(bp->strm->workspace);
5695
5696 kfree(bp->strm);
5697 bp->strm = NULL;
5698
5699 if (bp->gunzip_buf) {
1a983142
FT
5700 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
5701 bp->gunzip_mapping);
a2fbb9ea
ET
5702 bp->gunzip_buf = NULL;
5703 }
5704}
5705
94a78b79 5706static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5707{
5708 int n, rc;
5709
5710 /* check gzip header */
94a78b79
VZ
5711 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5712 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5713 return -EINVAL;
94a78b79 5714 }
a2fbb9ea
ET
5715
5716 n = 10;
5717
34f80b04 5718#define FNAME 0x8
a2fbb9ea
ET
5719
5720 if (zbuf[3] & FNAME)
5721 while ((zbuf[n++] != 0) && (n < len));
5722
94a78b79 5723 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5724 bp->strm->avail_in = len - n;
5725 bp->strm->next_out = bp->gunzip_buf;
5726 bp->strm->avail_out = FW_BUF_SIZE;
5727
5728 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5729 if (rc != Z_OK)
5730 return rc;
5731
5732 rc = zlib_inflate(bp->strm, Z_FINISH);
5733 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
5734 netdev_err(bp->dev, "Firmware decompression error: %s\n",
5735 bp->strm->msg);
a2fbb9ea
ET
5736
5737 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5738 if (bp->gunzip_outlen & 0x3)
7995c64e
JP
5739 netdev_err(bp->dev, "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
5740 bp->gunzip_outlen);
a2fbb9ea
ET
5741 bp->gunzip_outlen >>= 2;
5742
5743 zlib_inflateEnd(bp->strm);
5744
5745 if (rc == Z_STREAM_END)
5746 return 0;
5747
5748 return rc;
5749}
5750
5751/* nic load/unload */
5752
5753/*
34f80b04 5754 * General service functions
a2fbb9ea
ET
5755 */
5756
5757/* send a NIG loopback debug packet */
5758static void bnx2x_lb_pckt(struct bnx2x *bp)
5759{
a2fbb9ea 5760 u32 wb_write[3];
a2fbb9ea
ET
5761
5762 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5763 wb_write[0] = 0x55555555;
5764 wb_write[1] = 0x55555555;
34f80b04 5765 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5766 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5767
5768 /* NON-IP protocol */
a2fbb9ea
ET
5769 wb_write[0] = 0x09000000;
5770 wb_write[1] = 0x55555555;
34f80b04 5771 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5772 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5773}
5774
5775/* some of the internal memories
5776 * are not directly readable from the driver
5777 * to test them we send debug packets
5778 */
5779static int bnx2x_int_mem_test(struct bnx2x *bp)
5780{
5781 int factor;
5782 int count, i;
5783 u32 val = 0;
5784
ad8d3948 5785 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5786 factor = 120;
ad8d3948
EG
5787 else if (CHIP_REV_IS_EMUL(bp))
5788 factor = 200;
5789 else
a2fbb9ea 5790 factor = 1;
a2fbb9ea
ET
5791
5792 DP(NETIF_MSG_HW, "start part1\n");
5793
5794 /* Disable inputs of parser neighbor blocks */
5795 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5796 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5797 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5798 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5799
5800 /* Write 0 to parser credits for CFC search request */
5801 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5802
5803 /* send Ethernet packet */
5804 bnx2x_lb_pckt(bp);
5805
5806 /* TODO do i reset NIG statistic? */
5807 /* Wait until NIG register shows 1 packet of size 0x10 */
5808 count = 1000 * factor;
5809 while (count) {
34f80b04 5810
a2fbb9ea
ET
5811 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5812 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5813 if (val == 0x10)
5814 break;
5815
5816 msleep(10);
5817 count--;
5818 }
5819 if (val != 0x10) {
5820 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5821 return -1;
5822 }
5823
5824 /* Wait until PRS register shows 1 packet */
5825 count = 1000 * factor;
5826 while (count) {
5827 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5828 if (val == 1)
5829 break;
5830
5831 msleep(10);
5832 count--;
5833 }
5834 if (val != 0x1) {
5835 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5836 return -2;
5837 }
5838
5839 /* Reset and init BRB, PRS */
34f80b04 5840 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5841 msleep(50);
34f80b04 5842 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5843 msleep(50);
94a78b79
VZ
5844 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5845 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5846
5847 DP(NETIF_MSG_HW, "part2\n");
5848
5849 /* Disable inputs of parser neighbor blocks */
5850 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5851 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5852 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5853 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5854
5855 /* Write 0 to parser credits for CFC search request */
5856 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5857
5858 /* send 10 Ethernet packets */
5859 for (i = 0; i < 10; i++)
5860 bnx2x_lb_pckt(bp);
5861
5862 /* Wait until NIG register shows 10 + 1
5863 packets of size 11*0x10 = 0xb0 */
5864 count = 1000 * factor;
5865 while (count) {
34f80b04 5866
a2fbb9ea
ET
5867 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5868 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5869 if (val == 0xb0)
5870 break;
5871
5872 msleep(10);
5873 count--;
5874 }
5875 if (val != 0xb0) {
5876 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5877 return -3;
5878 }
5879
5880 /* Wait until PRS register shows 2 packets */
5881 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5882 if (val != 2)
5883 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5884
5885 /* Write 1 to parser credits for CFC search request */
5886 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5887
5888 /* Wait until PRS register shows 3 packets */
5889 msleep(10 * factor);
5890 /* Wait until NIG register shows 1 packet of size 0x10 */
5891 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5892 if (val != 3)
5893 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5894
5895 /* clear NIG EOP FIFO */
5896 for (i = 0; i < 11; i++)
5897 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5898 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5899 if (val != 1) {
5900 BNX2X_ERR("clear of NIG failed\n");
5901 return -4;
5902 }
5903
5904 /* Reset and init BRB, PRS, NIG */
5905 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5906 msleep(50);
5907 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5908 msleep(50);
94a78b79
VZ
5909 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5910 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 5911#ifndef BCM_CNIC
a2fbb9ea
ET
5912 /* set NIC mode */
5913 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5914#endif
5915
5916 /* Enable inputs of parser neighbor blocks */
5917 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5918 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5919 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5920 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5921
5922 DP(NETIF_MSG_HW, "done\n");
5923
5924 return 0; /* OK */
5925}
5926
5927static void enable_blocks_attention(struct bnx2x *bp)
5928{
5929 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5930 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5931 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5932 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5933 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5934 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5935 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5936 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5937 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5938/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5939/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5940 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5941 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5942 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5943/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5944/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5945 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5946 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5947 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5948 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5949/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5950/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5951 if (CHIP_REV_IS_FPGA(bp))
5952 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5953 else
5954 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5955 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5956 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5957 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5958/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5959/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5960 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5961 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5962/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5963 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5964}
5965
34f80b04 5966
81f75bbf
EG
5967static void bnx2x_reset_common(struct bnx2x *bp)
5968{
5969 /* reset_common */
5970 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5971 0xd3ffff7f);
5972 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5973}
5974
573f2035
EG
5975static void bnx2x_init_pxp(struct bnx2x *bp)
5976{
5977 u16 devctl;
5978 int r_order, w_order;
5979
5980 pci_read_config_word(bp->pdev,
5981 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5982 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5983 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5984 if (bp->mrrs == -1)
5985 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5986 else {
5987 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5988 r_order = bp->mrrs;
5989 }
5990
5991 bnx2x_init_pxp_arb(bp, r_order, w_order);
5992}
fd4ef40d
EG
5993
5994static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5995{
5996 u32 val;
5997 u8 port;
5998 u8 is_required = 0;
5999
6000 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6001 SHARED_HW_CFG_FAN_FAILURE_MASK;
6002
6003 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6004 is_required = 1;
6005
6006 /*
6007 * The fan failure mechanism is usually related to the PHY type since
6008 * the power consumption of the board is affected by the PHY. Currently,
6009 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6010 */
6011 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6012 for (port = PORT_0; port < PORT_MAX; port++) {
6013 u32 phy_type =
6014 SHMEM_RD(bp, dev_info.port_hw_config[port].
6015 external_phy_config) &
6016 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6017 is_required |=
6018 ((phy_type ==
6019 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
6020 (phy_type ==
6021 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
6022 (phy_type ==
6023 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6024 }
6025
6026 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6027
6028 if (is_required == 0)
6029 return;
6030
6031 /* Fan failure is indicated by SPIO 5 */
6032 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6033 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6034
6035 /* set to active low mode */
6036 val = REG_RD(bp, MISC_REG_SPIO_INT);
6037 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6038 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6039 REG_WR(bp, MISC_REG_SPIO_INT, val);
6040
6041 /* enable interrupt to signal the IGU */
6042 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6043 val |= (1 << MISC_REGISTERS_SPIO_5);
6044 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6045}
6046
34f80b04 6047static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 6048{
a2fbb9ea 6049 u32 val, i;
37b091ba
MC
6050#ifdef BCM_CNIC
6051 u32 wb_write[2];
6052#endif
a2fbb9ea 6053
34f80b04 6054 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 6055
81f75bbf 6056 bnx2x_reset_common(bp);
34f80b04
EG
6057 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6058 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 6059
94a78b79 6060 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
6061 if (CHIP_IS_E1H(bp))
6062 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 6063
34f80b04
EG
6064 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6065 msleep(30);
6066 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 6067
94a78b79 6068 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
6069 if (CHIP_IS_E1(bp)) {
6070 /* enable HW interrupt from PXP on USDM overflow
6071 bit 16 on INT_MASK_0 */
6072 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6073 }
a2fbb9ea 6074
94a78b79 6075 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 6076 bnx2x_init_pxp(bp);
a2fbb9ea
ET
6077
6078#ifdef __BIG_ENDIAN
34f80b04
EG
6079 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6080 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6081 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6082 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6083 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6084 /* make sure this value is 0 */
6085 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6086
6087/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6088 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6089 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6090 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6091 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6092#endif
6093
34f80b04 6094 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
37b091ba 6095#ifdef BCM_CNIC
34f80b04
EG
6096 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6097 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6098 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6099#endif
6100
34f80b04
EG
6101 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6102 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6103
34f80b04
EG
6104 /* let the HW do it's magic ... */
6105 msleep(100);
6106 /* finish PXP init */
6107 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6108 if (val != 1) {
6109 BNX2X_ERR("PXP2 CFG failed\n");
6110 return -EBUSY;
6111 }
6112 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6113 if (val != 1) {
6114 BNX2X_ERR("PXP2 RD_INIT failed\n");
6115 return -EBUSY;
6116 }
a2fbb9ea 6117
34f80b04
EG
6118 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6119 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6120
94a78b79 6121 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6122
34f80b04
EG
6123 /* clean the DMAE memory */
6124 bp->dmae_ready = 1;
6125 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6126
94a78b79
VZ
6127 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6128 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6129 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6130 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6131
34f80b04
EG
6132 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6133 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6134 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6135 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6136
94a78b79 6137 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba
MC
6138
6139#ifdef BCM_CNIC
6140 wb_write[0] = 0;
6141 wb_write[1] = 0;
6142 for (i = 0; i < 64; i++) {
6143 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6144 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6145
6146 if (CHIP_IS_E1H(bp)) {
6147 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6148 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6149 wb_write, 2);
6150 }
6151 }
6152#endif
34f80b04
EG
6153 /* soft reset pulse */
6154 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6155 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 6156
37b091ba 6157#ifdef BCM_CNIC
94a78b79 6158 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6159#endif
a2fbb9ea 6160
94a78b79 6161 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6162 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6163 if (!CHIP_REV_IS_SLOW(bp)) {
6164 /* enable hw interrupt from doorbell Q */
6165 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6166 }
a2fbb9ea 6167
94a78b79
VZ
6168 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6169 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6170 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 6171#ifndef BCM_CNIC
3196a88a
EG
6172 /* set NIC mode */
6173 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 6174#endif
34f80b04
EG
6175 if (CHIP_IS_E1H(bp))
6176 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6177
94a78b79
VZ
6178 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6179 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6180 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6181 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6182
ca00392c
EG
6183 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6184 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6185 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6186 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6187
94a78b79
VZ
6188 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6189 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6190 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6191 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6192
34f80b04
EG
6193 /* sync semi rtc */
6194 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6195 0x80000000);
6196 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6197 0x80000000);
a2fbb9ea 6198
94a78b79
VZ
6199 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6200 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6201 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6202
34f80b04
EG
6203 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6204 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6205 REG_WR(bp, i, 0xc0cac01a);
6206 /* TODO: replace with something meaningful */
6207 }
94a78b79 6208 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
6209#ifdef BCM_CNIC
6210 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6211 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6212 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6213 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6214 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6215 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6216 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6217 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6218 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6219 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6220#endif
34f80b04 6221 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6222
34f80b04
EG
6223 if (sizeof(union cdu_context) != 1024)
6224 /* we currently assume that a context is 1024 bytes */
7995c64e
JP
6225 pr_alert("please adjust the size of cdu_context(%ld)\n",
6226 (long)sizeof(union cdu_context));
a2fbb9ea 6227
94a78b79 6228 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6229 val = (4 << 24) + (0 << 12) + 1024;
6230 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6231
94a78b79 6232 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6233 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6234 /* enable context validation interrupt from CFC */
6235 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6236
6237 /* set the thresholds to prevent CFC/CDU race */
6238 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6239
94a78b79
VZ
6240 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6241 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6242
94a78b79 6243 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6244 /* Reset PCIE errors for debug */
6245 REG_WR(bp, 0x2814, 0xffffffff);
6246 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6247
94a78b79 6248 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6249 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6250 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6251 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6252
94a78b79 6253 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6254 if (CHIP_IS_E1H(bp)) {
6255 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6256 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6257 }
6258
6259 if (CHIP_REV_IS_SLOW(bp))
6260 msleep(200);
6261
6262 /* finish CFC init */
6263 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6264 if (val != 1) {
6265 BNX2X_ERR("CFC LL_INIT failed\n");
6266 return -EBUSY;
6267 }
6268 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6269 if (val != 1) {
6270 BNX2X_ERR("CFC AC_INIT failed\n");
6271 return -EBUSY;
6272 }
6273 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6274 if (val != 1) {
6275 BNX2X_ERR("CFC CAM_INIT failed\n");
6276 return -EBUSY;
6277 }
6278 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6279
34f80b04
EG
6280 /* read NIG statistic
6281 to see if this is our first up since powerup */
6282 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6283 val = *bnx2x_sp(bp, wb_data[0]);
6284
6285 /* do internal memory self test */
6286 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6287 BNX2X_ERR("internal mem self test failed\n");
6288 return -EBUSY;
6289 }
6290
35b19ba5 6291 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6292 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6293 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6294 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6295 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6296 bp->port.need_hw_lock = 1;
6297 break;
6298
34f80b04
EG
6299 default:
6300 break;
6301 }
f1410647 6302
fd4ef40d
EG
6303 bnx2x_setup_fan_failure_detection(bp);
6304
34f80b04
EG
6305 /* clear PXP2 attentions */
6306 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6307
34f80b04 6308 enable_blocks_attention(bp);
a2fbb9ea 6309
6bbca910
YR
6310 if (!BP_NOMCP(bp)) {
6311 bnx2x_acquire_phy_lock(bp);
6312 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6313 bnx2x_release_phy_lock(bp);
6314 } else
6315 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6316
34f80b04
EG
6317 return 0;
6318}
a2fbb9ea 6319
34f80b04
EG
6320static int bnx2x_init_port(struct bnx2x *bp)
6321{
6322 int port = BP_PORT(bp);
94a78b79 6323 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6324 u32 low, high;
34f80b04 6325 u32 val;
a2fbb9ea 6326
34f80b04
EG
6327 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6328
6329 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6330
94a78b79 6331 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6332 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6333
6334 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6335 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6336 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 6337 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6338
37b091ba
MC
6339#ifdef BCM_CNIC
6340 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
a2fbb9ea 6341
94a78b79 6342 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
6343 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6344 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 6345#endif
94a78b79 6346 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6347
94a78b79 6348 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6349 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6350 /* no pause for emulation and FPGA */
6351 low = 0;
6352 high = 513;
6353 } else {
6354 if (IS_E1HMF(bp))
6355 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6356 else if (bp->dev->mtu > 4096) {
6357 if (bp->flags & ONE_PORT_FLAG)
6358 low = 160;
6359 else {
6360 val = bp->dev->mtu;
6361 /* (24*1024 + val*4)/256 */
6362 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6363 }
6364 } else
6365 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6366 high = low + 56; /* 14*1024/256 */
6367 }
6368 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6369 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6370
6371
94a78b79 6372 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6373
94a78b79 6374 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6375 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6376 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6377 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6378
94a78b79
VZ
6379 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6380 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6381 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6382 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6383
94a78b79 6384 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6385 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6386
94a78b79 6387 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6388
6389 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6390 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6391
6392 /* update threshold */
34f80b04 6393 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6394 /* update init credit */
34f80b04 6395 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6396
6397 /* probe changes */
34f80b04 6398 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6399 msleep(5);
34f80b04 6400 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea 6401
37b091ba
MC
6402#ifdef BCM_CNIC
6403 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 6404#endif
94a78b79 6405 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6406 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6407
6408 if (CHIP_IS_E1(bp)) {
6409 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6410 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6411 }
94a78b79 6412 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6413
94a78b79 6414 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6415 /* init aeu_mask_attn_func_0/1:
6416 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6417 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6418 * bits 4-7 are used for "per vn group attention" */
6419 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6420 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6421
94a78b79 6422 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6423 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6424 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6425 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6426 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6427
94a78b79 6428 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6429
6430 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6431
6432 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6433 /* 0x2 disable e1hov, 0x1 enable */
6434 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6435 (IS_E1HMF(bp) ? 0x1 : 0x2));
6436
1c06328c
EG
6437 {
6438 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6439 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6440 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6441 }
34f80b04
EG
6442 }
6443
94a78b79 6444 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6445 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6446
35b19ba5 6447 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6448 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6449 {
6450 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6451
6452 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6453 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6454
6455 /* The GPIO should be swapped if the swap register is
6456 set and active */
6457 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6458 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6459
6460 /* Select function upon port-swap configuration */
6461 if (port == 0) {
6462 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6463 aeu_gpio_mask = (swap_val && swap_override) ?
6464 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6465 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6466 } else {
6467 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6468 aeu_gpio_mask = (swap_val && swap_override) ?
6469 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6470 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6471 }
6472 val = REG_RD(bp, offset);
6473 /* add GPIO3 to group */
6474 val |= aeu_gpio_mask;
6475 REG_WR(bp, offset, val);
6476 }
6477 break;
6478
35b19ba5 6479 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6480 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6481 /* add SPIO 5 to group 0 */
4d295db0
EG
6482 {
6483 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6484 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6485 val = REG_RD(bp, reg_addr);
f1410647 6486 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6487 REG_WR(bp, reg_addr, val);
6488 }
f1410647
ET
6489 break;
6490
6491 default:
6492 break;
6493 }
6494
c18487ee 6495 bnx2x__link_reset(bp);
a2fbb9ea 6496
34f80b04
EG
6497 return 0;
6498}
6499
6500#define ILT_PER_FUNC (768/2)
6501#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6502/* the phys address is shifted right 12 bits and has an added
6503 1=valid bit added to the 53rd bit
6504 then since this is a wide register(TM)
6505 we split it into two 32 bit writes
6506 */
6507#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6508#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6509#define PXP_ONE_ILT(x) (((x) << 10) | x)
6510#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6511
37b091ba
MC
6512#ifdef BCM_CNIC
6513#define CNIC_ILT_LINES 127
6514#define CNIC_CTX_PER_ILT 16
6515#else
34f80b04 6516#define CNIC_ILT_LINES 0
37b091ba 6517#endif
34f80b04
EG
6518
6519static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6520{
6521 int reg;
6522
6523 if (CHIP_IS_E1H(bp))
6524 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6525 else /* E1 */
6526 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6527
6528 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6529}
6530
6531static int bnx2x_init_func(struct bnx2x *bp)
6532{
6533 int port = BP_PORT(bp);
6534 int func = BP_FUNC(bp);
8badd27a 6535 u32 addr, val;
34f80b04
EG
6536 int i;
6537
6538 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6539
8badd27a
EG
6540 /* set MSI reconfigure capability */
6541 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6542 val = REG_RD(bp, addr);
6543 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6544 REG_WR(bp, addr, val);
6545
34f80b04
EG
6546 i = FUNC_ILT_BASE(func);
6547
6548 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6549 if (CHIP_IS_E1H(bp)) {
6550 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6551 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6552 } else /* E1 */
6553 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6554 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6555
37b091ba
MC
6556#ifdef BCM_CNIC
6557 i += 1 + CNIC_ILT_LINES;
6558 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6559 if (CHIP_IS_E1(bp))
6560 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6561 else {
6562 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6563 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6564 }
6565
6566 i++;
6567 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6568 if (CHIP_IS_E1(bp))
6569 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6570 else {
6571 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6572 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6573 }
6574
6575 i++;
6576 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6577 if (CHIP_IS_E1(bp))
6578 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6579 else {
6580 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6581 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6582 }
6583
6584 /* tell the searcher where the T2 table is */
6585 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6586
6587 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6588 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6589
6590 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6591 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6592 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6593
6594 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6595#endif
34f80b04
EG
6596
6597 if (CHIP_IS_E1H(bp)) {
573f2035
EG
6598 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6599 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6600 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6601 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6602 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6603 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6604 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6605 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6606 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
6607
6608 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6609 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6610 }
6611
6612 /* HC init per function */
6613 if (CHIP_IS_E1H(bp)) {
6614 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6615
6616 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6617 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6618 }
94a78b79 6619 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6620
c14423fe 6621 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6622 REG_WR(bp, 0x2114, 0xffffffff);
6623 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6624
34f80b04
EG
6625 return 0;
6626}
6627
6628static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6629{
6630 int i, rc = 0;
a2fbb9ea 6631
34f80b04
EG
6632 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6633 BP_FUNC(bp), load_code);
a2fbb9ea 6634
34f80b04
EG
6635 bp->dmae_ready = 0;
6636 mutex_init(&bp->dmae_mutex);
54016b26
EG
6637 rc = bnx2x_gunzip_init(bp);
6638 if (rc)
6639 return rc;
a2fbb9ea 6640
34f80b04
EG
6641 switch (load_code) {
6642 case FW_MSG_CODE_DRV_LOAD_COMMON:
6643 rc = bnx2x_init_common(bp);
6644 if (rc)
6645 goto init_hw_err;
6646 /* no break */
6647
6648 case FW_MSG_CODE_DRV_LOAD_PORT:
6649 bp->dmae_ready = 1;
6650 rc = bnx2x_init_port(bp);
6651 if (rc)
6652 goto init_hw_err;
6653 /* no break */
6654
6655 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6656 bp->dmae_ready = 1;
6657 rc = bnx2x_init_func(bp);
6658 if (rc)
6659 goto init_hw_err;
6660 break;
6661
6662 default:
6663 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6664 break;
6665 }
6666
6667 if (!BP_NOMCP(bp)) {
6668 int func = BP_FUNC(bp);
a2fbb9ea
ET
6669
6670 bp->fw_drv_pulse_wr_seq =
34f80b04 6671 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6672 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
6673 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6674 }
a2fbb9ea 6675
34f80b04
EG
6676 /* this needs to be done before gunzip end */
6677 bnx2x_zero_def_sb(bp);
6678 for_each_queue(bp, i)
6679 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
37b091ba
MC
6680#ifdef BCM_CNIC
6681 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6682#endif
34f80b04
EG
6683
6684init_hw_err:
6685 bnx2x_gunzip_end(bp);
6686
6687 return rc;
a2fbb9ea
ET
6688}
6689
a2fbb9ea
ET
6690static void bnx2x_free_mem(struct bnx2x *bp)
6691{
6692
6693#define BNX2X_PCI_FREE(x, y, size) \
6694 do { \
6695 if (x) { \
1a983142 6696 dma_free_coherent(&bp->pdev->dev, size, x, y); \
a2fbb9ea
ET
6697 x = NULL; \
6698 y = 0; \
6699 } \
6700 } while (0)
6701
6702#define BNX2X_FREE(x) \
6703 do { \
6704 if (x) { \
6705 vfree(x); \
6706 x = NULL; \
6707 } \
6708 } while (0)
6709
6710 int i;
6711
6712 /* fastpath */
555f6c78 6713 /* Common */
a2fbb9ea
ET
6714 for_each_queue(bp, i) {
6715
555f6c78 6716 /* status blocks */
a2fbb9ea
ET
6717 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6718 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6719 sizeof(struct host_status_block));
555f6c78
EG
6720 }
6721 /* Rx */
54b9ddaa 6722 for_each_queue(bp, i) {
a2fbb9ea 6723
555f6c78 6724 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6725 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6726 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6727 bnx2x_fp(bp, i, rx_desc_mapping),
6728 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6729
6730 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6731 bnx2x_fp(bp, i, rx_comp_mapping),
6732 sizeof(struct eth_fast_path_rx_cqe) *
6733 NUM_RCQ_BD);
a2fbb9ea 6734
7a9b2557 6735 /* SGE ring */
32626230 6736 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6737 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6738 bnx2x_fp(bp, i, rx_sge_mapping),
6739 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6740 }
555f6c78 6741 /* Tx */
54b9ddaa 6742 for_each_queue(bp, i) {
555f6c78
EG
6743
6744 /* fastpath tx rings: tx_buf tx_desc */
6745 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6746 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6747 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6748 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6749 }
a2fbb9ea
ET
6750 /* end of fastpath */
6751
6752 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6753 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6754
6755 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6756 sizeof(struct bnx2x_slowpath));
a2fbb9ea 6757
37b091ba 6758#ifdef BCM_CNIC
a2fbb9ea
ET
6759 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6760 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6761 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6762 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
37b091ba
MC
6763 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6764 sizeof(struct host_status_block));
a2fbb9ea 6765#endif
7a9b2557 6766 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6767
6768#undef BNX2X_PCI_FREE
6769#undef BNX2X_KFREE
6770}
6771
6772static int bnx2x_alloc_mem(struct bnx2x *bp)
6773{
6774
6775#define BNX2X_PCI_ALLOC(x, y, size) \
6776 do { \
1a983142 6777 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
a2fbb9ea
ET
6778 if (x == NULL) \
6779 goto alloc_mem_err; \
6780 memset(x, 0, size); \
6781 } while (0)
6782
6783#define BNX2X_ALLOC(x, size) \
6784 do { \
6785 x = vmalloc(size); \
6786 if (x == NULL) \
6787 goto alloc_mem_err; \
6788 memset(x, 0, size); \
6789 } while (0)
6790
6791 int i;
6792
6793 /* fastpath */
555f6c78 6794 /* Common */
a2fbb9ea
ET
6795 for_each_queue(bp, i) {
6796 bnx2x_fp(bp, i, bp) = bp;
6797
555f6c78 6798 /* status blocks */
a2fbb9ea
ET
6799 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6800 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6801 sizeof(struct host_status_block));
555f6c78
EG
6802 }
6803 /* Rx */
54b9ddaa 6804 for_each_queue(bp, i) {
a2fbb9ea 6805
555f6c78 6806 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6807 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6808 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6809 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6810 &bnx2x_fp(bp, i, rx_desc_mapping),
6811 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6812
6813 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6814 &bnx2x_fp(bp, i, rx_comp_mapping),
6815 sizeof(struct eth_fast_path_rx_cqe) *
6816 NUM_RCQ_BD);
6817
7a9b2557
VZ
6818 /* SGE ring */
6819 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6820 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6821 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6822 &bnx2x_fp(bp, i, rx_sge_mapping),
6823 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6824 }
555f6c78 6825 /* Tx */
54b9ddaa 6826 for_each_queue(bp, i) {
555f6c78 6827
555f6c78
EG
6828 /* fastpath tx rings: tx_buf tx_desc */
6829 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6830 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6831 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6832 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6833 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6834 }
a2fbb9ea
ET
6835 /* end of fastpath */
6836
6837 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6838 sizeof(struct host_def_status_block));
6839
6840 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6841 sizeof(struct bnx2x_slowpath));
6842
37b091ba 6843#ifdef BCM_CNIC
a2fbb9ea
ET
6844 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6845
a2fbb9ea
ET
6846 /* allocate searcher T2 table
6847 we allocate 1/4 of alloc num for T2
6848 (which is not entered into the ILT) */
6849 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6850
37b091ba 6851 /* Initialize T2 (for 1024 connections) */
a2fbb9ea 6852 for (i = 0; i < 16*1024; i += 64)
37b091ba 6853 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
a2fbb9ea 6854
37b091ba 6855 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
a2fbb9ea
ET
6856 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6857
6858 /* QM queues (128*MAX_CONN) */
6859 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
37b091ba
MC
6860
6861 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6862 sizeof(struct host_status_block));
a2fbb9ea
ET
6863#endif
6864
6865 /* Slow path ring */
6866 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6867
6868 return 0;
6869
6870alloc_mem_err:
6871 bnx2x_free_mem(bp);
6872 return -ENOMEM;
6873
6874#undef BNX2X_PCI_ALLOC
6875#undef BNX2X_ALLOC
6876}
6877
6878static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6879{
6880 int i;
6881
54b9ddaa 6882 for_each_queue(bp, i) {
a2fbb9ea
ET
6883 struct bnx2x_fastpath *fp = &bp->fp[i];
6884
6885 u16 bd_cons = fp->tx_bd_cons;
6886 u16 sw_prod = fp->tx_pkt_prod;
6887 u16 sw_cons = fp->tx_pkt_cons;
6888
a2fbb9ea
ET
6889 while (sw_cons != sw_prod) {
6890 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6891 sw_cons++;
6892 }
6893 }
6894}
6895
6896static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6897{
6898 int i, j;
6899
54b9ddaa 6900 for_each_queue(bp, j) {
a2fbb9ea
ET
6901 struct bnx2x_fastpath *fp = &bp->fp[j];
6902
a2fbb9ea
ET
6903 for (i = 0; i < NUM_RX_BD; i++) {
6904 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6905 struct sk_buff *skb = rx_buf->skb;
6906
6907 if (skb == NULL)
6908 continue;
6909
1a983142
FT
6910 dma_unmap_single(&bp->pdev->dev,
6911 dma_unmap_addr(rx_buf, mapping),
6912 bp->rx_buf_size, DMA_FROM_DEVICE);
a2fbb9ea
ET
6913
6914 rx_buf->skb = NULL;
6915 dev_kfree_skb(skb);
6916 }
7a9b2557 6917 if (!fp->disable_tpa)
32626230
EG
6918 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6919 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6920 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6921 }
6922}
6923
6924static void bnx2x_free_skbs(struct bnx2x *bp)
6925{
6926 bnx2x_free_tx_skbs(bp);
6927 bnx2x_free_rx_skbs(bp);
6928}
6929
6930static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6931{
34f80b04 6932 int i, offset = 1;
a2fbb9ea
ET
6933
6934 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6935 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6936 bp->msix_table[0].vector);
6937
37b091ba
MC
6938#ifdef BCM_CNIC
6939 offset++;
6940#endif
a2fbb9ea 6941 for_each_queue(bp, i) {
c14423fe 6942 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6943 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6944 bnx2x_fp(bp, i, state));
6945
34f80b04 6946 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6947 }
a2fbb9ea
ET
6948}
6949
6cbe5065 6950static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
a2fbb9ea 6951{
a2fbb9ea 6952 if (bp->flags & USING_MSIX_FLAG) {
6cbe5065
VZ
6953 if (!disable_only)
6954 bnx2x_free_msix_irqs(bp);
a2fbb9ea 6955 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6956 bp->flags &= ~USING_MSIX_FLAG;
6957
8badd27a 6958 } else if (bp->flags & USING_MSI_FLAG) {
6cbe5065
VZ
6959 if (!disable_only)
6960 free_irq(bp->pdev->irq, bp->dev);
8badd27a
EG
6961 pci_disable_msi(bp->pdev);
6962 bp->flags &= ~USING_MSI_FLAG;
6963
6cbe5065 6964 } else if (!disable_only)
a2fbb9ea
ET
6965 free_irq(bp->pdev->irq, bp->dev);
6966}
6967
6968static int bnx2x_enable_msix(struct bnx2x *bp)
6969{
8badd27a
EG
6970 int i, rc, offset = 1;
6971 int igu_vec = 0;
a2fbb9ea 6972
8badd27a
EG
6973 bp->msix_table[0].entry = igu_vec;
6974 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6975
37b091ba
MC
6976#ifdef BCM_CNIC
6977 igu_vec = BP_L_ID(bp) + offset;
6978 bp->msix_table[1].entry = igu_vec;
6979 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6980 offset++;
6981#endif
34f80b04 6982 for_each_queue(bp, i) {
8badd27a 6983 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6984 bp->msix_table[i + offset].entry = igu_vec;
6985 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6986 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6987 }
6988
34f80b04 6989 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6990 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6991 if (rc) {
8badd27a
EG
6992 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6993 return rc;
34f80b04 6994 }
8badd27a 6995
a2fbb9ea
ET
6996 bp->flags |= USING_MSIX_FLAG;
6997
6998 return 0;
a2fbb9ea
ET
6999}
7000
a2fbb9ea
ET
7001static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7002{
34f80b04 7003 int i, rc, offset = 1;
a2fbb9ea 7004
a2fbb9ea
ET
7005 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7006 bp->dev->name, bp->dev);
a2fbb9ea
ET
7007 if (rc) {
7008 BNX2X_ERR("request sp irq failed\n");
7009 return -EBUSY;
7010 }
7011
37b091ba
MC
7012#ifdef BCM_CNIC
7013 offset++;
7014#endif
a2fbb9ea 7015 for_each_queue(bp, i) {
555f6c78 7016 struct bnx2x_fastpath *fp = &bp->fp[i];
54b9ddaa
VZ
7017 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7018 bp->dev->name, i);
ca00392c 7019
34f80b04 7020 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 7021 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 7022 if (rc) {
555f6c78 7023 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
7024 bnx2x_free_msix_irqs(bp);
7025 return -EBUSY;
7026 }
7027
555f6c78 7028 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
7029 }
7030
555f6c78 7031 i = BNX2X_NUM_QUEUES(bp);
7995c64e
JP
7032 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
7033 bp->msix_table[0].vector,
7034 0, bp->msix_table[offset].vector,
7035 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 7036
a2fbb9ea 7037 return 0;
a2fbb9ea
ET
7038}
7039
8badd27a
EG
7040static int bnx2x_enable_msi(struct bnx2x *bp)
7041{
7042 int rc;
7043
7044 rc = pci_enable_msi(bp->pdev);
7045 if (rc) {
7046 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7047 return -1;
7048 }
7049 bp->flags |= USING_MSI_FLAG;
7050
7051 return 0;
7052}
7053
a2fbb9ea
ET
7054static int bnx2x_req_irq(struct bnx2x *bp)
7055{
8badd27a 7056 unsigned long flags;
34f80b04 7057 int rc;
a2fbb9ea 7058
8badd27a
EG
7059 if (bp->flags & USING_MSI_FLAG)
7060 flags = 0;
7061 else
7062 flags = IRQF_SHARED;
7063
7064 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 7065 bp->dev->name, bp->dev);
a2fbb9ea
ET
7066 if (!rc)
7067 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7068
7069 return rc;
a2fbb9ea
ET
7070}
7071
65abd74d
YG
7072static void bnx2x_napi_enable(struct bnx2x *bp)
7073{
7074 int i;
7075
54b9ddaa 7076 for_each_queue(bp, i)
65abd74d
YG
7077 napi_enable(&bnx2x_fp(bp, i, napi));
7078}
7079
7080static void bnx2x_napi_disable(struct bnx2x *bp)
7081{
7082 int i;
7083
54b9ddaa 7084 for_each_queue(bp, i)
65abd74d
YG
7085 napi_disable(&bnx2x_fp(bp, i, napi));
7086}
7087
7088static void bnx2x_netif_start(struct bnx2x *bp)
7089{
e1510706
EG
7090 int intr_sem;
7091
7092 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7093 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7094
7095 if (intr_sem) {
65abd74d 7096 if (netif_running(bp->dev)) {
65abd74d
YG
7097 bnx2x_napi_enable(bp);
7098 bnx2x_int_enable(bp);
555f6c78
EG
7099 if (bp->state == BNX2X_STATE_OPEN)
7100 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
7101 }
7102 }
7103}
7104
f8ef6e44 7105static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 7106{
f8ef6e44 7107 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 7108 bnx2x_napi_disable(bp);
762d5f6c 7109 netif_tx_disable(bp->dev);
65abd74d
YG
7110}
7111
a2fbb9ea
ET
7112/*
7113 * Init service functions
7114 */
7115
e665bfda
MC
7116/**
7117 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7118 *
7119 * @param bp driver descriptor
7120 * @param set set or clear an entry (1 or 0)
7121 * @param mac pointer to a buffer containing a MAC
7122 * @param cl_bit_vec bit vector of clients to register a MAC for
7123 * @param cam_offset offset in a CAM to use
7124 * @param with_bcast set broadcast MAC as well
7125 */
7126static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7127 u32 cl_bit_vec, u8 cam_offset,
7128 u8 with_bcast)
a2fbb9ea
ET
7129{
7130 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 7131 int port = BP_PORT(bp);
a2fbb9ea
ET
7132
7133 /* CAM allocation
7134 * unicasts 0-31:port0 32-63:port1
7135 * multicast 64-127:port0 128-191:port1
7136 */
e665bfda
MC
7137 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7138 config->hdr.offset = cam_offset;
7139 config->hdr.client_id = 0xff;
a2fbb9ea
ET
7140 config->hdr.reserved1 = 0;
7141
7142 /* primary MAC */
7143 config->config_table[0].cam_entry.msb_mac_addr =
e665bfda 7144 swab16(*(u16 *)&mac[0]);
a2fbb9ea 7145 config->config_table[0].cam_entry.middle_mac_addr =
e665bfda 7146 swab16(*(u16 *)&mac[2]);
a2fbb9ea 7147 config->config_table[0].cam_entry.lsb_mac_addr =
e665bfda 7148 swab16(*(u16 *)&mac[4]);
34f80b04 7149 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7150 if (set)
7151 config->config_table[0].target_table_entry.flags = 0;
7152 else
7153 CAM_INVALIDATE(config->config_table[0]);
ca00392c 7154 config->config_table[0].target_table_entry.clients_bit_vector =
e665bfda 7155 cpu_to_le32(cl_bit_vec);
a2fbb9ea
ET
7156 config->config_table[0].target_table_entry.vlan_id = 0;
7157
3101c2bc
YG
7158 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7159 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7160 config->config_table[0].cam_entry.msb_mac_addr,
7161 config->config_table[0].cam_entry.middle_mac_addr,
7162 config->config_table[0].cam_entry.lsb_mac_addr);
7163
7164 /* broadcast */
e665bfda
MC
7165 if (with_bcast) {
7166 config->config_table[1].cam_entry.msb_mac_addr =
7167 cpu_to_le16(0xffff);
7168 config->config_table[1].cam_entry.middle_mac_addr =
7169 cpu_to_le16(0xffff);
7170 config->config_table[1].cam_entry.lsb_mac_addr =
7171 cpu_to_le16(0xffff);
7172 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7173 if (set)
7174 config->config_table[1].target_table_entry.flags =
7175 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7176 else
7177 CAM_INVALIDATE(config->config_table[1]);
7178 config->config_table[1].target_table_entry.clients_bit_vector =
7179 cpu_to_le32(cl_bit_vec);
7180 config->config_table[1].target_table_entry.vlan_id = 0;
7181 }
a2fbb9ea
ET
7182
7183 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7184 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7185 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7186}
7187
e665bfda
MC
7188/**
7189 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7190 *
7191 * @param bp driver descriptor
7192 * @param set set or clear an entry (1 or 0)
7193 * @param mac pointer to a buffer containing a MAC
7194 * @param cl_bit_vec bit vector of clients to register a MAC for
7195 * @param cam_offset offset in a CAM to use
7196 */
7197static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7198 u32 cl_bit_vec, u8 cam_offset)
34f80b04
EG
7199{
7200 struct mac_configuration_cmd_e1h *config =
7201 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7202
8d9c5f34 7203 config->hdr.length = 1;
e665bfda
MC
7204 config->hdr.offset = cam_offset;
7205 config->hdr.client_id = 0xff;
34f80b04
EG
7206 config->hdr.reserved1 = 0;
7207
7208 /* primary MAC */
7209 config->config_table[0].msb_mac_addr =
e665bfda 7210 swab16(*(u16 *)&mac[0]);
34f80b04 7211 config->config_table[0].middle_mac_addr =
e665bfda 7212 swab16(*(u16 *)&mac[2]);
34f80b04 7213 config->config_table[0].lsb_mac_addr =
e665bfda 7214 swab16(*(u16 *)&mac[4]);
ca00392c 7215 config->config_table[0].clients_bit_vector =
e665bfda 7216 cpu_to_le32(cl_bit_vec);
34f80b04
EG
7217 config->config_table[0].vlan_id = 0;
7218 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7219 if (set)
7220 config->config_table[0].flags = BP_PORT(bp);
7221 else
7222 config->config_table[0].flags =
7223 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7224
e665bfda 7225 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
3101c2bc 7226 (set ? "setting" : "clearing"),
34f80b04
EG
7227 config->config_table[0].msb_mac_addr,
7228 config->config_table[0].middle_mac_addr,
e665bfda 7229 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
34f80b04
EG
7230
7231 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7232 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7233 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7234}
7235
a2fbb9ea
ET
7236static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7237 int *state_p, int poll)
7238{
7239 /* can take a while if any port is running */
8b3a0f0b 7240 int cnt = 5000;
a2fbb9ea 7241
c14423fe
ET
7242 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7243 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7244
7245 might_sleep();
34f80b04 7246 while (cnt--) {
a2fbb9ea
ET
7247 if (poll) {
7248 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7249 /* if index is different from 0
7250 * the reply for some commands will
3101c2bc 7251 * be on the non default queue
a2fbb9ea
ET
7252 */
7253 if (idx)
7254 bnx2x_rx_int(&bp->fp[idx], 10);
7255 }
a2fbb9ea 7256
3101c2bc 7257 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7258 if (*state_p == state) {
7259#ifdef BNX2X_STOP_ON_ERROR
7260 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7261#endif
a2fbb9ea 7262 return 0;
8b3a0f0b 7263 }
a2fbb9ea 7264
a2fbb9ea 7265 msleep(1);
e3553b29
EG
7266
7267 if (bp->panic)
7268 return -EIO;
a2fbb9ea
ET
7269 }
7270
a2fbb9ea 7271 /* timeout! */
49d66772
ET
7272 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7273 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7274#ifdef BNX2X_STOP_ON_ERROR
7275 bnx2x_panic();
7276#endif
a2fbb9ea 7277
49d66772 7278 return -EBUSY;
a2fbb9ea
ET
7279}
7280
e665bfda
MC
7281static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7282{
7283 bp->set_mac_pending++;
7284 smp_wmb();
7285
7286 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7287 (1 << bp->fp->cl_id), BP_FUNC(bp));
7288
7289 /* Wait for a completion */
7290 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7291}
7292
7293static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7294{
7295 bp->set_mac_pending++;
7296 smp_wmb();
7297
7298 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7299 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7300 1);
7301
7302 /* Wait for a completion */
7303 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7304}
7305
993ac7b5
MC
7306#ifdef BCM_CNIC
7307/**
7308 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7309 * MAC(s). This function will wait until the ramdord completion
7310 * returns.
7311 *
7312 * @param bp driver handle
7313 * @param set set or clear the CAM entry
7314 *
7315 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7316 */
7317static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7318{
7319 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7320
7321 bp->set_mac_pending++;
7322 smp_wmb();
7323
7324 /* Send a SET_MAC ramrod */
7325 if (CHIP_IS_E1(bp))
7326 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7327 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7328 1);
7329 else
7330 /* CAM allocation for E1H
7331 * unicasts: by func number
7332 * multicast: 20+FUNC*20, 20 each
7333 */
7334 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7335 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7336
7337 /* Wait for a completion when setting */
7338 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7339
7340 return 0;
7341}
7342#endif
7343
a2fbb9ea
ET
7344static int bnx2x_setup_leading(struct bnx2x *bp)
7345{
34f80b04 7346 int rc;
a2fbb9ea 7347
c14423fe 7348 /* reset IGU state */
34f80b04 7349 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7350
7351 /* SETUP ramrod */
7352 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7353
34f80b04
EG
7354 /* Wait for completion */
7355 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7356
34f80b04 7357 return rc;
a2fbb9ea
ET
7358}
7359
7360static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7361{
555f6c78
EG
7362 struct bnx2x_fastpath *fp = &bp->fp[index];
7363
a2fbb9ea 7364 /* reset IGU state */
555f6c78 7365 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7366
228241eb 7367 /* SETUP ramrod */
555f6c78
EG
7368 fp->state = BNX2X_FP_STATE_OPENING;
7369 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7370 fp->cl_id, 0);
a2fbb9ea
ET
7371
7372 /* Wait for completion */
7373 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7374 &(fp->state), 0);
a2fbb9ea
ET
7375}
7376
a2fbb9ea 7377static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7378
54b9ddaa 7379static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
ca00392c 7380{
ca00392c
EG
7381
7382 switch (bp->multi_mode) {
7383 case ETH_RSS_MODE_DISABLED:
54b9ddaa 7384 bp->num_queues = 1;
ca00392c
EG
7385 break;
7386
7387 case ETH_RSS_MODE_REGULAR:
54b9ddaa
VZ
7388 if (num_queues)
7389 bp->num_queues = min_t(u32, num_queues,
7390 BNX2X_MAX_QUEUES(bp));
ca00392c 7391 else
54b9ddaa
VZ
7392 bp->num_queues = min_t(u32, num_online_cpus(),
7393 BNX2X_MAX_QUEUES(bp));
ca00392c
EG
7394 break;
7395
7396
7397 default:
54b9ddaa 7398 bp->num_queues = 1;
ca00392c
EG
7399 break;
7400 }
ca00392c
EG
7401}
7402
54b9ddaa 7403static int bnx2x_set_num_queues(struct bnx2x *bp)
a2fbb9ea 7404{
ca00392c 7405 int rc = 0;
a2fbb9ea 7406
8badd27a
EG
7407 switch (int_mode) {
7408 case INT_MODE_INTx:
7409 case INT_MODE_MSI:
54b9ddaa 7410 bp->num_queues = 1;
ca00392c 7411 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a
EG
7412 break;
7413
7414 case INT_MODE_MSIX:
7415 default:
54b9ddaa
VZ
7416 /* Set number of queues according to bp->multi_mode value */
7417 bnx2x_set_num_queues_msix(bp);
ca00392c 7418
54b9ddaa
VZ
7419 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7420 bp->num_queues);
ca00392c 7421
2dfe0e1f
EG
7422 /* if we can't use MSI-X we only need one fp,
7423 * so try to enable MSI-X with the requested number of fp's
7424 * and fallback to MSI or legacy INTx with one fp
7425 */
ca00392c 7426 rc = bnx2x_enable_msix(bp);
54b9ddaa 7427 if (rc)
34f80b04 7428 /* failed to enable MSI-X */
54b9ddaa 7429 bp->num_queues = 1;
8badd27a 7430 break;
a2fbb9ea 7431 }
54b9ddaa 7432 bp->dev->real_num_tx_queues = bp->num_queues;
ca00392c 7433 return rc;
8badd27a
EG
7434}
7435
993ac7b5
MC
7436#ifdef BCM_CNIC
7437static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7438static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7439#endif
8badd27a
EG
7440
7441/* must be called with rtnl_lock */
7442static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7443{
7444 u32 load_code;
ca00392c
EG
7445 int i, rc;
7446
8badd27a 7447#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7448 if (unlikely(bp->panic))
7449 return -EPERM;
7450#endif
7451
7452 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7453
54b9ddaa 7454 rc = bnx2x_set_num_queues(bp);
c14423fe 7455
6cbe5065
VZ
7456 if (bnx2x_alloc_mem(bp)) {
7457 bnx2x_free_irq(bp, true);
a2fbb9ea 7458 return -ENOMEM;
6cbe5065 7459 }
a2fbb9ea 7460
54b9ddaa 7461 for_each_queue(bp, i)
7a9b2557
VZ
7462 bnx2x_fp(bp, i, disable_tpa) =
7463 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7464
54b9ddaa 7465 for_each_queue(bp, i)
2dfe0e1f
EG
7466 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7467 bnx2x_poll, 128);
7468
2dfe0e1f
EG
7469 bnx2x_napi_enable(bp);
7470
34f80b04
EG
7471 if (bp->flags & USING_MSIX_FLAG) {
7472 rc = bnx2x_req_msix_irqs(bp);
7473 if (rc) {
6cbe5065 7474 bnx2x_free_irq(bp, true);
2dfe0e1f 7475 goto load_error1;
34f80b04
EG
7476 }
7477 } else {
ca00392c 7478 /* Fall to INTx if failed to enable MSI-X due to lack of
54b9ddaa 7479 memory (in bnx2x_set_num_queues()) */
8badd27a
EG
7480 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7481 bnx2x_enable_msi(bp);
34f80b04
EG
7482 bnx2x_ack_int(bp);
7483 rc = bnx2x_req_irq(bp);
7484 if (rc) {
2dfe0e1f 7485 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6cbe5065 7486 bnx2x_free_irq(bp, true);
2dfe0e1f 7487 goto load_error1;
a2fbb9ea 7488 }
8badd27a
EG
7489 if (bp->flags & USING_MSI_FLAG) {
7490 bp->dev->irq = bp->pdev->irq;
7995c64e
JP
7491 netdev_info(bp->dev, "using MSI IRQ %d\n",
7492 bp->pdev->irq);
8badd27a 7493 }
a2fbb9ea
ET
7494 }
7495
2dfe0e1f
EG
7496 /* Send LOAD_REQUEST command to MCP
7497 Returns the type of LOAD command:
7498 if it is the first port to be initialized
7499 common blocks should be initialized, otherwise - not
7500 */
7501 if (!BP_NOMCP(bp)) {
7502 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7503 if (!load_code) {
7504 BNX2X_ERR("MCP response failure, aborting\n");
7505 rc = -EBUSY;
7506 goto load_error2;
7507 }
7508 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7509 rc = -EBUSY; /* other port in diagnostic mode */
7510 goto load_error2;
7511 }
7512
7513 } else {
7514 int port = BP_PORT(bp);
7515
f5372251 7516 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7517 load_count[0], load_count[1], load_count[2]);
7518 load_count[0]++;
7519 load_count[1 + port]++;
f5372251 7520 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7521 load_count[0], load_count[1], load_count[2]);
7522 if (load_count[0] == 1)
7523 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7524 else if (load_count[1 + port] == 1)
7525 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7526 else
7527 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7528 }
7529
7530 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7531 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7532 bp->port.pmf = 1;
7533 else
7534 bp->port.pmf = 0;
7535 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7536
a2fbb9ea 7537 /* Initialize HW */
34f80b04
EG
7538 rc = bnx2x_init_hw(bp, load_code);
7539 if (rc) {
a2fbb9ea 7540 BNX2X_ERR("HW init failed, aborting\n");
f1e1a199
VZ
7541 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7542 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7543 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
2dfe0e1f 7544 goto load_error2;
a2fbb9ea
ET
7545 }
7546
a2fbb9ea 7547 /* Setup NIC internals and enable interrupts */
471de716 7548 bnx2x_nic_init(bp, load_code);
a2fbb9ea 7549
2691d51d
EG
7550 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7551 (bp->common.shmem2_base))
7552 SHMEM2_WR(bp, dcc_support,
7553 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7554 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7555
a2fbb9ea 7556 /* Send LOAD_DONE command to MCP */
34f80b04 7557 if (!BP_NOMCP(bp)) {
228241eb
ET
7558 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7559 if (!load_code) {
da5a662a 7560 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7561 rc = -EBUSY;
2dfe0e1f 7562 goto load_error3;
a2fbb9ea
ET
7563 }
7564 }
7565
7566 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7567
34f80b04
EG
7568 rc = bnx2x_setup_leading(bp);
7569 if (rc) {
da5a662a 7570 BNX2X_ERR("Setup leading failed!\n");
e3553b29 7571#ifndef BNX2X_STOP_ON_ERROR
2dfe0e1f 7572 goto load_error3;
e3553b29
EG
7573#else
7574 bp->panic = 1;
7575 return -EBUSY;
7576#endif
34f80b04 7577 }
a2fbb9ea 7578
34f80b04
EG
7579 if (CHIP_IS_E1H(bp))
7580 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7581 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
f34d28ea 7582 bp->flags |= MF_FUNC_DIS;
34f80b04 7583 }
a2fbb9ea 7584
ca00392c 7585 if (bp->state == BNX2X_STATE_OPEN) {
37b091ba
MC
7586#ifdef BCM_CNIC
7587 /* Enable Timer scan */
7588 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7589#endif
34f80b04
EG
7590 for_each_nondefault_queue(bp, i) {
7591 rc = bnx2x_setup_multi(bp, i);
7592 if (rc)
37b091ba
MC
7593#ifdef BCM_CNIC
7594 goto load_error4;
7595#else
2dfe0e1f 7596 goto load_error3;
37b091ba 7597#endif
34f80b04 7598 }
a2fbb9ea 7599
ca00392c 7600 if (CHIP_IS_E1(bp))
e665bfda 7601 bnx2x_set_eth_mac_addr_e1(bp, 1);
ca00392c 7602 else
e665bfda 7603 bnx2x_set_eth_mac_addr_e1h(bp, 1);
993ac7b5
MC
7604#ifdef BCM_CNIC
7605 /* Set iSCSI L2 MAC */
7606 mutex_lock(&bp->cnic_mutex);
7607 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7608 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7609 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
4a6e47a4
MC
7610 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
7611 CNIC_SB_ID(bp));
993ac7b5
MC
7612 }
7613 mutex_unlock(&bp->cnic_mutex);
7614#endif
ca00392c 7615 }
34f80b04
EG
7616
7617 if (bp->port.pmf)
b5bf9068 7618 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7619
7620 /* Start fast path */
34f80b04
EG
7621 switch (load_mode) {
7622 case LOAD_NORMAL:
ca00392c
EG
7623 if (bp->state == BNX2X_STATE_OPEN) {
7624 /* Tx queue should be only reenabled */
7625 netif_tx_wake_all_queues(bp->dev);
7626 }
2dfe0e1f 7627 /* Initialize the receive filter. */
34f80b04
EG
7628 bnx2x_set_rx_mode(bp->dev);
7629 break;
7630
7631 case LOAD_OPEN:
555f6c78 7632 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
7633 if (bp->state != BNX2X_STATE_OPEN)
7634 netif_tx_disable(bp->dev);
2dfe0e1f 7635 /* Initialize the receive filter. */
34f80b04 7636 bnx2x_set_rx_mode(bp->dev);
34f80b04 7637 break;
a2fbb9ea 7638
34f80b04 7639 case LOAD_DIAG:
2dfe0e1f 7640 /* Initialize the receive filter. */
a2fbb9ea 7641 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7642 bp->state = BNX2X_STATE_DIAG;
7643 break;
7644
7645 default:
7646 break;
a2fbb9ea
ET
7647 }
7648
34f80b04
EG
7649 if (!bp->port.pmf)
7650 bnx2x__link_status_update(bp);
7651
a2fbb9ea
ET
7652 /* start the timer */
7653 mod_timer(&bp->timer, jiffies + bp->current_interval);
7654
993ac7b5
MC
7655#ifdef BCM_CNIC
7656 bnx2x_setup_cnic_irq_info(bp);
7657 if (bp->state == BNX2X_STATE_OPEN)
7658 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7659#endif
34f80b04 7660
a2fbb9ea
ET
7661 return 0;
7662
37b091ba
MC
7663#ifdef BCM_CNIC
7664load_error4:
7665 /* Disable Timer scan */
7666 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7667#endif
2dfe0e1f
EG
7668load_error3:
7669 bnx2x_int_disable_sync(bp, 1);
7670 if (!BP_NOMCP(bp)) {
7671 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7672 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7673 }
7674 bp->port.pmf = 0;
7a9b2557
VZ
7675 /* Free SKBs, SGEs, TPA pool and driver internals */
7676 bnx2x_free_skbs(bp);
54b9ddaa 7677 for_each_queue(bp, i)
3196a88a 7678 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7679load_error2:
d1014634 7680 /* Release IRQs */
6cbe5065 7681 bnx2x_free_irq(bp, false);
2dfe0e1f
EG
7682load_error1:
7683 bnx2x_napi_disable(bp);
54b9ddaa 7684 for_each_queue(bp, i)
7cde1c8b 7685 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7686 bnx2x_free_mem(bp);
7687
34f80b04 7688 return rc;
a2fbb9ea
ET
7689}
7690
7691static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7692{
555f6c78 7693 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7694 int rc;
7695
c14423fe 7696 /* halt the connection */
555f6c78
EG
7697 fp->state = BNX2X_FP_STATE_HALTING;
7698 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7699
34f80b04 7700 /* Wait for completion */
a2fbb9ea 7701 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7702 &(fp->state), 1);
c14423fe 7703 if (rc) /* timeout */
a2fbb9ea
ET
7704 return rc;
7705
7706 /* delete cfc entry */
7707 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7708
34f80b04
EG
7709 /* Wait for completion */
7710 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7711 &(fp->state), 1);
34f80b04 7712 return rc;
a2fbb9ea
ET
7713}
7714
da5a662a 7715static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7716{
4781bfad 7717 __le16 dsb_sp_prod_idx;
c14423fe 7718 /* if the other port is handling traffic,
a2fbb9ea 7719 this can take a lot of time */
34f80b04
EG
7720 int cnt = 500;
7721 int rc;
a2fbb9ea
ET
7722
7723 might_sleep();
7724
7725 /* Send HALT ramrod */
7726 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7727 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7728
34f80b04
EG
7729 /* Wait for completion */
7730 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7731 &(bp->fp[0].state), 1);
7732 if (rc) /* timeout */
da5a662a 7733 return rc;
a2fbb9ea 7734
49d66772 7735 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7736
228241eb 7737 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7738 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7739
49d66772 7740 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7741 we are going to reset the chip anyway
7742 so there is not much to do if this times out
7743 */
34f80b04 7744 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7745 if (!cnt) {
7746 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7747 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7748 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7749#ifdef BNX2X_STOP_ON_ERROR
7750 bnx2x_panic();
7751#endif
36e552ab 7752 rc = -EBUSY;
34f80b04
EG
7753 break;
7754 }
7755 cnt--;
da5a662a 7756 msleep(1);
5650d9d4 7757 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7758 }
7759 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7760 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7761
7762 return rc;
a2fbb9ea
ET
7763}
7764
34f80b04
EG
7765static void bnx2x_reset_func(struct bnx2x *bp)
7766{
7767 int port = BP_PORT(bp);
7768 int func = BP_FUNC(bp);
7769 int base, i;
7770
7771 /* Configure IGU */
7772 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7773 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7774
37b091ba
MC
7775#ifdef BCM_CNIC
7776 /* Disable Timer scan */
7777 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7778 /*
7779 * Wait for at least 10ms and up to 2 second for the timers scan to
7780 * complete
7781 */
7782 for (i = 0; i < 200; i++) {
7783 msleep(10);
7784 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7785 break;
7786 }
7787#endif
34f80b04
EG
7788 /* Clear ILT */
7789 base = FUNC_ILT_BASE(func);
7790 for (i = base; i < base + ILT_PER_FUNC; i++)
7791 bnx2x_ilt_wr(bp, i, 0);
7792}
7793
7794static void bnx2x_reset_port(struct bnx2x *bp)
7795{
7796 int port = BP_PORT(bp);
7797 u32 val;
7798
7799 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7800
7801 /* Do not rcv packets to BRB */
7802 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7803 /* Do not direct rcv packets that are not for MCP to the BRB */
7804 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7805 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7806
7807 /* Configure AEU */
7808 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7809
7810 msleep(100);
7811 /* Check for BRB port occupancy */
7812 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7813 if (val)
7814 DP(NETIF_MSG_IFDOWN,
33471629 7815 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7816
7817 /* TODO: Close Doorbell port? */
7818}
7819
34f80b04
EG
7820static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7821{
7822 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7823 BP_FUNC(bp), reset_code);
7824
7825 switch (reset_code) {
7826 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7827 bnx2x_reset_port(bp);
7828 bnx2x_reset_func(bp);
7829 bnx2x_reset_common(bp);
7830 break;
7831
7832 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7833 bnx2x_reset_port(bp);
7834 bnx2x_reset_func(bp);
7835 break;
7836
7837 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7838 bnx2x_reset_func(bp);
7839 break;
49d66772 7840
34f80b04
EG
7841 default:
7842 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7843 break;
7844 }
7845}
7846
33471629 7847/* must be called with rtnl_lock */
34f80b04 7848static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7849{
da5a662a 7850 int port = BP_PORT(bp);
a2fbb9ea 7851 u32 reset_code = 0;
da5a662a 7852 int i, cnt, rc;
a2fbb9ea 7853
993ac7b5
MC
7854#ifdef BCM_CNIC
7855 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7856#endif
a2fbb9ea
ET
7857 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7858
ab6ad5a4 7859 /* Set "drop all" */
228241eb
ET
7860 bp->rx_mode = BNX2X_RX_MODE_NONE;
7861 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7862
ab6ad5a4 7863 /* Disable HW interrupts, NAPI and Tx */
f8ef6e44 7864 bnx2x_netif_stop(bp, 1);
e94d8af3 7865
34f80b04
EG
7866 del_timer_sync(&bp->timer);
7867 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7868 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7869 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7870
70b9986c 7871 /* Release IRQs */
6cbe5065 7872 bnx2x_free_irq(bp, false);
70b9986c 7873
555f6c78 7874 /* Wait until tx fastpath tasks complete */
54b9ddaa 7875 for_each_queue(bp, i) {
228241eb
ET
7876 struct bnx2x_fastpath *fp = &bp->fp[i];
7877
34f80b04 7878 cnt = 1000;
e8b5fc51 7879 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7880
7961f791 7881 bnx2x_tx_int(fp);
34f80b04
EG
7882 if (!cnt) {
7883 BNX2X_ERR("timeout waiting for queue[%d]\n",
7884 i);
7885#ifdef BNX2X_STOP_ON_ERROR
7886 bnx2x_panic();
7887 return -EBUSY;
7888#else
7889 break;
7890#endif
7891 }
7892 cnt--;
da5a662a 7893 msleep(1);
34f80b04 7894 }
228241eb 7895 }
da5a662a
VZ
7896 /* Give HW time to discard old tx messages */
7897 msleep(1);
a2fbb9ea 7898
3101c2bc
YG
7899 if (CHIP_IS_E1(bp)) {
7900 struct mac_configuration_cmd *config =
7901 bnx2x_sp(bp, mcast_config);
7902
e665bfda 7903 bnx2x_set_eth_mac_addr_e1(bp, 0);
3101c2bc 7904
8d9c5f34 7905 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7906 CAM_INVALIDATE(config->config_table[i]);
7907
8d9c5f34 7908 config->hdr.length = i;
3101c2bc
YG
7909 if (CHIP_REV_IS_SLOW(bp))
7910 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7911 else
7912 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7913 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7914 config->hdr.reserved1 = 0;
7915
e665bfda
MC
7916 bp->set_mac_pending++;
7917 smp_wmb();
7918
3101c2bc
YG
7919 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7920 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7921 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7922
7923 } else { /* E1H */
65abd74d
YG
7924 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7925
e665bfda 7926 bnx2x_set_eth_mac_addr_e1h(bp, 0);
3101c2bc
YG
7927
7928 for (i = 0; i < MC_HASH_SIZE; i++)
7929 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
7930
7931 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc 7932 }
993ac7b5
MC
7933#ifdef BCM_CNIC
7934 /* Clear iSCSI L2 MAC */
7935 mutex_lock(&bp->cnic_mutex);
7936 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7937 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7938 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7939 }
7940 mutex_unlock(&bp->cnic_mutex);
7941#endif
3101c2bc 7942
65abd74d
YG
7943 if (unload_mode == UNLOAD_NORMAL)
7944 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7945
7d0446c2 7946 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7947 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7948
7d0446c2 7949 else if (bp->wol) {
65abd74d
YG
7950 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7951 u8 *mac_addr = bp->dev->dev_addr;
7952 u32 val;
7953 /* The mac address is written to entries 1-4 to
7954 preserve entry 0 which is used by the PMF */
7955 u8 entry = (BP_E1HVN(bp) + 1)*8;
7956
7957 val = (mac_addr[0] << 8) | mac_addr[1];
7958 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7959
7960 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7961 (mac_addr[4] << 8) | mac_addr[5];
7962 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7963
7964 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7965
7966 } else
7967 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7968
34f80b04
EG
7969 /* Close multi and leading connections
7970 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7971 for_each_nondefault_queue(bp, i)
7972 if (bnx2x_stop_multi(bp, i))
228241eb 7973 goto unload_error;
a2fbb9ea 7974
da5a662a
VZ
7975 rc = bnx2x_stop_leading(bp);
7976 if (rc) {
34f80b04 7977 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7978#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7979 return -EBUSY;
da5a662a
VZ
7980#else
7981 goto unload_error;
34f80b04 7982#endif
228241eb
ET
7983 }
7984
7985unload_error:
34f80b04 7986 if (!BP_NOMCP(bp))
228241eb 7987 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7988 else {
f5372251 7989 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7990 load_count[0], load_count[1], load_count[2]);
7991 load_count[0]--;
da5a662a 7992 load_count[1 + port]--;
f5372251 7993 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7994 load_count[0], load_count[1], load_count[2]);
7995 if (load_count[0] == 0)
7996 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7997 else if (load_count[1 + port] == 0)
34f80b04
EG
7998 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7999 else
8000 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8001 }
a2fbb9ea 8002
34f80b04
EG
8003 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8004 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8005 bnx2x__link_reset(bp);
a2fbb9ea
ET
8006
8007 /* Reset the chip */
228241eb 8008 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
8009
8010 /* Report UNLOAD_DONE to MCP */
34f80b04 8011 if (!BP_NOMCP(bp))
a2fbb9ea 8012 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 8013
9a035440 8014 bp->port.pmf = 0;
a2fbb9ea 8015
7a9b2557 8016 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 8017 bnx2x_free_skbs(bp);
54b9ddaa 8018 for_each_queue(bp, i)
3196a88a 8019 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 8020 for_each_queue(bp, i)
7cde1c8b 8021 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
8022 bnx2x_free_mem(bp);
8023
8024 bp->state = BNX2X_STATE_CLOSED;
228241eb 8025
a2fbb9ea
ET
8026 netif_carrier_off(bp->dev);
8027
8028 return 0;
8029}
8030
34f80b04
EG
8031static void bnx2x_reset_task(struct work_struct *work)
8032{
8033 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8034
8035#ifdef BNX2X_STOP_ON_ERROR
8036 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8037 " so reset not done to allow debug dump,\n"
ad361c98 8038 " you will need to reboot when done\n");
34f80b04
EG
8039 return;
8040#endif
8041
8042 rtnl_lock();
8043
8044 if (!netif_running(bp->dev))
8045 goto reset_task_exit;
8046
8047 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8048 bnx2x_nic_load(bp, LOAD_NORMAL);
8049
8050reset_task_exit:
8051 rtnl_unlock();
8052}
8053
a2fbb9ea
ET
8054/* end of nic load/unload */
8055
8056/* ethtool_ops */
8057
8058/*
8059 * Init service functions
8060 */
8061
f1ef27ef
EG
8062static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8063{
8064 switch (func) {
8065 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8066 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8067 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8068 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8069 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8070 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8071 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8072 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8073 default:
8074 BNX2X_ERR("Unsupported function index: %d\n", func);
8075 return (u32)(-1);
8076 }
8077}
8078
8079static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8080{
8081 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8082
8083 /* Flush all outstanding writes */
8084 mmiowb();
8085
8086 /* Pretend to be function 0 */
8087 REG_WR(bp, reg, 0);
8088 /* Flush the GRC transaction (in the chip) */
8089 new_val = REG_RD(bp, reg);
8090 if (new_val != 0) {
8091 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8092 new_val);
8093 BUG();
8094 }
8095
8096 /* From now we are in the "like-E1" mode */
8097 bnx2x_int_disable(bp);
8098
8099 /* Flush all outstanding writes */
8100 mmiowb();
8101
8102 /* Restore the original funtion settings */
8103 REG_WR(bp, reg, orig_func);
8104 new_val = REG_RD(bp, reg);
8105 if (new_val != orig_func) {
8106 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8107 orig_func, new_val);
8108 BUG();
8109 }
8110}
8111
8112static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8113{
8114 if (CHIP_IS_E1H(bp))
8115 bnx2x_undi_int_disable_e1h(bp, func);
8116 else
8117 bnx2x_int_disable(bp);
8118}
8119
34f80b04
EG
8120static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8121{
8122 u32 val;
8123
8124 /* Check if there is any driver already loaded */
8125 val = REG_RD(bp, MISC_REG_UNPREPARED);
8126 if (val == 0x1) {
8127 /* Check if it is the UNDI driver
8128 * UNDI driver initializes CID offset for normal bell to 0x7
8129 */
4a37fb66 8130 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
8131 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8132 if (val == 0x7) {
8133 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8134 /* save our func */
34f80b04 8135 int func = BP_FUNC(bp);
da5a662a
VZ
8136 u32 swap_en;
8137 u32 swap_val;
34f80b04 8138
b4661739
EG
8139 /* clear the UNDI indication */
8140 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8141
34f80b04
EG
8142 BNX2X_DEV_INFO("UNDI is active! reset device\n");
8143
8144 /* try unload UNDI on port 0 */
8145 bp->func = 0;
da5a662a
VZ
8146 bp->fw_seq =
8147 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8148 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 8149 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
8150
8151 /* if UNDI is loaded on the other port */
8152 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8153
da5a662a
VZ
8154 /* send "DONE" for previous unload */
8155 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8156
8157 /* unload UNDI on port 1 */
34f80b04 8158 bp->func = 1;
da5a662a
VZ
8159 bp->fw_seq =
8160 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8161 DRV_MSG_SEQ_NUMBER_MASK);
8162 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8163
8164 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
8165 }
8166
b4661739
EG
8167 /* now it's safe to release the lock */
8168 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8169
f1ef27ef 8170 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
8171
8172 /* close input traffic and wait for it */
8173 /* Do not rcv packets to BRB */
8174 REG_WR(bp,
8175 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8176 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8177 /* Do not direct rcv packets that are not for MCP to
8178 * the BRB */
8179 REG_WR(bp,
8180 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8181 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8182 /* clear AEU */
8183 REG_WR(bp,
8184 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8185 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8186 msleep(10);
8187
8188 /* save NIG port swap info */
8189 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8190 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
8191 /* reset device */
8192 REG_WR(bp,
8193 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 8194 0xd3ffffff);
34f80b04
EG
8195 REG_WR(bp,
8196 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8197 0x1403);
da5a662a
VZ
8198 /* take the NIG out of reset and restore swap values */
8199 REG_WR(bp,
8200 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8201 MISC_REGISTERS_RESET_REG_1_RST_NIG);
8202 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8203 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8204
8205 /* send unload done to the MCP */
8206 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8207
8208 /* restore our func and fw_seq */
8209 bp->func = func;
8210 bp->fw_seq =
8211 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8212 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
8213
8214 } else
8215 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
8216 }
8217}
8218
8219static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8220{
8221 u32 val, val2, val3, val4, id;
72ce58c3 8222 u16 pmc;
34f80b04
EG
8223
8224 /* Get the chip revision id and number. */
8225 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8226 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8227 id = ((val & 0xffff) << 16);
8228 val = REG_RD(bp, MISC_REG_CHIP_REV);
8229 id |= ((val & 0xf) << 12);
8230 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8231 id |= ((val & 0xff) << 4);
5a40e08e 8232 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
8233 id |= (val & 0xf);
8234 bp->common.chip_id = id;
8235 bp->link_params.chip_id = bp->common.chip_id;
8236 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8237
1c06328c
EG
8238 val = (REG_RD(bp, 0x2874) & 0x55);
8239 if ((bp->common.chip_id & 0x1) ||
8240 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8241 bp->flags |= ONE_PORT_FLAG;
8242 BNX2X_DEV_INFO("single port device\n");
8243 }
8244
34f80b04
EG
8245 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8246 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8247 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8248 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8249 bp->common.flash_size, bp->common.flash_size);
8250
8251 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 8252 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 8253 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
8254 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8255 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
8256
8257 if (!bp->common.shmem_base ||
8258 (bp->common.shmem_base < 0xA0000) ||
8259 (bp->common.shmem_base >= 0xC0000)) {
8260 BNX2X_DEV_INFO("MCP not active\n");
8261 bp->flags |= NO_MCP_FLAG;
8262 return;
8263 }
8264
8265 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8266 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8267 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8268 BNX2X_ERR("BAD MCP validity signature\n");
8269
8270 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 8271 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
8272
8273 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8274 SHARED_HW_CFG_LED_MODE_MASK) >>
8275 SHARED_HW_CFG_LED_MODE_SHIFT);
8276
c2c8b03e
EG
8277 bp->link_params.feature_config_flags = 0;
8278 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8279 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8280 bp->link_params.feature_config_flags |=
8281 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8282 else
8283 bp->link_params.feature_config_flags &=
8284 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8285
34f80b04
EG
8286 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8287 bp->common.bc_ver = val;
8288 BNX2X_DEV_INFO("bc_ver %X\n", val);
8289 if (val < BNX2X_BC_VER) {
8290 /* for now only warn
8291 * later we might need to enforce this */
8292 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8293 " please upgrade BC\n", BNX2X_BC_VER, val);
8294 }
4d295db0
EG
8295 bp->link_params.feature_config_flags |=
8296 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8297 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
8298
8299 if (BP_E1HVN(bp) == 0) {
8300 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8301 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8302 } else {
8303 /* no WOL capability for E1HVN != 0 */
8304 bp->flags |= NO_WOL_FLAG;
8305 }
8306 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 8307 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
8308
8309 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8310 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8311 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8312 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8313
7995c64e 8314 pr_info("part number %X-%X-%X-%X\n", val, val2, val3, val4);
34f80b04
EG
8315}
8316
8317static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8318 u32 switch_cfg)
a2fbb9ea 8319{
34f80b04 8320 int port = BP_PORT(bp);
a2fbb9ea
ET
8321 u32 ext_phy_type;
8322
a2fbb9ea
ET
8323 switch (switch_cfg) {
8324 case SWITCH_CFG_1G:
8325 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8326
c18487ee
YR
8327 ext_phy_type =
8328 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8329 switch (ext_phy_type) {
8330 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8331 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8332 ext_phy_type);
8333
34f80b04
EG
8334 bp->port.supported |= (SUPPORTED_10baseT_Half |
8335 SUPPORTED_10baseT_Full |
8336 SUPPORTED_100baseT_Half |
8337 SUPPORTED_100baseT_Full |
8338 SUPPORTED_1000baseT_Full |
8339 SUPPORTED_2500baseX_Full |
8340 SUPPORTED_TP |
8341 SUPPORTED_FIBRE |
8342 SUPPORTED_Autoneg |
8343 SUPPORTED_Pause |
8344 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8345 break;
8346
8347 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8348 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8349 ext_phy_type);
8350
34f80b04
EG
8351 bp->port.supported |= (SUPPORTED_10baseT_Half |
8352 SUPPORTED_10baseT_Full |
8353 SUPPORTED_100baseT_Half |
8354 SUPPORTED_100baseT_Full |
8355 SUPPORTED_1000baseT_Full |
8356 SUPPORTED_TP |
8357 SUPPORTED_FIBRE |
8358 SUPPORTED_Autoneg |
8359 SUPPORTED_Pause |
8360 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8361 break;
8362
8363 default:
8364 BNX2X_ERR("NVRAM config error. "
8365 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 8366 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8367 return;
8368 }
8369
34f80b04
EG
8370 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8371 port*0x10);
8372 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
8373 break;
8374
8375 case SWITCH_CFG_10G:
8376 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8377
c18487ee
YR
8378 ext_phy_type =
8379 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8380 switch (ext_phy_type) {
8381 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8382 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8383 ext_phy_type);
8384
34f80b04
EG
8385 bp->port.supported |= (SUPPORTED_10baseT_Half |
8386 SUPPORTED_10baseT_Full |
8387 SUPPORTED_100baseT_Half |
8388 SUPPORTED_100baseT_Full |
8389 SUPPORTED_1000baseT_Full |
8390 SUPPORTED_2500baseX_Full |
8391 SUPPORTED_10000baseT_Full |
8392 SUPPORTED_TP |
8393 SUPPORTED_FIBRE |
8394 SUPPORTED_Autoneg |
8395 SUPPORTED_Pause |
8396 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8397 break;
8398
589abe3a
EG
8399 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8400 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 8401 ext_phy_type);
f1410647 8402
34f80b04 8403 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8404 SUPPORTED_1000baseT_Full |
34f80b04 8405 SUPPORTED_FIBRE |
589abe3a 8406 SUPPORTED_Autoneg |
34f80b04
EG
8407 SUPPORTED_Pause |
8408 SUPPORTED_Asym_Pause);
f1410647
ET
8409 break;
8410
589abe3a
EG
8411 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8412 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
8413 ext_phy_type);
8414
34f80b04 8415 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8416 SUPPORTED_2500baseX_Full |
34f80b04 8417 SUPPORTED_1000baseT_Full |
589abe3a
EG
8418 SUPPORTED_FIBRE |
8419 SUPPORTED_Autoneg |
8420 SUPPORTED_Pause |
8421 SUPPORTED_Asym_Pause);
8422 break;
8423
8424 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8425 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8426 ext_phy_type);
8427
8428 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
8429 SUPPORTED_FIBRE |
8430 SUPPORTED_Pause |
8431 SUPPORTED_Asym_Pause);
f1410647
ET
8432 break;
8433
589abe3a
EG
8434 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8435 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
8436 ext_phy_type);
8437
34f80b04
EG
8438 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8439 SUPPORTED_1000baseT_Full |
8440 SUPPORTED_FIBRE |
34f80b04
EG
8441 SUPPORTED_Pause |
8442 SUPPORTED_Asym_Pause);
f1410647
ET
8443 break;
8444
589abe3a
EG
8445 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8446 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
8447 ext_phy_type);
8448
34f80b04 8449 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 8450 SUPPORTED_1000baseT_Full |
34f80b04 8451 SUPPORTED_Autoneg |
589abe3a 8452 SUPPORTED_FIBRE |
34f80b04
EG
8453 SUPPORTED_Pause |
8454 SUPPORTED_Asym_Pause);
c18487ee
YR
8455 break;
8456
4d295db0
EG
8457 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8458 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8459 ext_phy_type);
8460
8461 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8462 SUPPORTED_1000baseT_Full |
8463 SUPPORTED_Autoneg |
8464 SUPPORTED_FIBRE |
8465 SUPPORTED_Pause |
8466 SUPPORTED_Asym_Pause);
8467 break;
8468
f1410647
ET
8469 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8470 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8471 ext_phy_type);
8472
34f80b04
EG
8473 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8474 SUPPORTED_TP |
8475 SUPPORTED_Autoneg |
8476 SUPPORTED_Pause |
8477 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8478 break;
8479
28577185
EG
8480 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8481 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8482 ext_phy_type);
8483
8484 bp->port.supported |= (SUPPORTED_10baseT_Half |
8485 SUPPORTED_10baseT_Full |
8486 SUPPORTED_100baseT_Half |
8487 SUPPORTED_100baseT_Full |
8488 SUPPORTED_1000baseT_Full |
8489 SUPPORTED_10000baseT_Full |
8490 SUPPORTED_TP |
8491 SUPPORTED_Autoneg |
8492 SUPPORTED_Pause |
8493 SUPPORTED_Asym_Pause);
8494 break;
8495
c18487ee
YR
8496 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8497 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8498 bp->link_params.ext_phy_config);
8499 break;
8500
a2fbb9ea
ET
8501 default:
8502 BNX2X_ERR("NVRAM config error. "
8503 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 8504 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8505 return;
8506 }
8507
34f80b04
EG
8508 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8509 port*0x18);
8510 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 8511
a2fbb9ea
ET
8512 break;
8513
8514 default:
8515 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 8516 bp->port.link_config);
a2fbb9ea
ET
8517 return;
8518 }
34f80b04 8519 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
8520
8521 /* mask what we support according to speed_cap_mask */
c18487ee
YR
8522 if (!(bp->link_params.speed_cap_mask &
8523 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 8524 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 8525
c18487ee
YR
8526 if (!(bp->link_params.speed_cap_mask &
8527 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 8528 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 8529
c18487ee
YR
8530 if (!(bp->link_params.speed_cap_mask &
8531 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 8532 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 8533
c18487ee
YR
8534 if (!(bp->link_params.speed_cap_mask &
8535 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 8536 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 8537
c18487ee
YR
8538 if (!(bp->link_params.speed_cap_mask &
8539 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
8540 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8541 SUPPORTED_1000baseT_Full);
a2fbb9ea 8542
c18487ee
YR
8543 if (!(bp->link_params.speed_cap_mask &
8544 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 8545 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 8546
c18487ee
YR
8547 if (!(bp->link_params.speed_cap_mask &
8548 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 8549 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 8550
34f80b04 8551 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
8552}
8553
34f80b04 8554static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8555{
c18487ee 8556 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 8557
34f80b04 8558 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 8559 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 8560 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 8561 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8562 bp->port.advertising = bp->port.supported;
a2fbb9ea 8563 } else {
c18487ee
YR
8564 u32 ext_phy_type =
8565 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8566
8567 if ((ext_phy_type ==
8568 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8569 (ext_phy_type ==
8570 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 8571 /* force 10G, no AN */
c18487ee 8572 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 8573 bp->port.advertising =
a2fbb9ea
ET
8574 (ADVERTISED_10000baseT_Full |
8575 ADVERTISED_FIBRE);
8576 break;
8577 }
8578 BNX2X_ERR("NVRAM config error. "
8579 "Invalid link_config 0x%x"
8580 " Autoneg not supported\n",
34f80b04 8581 bp->port.link_config);
a2fbb9ea
ET
8582 return;
8583 }
8584 break;
8585
8586 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 8587 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 8588 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
8589 bp->port.advertising = (ADVERTISED_10baseT_Full |
8590 ADVERTISED_TP);
a2fbb9ea
ET
8591 } else {
8592 BNX2X_ERR("NVRAM config error. "
8593 "Invalid link_config 0x%x"
8594 " speed_cap_mask 0x%x\n",
34f80b04 8595 bp->port.link_config,
c18487ee 8596 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8597 return;
8598 }
8599 break;
8600
8601 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 8602 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
8603 bp->link_params.req_line_speed = SPEED_10;
8604 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8605 bp->port.advertising = (ADVERTISED_10baseT_Half |
8606 ADVERTISED_TP);
a2fbb9ea
ET
8607 } else {
8608 BNX2X_ERR("NVRAM config error. "
8609 "Invalid link_config 0x%x"
8610 " speed_cap_mask 0x%x\n",
34f80b04 8611 bp->port.link_config,
c18487ee 8612 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8613 return;
8614 }
8615 break;
8616
8617 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 8618 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 8619 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
8620 bp->port.advertising = (ADVERTISED_100baseT_Full |
8621 ADVERTISED_TP);
a2fbb9ea
ET
8622 } else {
8623 BNX2X_ERR("NVRAM config error. "
8624 "Invalid link_config 0x%x"
8625 " speed_cap_mask 0x%x\n",
34f80b04 8626 bp->port.link_config,
c18487ee 8627 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8628 return;
8629 }
8630 break;
8631
8632 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 8633 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
8634 bp->link_params.req_line_speed = SPEED_100;
8635 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8636 bp->port.advertising = (ADVERTISED_100baseT_Half |
8637 ADVERTISED_TP);
a2fbb9ea
ET
8638 } else {
8639 BNX2X_ERR("NVRAM config error. "
8640 "Invalid link_config 0x%x"
8641 " speed_cap_mask 0x%x\n",
34f80b04 8642 bp->port.link_config,
c18487ee 8643 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8644 return;
8645 }
8646 break;
8647
8648 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 8649 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 8650 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
8651 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8652 ADVERTISED_TP);
a2fbb9ea
ET
8653 } else {
8654 BNX2X_ERR("NVRAM config error. "
8655 "Invalid link_config 0x%x"
8656 " speed_cap_mask 0x%x\n",
34f80b04 8657 bp->port.link_config,
c18487ee 8658 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8659 return;
8660 }
8661 break;
8662
8663 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8664 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8665 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8666 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8667 ADVERTISED_TP);
a2fbb9ea
ET
8668 } else {
8669 BNX2X_ERR("NVRAM config error. "
8670 "Invalid link_config 0x%x"
8671 " speed_cap_mask 0x%x\n",
34f80b04 8672 bp->port.link_config,
c18487ee 8673 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8674 return;
8675 }
8676 break;
8677
8678 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8679 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8680 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8681 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8682 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8683 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8684 ADVERTISED_FIBRE);
a2fbb9ea
ET
8685 } else {
8686 BNX2X_ERR("NVRAM config error. "
8687 "Invalid link_config 0x%x"
8688 " speed_cap_mask 0x%x\n",
34f80b04 8689 bp->port.link_config,
c18487ee 8690 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8691 return;
8692 }
8693 break;
8694
8695 default:
8696 BNX2X_ERR("NVRAM config error. "
8697 "BAD link speed link_config 0x%x\n",
34f80b04 8698 bp->port.link_config);
c18487ee 8699 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8700 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8701 break;
8702 }
a2fbb9ea 8703
34f80b04
EG
8704 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8705 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8706 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8707 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8708 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8709
c18487ee 8710 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8711 " advertising 0x%x\n",
c18487ee
YR
8712 bp->link_params.req_line_speed,
8713 bp->link_params.req_duplex,
34f80b04 8714 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8715}
8716
e665bfda
MC
8717static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8718{
8719 mac_hi = cpu_to_be16(mac_hi);
8720 mac_lo = cpu_to_be32(mac_lo);
8721 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8722 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8723}
8724
34f80b04 8725static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8726{
34f80b04
EG
8727 int port = BP_PORT(bp);
8728 u32 val, val2;
589abe3a 8729 u32 config;
c2c8b03e 8730 u16 i;
01cd4528 8731 u32 ext_phy_type;
a2fbb9ea 8732
c18487ee 8733 bp->link_params.bp = bp;
34f80b04 8734 bp->link_params.port = port;
c18487ee 8735
c18487ee 8736 bp->link_params.lane_config =
a2fbb9ea 8737 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8738 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8739 SHMEM_RD(bp,
8740 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
8741 /* BCM8727_NOC => BCM8727 no over current */
8742 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8743 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8744 bp->link_params.ext_phy_config &=
8745 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8746 bp->link_params.ext_phy_config |=
8747 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8748 bp->link_params.feature_config_flags |=
8749 FEATURE_CONFIG_BCM8727_NOC;
8750 }
8751
c18487ee 8752 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8753 SHMEM_RD(bp,
8754 dev_info.port_hw_config[port].speed_capability_mask);
8755
34f80b04 8756 bp->port.link_config =
a2fbb9ea
ET
8757 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8758
c2c8b03e
EG
8759 /* Get the 4 lanes xgxs config rx and tx */
8760 for (i = 0; i < 2; i++) {
8761 val = SHMEM_RD(bp,
8762 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8763 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8764 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8765
8766 val = SHMEM_RD(bp,
8767 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8768 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8769 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8770 }
8771
3ce2c3f9
EG
8772 /* If the device is capable of WoL, set the default state according
8773 * to the HW
8774 */
4d295db0 8775 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8776 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8777 (config & PORT_FEATURE_WOL_ENABLED));
8778
c2c8b03e
EG
8779 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8780 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8781 bp->link_params.lane_config,
8782 bp->link_params.ext_phy_config,
34f80b04 8783 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8784
4d295db0
EG
8785 bp->link_params.switch_cfg |= (bp->port.link_config &
8786 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 8787 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8788
8789 bnx2x_link_settings_requested(bp);
8790
01cd4528
EG
8791 /*
8792 * If connected directly, work with the internal PHY, otherwise, work
8793 * with the external PHY
8794 */
8795 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8796 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8797 bp->mdio.prtad = bp->link_params.phy_addr;
8798
8799 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8800 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8801 bp->mdio.prtad =
659bc5c4 8802 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
01cd4528 8803
a2fbb9ea
ET
8804 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8805 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 8806 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
8807 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8808 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
8809
8810#ifdef BCM_CNIC
8811 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8812 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8813 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8814#endif
34f80b04
EG
8815}
8816
8817static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8818{
8819 int func = BP_FUNC(bp);
8820 u32 val, val2;
8821 int rc = 0;
a2fbb9ea 8822
34f80b04 8823 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8824
34f80b04
EG
8825 bp->e1hov = 0;
8826 bp->e1hmf = 0;
8827 if (CHIP_IS_E1H(bp)) {
8828 bp->mf_config =
8829 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8830
2691d51d 8831 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 8832 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 8833 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 8834 bp->e1hmf = 1;
2691d51d
EG
8835 BNX2X_DEV_INFO("%s function mode\n",
8836 IS_E1HMF(bp) ? "multi" : "single");
8837
8838 if (IS_E1HMF(bp)) {
8839 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8840 e1hov_tag) &
8841 FUNC_MF_CFG_E1HOV_TAG_MASK);
8842 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8843 bp->e1hov = val;
8844 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8845 "(0x%04x)\n",
8846 func, bp->e1hov, bp->e1hov);
8847 } else {
34f80b04
EG
8848 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8849 " aborting\n", func);
8850 rc = -EPERM;
8851 }
2691d51d
EG
8852 } else {
8853 if (BP_E1HVN(bp)) {
8854 BNX2X_ERR("!!! VN %d in single function mode,"
8855 " aborting\n", BP_E1HVN(bp));
8856 rc = -EPERM;
8857 }
34f80b04
EG
8858 }
8859 }
a2fbb9ea 8860
34f80b04
EG
8861 if (!BP_NOMCP(bp)) {
8862 bnx2x_get_port_hwinfo(bp);
8863
8864 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8865 DRV_MSG_SEQ_NUMBER_MASK);
8866 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8867 }
8868
8869 if (IS_E1HMF(bp)) {
8870 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8871 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8872 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8873 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8874 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8875 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8876 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8877 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8878 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8879 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8880 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8881 ETH_ALEN);
8882 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8883 ETH_ALEN);
a2fbb9ea 8884 }
34f80b04
EG
8885
8886 return rc;
a2fbb9ea
ET
8887 }
8888
34f80b04
EG
8889 if (BP_NOMCP(bp)) {
8890 /* only supposed to happen on emulation/FPGA */
33471629 8891 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8892 random_ether_addr(bp->dev->dev_addr);
8893 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8894 }
a2fbb9ea 8895
34f80b04
EG
8896 return rc;
8897}
8898
8899static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8900{
8901 int func = BP_FUNC(bp);
87942b46 8902 int timer_interval;
34f80b04
EG
8903 int rc;
8904
da5a662a
VZ
8905 /* Disable interrupt handling until HW is initialized */
8906 atomic_set(&bp->intr_sem, 1);
e1510706 8907 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8908
34f80b04 8909 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 8910 mutex_init(&bp->fw_mb_mutex);
993ac7b5
MC
8911#ifdef BCM_CNIC
8912 mutex_init(&bp->cnic_mutex);
8913#endif
a2fbb9ea 8914
1cf167f2 8915 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8916 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8917
8918 rc = bnx2x_get_hwinfo(bp);
8919
8920 /* need to reset chip if undi was active */
8921 if (!BP_NOMCP(bp))
8922 bnx2x_undi_unload(bp);
8923
8924 if (CHIP_REV_IS_FPGA(bp))
7995c64e 8925 pr_err("FPGA detected\n");
34f80b04
EG
8926
8927 if (BP_NOMCP(bp) && (func == 0))
7995c64e 8928 pr_err("MCP disabled, must load devices in order!\n");
34f80b04 8929
555f6c78 8930 /* Set multi queue mode */
8badd27a
EG
8931 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8932 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
7995c64e 8933 pr_err("Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8934 multi_mode = ETH_RSS_MODE_DISABLED;
8935 }
8936 bp->multi_mode = multi_mode;
8937
8938
4fd89b7a
DK
8939 bp->dev->features |= NETIF_F_GRO;
8940
7a9b2557
VZ
8941 /* Set TPA flags */
8942 if (disable_tpa) {
8943 bp->flags &= ~TPA_ENABLE_FLAG;
8944 bp->dev->features &= ~NETIF_F_LRO;
8945 } else {
8946 bp->flags |= TPA_ENABLE_FLAG;
8947 bp->dev->features |= NETIF_F_LRO;
8948 }
8949
a18f5128
EG
8950 if (CHIP_IS_E1(bp))
8951 bp->dropless_fc = 0;
8952 else
8953 bp->dropless_fc = dropless_fc;
8954
8d5726c4 8955 bp->mrrs = mrrs;
7a9b2557 8956
34f80b04
EG
8957 bp->tx_ring_size = MAX_TX_AVAIL;
8958 bp->rx_ring_size = MAX_RX_AVAIL;
8959
8960 bp->rx_csum = 1;
34f80b04 8961
7d323bfd
EG
8962 /* make sure that the numbers are in the right granularity */
8963 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8964 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
34f80b04 8965
87942b46
EG
8966 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8967 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8968
8969 init_timer(&bp->timer);
8970 bp->timer.expires = jiffies + bp->current_interval;
8971 bp->timer.data = (unsigned long) bp;
8972 bp->timer.function = bnx2x_timer;
8973
8974 return rc;
a2fbb9ea
ET
8975}
8976
8977/*
8978 * ethtool service functions
8979 */
8980
8981/* All ethtool functions called with rtnl_lock */
8982
8983static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8984{
8985 struct bnx2x *bp = netdev_priv(dev);
8986
34f80b04
EG
8987 cmd->supported = bp->port.supported;
8988 cmd->advertising = bp->port.advertising;
a2fbb9ea 8989
f34d28ea
EG
8990 if ((bp->state == BNX2X_STATE_OPEN) &&
8991 !(bp->flags & MF_FUNC_DIS) &&
8992 (bp->link_vars.link_up)) {
c18487ee
YR
8993 cmd->speed = bp->link_vars.line_speed;
8994 cmd->duplex = bp->link_vars.duplex;
b015e3d1
EG
8995 if (IS_E1HMF(bp)) {
8996 u16 vn_max_rate;
34f80b04 8997
b015e3d1
EG
8998 vn_max_rate =
8999 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
34f80b04 9000 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
b015e3d1
EG
9001 if (vn_max_rate < cmd->speed)
9002 cmd->speed = vn_max_rate;
9003 }
9004 } else {
9005 cmd->speed = -1;
9006 cmd->duplex = -1;
34f80b04 9007 }
a2fbb9ea 9008
c18487ee
YR
9009 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9010 u32 ext_phy_type =
9011 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
9012
9013 switch (ext_phy_type) {
9014 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 9015 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 9016 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
9017 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9018 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9019 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 9020 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
9021 cmd->port = PORT_FIBRE;
9022 break;
9023
9024 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 9025 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
9026 cmd->port = PORT_TP;
9027 break;
9028
c18487ee
YR
9029 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9030 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9031 bp->link_params.ext_phy_config);
9032 break;
9033
f1410647
ET
9034 default:
9035 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
9036 bp->link_params.ext_phy_config);
9037 break;
f1410647
ET
9038 }
9039 } else
a2fbb9ea 9040 cmd->port = PORT_TP;
a2fbb9ea 9041
01cd4528 9042 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
9043 cmd->transceiver = XCVR_INTERNAL;
9044
c18487ee 9045 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 9046 cmd->autoneg = AUTONEG_ENABLE;
f1410647 9047 else
a2fbb9ea 9048 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
9049
9050 cmd->maxtxpkt = 0;
9051 cmd->maxrxpkt = 0;
9052
9053 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9054 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9055 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9056 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9057 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9058 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9059 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9060
9061 return 0;
9062}
9063
9064static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9065{
9066 struct bnx2x *bp = netdev_priv(dev);
9067 u32 advertising;
9068
34f80b04
EG
9069 if (IS_E1HMF(bp))
9070 return 0;
9071
a2fbb9ea
ET
9072 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9073 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9074 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9075 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9076 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9077 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9078 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9079
a2fbb9ea 9080 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
9081 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9082 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 9083 return -EINVAL;
f1410647 9084 }
a2fbb9ea
ET
9085
9086 /* advertise the requested speed and duplex if supported */
34f80b04 9087 cmd->advertising &= bp->port.supported;
a2fbb9ea 9088
c18487ee
YR
9089 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9090 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
9091 bp->port.advertising |= (ADVERTISED_Autoneg |
9092 cmd->advertising);
a2fbb9ea
ET
9093
9094 } else { /* forced speed */
9095 /* advertise the requested speed and duplex if supported */
9096 switch (cmd->speed) {
9097 case SPEED_10:
9098 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 9099 if (!(bp->port.supported &
f1410647
ET
9100 SUPPORTED_10baseT_Full)) {
9101 DP(NETIF_MSG_LINK,
9102 "10M full not supported\n");
a2fbb9ea 9103 return -EINVAL;
f1410647 9104 }
a2fbb9ea
ET
9105
9106 advertising = (ADVERTISED_10baseT_Full |
9107 ADVERTISED_TP);
9108 } else {
34f80b04 9109 if (!(bp->port.supported &
f1410647
ET
9110 SUPPORTED_10baseT_Half)) {
9111 DP(NETIF_MSG_LINK,
9112 "10M half not supported\n");
a2fbb9ea 9113 return -EINVAL;
f1410647 9114 }
a2fbb9ea
ET
9115
9116 advertising = (ADVERTISED_10baseT_Half |
9117 ADVERTISED_TP);
9118 }
9119 break;
9120
9121 case SPEED_100:
9122 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 9123 if (!(bp->port.supported &
f1410647
ET
9124 SUPPORTED_100baseT_Full)) {
9125 DP(NETIF_MSG_LINK,
9126 "100M full not supported\n");
a2fbb9ea 9127 return -EINVAL;
f1410647 9128 }
a2fbb9ea
ET
9129
9130 advertising = (ADVERTISED_100baseT_Full |
9131 ADVERTISED_TP);
9132 } else {
34f80b04 9133 if (!(bp->port.supported &
f1410647
ET
9134 SUPPORTED_100baseT_Half)) {
9135 DP(NETIF_MSG_LINK,
9136 "100M half not supported\n");
a2fbb9ea 9137 return -EINVAL;
f1410647 9138 }
a2fbb9ea
ET
9139
9140 advertising = (ADVERTISED_100baseT_Half |
9141 ADVERTISED_TP);
9142 }
9143 break;
9144
9145 case SPEED_1000:
f1410647
ET
9146 if (cmd->duplex != DUPLEX_FULL) {
9147 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 9148 return -EINVAL;
f1410647 9149 }
a2fbb9ea 9150
34f80b04 9151 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 9152 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 9153 return -EINVAL;
f1410647 9154 }
a2fbb9ea
ET
9155
9156 advertising = (ADVERTISED_1000baseT_Full |
9157 ADVERTISED_TP);
9158 break;
9159
9160 case SPEED_2500:
f1410647
ET
9161 if (cmd->duplex != DUPLEX_FULL) {
9162 DP(NETIF_MSG_LINK,
9163 "2.5G half not supported\n");
a2fbb9ea 9164 return -EINVAL;
f1410647 9165 }
a2fbb9ea 9166
34f80b04 9167 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
9168 DP(NETIF_MSG_LINK,
9169 "2.5G full not supported\n");
a2fbb9ea 9170 return -EINVAL;
f1410647 9171 }
a2fbb9ea 9172
f1410647 9173 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
9174 ADVERTISED_TP);
9175 break;
9176
9177 case SPEED_10000:
f1410647
ET
9178 if (cmd->duplex != DUPLEX_FULL) {
9179 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 9180 return -EINVAL;
f1410647 9181 }
a2fbb9ea 9182
34f80b04 9183 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 9184 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 9185 return -EINVAL;
f1410647 9186 }
a2fbb9ea
ET
9187
9188 advertising = (ADVERTISED_10000baseT_Full |
9189 ADVERTISED_FIBRE);
9190 break;
9191
9192 default:
f1410647 9193 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
9194 return -EINVAL;
9195 }
9196
c18487ee
YR
9197 bp->link_params.req_line_speed = cmd->speed;
9198 bp->link_params.req_duplex = cmd->duplex;
34f80b04 9199 bp->port.advertising = advertising;
a2fbb9ea
ET
9200 }
9201
c18487ee 9202 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 9203 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 9204 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 9205 bp->port.advertising);
a2fbb9ea 9206
34f80b04 9207 if (netif_running(dev)) {
bb2a0f7a 9208 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9209 bnx2x_link_set(bp);
9210 }
a2fbb9ea
ET
9211
9212 return 0;
9213}
9214
0a64ea57
EG
9215#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9216#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9217
9218static int bnx2x_get_regs_len(struct net_device *dev)
9219{
0a64ea57 9220 struct bnx2x *bp = netdev_priv(dev);
0d28e49a 9221 int regdump_len = 0;
0a64ea57
EG
9222 int i;
9223
0a64ea57
EG
9224 if (CHIP_IS_E1(bp)) {
9225 for (i = 0; i < REGS_COUNT; i++)
9226 if (IS_E1_ONLINE(reg_addrs[i].info))
9227 regdump_len += reg_addrs[i].size;
9228
9229 for (i = 0; i < WREGS_COUNT_E1; i++)
9230 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9231 regdump_len += wreg_addrs_e1[i].size *
9232 (1 + wreg_addrs_e1[i].read_regs_count);
9233
9234 } else { /* E1H */
9235 for (i = 0; i < REGS_COUNT; i++)
9236 if (IS_E1H_ONLINE(reg_addrs[i].info))
9237 regdump_len += reg_addrs[i].size;
9238
9239 for (i = 0; i < WREGS_COUNT_E1H; i++)
9240 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9241 regdump_len += wreg_addrs_e1h[i].size *
9242 (1 + wreg_addrs_e1h[i].read_regs_count);
9243 }
9244 regdump_len *= 4;
9245 regdump_len += sizeof(struct dump_hdr);
9246
9247 return regdump_len;
9248}
9249
9250static void bnx2x_get_regs(struct net_device *dev,
9251 struct ethtool_regs *regs, void *_p)
9252{
9253 u32 *p = _p, i, j;
9254 struct bnx2x *bp = netdev_priv(dev);
9255 struct dump_hdr dump_hdr = {0};
9256
9257 regs->version = 0;
9258 memset(p, 0, regs->len);
9259
9260 if (!netif_running(bp->dev))
9261 return;
9262
9263 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9264 dump_hdr.dump_sign = dump_sign_all;
9265 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9266 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9267 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9268 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9269 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9270
9271 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9272 p += dump_hdr.hdr_size + 1;
9273
9274 if (CHIP_IS_E1(bp)) {
9275 for (i = 0; i < REGS_COUNT; i++)
9276 if (IS_E1_ONLINE(reg_addrs[i].info))
9277 for (j = 0; j < reg_addrs[i].size; j++)
9278 *p++ = REG_RD(bp,
9279 reg_addrs[i].addr + j*4);
9280
9281 } else { /* E1H */
9282 for (i = 0; i < REGS_COUNT; i++)
9283 if (IS_E1H_ONLINE(reg_addrs[i].info))
9284 for (j = 0; j < reg_addrs[i].size; j++)
9285 *p++ = REG_RD(bp,
9286 reg_addrs[i].addr + j*4);
9287 }
9288}
9289
0d28e49a
EG
9290#define PHY_FW_VER_LEN 10
9291
9292static void bnx2x_get_drvinfo(struct net_device *dev,
9293 struct ethtool_drvinfo *info)
9294{
9295 struct bnx2x *bp = netdev_priv(dev);
9296 u8 phy_fw_ver[PHY_FW_VER_LEN];
9297
9298 strcpy(info->driver, DRV_MODULE_NAME);
9299 strcpy(info->version, DRV_MODULE_VERSION);
9300
9301 phy_fw_ver[0] = '\0';
9302 if (bp->port.pmf) {
9303 bnx2x_acquire_phy_lock(bp);
9304 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9305 (bp->state != BNX2X_STATE_CLOSED),
9306 phy_fw_ver, PHY_FW_VER_LEN);
9307 bnx2x_release_phy_lock(bp);
9308 }
9309
9310 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9311 (bp->common.bc_ver & 0xff0000) >> 16,
9312 (bp->common.bc_ver & 0xff00) >> 8,
9313 (bp->common.bc_ver & 0xff),
9314 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9315 strcpy(info->bus_info, pci_name(bp->pdev));
9316 info->n_stats = BNX2X_NUM_STATS;
9317 info->testinfo_len = BNX2X_NUM_TESTS;
9318 info->eedump_len = bp->common.flash_size;
9319 info->regdump_len = bnx2x_get_regs_len(dev);
9320}
9321
a2fbb9ea
ET
9322static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9323{
9324 struct bnx2x *bp = netdev_priv(dev);
9325
9326 if (bp->flags & NO_WOL_FLAG) {
9327 wol->supported = 0;
9328 wol->wolopts = 0;
9329 } else {
9330 wol->supported = WAKE_MAGIC;
9331 if (bp->wol)
9332 wol->wolopts = WAKE_MAGIC;
9333 else
9334 wol->wolopts = 0;
9335 }
9336 memset(&wol->sopass, 0, sizeof(wol->sopass));
9337}
9338
9339static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9340{
9341 struct bnx2x *bp = netdev_priv(dev);
9342
9343 if (wol->wolopts & ~WAKE_MAGIC)
9344 return -EINVAL;
9345
9346 if (wol->wolopts & WAKE_MAGIC) {
9347 if (bp->flags & NO_WOL_FLAG)
9348 return -EINVAL;
9349
9350 bp->wol = 1;
34f80b04 9351 } else
a2fbb9ea 9352 bp->wol = 0;
34f80b04 9353
a2fbb9ea
ET
9354 return 0;
9355}
9356
9357static u32 bnx2x_get_msglevel(struct net_device *dev)
9358{
9359 struct bnx2x *bp = netdev_priv(dev);
9360
7995c64e 9361 return bp->msg_enable;
a2fbb9ea
ET
9362}
9363
9364static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9365{
9366 struct bnx2x *bp = netdev_priv(dev);
9367
9368 if (capable(CAP_NET_ADMIN))
7995c64e 9369 bp->msg_enable = level;
a2fbb9ea
ET
9370}
9371
9372static int bnx2x_nway_reset(struct net_device *dev)
9373{
9374 struct bnx2x *bp = netdev_priv(dev);
9375
34f80b04
EG
9376 if (!bp->port.pmf)
9377 return 0;
a2fbb9ea 9378
34f80b04 9379 if (netif_running(dev)) {
bb2a0f7a 9380 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9381 bnx2x_link_set(bp);
9382 }
a2fbb9ea
ET
9383
9384 return 0;
9385}
9386
ab6ad5a4 9387static u32 bnx2x_get_link(struct net_device *dev)
01e53298
NO
9388{
9389 struct bnx2x *bp = netdev_priv(dev);
9390
f34d28ea
EG
9391 if (bp->flags & MF_FUNC_DIS)
9392 return 0;
9393
01e53298
NO
9394 return bp->link_vars.link_up;
9395}
9396
a2fbb9ea
ET
9397static int bnx2x_get_eeprom_len(struct net_device *dev)
9398{
9399 struct bnx2x *bp = netdev_priv(dev);
9400
34f80b04 9401 return bp->common.flash_size;
a2fbb9ea
ET
9402}
9403
9404static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9405{
34f80b04 9406 int port = BP_PORT(bp);
a2fbb9ea
ET
9407 int count, i;
9408 u32 val = 0;
9409
9410 /* adjust timeout for emulation/FPGA */
9411 count = NVRAM_TIMEOUT_COUNT;
9412 if (CHIP_REV_IS_SLOW(bp))
9413 count *= 100;
9414
9415 /* request access to nvram interface */
9416 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9417 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9418
9419 for (i = 0; i < count*10; i++) {
9420 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9421 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9422 break;
9423
9424 udelay(5);
9425 }
9426
9427 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 9428 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
9429 return -EBUSY;
9430 }
9431
9432 return 0;
9433}
9434
9435static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9436{
34f80b04 9437 int port = BP_PORT(bp);
a2fbb9ea
ET
9438 int count, i;
9439 u32 val = 0;
9440
9441 /* adjust timeout for emulation/FPGA */
9442 count = NVRAM_TIMEOUT_COUNT;
9443 if (CHIP_REV_IS_SLOW(bp))
9444 count *= 100;
9445
9446 /* relinquish nvram interface */
9447 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9448 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9449
9450 for (i = 0; i < count*10; i++) {
9451 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9452 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9453 break;
9454
9455 udelay(5);
9456 }
9457
9458 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 9459 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
9460 return -EBUSY;
9461 }
9462
9463 return 0;
9464}
9465
9466static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9467{
9468 u32 val;
9469
9470 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9471
9472 /* enable both bits, even on read */
9473 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9474 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9475 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9476}
9477
9478static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9479{
9480 u32 val;
9481
9482 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9483
9484 /* disable both bits, even after read */
9485 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9486 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9487 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9488}
9489
4781bfad 9490static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
9491 u32 cmd_flags)
9492{
f1410647 9493 int count, i, rc;
a2fbb9ea
ET
9494 u32 val;
9495
9496 /* build the command word */
9497 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9498
9499 /* need to clear DONE bit separately */
9500 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9501
9502 /* address of the NVRAM to read from */
9503 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9504 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9505
9506 /* issue a read command */
9507 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9508
9509 /* adjust timeout for emulation/FPGA */
9510 count = NVRAM_TIMEOUT_COUNT;
9511 if (CHIP_REV_IS_SLOW(bp))
9512 count *= 100;
9513
9514 /* wait for completion */
9515 *ret_val = 0;
9516 rc = -EBUSY;
9517 for (i = 0; i < count; i++) {
9518 udelay(5);
9519 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9520
9521 if (val & MCPR_NVM_COMMAND_DONE) {
9522 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
9523 /* we read nvram data in cpu order
9524 * but ethtool sees it as an array of bytes
9525 * converting to big-endian will do the work */
4781bfad 9526 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
9527 rc = 0;
9528 break;
9529 }
9530 }
9531
9532 return rc;
9533}
9534
9535static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9536 int buf_size)
9537{
9538 int rc;
9539 u32 cmd_flags;
4781bfad 9540 __be32 val;
a2fbb9ea
ET
9541
9542 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9543 DP(BNX2X_MSG_NVM,
c14423fe 9544 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9545 offset, buf_size);
9546 return -EINVAL;
9547 }
9548
34f80b04
EG
9549 if (offset + buf_size > bp->common.flash_size) {
9550 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9551 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9552 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9553 return -EINVAL;
9554 }
9555
9556 /* request access to nvram interface */
9557 rc = bnx2x_acquire_nvram_lock(bp);
9558 if (rc)
9559 return rc;
9560
9561 /* enable access to nvram interface */
9562 bnx2x_enable_nvram_access(bp);
9563
9564 /* read the first word(s) */
9565 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9566 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9567 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9568 memcpy(ret_buf, &val, 4);
9569
9570 /* advance to the next dword */
9571 offset += sizeof(u32);
9572 ret_buf += sizeof(u32);
9573 buf_size -= sizeof(u32);
9574 cmd_flags = 0;
9575 }
9576
9577 if (rc == 0) {
9578 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9579 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9580 memcpy(ret_buf, &val, 4);
9581 }
9582
9583 /* disable access to nvram interface */
9584 bnx2x_disable_nvram_access(bp);
9585 bnx2x_release_nvram_lock(bp);
9586
9587 return rc;
9588}
9589
9590static int bnx2x_get_eeprom(struct net_device *dev,
9591 struct ethtool_eeprom *eeprom, u8 *eebuf)
9592{
9593 struct bnx2x *bp = netdev_priv(dev);
9594 int rc;
9595
2add3acb
EG
9596 if (!netif_running(dev))
9597 return -EAGAIN;
9598
34f80b04 9599 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9600 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9601 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9602 eeprom->len, eeprom->len);
9603
9604 /* parameters already validated in ethtool_get_eeprom */
9605
9606 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9607
9608 return rc;
9609}
9610
9611static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9612 u32 cmd_flags)
9613{
f1410647 9614 int count, i, rc;
a2fbb9ea
ET
9615
9616 /* build the command word */
9617 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9618
9619 /* need to clear DONE bit separately */
9620 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9621
9622 /* write the data */
9623 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9624
9625 /* address of the NVRAM to write to */
9626 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9627 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9628
9629 /* issue the write command */
9630 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9631
9632 /* adjust timeout for emulation/FPGA */
9633 count = NVRAM_TIMEOUT_COUNT;
9634 if (CHIP_REV_IS_SLOW(bp))
9635 count *= 100;
9636
9637 /* wait for completion */
9638 rc = -EBUSY;
9639 for (i = 0; i < count; i++) {
9640 udelay(5);
9641 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9642 if (val & MCPR_NVM_COMMAND_DONE) {
9643 rc = 0;
9644 break;
9645 }
9646 }
9647
9648 return rc;
9649}
9650
f1410647 9651#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
9652
9653static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9654 int buf_size)
9655{
9656 int rc;
9657 u32 cmd_flags;
9658 u32 align_offset;
4781bfad 9659 __be32 val;
a2fbb9ea 9660
34f80b04
EG
9661 if (offset + buf_size > bp->common.flash_size) {
9662 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9663 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9664 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9665 return -EINVAL;
9666 }
9667
9668 /* request access to nvram interface */
9669 rc = bnx2x_acquire_nvram_lock(bp);
9670 if (rc)
9671 return rc;
9672
9673 /* enable access to nvram interface */
9674 bnx2x_enable_nvram_access(bp);
9675
9676 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9677 align_offset = (offset & ~0x03);
9678 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9679
9680 if (rc == 0) {
9681 val &= ~(0xff << BYTE_OFFSET(offset));
9682 val |= (*data_buf << BYTE_OFFSET(offset));
9683
9684 /* nvram data is returned as an array of bytes
9685 * convert it back to cpu order */
9686 val = be32_to_cpu(val);
9687
a2fbb9ea
ET
9688 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9689 cmd_flags);
9690 }
9691
9692 /* disable access to nvram interface */
9693 bnx2x_disable_nvram_access(bp);
9694 bnx2x_release_nvram_lock(bp);
9695
9696 return rc;
9697}
9698
9699static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9700 int buf_size)
9701{
9702 int rc;
9703 u32 cmd_flags;
9704 u32 val;
9705 u32 written_so_far;
9706
34f80b04 9707 if (buf_size == 1) /* ethtool */
a2fbb9ea 9708 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9709
9710 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9711 DP(BNX2X_MSG_NVM,
c14423fe 9712 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9713 offset, buf_size);
9714 return -EINVAL;
9715 }
9716
34f80b04
EG
9717 if (offset + buf_size > bp->common.flash_size) {
9718 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9719 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9720 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9721 return -EINVAL;
9722 }
9723
9724 /* request access to nvram interface */
9725 rc = bnx2x_acquire_nvram_lock(bp);
9726 if (rc)
9727 return rc;
9728
9729 /* enable access to nvram interface */
9730 bnx2x_enable_nvram_access(bp);
9731
9732 written_so_far = 0;
9733 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9734 while ((written_so_far < buf_size) && (rc == 0)) {
9735 if (written_so_far == (buf_size - sizeof(u32)))
9736 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9737 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9738 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9739 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9740 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9741
9742 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9743
9744 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9745
9746 /* advance to the next dword */
9747 offset += sizeof(u32);
9748 data_buf += sizeof(u32);
9749 written_so_far += sizeof(u32);
9750 cmd_flags = 0;
9751 }
9752
9753 /* disable access to nvram interface */
9754 bnx2x_disable_nvram_access(bp);
9755 bnx2x_release_nvram_lock(bp);
9756
9757 return rc;
9758}
9759
9760static int bnx2x_set_eeprom(struct net_device *dev,
9761 struct ethtool_eeprom *eeprom, u8 *eebuf)
9762{
9763 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
9764 int port = BP_PORT(bp);
9765 int rc = 0;
a2fbb9ea 9766
9f4c9583
EG
9767 if (!netif_running(dev))
9768 return -EAGAIN;
9769
34f80b04 9770 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9771 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9772 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9773 eeprom->len, eeprom->len);
9774
9775 /* parameters already validated in ethtool_set_eeprom */
9776
f57a6025
EG
9777 /* PHY eeprom can be accessed only by the PMF */
9778 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9779 !bp->port.pmf)
9780 return -EINVAL;
9781
9782 if (eeprom->magic == 0x50485950) {
9783 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9784 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 9785
f57a6025
EG
9786 bnx2x_acquire_phy_lock(bp);
9787 rc |= bnx2x_link_reset(&bp->link_params,
9788 &bp->link_vars, 0);
9789 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9790 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9791 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9792 MISC_REGISTERS_GPIO_HIGH, port);
9793 bnx2x_release_phy_lock(bp);
9794 bnx2x_link_report(bp);
9795
9796 } else if (eeprom->magic == 0x50485952) {
9797 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
f34d28ea 9798 if (bp->state == BNX2X_STATE_OPEN) {
4a37fb66 9799 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
9800 rc |= bnx2x_link_reset(&bp->link_params,
9801 &bp->link_vars, 1);
9802
9803 rc |= bnx2x_phy_init(&bp->link_params,
9804 &bp->link_vars);
4a37fb66 9805 bnx2x_release_phy_lock(bp);
f57a6025
EG
9806 bnx2x_calc_fc_adv(bp);
9807 }
9808 } else if (eeprom->magic == 0x53985943) {
9809 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9810 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9811 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9812 u8 ext_phy_addr =
659bc5c4 9813 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
f57a6025
EG
9814
9815 /* DSP Remove Download Mode */
9816 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9817 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 9818
f57a6025
EG
9819 bnx2x_acquire_phy_lock(bp);
9820
9821 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9822
9823 /* wait 0.5 sec to allow it to run */
9824 msleep(500);
9825 bnx2x_ext_phy_hw_reset(bp, port);
9826 msleep(500);
9827 bnx2x_release_phy_lock(bp);
9828 }
9829 } else
c18487ee 9830 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9831
9832 return rc;
9833}
9834
9835static int bnx2x_get_coalesce(struct net_device *dev,
9836 struct ethtool_coalesce *coal)
9837{
9838 struct bnx2x *bp = netdev_priv(dev);
9839
9840 memset(coal, 0, sizeof(struct ethtool_coalesce));
9841
9842 coal->rx_coalesce_usecs = bp->rx_ticks;
9843 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9844
9845 return 0;
9846}
9847
ca00392c 9848#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
a2fbb9ea
ET
9849static int bnx2x_set_coalesce(struct net_device *dev,
9850 struct ethtool_coalesce *coal)
9851{
9852 struct bnx2x *bp = netdev_priv(dev);
9853
9854 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
ca00392c
EG
9855 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9856 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea
ET
9857
9858 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
ca00392c
EG
9859 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9860 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea 9861
34f80b04 9862 if (netif_running(dev))
a2fbb9ea
ET
9863 bnx2x_update_coalesce(bp);
9864
9865 return 0;
9866}
9867
9868static void bnx2x_get_ringparam(struct net_device *dev,
9869 struct ethtool_ringparam *ering)
9870{
9871 struct bnx2x *bp = netdev_priv(dev);
9872
9873 ering->rx_max_pending = MAX_RX_AVAIL;
9874 ering->rx_mini_max_pending = 0;
9875 ering->rx_jumbo_max_pending = 0;
9876
9877 ering->rx_pending = bp->rx_ring_size;
9878 ering->rx_mini_pending = 0;
9879 ering->rx_jumbo_pending = 0;
9880
9881 ering->tx_max_pending = MAX_TX_AVAIL;
9882 ering->tx_pending = bp->tx_ring_size;
9883}
9884
9885static int bnx2x_set_ringparam(struct net_device *dev,
9886 struct ethtool_ringparam *ering)
9887{
9888 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9889 int rc = 0;
a2fbb9ea
ET
9890
9891 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9892 (ering->tx_pending > MAX_TX_AVAIL) ||
9893 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9894 return -EINVAL;
9895
9896 bp->rx_ring_size = ering->rx_pending;
9897 bp->tx_ring_size = ering->tx_pending;
9898
34f80b04
EG
9899 if (netif_running(dev)) {
9900 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9901 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9902 }
9903
34f80b04 9904 return rc;
a2fbb9ea
ET
9905}
9906
9907static void bnx2x_get_pauseparam(struct net_device *dev,
9908 struct ethtool_pauseparam *epause)
9909{
9910 struct bnx2x *bp = netdev_priv(dev);
9911
356e2385
EG
9912 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9913 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9914 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9915
c0700f90
DM
9916 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9917 BNX2X_FLOW_CTRL_RX);
9918 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9919 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9920
9921 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9922 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9923 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9924}
9925
9926static int bnx2x_set_pauseparam(struct net_device *dev,
9927 struct ethtool_pauseparam *epause)
9928{
9929 struct bnx2x *bp = netdev_priv(dev);
9930
34f80b04
EG
9931 if (IS_E1HMF(bp))
9932 return 0;
9933
a2fbb9ea
ET
9934 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9935 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9936 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9937
c0700f90 9938 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9939
f1410647 9940 if (epause->rx_pause)
c0700f90 9941 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9942
f1410647 9943 if (epause->tx_pause)
c0700f90 9944 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9945
c0700f90
DM
9946 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9947 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9948
c18487ee 9949 if (epause->autoneg) {
34f80b04 9950 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9951 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9952 return -EINVAL;
9953 }
a2fbb9ea 9954
c18487ee 9955 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9956 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9957 }
a2fbb9ea 9958
c18487ee
YR
9959 DP(NETIF_MSG_LINK,
9960 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9961
9962 if (netif_running(dev)) {
bb2a0f7a 9963 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9964 bnx2x_link_set(bp);
9965 }
a2fbb9ea
ET
9966
9967 return 0;
9968}
9969
df0f2343
VZ
9970static int bnx2x_set_flags(struct net_device *dev, u32 data)
9971{
9972 struct bnx2x *bp = netdev_priv(dev);
9973 int changed = 0;
9974 int rc = 0;
9975
9976 /* TPA requires Rx CSUM offloading */
9977 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
d43a7e67
VZ
9978 if (!disable_tpa) {
9979 if (!(dev->features & NETIF_F_LRO)) {
9980 dev->features |= NETIF_F_LRO;
9981 bp->flags |= TPA_ENABLE_FLAG;
9982 changed = 1;
9983 }
9984 } else
9985 rc = -EINVAL;
df0f2343
VZ
9986 } else if (dev->features & NETIF_F_LRO) {
9987 dev->features &= ~NETIF_F_LRO;
9988 bp->flags &= ~TPA_ENABLE_FLAG;
9989 changed = 1;
9990 }
9991
9992 if (changed && netif_running(dev)) {
9993 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9994 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9995 }
9996
9997 return rc;
9998}
9999
a2fbb9ea
ET
10000static u32 bnx2x_get_rx_csum(struct net_device *dev)
10001{
10002 struct bnx2x *bp = netdev_priv(dev);
10003
10004 return bp->rx_csum;
10005}
10006
10007static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10008{
10009 struct bnx2x *bp = netdev_priv(dev);
df0f2343 10010 int rc = 0;
a2fbb9ea
ET
10011
10012 bp->rx_csum = data;
df0f2343
VZ
10013
10014 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10015 TPA'ed packets will be discarded due to wrong TCP CSUM */
10016 if (!data) {
10017 u32 flags = ethtool_op_get_flags(dev);
10018
10019 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10020 }
10021
10022 return rc;
a2fbb9ea
ET
10023}
10024
10025static int bnx2x_set_tso(struct net_device *dev, u32 data)
10026{
755735eb 10027 if (data) {
a2fbb9ea 10028 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
10029 dev->features |= NETIF_F_TSO6;
10030 } else {
a2fbb9ea 10031 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
10032 dev->features &= ~NETIF_F_TSO6;
10033 }
10034
a2fbb9ea
ET
10035 return 0;
10036}
10037
f3c87cdd 10038static const struct {
a2fbb9ea
ET
10039 char string[ETH_GSTRING_LEN];
10040} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
10041 { "register_test (offline)" },
10042 { "memory_test (offline)" },
10043 { "loopback_test (offline)" },
10044 { "nvram_test (online)" },
10045 { "interrupt_test (online)" },
10046 { "link_test (online)" },
d3d4f495 10047 { "idle check (online)" }
a2fbb9ea
ET
10048};
10049
f3c87cdd
YG
10050static int bnx2x_test_registers(struct bnx2x *bp)
10051{
10052 int idx, i, rc = -ENODEV;
10053 u32 wr_val = 0;
9dabc424 10054 int port = BP_PORT(bp);
f3c87cdd
YG
10055 static const struct {
10056 u32 offset0;
10057 u32 offset1;
10058 u32 mask;
10059 } reg_tbl[] = {
10060/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10061 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
10062 { HC_REG_AGG_INT_0, 4, 0x000003ff },
10063 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
10064 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
10065 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
10066 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
10067 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10068 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
10069 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
10070/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
10071 { QM_REG_CONNNUM_0, 4, 0x000fffff },
10072 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
10073 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
10074 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
10075 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10076 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
10077 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 10078 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
10079 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
10080/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
10081 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
10082 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
10083 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
10084 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
10085 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
10086 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
10087 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
10088 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
10089 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
10090/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
10091 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
10092 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
10093 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10094 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
10095 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10096 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
10097
10098 { 0xffffffff, 0, 0x00000000 }
10099 };
10100
10101 if (!netif_running(bp->dev))
10102 return rc;
10103
10104 /* Repeat the test twice:
10105 First by writing 0x00000000, second by writing 0xffffffff */
10106 for (idx = 0; idx < 2; idx++) {
10107
10108 switch (idx) {
10109 case 0:
10110 wr_val = 0;
10111 break;
10112 case 1:
10113 wr_val = 0xffffffff;
10114 break;
10115 }
10116
10117 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10118 u32 offset, mask, save_val, val;
f3c87cdd
YG
10119
10120 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10121 mask = reg_tbl[i].mask;
10122
10123 save_val = REG_RD(bp, offset);
10124
10125 REG_WR(bp, offset, wr_val);
10126 val = REG_RD(bp, offset);
10127
10128 /* Restore the original register's value */
10129 REG_WR(bp, offset, save_val);
10130
10131 /* verify that value is as expected value */
10132 if ((val & mask) != (wr_val & mask))
10133 goto test_reg_exit;
10134 }
10135 }
10136
10137 rc = 0;
10138
10139test_reg_exit:
10140 return rc;
10141}
10142
10143static int bnx2x_test_memory(struct bnx2x *bp)
10144{
10145 int i, j, rc = -ENODEV;
10146 u32 val;
10147 static const struct {
10148 u32 offset;
10149 int size;
10150 } mem_tbl[] = {
10151 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
10152 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10153 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
10154 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
10155 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
10156 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
10157 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
10158
10159 { 0xffffffff, 0 }
10160 };
10161 static const struct {
10162 char *name;
10163 u32 offset;
9dabc424
YG
10164 u32 e1_mask;
10165 u32 e1h_mask;
f3c87cdd 10166 } prty_tbl[] = {
9dabc424
YG
10167 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
10168 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
10169 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
10170 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
10171 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
10172 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
10173
10174 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
10175 };
10176
10177 if (!netif_running(bp->dev))
10178 return rc;
10179
10180 /* Go through all the memories */
10181 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10182 for (j = 0; j < mem_tbl[i].size; j++)
10183 REG_RD(bp, mem_tbl[i].offset + j*4);
10184
10185 /* Check the parity status */
10186 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10187 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
10188 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10189 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
10190 DP(NETIF_MSG_HW,
10191 "%s is 0x%x\n", prty_tbl[i].name, val);
10192 goto test_mem_exit;
10193 }
10194 }
10195
10196 rc = 0;
10197
10198test_mem_exit:
10199 return rc;
10200}
10201
f3c87cdd
YG
10202static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10203{
10204 int cnt = 1000;
10205
10206 if (link_up)
10207 while (bnx2x_link_test(bp) && cnt--)
10208 msleep(10);
10209}
10210
10211static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10212{
10213 unsigned int pkt_size, num_pkts, i;
10214 struct sk_buff *skb;
10215 unsigned char *packet;
ca00392c 10216 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
54b9ddaa 10217 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
f3c87cdd
YG
10218 u16 tx_start_idx, tx_idx;
10219 u16 rx_start_idx, rx_idx;
ca00392c 10220 u16 pkt_prod, bd_prod;
f3c87cdd 10221 struct sw_tx_bd *tx_buf;
ca00392c
EG
10222 struct eth_tx_start_bd *tx_start_bd;
10223 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
10224 dma_addr_t mapping;
10225 union eth_rx_cqe *cqe;
10226 u8 cqe_fp_flags;
10227 struct sw_rx_bd *rx_buf;
10228 u16 len;
10229 int rc = -ENODEV;
10230
b5bf9068
EG
10231 /* check the loopback mode */
10232 switch (loopback_mode) {
10233 case BNX2X_PHY_LOOPBACK:
10234 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10235 return -EINVAL;
10236 break;
10237 case BNX2X_MAC_LOOPBACK:
f3c87cdd 10238 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 10239 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
10240 break;
10241 default:
f3c87cdd 10242 return -EINVAL;
b5bf9068 10243 }
f3c87cdd 10244
b5bf9068
EG
10245 /* prepare the loopback packet */
10246 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10247 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
10248 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10249 if (!skb) {
10250 rc = -ENOMEM;
10251 goto test_loopback_exit;
10252 }
10253 packet = skb_put(skb, pkt_size);
10254 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
10255 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10256 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
10257 for (i = ETH_HLEN; i < pkt_size; i++)
10258 packet[i] = (unsigned char) (i & 0xff);
10259
b5bf9068 10260 /* send the loopback packet */
f3c87cdd 10261 num_pkts = 0;
ca00392c
EG
10262 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10263 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 10264
ca00392c
EG
10265 pkt_prod = fp_tx->tx_pkt_prod++;
10266 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10267 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 10268 tx_buf->skb = skb;
ca00392c 10269 tx_buf->flags = 0;
f3c87cdd 10270
ca00392c
EG
10271 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10272 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
1a983142
FT
10273 mapping = dma_map_single(&bp->pdev->dev, skb->data,
10274 skb_headlen(skb), DMA_TO_DEVICE);
ca00392c
EG
10275 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10276 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10277 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10278 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10279 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10280 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10281 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10282 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10283
10284 /* turn on parsing and get a BD */
10285 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10286 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10287
10288 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 10289
58f4c4cf
EG
10290 wmb();
10291
ca00392c
EG
10292 fp_tx->tx_db.data.prod += 2;
10293 barrier();
54b9ddaa 10294 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
f3c87cdd
YG
10295
10296 mmiowb();
10297
10298 num_pkts++;
ca00392c 10299 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
10300
10301 udelay(100);
10302
ca00392c 10303 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
10304 if (tx_idx != tx_start_idx + num_pkts)
10305 goto test_loopback_exit;
10306
ca00392c 10307 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
10308 if (rx_idx != rx_start_idx + num_pkts)
10309 goto test_loopback_exit;
10310
ca00392c 10311 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
10312 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10313 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10314 goto test_loopback_rx_exit;
10315
10316 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10317 if (len != pkt_size)
10318 goto test_loopback_rx_exit;
10319
ca00392c 10320 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
10321 skb = rx_buf->skb;
10322 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10323 for (i = ETH_HLEN; i < pkt_size; i++)
10324 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10325 goto test_loopback_rx_exit;
10326
10327 rc = 0;
10328
10329test_loopback_rx_exit:
f3c87cdd 10330
ca00392c
EG
10331 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10332 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10333 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10334 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
10335
10336 /* Update producers */
ca00392c
EG
10337 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10338 fp_rx->rx_sge_prod);
f3c87cdd
YG
10339
10340test_loopback_exit:
10341 bp->link_params.loopback_mode = LOOPBACK_NONE;
10342
10343 return rc;
10344}
10345
10346static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10347{
b5bf9068 10348 int rc = 0, res;
f3c87cdd
YG
10349
10350 if (!netif_running(bp->dev))
10351 return BNX2X_LOOPBACK_FAILED;
10352
f8ef6e44 10353 bnx2x_netif_stop(bp, 1);
3910c8ae 10354 bnx2x_acquire_phy_lock(bp);
f3c87cdd 10355
b5bf9068
EG
10356 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10357 if (res) {
10358 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10359 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
10360 }
10361
b5bf9068
EG
10362 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10363 if (res) {
10364 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10365 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
10366 }
10367
3910c8ae 10368 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
10369 bnx2x_netif_start(bp);
10370
10371 return rc;
10372}
10373
10374#define CRC32_RESIDUAL 0xdebb20e3
10375
10376static int bnx2x_test_nvram(struct bnx2x *bp)
10377{
10378 static const struct {
10379 int offset;
10380 int size;
10381 } nvram_tbl[] = {
10382 { 0, 0x14 }, /* bootstrap */
10383 { 0x14, 0xec }, /* dir */
10384 { 0x100, 0x350 }, /* manuf_info */
10385 { 0x450, 0xf0 }, /* feature_info */
10386 { 0x640, 0x64 }, /* upgrade_key_info */
10387 { 0x6a4, 0x64 },
10388 { 0x708, 0x70 }, /* manuf_key_info */
10389 { 0x778, 0x70 },
10390 { 0, 0 }
10391 };
4781bfad 10392 __be32 buf[0x350 / 4];
f3c87cdd
YG
10393 u8 *data = (u8 *)buf;
10394 int i, rc;
ab6ad5a4 10395 u32 magic, crc;
f3c87cdd
YG
10396
10397 rc = bnx2x_nvram_read(bp, 0, data, 4);
10398 if (rc) {
f5372251 10399 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
10400 goto test_nvram_exit;
10401 }
10402
10403 magic = be32_to_cpu(buf[0]);
10404 if (magic != 0x669955aa) {
10405 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10406 rc = -ENODEV;
10407 goto test_nvram_exit;
10408 }
10409
10410 for (i = 0; nvram_tbl[i].size; i++) {
10411
10412 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10413 nvram_tbl[i].size);
10414 if (rc) {
10415 DP(NETIF_MSG_PROBE,
f5372251 10416 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
10417 goto test_nvram_exit;
10418 }
10419
ab6ad5a4
EG
10420 crc = ether_crc_le(nvram_tbl[i].size, data);
10421 if (crc != CRC32_RESIDUAL) {
f3c87cdd 10422 DP(NETIF_MSG_PROBE,
ab6ad5a4 10423 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
f3c87cdd
YG
10424 rc = -ENODEV;
10425 goto test_nvram_exit;
10426 }
10427 }
10428
10429test_nvram_exit:
10430 return rc;
10431}
10432
10433static int bnx2x_test_intr(struct bnx2x *bp)
10434{
10435 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10436 int i, rc;
10437
10438 if (!netif_running(bp->dev))
10439 return -ENODEV;
10440
8d9c5f34 10441 config->hdr.length = 0;
af246401 10442 if (CHIP_IS_E1(bp))
0c43f43f
VZ
10443 /* use last unicast entries */
10444 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
af246401
EG
10445 else
10446 config->hdr.offset = BP_FUNC(bp);
0626b899 10447 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
10448 config->hdr.reserved1 = 0;
10449
e665bfda
MC
10450 bp->set_mac_pending++;
10451 smp_wmb();
f3c87cdd
YG
10452 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10453 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10454 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10455 if (rc == 0) {
f3c87cdd
YG
10456 for (i = 0; i < 10; i++) {
10457 if (!bp->set_mac_pending)
10458 break;
e665bfda 10459 smp_rmb();
f3c87cdd
YG
10460 msleep_interruptible(10);
10461 }
10462 if (i == 10)
10463 rc = -ENODEV;
10464 }
10465
10466 return rc;
10467}
10468
a2fbb9ea
ET
10469static void bnx2x_self_test(struct net_device *dev,
10470 struct ethtool_test *etest, u64 *buf)
10471{
10472 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
10473
10474 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10475
f3c87cdd 10476 if (!netif_running(dev))
a2fbb9ea 10477 return;
a2fbb9ea 10478
33471629 10479 /* offline tests are not supported in MF mode */
f3c87cdd
YG
10480 if (IS_E1HMF(bp))
10481 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10482
10483 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
10484 int port = BP_PORT(bp);
10485 u32 val;
f3c87cdd
YG
10486 u8 link_up;
10487
279abdf5
EG
10488 /* save current value of input enable for TX port IF */
10489 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10490 /* disable input for TX port IF */
10491 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10492
061bc702 10493 link_up = (bnx2x_link_test(bp) == 0);
f3c87cdd
YG
10494 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10495 bnx2x_nic_load(bp, LOAD_DIAG);
10496 /* wait until link state is restored */
10497 bnx2x_wait_for_link(bp, link_up);
10498
10499 if (bnx2x_test_registers(bp) != 0) {
10500 buf[0] = 1;
10501 etest->flags |= ETH_TEST_FL_FAILED;
10502 }
10503 if (bnx2x_test_memory(bp) != 0) {
10504 buf[1] = 1;
10505 etest->flags |= ETH_TEST_FL_FAILED;
10506 }
10507 buf[2] = bnx2x_test_loopback(bp, link_up);
10508 if (buf[2] != 0)
10509 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 10510
f3c87cdd 10511 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
10512
10513 /* restore input for TX port IF */
10514 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10515
f3c87cdd
YG
10516 bnx2x_nic_load(bp, LOAD_NORMAL);
10517 /* wait until link state is restored */
10518 bnx2x_wait_for_link(bp, link_up);
10519 }
10520 if (bnx2x_test_nvram(bp) != 0) {
10521 buf[3] = 1;
a2fbb9ea
ET
10522 etest->flags |= ETH_TEST_FL_FAILED;
10523 }
f3c87cdd
YG
10524 if (bnx2x_test_intr(bp) != 0) {
10525 buf[4] = 1;
10526 etest->flags |= ETH_TEST_FL_FAILED;
10527 }
10528 if (bp->port.pmf)
10529 if (bnx2x_link_test(bp) != 0) {
10530 buf[5] = 1;
10531 etest->flags |= ETH_TEST_FL_FAILED;
10532 }
f3c87cdd
YG
10533
10534#ifdef BNX2X_EXTRA_DEBUG
10535 bnx2x_panic_dump(bp);
10536#endif
a2fbb9ea
ET
10537}
10538
de832a55
EG
10539static const struct {
10540 long offset;
10541 int size;
10542 u8 string[ETH_GSTRING_LEN];
10543} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10544/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10545 { Q_STATS_OFFSET32(error_bytes_received_hi),
10546 8, "[%d]: rx_error_bytes" },
10547 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10548 8, "[%d]: rx_ucast_packets" },
10549 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10550 8, "[%d]: rx_mcast_packets" },
10551 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10552 8, "[%d]: rx_bcast_packets" },
10553 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10554 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10555 4, "[%d]: rx_phy_ip_err_discards"},
10556 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10557 4, "[%d]: rx_skb_alloc_discard" },
10558 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10559
10560/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10561 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10562 8, "[%d]: tx_packets" }
10563};
10564
bb2a0f7a
YG
10565static const struct {
10566 long offset;
10567 int size;
10568 u32 flags;
66e855f3
YG
10569#define STATS_FLAGS_PORT 1
10570#define STATS_FLAGS_FUNC 2
de832a55 10571#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 10572 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 10573} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
10574/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10575 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 10576 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 10577 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 10578 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 10579 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 10580 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 10581 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 10582 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 10583 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 10584 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 10585 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 10586 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 10587 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
10588 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10589 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10590 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10591 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10592/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10593 8, STATS_FLAGS_PORT, "rx_fragments" },
10594 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10595 8, STATS_FLAGS_PORT, "rx_jabbers" },
10596 { STATS_OFFSET32(no_buff_discard_hi),
10597 8, STATS_FLAGS_BOTH, "rx_discards" },
10598 { STATS_OFFSET32(mac_filter_discard),
10599 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10600 { STATS_OFFSET32(xxoverflow_discard),
10601 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10602 { STATS_OFFSET32(brb_drop_hi),
10603 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10604 { STATS_OFFSET32(brb_truncate_hi),
10605 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10606 { STATS_OFFSET32(pause_frames_received_hi),
10607 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10608 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10609 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10610 { STATS_OFFSET32(nig_timer_max),
10611 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10612/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10613 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10614 { STATS_OFFSET32(rx_skb_alloc_failed),
10615 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10616 { STATS_OFFSET32(hw_csum_err),
10617 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10618
10619 { STATS_OFFSET32(total_bytes_transmitted_hi),
10620 8, STATS_FLAGS_BOTH, "tx_bytes" },
10621 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10622 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10623 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10624 8, STATS_FLAGS_BOTH, "tx_packets" },
10625 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10626 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10627 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10628 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 10629 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 10630 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 10631 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 10632 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 10633/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 10634 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 10635 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 10636 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 10637 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 10638 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 10639 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 10640 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 10641 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 10642 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 10643 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 10644 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 10645 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 10646 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 10647 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 10648 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 10649 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 10650 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 10651 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 10652 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 10653/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 10654 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
10655 { STATS_OFFSET32(pause_frames_sent_hi),
10656 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
10657};
10658
de832a55
EG
10659#define IS_PORT_STAT(i) \
10660 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10661#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10662#define IS_E1HMF_MODE_STAT(bp) \
7995c64e 10663 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
66e855f3 10664
15f0a394
BH
10665static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10666{
10667 struct bnx2x *bp = netdev_priv(dev);
10668 int i, num_stats;
10669
10670 switch(stringset) {
10671 case ETH_SS_STATS:
10672 if (is_multi(bp)) {
54b9ddaa 10673 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
15f0a394
BH
10674 if (!IS_E1HMF_MODE_STAT(bp))
10675 num_stats += BNX2X_NUM_STATS;
10676 } else {
10677 if (IS_E1HMF_MODE_STAT(bp)) {
10678 num_stats = 0;
10679 for (i = 0; i < BNX2X_NUM_STATS; i++)
10680 if (IS_FUNC_STAT(i))
10681 num_stats++;
10682 } else
10683 num_stats = BNX2X_NUM_STATS;
10684 }
10685 return num_stats;
10686
10687 case ETH_SS_TEST:
10688 return BNX2X_NUM_TESTS;
10689
10690 default:
10691 return -EINVAL;
10692 }
10693}
10694
a2fbb9ea
ET
10695static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10696{
bb2a0f7a 10697 struct bnx2x *bp = netdev_priv(dev);
de832a55 10698 int i, j, k;
bb2a0f7a 10699
a2fbb9ea
ET
10700 switch (stringset) {
10701 case ETH_SS_STATS:
de832a55
EG
10702 if (is_multi(bp)) {
10703 k = 0;
54b9ddaa 10704 for_each_queue(bp, i) {
de832a55
EG
10705 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10706 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10707 bnx2x_q_stats_arr[j].string, i);
10708 k += BNX2X_NUM_Q_STATS;
10709 }
10710 if (IS_E1HMF_MODE_STAT(bp))
10711 break;
10712 for (j = 0; j < BNX2X_NUM_STATS; j++)
10713 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10714 bnx2x_stats_arr[j].string);
10715 } else {
10716 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10717 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10718 continue;
10719 strcpy(buf + j*ETH_GSTRING_LEN,
10720 bnx2x_stats_arr[i].string);
10721 j++;
10722 }
bb2a0f7a 10723 }
a2fbb9ea
ET
10724 break;
10725
10726 case ETH_SS_TEST:
10727 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10728 break;
10729 }
10730}
10731
a2fbb9ea
ET
10732static void bnx2x_get_ethtool_stats(struct net_device *dev,
10733 struct ethtool_stats *stats, u64 *buf)
10734{
10735 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
10736 u32 *hw_stats, *offset;
10737 int i, j, k;
bb2a0f7a 10738
de832a55
EG
10739 if (is_multi(bp)) {
10740 k = 0;
54b9ddaa 10741 for_each_queue(bp, i) {
de832a55
EG
10742 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10743 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10744 if (bnx2x_q_stats_arr[j].size == 0) {
10745 /* skip this counter */
10746 buf[k + j] = 0;
10747 continue;
10748 }
10749 offset = (hw_stats +
10750 bnx2x_q_stats_arr[j].offset);
10751 if (bnx2x_q_stats_arr[j].size == 4) {
10752 /* 4-byte counter */
10753 buf[k + j] = (u64) *offset;
10754 continue;
10755 }
10756 /* 8-byte counter */
10757 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10758 }
10759 k += BNX2X_NUM_Q_STATS;
10760 }
10761 if (IS_E1HMF_MODE_STAT(bp))
10762 return;
10763 hw_stats = (u32 *)&bp->eth_stats;
10764 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10765 if (bnx2x_stats_arr[j].size == 0) {
10766 /* skip this counter */
10767 buf[k + j] = 0;
10768 continue;
10769 }
10770 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10771 if (bnx2x_stats_arr[j].size == 4) {
10772 /* 4-byte counter */
10773 buf[k + j] = (u64) *offset;
10774 continue;
10775 }
10776 /* 8-byte counter */
10777 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10778 }
de832a55
EG
10779 } else {
10780 hw_stats = (u32 *)&bp->eth_stats;
10781 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10782 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10783 continue;
10784 if (bnx2x_stats_arr[i].size == 0) {
10785 /* skip this counter */
10786 buf[j] = 0;
10787 j++;
10788 continue;
10789 }
10790 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10791 if (bnx2x_stats_arr[i].size == 4) {
10792 /* 4-byte counter */
10793 buf[j] = (u64) *offset;
10794 j++;
10795 continue;
10796 }
10797 /* 8-byte counter */
10798 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10799 j++;
a2fbb9ea 10800 }
a2fbb9ea
ET
10801 }
10802}
10803
10804static int bnx2x_phys_id(struct net_device *dev, u32 data)
10805{
10806 struct bnx2x *bp = netdev_priv(dev);
10807 int i;
10808
34f80b04
EG
10809 if (!netif_running(dev))
10810 return 0;
10811
10812 if (!bp->port.pmf)
10813 return 0;
10814
a2fbb9ea
ET
10815 if (data == 0)
10816 data = 2;
10817
10818 for (i = 0; i < (data * 2); i++) {
c18487ee 10819 if ((i % 2) == 0)
7846e471
YR
10820 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10821 SPEED_1000);
c18487ee 10822 else
7846e471 10823 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
c18487ee 10824
a2fbb9ea
ET
10825 msleep_interruptible(500);
10826 if (signal_pending(current))
10827 break;
10828 }
10829
c18487ee 10830 if (bp->link_vars.link_up)
7846e471
YR
10831 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10832 bp->link_vars.line_speed);
a2fbb9ea
ET
10833
10834 return 0;
10835}
10836
0fc0b732 10837static const struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10838 .get_settings = bnx2x_get_settings,
10839 .set_settings = bnx2x_set_settings,
10840 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10841 .get_regs_len = bnx2x_get_regs_len,
10842 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10843 .get_wol = bnx2x_get_wol,
10844 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10845 .get_msglevel = bnx2x_get_msglevel,
10846 .set_msglevel = bnx2x_set_msglevel,
10847 .nway_reset = bnx2x_nway_reset,
01e53298 10848 .get_link = bnx2x_get_link,
7a9b2557
VZ
10849 .get_eeprom_len = bnx2x_get_eeprom_len,
10850 .get_eeprom = bnx2x_get_eeprom,
10851 .set_eeprom = bnx2x_set_eeprom,
10852 .get_coalesce = bnx2x_get_coalesce,
10853 .set_coalesce = bnx2x_set_coalesce,
10854 .get_ringparam = bnx2x_get_ringparam,
10855 .set_ringparam = bnx2x_set_ringparam,
10856 .get_pauseparam = bnx2x_get_pauseparam,
10857 .set_pauseparam = bnx2x_set_pauseparam,
10858 .get_rx_csum = bnx2x_get_rx_csum,
10859 .set_rx_csum = bnx2x_set_rx_csum,
10860 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10861 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10862 .set_flags = bnx2x_set_flags,
10863 .get_flags = ethtool_op_get_flags,
10864 .get_sg = ethtool_op_get_sg,
10865 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10866 .get_tso = ethtool_op_get_tso,
10867 .set_tso = bnx2x_set_tso,
7a9b2557 10868 .self_test = bnx2x_self_test,
15f0a394 10869 .get_sset_count = bnx2x_get_sset_count,
7a9b2557 10870 .get_strings = bnx2x_get_strings,
a2fbb9ea 10871 .phys_id = bnx2x_phys_id,
bb2a0f7a 10872 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10873};
10874
10875/* end of ethtool_ops */
10876
10877/****************************************************************************
10878* General service functions
10879****************************************************************************/
10880
10881static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10882{
10883 u16 pmcsr;
10884
10885 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10886
10887 switch (state) {
10888 case PCI_D0:
34f80b04 10889 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10890 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10891 PCI_PM_CTRL_PME_STATUS));
10892
10893 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10894 /* delay required during transition out of D3hot */
a2fbb9ea 10895 msleep(20);
34f80b04 10896 break;
a2fbb9ea 10897
34f80b04
EG
10898 case PCI_D3hot:
10899 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10900 pmcsr |= 3;
a2fbb9ea 10901
34f80b04
EG
10902 if (bp->wol)
10903 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10904
34f80b04
EG
10905 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10906 pmcsr);
a2fbb9ea 10907
34f80b04
EG
10908 /* No more memory access after this point until
10909 * device is brought back to D0.
10910 */
10911 break;
10912
10913 default:
10914 return -EINVAL;
10915 }
10916 return 0;
a2fbb9ea
ET
10917}
10918
237907c1
EG
10919static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10920{
10921 u16 rx_cons_sb;
10922
10923 /* Tell compiler that status block fields can change */
10924 barrier();
10925 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10926 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10927 rx_cons_sb++;
10928 return (fp->rx_comp_cons != rx_cons_sb);
10929}
10930
34f80b04
EG
10931/*
10932 * net_device service functions
10933 */
10934
a2fbb9ea
ET
10935static int bnx2x_poll(struct napi_struct *napi, int budget)
10936{
54b9ddaa 10937 int work_done = 0;
a2fbb9ea
ET
10938 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10939 napi);
10940 struct bnx2x *bp = fp->bp;
a2fbb9ea 10941
54b9ddaa 10942 while (1) {
a2fbb9ea 10943#ifdef BNX2X_STOP_ON_ERROR
54b9ddaa
VZ
10944 if (unlikely(bp->panic)) {
10945 napi_complete(napi);
10946 return 0;
10947 }
a2fbb9ea
ET
10948#endif
10949
54b9ddaa
VZ
10950 if (bnx2x_has_tx_work(fp))
10951 bnx2x_tx_int(fp);
356e2385 10952
54b9ddaa
VZ
10953 if (bnx2x_has_rx_work(fp)) {
10954 work_done += bnx2x_rx_int(fp, budget - work_done);
a2fbb9ea 10955
54b9ddaa
VZ
10956 /* must not complete if we consumed full budget */
10957 if (work_done >= budget)
10958 break;
10959 }
a2fbb9ea 10960
54b9ddaa
VZ
10961 /* Fall out from the NAPI loop if needed */
10962 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10963 bnx2x_update_fpsb_idx(fp);
10964 /* bnx2x_has_rx_work() reads the status block, thus we need
10965 * to ensure that status block indices have been actually read
10966 * (bnx2x_update_fpsb_idx) prior to this check
10967 * (bnx2x_has_rx_work) so that we won't write the "newer"
10968 * value of the status block to IGU (if there was a DMA right
10969 * after bnx2x_has_rx_work and if there is no rmb, the memory
10970 * reading (bnx2x_update_fpsb_idx) may be postponed to right
10971 * before bnx2x_ack_sb). In this case there will never be
10972 * another interrupt until there is another update of the
10973 * status block, while there is still unhandled work.
10974 */
10975 rmb();
a2fbb9ea 10976
54b9ddaa
VZ
10977 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10978 napi_complete(napi);
10979 /* Re-enable interrupts */
10980 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10981 le16_to_cpu(fp->fp_c_idx),
10982 IGU_INT_NOP, 1);
10983 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10984 le16_to_cpu(fp->fp_u_idx),
10985 IGU_INT_ENABLE, 1);
10986 break;
10987 }
10988 }
a2fbb9ea 10989 }
356e2385 10990
a2fbb9ea
ET
10991 return work_done;
10992}
10993
755735eb
EG
10994
10995/* we split the first BD into headers and data BDs
33471629 10996 * to ease the pain of our fellow microcode engineers
755735eb
EG
10997 * we use one mapping for both BDs
10998 * So far this has only been observed to happen
10999 * in Other Operating Systems(TM)
11000 */
11001static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
11002 struct bnx2x_fastpath *fp,
ca00392c
EG
11003 struct sw_tx_bd *tx_buf,
11004 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
11005 u16 bd_prod, int nbd)
11006{
ca00392c 11007 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
11008 struct eth_tx_bd *d_tx_bd;
11009 dma_addr_t mapping;
11010 int old_len = le16_to_cpu(h_tx_bd->nbytes);
11011
11012 /* first fix first BD */
11013 h_tx_bd->nbd = cpu_to_le16(nbd);
11014 h_tx_bd->nbytes = cpu_to_le16(hlen);
11015
11016 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11017 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11018 h_tx_bd->addr_lo, h_tx_bd->nbd);
11019
11020 /* now get a new data BD
11021 * (after the pbd) and fill it */
11022 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 11023 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
11024
11025 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11026 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11027
11028 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11029 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11030 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
11031
11032 /* this marks the BD as one that has no individual mapping */
11033 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11034
755735eb
EG
11035 DP(NETIF_MSG_TX_QUEUED,
11036 "TSO split data size is %d (%x:%x)\n",
11037 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11038
ca00392c
EG
11039 /* update tx_bd */
11040 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
11041
11042 return bd_prod;
11043}
11044
11045static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11046{
11047 if (fix > 0)
11048 csum = (u16) ~csum_fold(csum_sub(csum,
11049 csum_partial(t_header - fix, fix, 0)));
11050
11051 else if (fix < 0)
11052 csum = (u16) ~csum_fold(csum_add(csum,
11053 csum_partial(t_header, -fix, 0)));
11054
11055 return swab16(csum);
11056}
11057
11058static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11059{
11060 u32 rc;
11061
11062 if (skb->ip_summed != CHECKSUM_PARTIAL)
11063 rc = XMIT_PLAIN;
11064
11065 else {
4781bfad 11066 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
11067 rc = XMIT_CSUM_V6;
11068 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11069 rc |= XMIT_CSUM_TCP;
11070
11071 } else {
11072 rc = XMIT_CSUM_V4;
11073 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11074 rc |= XMIT_CSUM_TCP;
11075 }
11076 }
11077
11078 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
d6a2f98b 11079 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
755735eb
EG
11080
11081 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
d6a2f98b 11082 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
755735eb
EG
11083
11084 return rc;
11085}
11086
632da4d6 11087#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
11088/* check if packet requires linearization (packet is too fragmented)
11089 no need to check fragmentation if page size > 8K (there will be no
11090 violation to FW restrictions) */
755735eb
EG
11091static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11092 u32 xmit_type)
11093{
11094 int to_copy = 0;
11095 int hlen = 0;
11096 int first_bd_sz = 0;
11097
11098 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11099 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11100
11101 if (xmit_type & XMIT_GSO) {
11102 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11103 /* Check if LSO packet needs to be copied:
11104 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11105 int wnd_size = MAX_FETCH_BD - 3;
33471629 11106 /* Number of windows to check */
755735eb
EG
11107 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11108 int wnd_idx = 0;
11109 int frag_idx = 0;
11110 u32 wnd_sum = 0;
11111
11112 /* Headers length */
11113 hlen = (int)(skb_transport_header(skb) - skb->data) +
11114 tcp_hdrlen(skb);
11115
11116 /* Amount of data (w/o headers) on linear part of SKB*/
11117 first_bd_sz = skb_headlen(skb) - hlen;
11118
11119 wnd_sum = first_bd_sz;
11120
11121 /* Calculate the first sum - it's special */
11122 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11123 wnd_sum +=
11124 skb_shinfo(skb)->frags[frag_idx].size;
11125
11126 /* If there was data on linear skb data - check it */
11127 if (first_bd_sz > 0) {
11128 if (unlikely(wnd_sum < lso_mss)) {
11129 to_copy = 1;
11130 goto exit_lbl;
11131 }
11132
11133 wnd_sum -= first_bd_sz;
11134 }
11135
11136 /* Others are easier: run through the frag list and
11137 check all windows */
11138 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11139 wnd_sum +=
11140 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11141
11142 if (unlikely(wnd_sum < lso_mss)) {
11143 to_copy = 1;
11144 break;
11145 }
11146 wnd_sum -=
11147 skb_shinfo(skb)->frags[wnd_idx].size;
11148 }
755735eb
EG
11149 } else {
11150 /* in non-LSO too fragmented packet should always
11151 be linearized */
11152 to_copy = 1;
11153 }
11154 }
11155
11156exit_lbl:
11157 if (unlikely(to_copy))
11158 DP(NETIF_MSG_TX_QUEUED,
11159 "Linearization IS REQUIRED for %s packet. "
11160 "num_frags %d hlen %d first_bd_sz %d\n",
11161 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11162 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11163
11164 return to_copy;
11165}
632da4d6 11166#endif
755735eb
EG
11167
11168/* called with netif_tx_lock
a2fbb9ea 11169 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 11170 * netif_wake_queue()
a2fbb9ea 11171 */
61357325 11172static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
a2fbb9ea
ET
11173{
11174 struct bnx2x *bp = netdev_priv(dev);
54b9ddaa 11175 struct bnx2x_fastpath *fp;
555f6c78 11176 struct netdev_queue *txq;
a2fbb9ea 11177 struct sw_tx_bd *tx_buf;
ca00392c
EG
11178 struct eth_tx_start_bd *tx_start_bd;
11179 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
11180 struct eth_tx_parse_bd *pbd = NULL;
11181 u16 pkt_prod, bd_prod;
755735eb 11182 int nbd, fp_index;
a2fbb9ea 11183 dma_addr_t mapping;
755735eb 11184 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
11185 int i;
11186 u8 hlen = 0;
ca00392c 11187 __le16 pkt_size = 0;
a2fbb9ea
ET
11188
11189#ifdef BNX2X_STOP_ON_ERROR
11190 if (unlikely(bp->panic))
11191 return NETDEV_TX_BUSY;
11192#endif
11193
555f6c78
EG
11194 fp_index = skb_get_queue_mapping(skb);
11195 txq = netdev_get_tx_queue(dev, fp_index);
11196
54b9ddaa 11197 fp = &bp->fp[fp_index];
755735eb 11198
231fd58a 11199 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
54b9ddaa 11200 fp->eth_q_stats.driver_xoff++;
555f6c78 11201 netif_tx_stop_queue(txq);
a2fbb9ea
ET
11202 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11203 return NETDEV_TX_BUSY;
11204 }
11205
755735eb
EG
11206 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
11207 " gso type %x xmit_type %x\n",
11208 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11209 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11210
632da4d6 11211#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
11212 /* First, check if we need to linearize the skb (due to FW
11213 restrictions). No need to check fragmentation if page size > 8K
11214 (there will be no violation to FW restrictions) */
755735eb
EG
11215 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11216 /* Statistics of linearization */
11217 bp->lin_cnt++;
11218 if (skb_linearize(skb) != 0) {
11219 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11220 "silently dropping this SKB\n");
11221 dev_kfree_skb_any(skb);
da5a662a 11222 return NETDEV_TX_OK;
755735eb
EG
11223 }
11224 }
632da4d6 11225#endif
755735eb 11226
a2fbb9ea 11227 /*
755735eb 11228 Please read carefully. First we use one BD which we mark as start,
ca00392c 11229 then we have a parsing info BD (used for TSO or xsum),
755735eb 11230 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
11231 (don't forget to mark the last one as last,
11232 and to unmap only AFTER you write to the BD ...)
755735eb 11233 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
11234 */
11235
11236 pkt_prod = fp->tx_pkt_prod++;
755735eb 11237 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 11238
755735eb 11239 /* get a tx_buf and first BD */
a2fbb9ea 11240 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 11241 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 11242
ca00392c
EG
11243 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11244 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11245 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 11246 /* header nbd */
ca00392c 11247 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 11248
755735eb
EG
11249 /* remember the first BD of the packet */
11250 tx_buf->first_bd = fp->tx_bd_prod;
11251 tx_buf->skb = skb;
ca00392c 11252 tx_buf->flags = 0;
a2fbb9ea
ET
11253
11254 DP(NETIF_MSG_TX_QUEUED,
11255 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 11256 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 11257
0c6671b0
EG
11258#ifdef BCM_VLAN
11259 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11260 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
11261 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11262 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 11263 } else
0c6671b0 11264#endif
ca00392c 11265 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 11266
ca00392c
EG
11267 /* turn on parsing and get a BD */
11268 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11269 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 11270
ca00392c 11271 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
11272
11273 if (xmit_type & XMIT_CSUM) {
ca00392c 11274 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
11275
11276 /* for now NS flag is not used in Linux */
4781bfad
EG
11277 pbd->global_data =
11278 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11279 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 11280
755735eb
EG
11281 pbd->ip_hlen = (skb_transport_header(skb) -
11282 skb_network_header(skb)) / 2;
11283
11284 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 11285
755735eb 11286 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 11287 hlen = hlen*2;
a2fbb9ea 11288
ca00392c 11289 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
11290
11291 if (xmit_type & XMIT_CSUM_V4)
ca00392c 11292 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
11293 ETH_TX_BD_FLAGS_IP_CSUM;
11294 else
ca00392c
EG
11295 tx_start_bd->bd_flags.as_bitfield |=
11296 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
11297
11298 if (xmit_type & XMIT_CSUM_TCP) {
11299 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11300
11301 } else {
11302 s8 fix = SKB_CS_OFF(skb); /* signed! */
11303
ca00392c 11304 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 11305
755735eb 11306 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11307 "hlen %d fix %d csum before fix %x\n",
11308 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
11309
11310 /* HW bug: fixup the CSUM */
11311 pbd->tcp_pseudo_csum =
11312 bnx2x_csum_fix(skb_transport_header(skb),
11313 SKB_CS(skb), fix);
11314
11315 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11316 pbd->tcp_pseudo_csum);
11317 }
a2fbb9ea
ET
11318 }
11319
1a983142
FT
11320 mapping = dma_map_single(&bp->pdev->dev, skb->data,
11321 skb_headlen(skb), DMA_TO_DEVICE);
a2fbb9ea 11322
ca00392c
EG
11323 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11324 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11325 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11326 tx_start_bd->nbd = cpu_to_le16(nbd);
11327 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11328 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
11329
11330 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 11331 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
11332 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11333 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11334 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 11335
755735eb 11336 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
11337
11338 DP(NETIF_MSG_TX_QUEUED,
11339 "TSO packet len %d hlen %d total len %d tso size %d\n",
11340 skb->len, hlen, skb_headlen(skb),
11341 skb_shinfo(skb)->gso_size);
11342
ca00392c 11343 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 11344
755735eb 11345 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
11346 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11347 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
11348
11349 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11350 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
11351 pbd->tcp_flags = pbd_tcp_flags(skb);
11352
11353 if (xmit_type & XMIT_GSO_V4) {
11354 pbd->ip_id = swab16(ip_hdr(skb)->id);
11355 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
11356 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11357 ip_hdr(skb)->daddr,
11358 0, IPPROTO_TCP, 0));
755735eb
EG
11359
11360 } else
11361 pbd->tcp_pseudo_csum =
11362 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11363 &ipv6_hdr(skb)->daddr,
11364 0, IPPROTO_TCP, 0));
11365
a2fbb9ea
ET
11366 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11367 }
ca00392c 11368 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 11369
755735eb
EG
11370 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11371 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 11372
755735eb 11373 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
11374 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11375 if (total_pkt_bd == NULL)
11376 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 11377
1a983142
FT
11378 mapping = dma_map_page(&bp->pdev->dev, frag->page,
11379 frag->page_offset,
11380 frag->size, DMA_TO_DEVICE);
a2fbb9ea 11381
ca00392c
EG
11382 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11383 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11384 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11385 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 11386
755735eb 11387 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11388 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11389 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11390 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
11391 }
11392
ca00392c 11393 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 11394
a2fbb9ea
ET
11395 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11396
755735eb 11397 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
11398 * if the packet contains or ends with it
11399 */
11400 if (TX_BD_POFF(bd_prod) < nbd)
11401 nbd++;
11402
ca00392c
EG
11403 if (total_pkt_bd != NULL)
11404 total_pkt_bd->total_pkt_bytes = pkt_size;
11405
a2fbb9ea
ET
11406 if (pbd)
11407 DP(NETIF_MSG_TX_QUEUED,
11408 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11409 " tcp_flags %x xsum %x seq %u hlen %u\n",
11410 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11411 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 11412 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 11413
755735eb 11414 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 11415
58f4c4cf
EG
11416 /*
11417 * Make sure that the BD data is updated before updating the producer
11418 * since FW might read the BD right after the producer is updated.
11419 * This is only applicable for weak-ordered memory model archs such
11420 * as IA-64. The following barrier is also mandatory since FW will
11421 * assumes packets must have BDs.
11422 */
11423 wmb();
11424
ca00392c
EG
11425 fp->tx_db.data.prod += nbd;
11426 barrier();
54b9ddaa 11427 DOORBELL(bp, fp->index, fp->tx_db.raw);
a2fbb9ea
ET
11428
11429 mmiowb();
11430
755735eb 11431 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
11432
11433 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 11434 netif_tx_stop_queue(txq);
9baddeb8
SG
11435
11436 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
11437 * ordering of set_bit() in netif_tx_stop_queue() and read of
11438 * fp->bd_tx_cons */
58f4c4cf 11439 smp_mb();
9baddeb8 11440
54b9ddaa 11441 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 11442 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 11443 netif_tx_wake_queue(txq);
a2fbb9ea 11444 }
54b9ddaa 11445 fp->tx_pkt++;
a2fbb9ea
ET
11446
11447 return NETDEV_TX_OK;
11448}
11449
bb2a0f7a 11450/* called with rtnl_lock */
a2fbb9ea
ET
11451static int bnx2x_open(struct net_device *dev)
11452{
11453 struct bnx2x *bp = netdev_priv(dev);
11454
6eccabb3
EG
11455 netif_carrier_off(dev);
11456
a2fbb9ea
ET
11457 bnx2x_set_power_state(bp, PCI_D0);
11458
bb2a0f7a 11459 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
11460}
11461
bb2a0f7a 11462/* called with rtnl_lock */
a2fbb9ea
ET
11463static int bnx2x_close(struct net_device *dev)
11464{
a2fbb9ea
ET
11465 struct bnx2x *bp = netdev_priv(dev);
11466
11467 /* Unload the driver, release IRQs */
bb2a0f7a
YG
11468 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11469 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11470 if (!CHIP_REV_IS_SLOW(bp))
11471 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
11472
11473 return 0;
11474}
11475
f5372251 11476/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
11477static void bnx2x_set_rx_mode(struct net_device *dev)
11478{
11479 struct bnx2x *bp = netdev_priv(dev);
11480 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11481 int port = BP_PORT(bp);
11482
11483 if (bp->state != BNX2X_STATE_OPEN) {
11484 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11485 return;
11486 }
11487
11488 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11489
11490 if (dev->flags & IFF_PROMISC)
11491 rx_mode = BNX2X_RX_MODE_PROMISC;
11492
11493 else if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf
JP
11494 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
11495 CHIP_IS_E1(bp)))
34f80b04
EG
11496 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11497
11498 else { /* some multicasts */
11499 if (CHIP_IS_E1(bp)) {
11500 int i, old, offset;
22bedad3 11501 struct netdev_hw_addr *ha;
34f80b04
EG
11502 struct mac_configuration_cmd *config =
11503 bnx2x_sp(bp, mcast_config);
11504
0ddf477b 11505 i = 0;
22bedad3 11506 netdev_for_each_mc_addr(ha, dev) {
34f80b04
EG
11507 config->config_table[i].
11508 cam_entry.msb_mac_addr =
22bedad3 11509 swab16(*(u16 *)&ha->addr[0]);
34f80b04
EG
11510 config->config_table[i].
11511 cam_entry.middle_mac_addr =
22bedad3 11512 swab16(*(u16 *)&ha->addr[2]);
34f80b04
EG
11513 config->config_table[i].
11514 cam_entry.lsb_mac_addr =
22bedad3 11515 swab16(*(u16 *)&ha->addr[4]);
34f80b04
EG
11516 config->config_table[i].cam_entry.flags =
11517 cpu_to_le16(port);
11518 config->config_table[i].
11519 target_table_entry.flags = 0;
ca00392c
EG
11520 config->config_table[i].target_table_entry.
11521 clients_bit_vector =
11522 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
11523 config->config_table[i].
11524 target_table_entry.vlan_id = 0;
11525
11526 DP(NETIF_MSG_IFUP,
11527 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11528 config->config_table[i].
11529 cam_entry.msb_mac_addr,
11530 config->config_table[i].
11531 cam_entry.middle_mac_addr,
11532 config->config_table[i].
11533 cam_entry.lsb_mac_addr);
0ddf477b 11534 i++;
34f80b04 11535 }
8d9c5f34 11536 old = config->hdr.length;
34f80b04
EG
11537 if (old > i) {
11538 for (; i < old; i++) {
11539 if (CAM_IS_INVALID(config->
11540 config_table[i])) {
af246401 11541 /* already invalidated */
34f80b04
EG
11542 break;
11543 }
11544 /* invalidate */
11545 CAM_INVALIDATE(config->
11546 config_table[i]);
11547 }
11548 }
11549
11550 if (CHIP_REV_IS_SLOW(bp))
11551 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11552 else
11553 offset = BNX2X_MAX_MULTICAST*(1 + port);
11554
8d9c5f34 11555 config->hdr.length = i;
34f80b04 11556 config->hdr.offset = offset;
8d9c5f34 11557 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
11558 config->hdr.reserved1 = 0;
11559
e665bfda
MC
11560 bp->set_mac_pending++;
11561 smp_wmb();
11562
34f80b04
EG
11563 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11564 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11565 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11566 0);
11567 } else { /* E1H */
11568 /* Accept one or more multicasts */
22bedad3 11569 struct netdev_hw_addr *ha;
34f80b04
EG
11570 u32 mc_filter[MC_HASH_SIZE];
11571 u32 crc, bit, regidx;
11572 int i;
11573
11574 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11575
22bedad3 11576 netdev_for_each_mc_addr(ha, dev) {
7c510e4b 11577 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
22bedad3 11578 ha->addr);
34f80b04 11579
22bedad3 11580 crc = crc32c_le(0, ha->addr, ETH_ALEN);
34f80b04
EG
11581 bit = (crc >> 24) & 0xff;
11582 regidx = bit >> 5;
11583 bit &= 0x1f;
11584 mc_filter[regidx] |= (1 << bit);
11585 }
11586
11587 for (i = 0; i < MC_HASH_SIZE; i++)
11588 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11589 mc_filter[i]);
11590 }
11591 }
11592
11593 bp->rx_mode = rx_mode;
11594 bnx2x_set_storm_rx_mode(bp);
11595}
11596
11597/* called with rtnl_lock */
a2fbb9ea
ET
11598static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11599{
11600 struct sockaddr *addr = p;
11601 struct bnx2x *bp = netdev_priv(dev);
11602
34f80b04 11603 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
11604 return -EINVAL;
11605
11606 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
11607 if (netif_running(dev)) {
11608 if (CHIP_IS_E1(bp))
e665bfda 11609 bnx2x_set_eth_mac_addr_e1(bp, 1);
34f80b04 11610 else
e665bfda 11611 bnx2x_set_eth_mac_addr_e1h(bp, 1);
34f80b04 11612 }
a2fbb9ea
ET
11613
11614 return 0;
11615}
11616
c18487ee 11617/* called with rtnl_lock */
01cd4528
EG
11618static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11619 int devad, u16 addr)
a2fbb9ea 11620{
01cd4528
EG
11621 struct bnx2x *bp = netdev_priv(netdev);
11622 u16 value;
11623 int rc;
11624 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 11625
01cd4528
EG
11626 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11627 prtad, devad, addr);
a2fbb9ea 11628
01cd4528
EG
11629 if (prtad != bp->mdio.prtad) {
11630 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11631 prtad, bp->mdio.prtad);
11632 return -EINVAL;
11633 }
11634
11635 /* The HW expects different devad if CL22 is used */
11636 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 11637
01cd4528
EG
11638 bnx2x_acquire_phy_lock(bp);
11639 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11640 devad, addr, &value);
11641 bnx2x_release_phy_lock(bp);
11642 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 11643
01cd4528
EG
11644 if (!rc)
11645 rc = value;
11646 return rc;
11647}
a2fbb9ea 11648
01cd4528
EG
11649/* called with rtnl_lock */
11650static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11651 u16 addr, u16 value)
11652{
11653 struct bnx2x *bp = netdev_priv(netdev);
11654 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11655 int rc;
11656
11657 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11658 " value 0x%x\n", prtad, devad, addr, value);
11659
11660 if (prtad != bp->mdio.prtad) {
11661 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11662 prtad, bp->mdio.prtad);
11663 return -EINVAL;
a2fbb9ea
ET
11664 }
11665
01cd4528
EG
11666 /* The HW expects different devad if CL22 is used */
11667 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 11668
01cd4528
EG
11669 bnx2x_acquire_phy_lock(bp);
11670 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11671 devad, addr, value);
11672 bnx2x_release_phy_lock(bp);
11673 return rc;
11674}
c18487ee 11675
01cd4528
EG
11676/* called with rtnl_lock */
11677static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11678{
11679 struct bnx2x *bp = netdev_priv(dev);
11680 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 11681
01cd4528
EG
11682 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11683 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 11684
01cd4528
EG
11685 if (!netif_running(dev))
11686 return -EAGAIN;
11687
11688 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
11689}
11690
34f80b04 11691/* called with rtnl_lock */
a2fbb9ea
ET
11692static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11693{
11694 struct bnx2x *bp = netdev_priv(dev);
34f80b04 11695 int rc = 0;
a2fbb9ea
ET
11696
11697 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11698 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11699 return -EINVAL;
11700
11701 /* This does not race with packet allocation
c14423fe 11702 * because the actual alloc size is
a2fbb9ea
ET
11703 * only updated as part of load
11704 */
11705 dev->mtu = new_mtu;
11706
11707 if (netif_running(dev)) {
34f80b04
EG
11708 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11709 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 11710 }
34f80b04
EG
11711
11712 return rc;
a2fbb9ea
ET
11713}
11714
11715static void bnx2x_tx_timeout(struct net_device *dev)
11716{
11717 struct bnx2x *bp = netdev_priv(dev);
11718
11719#ifdef BNX2X_STOP_ON_ERROR
11720 if (!bp->panic)
11721 bnx2x_panic();
11722#endif
11723 /* This allows the netif to be shutdown gracefully before resetting */
11724 schedule_work(&bp->reset_task);
11725}
11726
11727#ifdef BCM_VLAN
34f80b04 11728/* called with rtnl_lock */
a2fbb9ea
ET
11729static void bnx2x_vlan_rx_register(struct net_device *dev,
11730 struct vlan_group *vlgrp)
11731{
11732 struct bnx2x *bp = netdev_priv(dev);
11733
11734 bp->vlgrp = vlgrp;
0c6671b0
EG
11735
11736 /* Set flags according to the required capabilities */
11737 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11738
11739 if (dev->features & NETIF_F_HW_VLAN_TX)
11740 bp->flags |= HW_VLAN_TX_FLAG;
11741
11742 if (dev->features & NETIF_F_HW_VLAN_RX)
11743 bp->flags |= HW_VLAN_RX_FLAG;
11744
a2fbb9ea 11745 if (netif_running(dev))
49d66772 11746 bnx2x_set_client_config(bp);
a2fbb9ea 11747}
34f80b04 11748
a2fbb9ea
ET
11749#endif
11750
257ddbda 11751#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
11752static void poll_bnx2x(struct net_device *dev)
11753{
11754 struct bnx2x *bp = netdev_priv(dev);
11755
11756 disable_irq(bp->pdev->irq);
11757 bnx2x_interrupt(bp->pdev->irq, dev);
11758 enable_irq(bp->pdev->irq);
11759}
11760#endif
11761
c64213cd
SH
11762static const struct net_device_ops bnx2x_netdev_ops = {
11763 .ndo_open = bnx2x_open,
11764 .ndo_stop = bnx2x_close,
11765 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 11766 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
11767 .ndo_set_mac_address = bnx2x_change_mac_addr,
11768 .ndo_validate_addr = eth_validate_addr,
11769 .ndo_do_ioctl = bnx2x_ioctl,
11770 .ndo_change_mtu = bnx2x_change_mtu,
11771 .ndo_tx_timeout = bnx2x_tx_timeout,
11772#ifdef BCM_VLAN
11773 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11774#endif
257ddbda 11775#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
11776 .ndo_poll_controller = poll_bnx2x,
11777#endif
11778};
11779
34f80b04
EG
11780static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11781 struct net_device *dev)
a2fbb9ea
ET
11782{
11783 struct bnx2x *bp;
11784 int rc;
11785
11786 SET_NETDEV_DEV(dev, &pdev->dev);
11787 bp = netdev_priv(dev);
11788
34f80b04
EG
11789 bp->dev = dev;
11790 bp->pdev = pdev;
a2fbb9ea 11791 bp->flags = 0;
34f80b04 11792 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11793
11794 rc = pci_enable_device(pdev);
11795 if (rc) {
7995c64e 11796 pr_err("Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
11797 goto err_out;
11798 }
11799
11800 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7995c64e 11801 pr_err("Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
11802 rc = -ENODEV;
11803 goto err_out_disable;
11804 }
11805
11806 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
7995c64e 11807 pr_err("Cannot find second PCI device base address, aborting\n");
a2fbb9ea
ET
11808 rc = -ENODEV;
11809 goto err_out_disable;
11810 }
11811
34f80b04
EG
11812 if (atomic_read(&pdev->enable_cnt) == 1) {
11813 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11814 if (rc) {
7995c64e 11815 pr_err("Cannot obtain PCI resources, aborting\n");
34f80b04
EG
11816 goto err_out_disable;
11817 }
a2fbb9ea 11818
34f80b04
EG
11819 pci_set_master(pdev);
11820 pci_save_state(pdev);
11821 }
a2fbb9ea
ET
11822
11823 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11824 if (bp->pm_cap == 0) {
7995c64e 11825 pr_err("Cannot find power management capability, aborting\n");
a2fbb9ea
ET
11826 rc = -EIO;
11827 goto err_out_release;
11828 }
11829
11830 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11831 if (bp->pcie_cap == 0) {
7995c64e 11832 pr_err("Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
11833 rc = -EIO;
11834 goto err_out_release;
11835 }
11836
1a983142 11837 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11838 bp->flags |= USING_DAC_FLAG;
1a983142
FT
11839 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
11840 pr_err("dma_set_coherent_mask failed, aborting\n");
a2fbb9ea
ET
11841 rc = -EIO;
11842 goto err_out_release;
11843 }
11844
1a983142 11845 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
7995c64e 11846 pr_err("System does not support DMA, aborting\n");
a2fbb9ea
ET
11847 rc = -EIO;
11848 goto err_out_release;
11849 }
11850
34f80b04
EG
11851 dev->mem_start = pci_resource_start(pdev, 0);
11852 dev->base_addr = dev->mem_start;
11853 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11854
11855 dev->irq = pdev->irq;
11856
275f165f 11857 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 11858 if (!bp->regview) {
7995c64e 11859 pr_err("Cannot map register space, aborting\n");
a2fbb9ea
ET
11860 rc = -ENOMEM;
11861 goto err_out_release;
11862 }
11863
34f80b04
EG
11864 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11865 min_t(u64, BNX2X_DB_SIZE,
11866 pci_resource_len(pdev, 2)));
a2fbb9ea 11867 if (!bp->doorbells) {
7995c64e 11868 pr_err("Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
11869 rc = -ENOMEM;
11870 goto err_out_unmap;
11871 }
11872
11873 bnx2x_set_power_state(bp, PCI_D0);
11874
34f80b04
EG
11875 /* clean indirect addresses */
11876 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11877 PCICFG_VENDOR_ID_OFFSET);
11878 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11879 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11880 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11881 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11882
34f80b04 11883 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11884
c64213cd 11885 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11886 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11887 dev->features |= NETIF_F_SG;
11888 dev->features |= NETIF_F_HW_CSUM;
11889 if (bp->flags & USING_DAC_FLAG)
11890 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
11891 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11892 dev->features |= NETIF_F_TSO6;
34f80b04
EG
11893#ifdef BCM_VLAN
11894 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11895 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
11896
11897 dev->vlan_features |= NETIF_F_SG;
11898 dev->vlan_features |= NETIF_F_HW_CSUM;
11899 if (bp->flags & USING_DAC_FLAG)
11900 dev->vlan_features |= NETIF_F_HIGHDMA;
11901 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11902 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 11903#endif
a2fbb9ea 11904
01cd4528
EG
11905 /* get_port_hwinfo() will set prtad and mmds properly */
11906 bp->mdio.prtad = MDIO_PRTAD_NONE;
11907 bp->mdio.mmds = 0;
11908 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11909 bp->mdio.dev = dev;
11910 bp->mdio.mdio_read = bnx2x_mdio_read;
11911 bp->mdio.mdio_write = bnx2x_mdio_write;
11912
a2fbb9ea
ET
11913 return 0;
11914
11915err_out_unmap:
11916 if (bp->regview) {
11917 iounmap(bp->regview);
11918 bp->regview = NULL;
11919 }
a2fbb9ea
ET
11920 if (bp->doorbells) {
11921 iounmap(bp->doorbells);
11922 bp->doorbells = NULL;
11923 }
11924
11925err_out_release:
34f80b04
EG
11926 if (atomic_read(&pdev->enable_cnt) == 1)
11927 pci_release_regions(pdev);
a2fbb9ea
ET
11928
11929err_out_disable:
11930 pci_disable_device(pdev);
11931 pci_set_drvdata(pdev, NULL);
11932
11933err_out:
11934 return rc;
11935}
11936
37f9ce62
EG
11937static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11938 int *width, int *speed)
25047950
ET
11939{
11940 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11941
37f9ce62 11942 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 11943
37f9ce62
EG
11944 /* return value of 1=2.5GHz 2=5GHz */
11945 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 11946}
37f9ce62 11947
94a78b79
VZ
11948static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11949{
37f9ce62 11950 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
11951 struct bnx2x_fw_file_hdr *fw_hdr;
11952 struct bnx2x_fw_file_section *sections;
94a78b79 11953 u32 offset, len, num_ops;
37f9ce62 11954 u16 *ops_offsets;
94a78b79 11955 int i;
37f9ce62 11956 const u8 *fw_ver;
94a78b79
VZ
11957
11958 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11959 return -EINVAL;
11960
11961 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11962 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11963
11964 /* Make sure none of the offsets and sizes make us read beyond
11965 * the end of the firmware data */
11966 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11967 offset = be32_to_cpu(sections[i].offset);
11968 len = be32_to_cpu(sections[i].len);
11969 if (offset + len > firmware->size) {
7995c64e 11970 pr_err("Section %d length is out of bounds\n", i);
94a78b79
VZ
11971 return -EINVAL;
11972 }
11973 }
11974
11975 /* Likewise for the init_ops offsets */
11976 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11977 ops_offsets = (u16 *)(firmware->data + offset);
11978 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11979
11980 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11981 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
7995c64e 11982 pr_err("Section offset %d is out of bounds\n", i);
94a78b79
VZ
11983 return -EINVAL;
11984 }
11985 }
11986
11987 /* Check FW version */
11988 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11989 fw_ver = firmware->data + offset;
11990 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11991 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11992 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11993 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
7995c64e 11994 pr_err("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
11995 fw_ver[0], fw_ver[1], fw_ver[2],
11996 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11997 BCM_5710_FW_MINOR_VERSION,
11998 BCM_5710_FW_REVISION_VERSION,
11999 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 12000 return -EINVAL;
94a78b79
VZ
12001 }
12002
12003 return 0;
12004}
12005
ab6ad5a4 12006static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 12007{
ab6ad5a4
EG
12008 const __be32 *source = (const __be32 *)_source;
12009 u32 *target = (u32 *)_target;
94a78b79 12010 u32 i;
94a78b79
VZ
12011
12012 for (i = 0; i < n/4; i++)
12013 target[i] = be32_to_cpu(source[i]);
12014}
12015
12016/*
12017 Ops array is stored in the following format:
12018 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12019 */
ab6ad5a4 12020static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 12021{
ab6ad5a4
EG
12022 const __be32 *source = (const __be32 *)_source;
12023 struct raw_op *target = (struct raw_op *)_target;
94a78b79 12024 u32 i, j, tmp;
94a78b79 12025
ab6ad5a4 12026 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
12027 tmp = be32_to_cpu(source[j]);
12028 target[i].op = (tmp >> 24) & 0xff;
12029 target[i].offset = tmp & 0xffffff;
12030 target[i].raw_data = be32_to_cpu(source[j+1]);
12031 }
12032}
ab6ad5a4
EG
12033
12034static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 12035{
ab6ad5a4
EG
12036 const __be16 *source = (const __be16 *)_source;
12037 u16 *target = (u16 *)_target;
94a78b79 12038 u32 i;
94a78b79
VZ
12039
12040 for (i = 0; i < n/2; i++)
12041 target[i] = be16_to_cpu(source[i]);
12042}
12043
7995c64e
JP
12044#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12045do { \
12046 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12047 bp->arr = kmalloc(len, GFP_KERNEL); \
12048 if (!bp->arr) { \
12049 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
12050 goto lbl; \
12051 } \
12052 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12053 (u8 *)bp->arr, len); \
12054} while (0)
94a78b79 12055
94a78b79
VZ
12056static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12057{
45229b42 12058 const char *fw_file_name;
94a78b79 12059 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 12060 int rc;
94a78b79 12061
94a78b79 12062 if (CHIP_IS_E1(bp))
45229b42 12063 fw_file_name = FW_FILE_NAME_E1;
94a78b79 12064 else
45229b42 12065 fw_file_name = FW_FILE_NAME_E1H;
94a78b79 12066
7995c64e 12067 pr_info("Loading %s\n", fw_file_name);
94a78b79
VZ
12068
12069 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12070 if (rc) {
7995c64e 12071 pr_err("Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
12072 goto request_firmware_exit;
12073 }
12074
12075 rc = bnx2x_check_firmware(bp);
12076 if (rc) {
7995c64e 12077 pr_err("Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
12078 goto request_firmware_exit;
12079 }
12080
12081 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12082
12083 /* Initialize the pointers to the init arrays */
12084 /* Blob */
12085 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12086
12087 /* Opcodes */
12088 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12089
12090 /* Offsets */
ab6ad5a4
EG
12091 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12092 be16_to_cpu_n);
94a78b79
VZ
12093
12094 /* STORMs firmware */
573f2035
EG
12095 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12096 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12097 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12098 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12099 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12100 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12101 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12102 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12103 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12104 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12105 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12106 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12107 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12108 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12109 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12110 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
12111
12112 return 0;
ab6ad5a4 12113
94a78b79
VZ
12114init_offsets_alloc_err:
12115 kfree(bp->init_ops);
12116init_ops_alloc_err:
12117 kfree(bp->init_data);
12118request_firmware_exit:
12119 release_firmware(bp->firmware);
12120
12121 return rc;
12122}
12123
12124
a2fbb9ea
ET
12125static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12126 const struct pci_device_id *ent)
12127{
a2fbb9ea
ET
12128 struct net_device *dev = NULL;
12129 struct bnx2x *bp;
37f9ce62 12130 int pcie_width, pcie_speed;
25047950 12131 int rc;
a2fbb9ea 12132
a2fbb9ea 12133 /* dev zeroed in init_etherdev */
555f6c78 12134 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04 12135 if (!dev) {
7995c64e 12136 pr_err("Cannot allocate net device\n");
a2fbb9ea 12137 return -ENOMEM;
34f80b04 12138 }
a2fbb9ea 12139
a2fbb9ea 12140 bp = netdev_priv(dev);
7995c64e 12141 bp->msg_enable = debug;
a2fbb9ea 12142
df4770de
EG
12143 pci_set_drvdata(pdev, dev);
12144
34f80b04 12145 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
12146 if (rc < 0) {
12147 free_netdev(dev);
12148 return rc;
12149 }
12150
34f80b04 12151 rc = bnx2x_init_bp(bp);
693fc0d1
EG
12152 if (rc)
12153 goto init_one_exit;
12154
94a78b79
VZ
12155 /* Set init arrays */
12156 rc = bnx2x_init_firmware(bp, &pdev->dev);
12157 if (rc) {
7995c64e 12158 pr_err("Error loading firmware\n");
94a78b79
VZ
12159 goto init_one_exit;
12160 }
12161
693fc0d1 12162 rc = register_netdev(dev);
34f80b04 12163 if (rc) {
693fc0d1 12164 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
12165 goto init_one_exit;
12166 }
12167
37f9ce62 12168 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
7995c64e
JP
12169 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
12170 board_info[ent->driver_data].name,
12171 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12172 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12173 dev->base_addr, bp->pdev->irq, dev->dev_addr);
c016201c 12174
a2fbb9ea 12175 return 0;
34f80b04
EG
12176
12177init_one_exit:
12178 if (bp->regview)
12179 iounmap(bp->regview);
12180
12181 if (bp->doorbells)
12182 iounmap(bp->doorbells);
12183
12184 free_netdev(dev);
12185
12186 if (atomic_read(&pdev->enable_cnt) == 1)
12187 pci_release_regions(pdev);
12188
12189 pci_disable_device(pdev);
12190 pci_set_drvdata(pdev, NULL);
12191
12192 return rc;
a2fbb9ea
ET
12193}
12194
12195static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12196{
12197 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
12198 struct bnx2x *bp;
12199
12200 if (!dev) {
7995c64e 12201 pr_err("BAD net device from bnx2x_init_one\n");
228241eb
ET
12202 return;
12203 }
228241eb 12204 bp = netdev_priv(dev);
a2fbb9ea 12205
a2fbb9ea
ET
12206 unregister_netdev(dev);
12207
94a78b79
VZ
12208 kfree(bp->init_ops_offsets);
12209 kfree(bp->init_ops);
12210 kfree(bp->init_data);
12211 release_firmware(bp->firmware);
12212
a2fbb9ea
ET
12213 if (bp->regview)
12214 iounmap(bp->regview);
12215
12216 if (bp->doorbells)
12217 iounmap(bp->doorbells);
12218
12219 free_netdev(dev);
34f80b04
EG
12220
12221 if (atomic_read(&pdev->enable_cnt) == 1)
12222 pci_release_regions(pdev);
12223
a2fbb9ea
ET
12224 pci_disable_device(pdev);
12225 pci_set_drvdata(pdev, NULL);
12226}
12227
12228static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12229{
12230 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
12231 struct bnx2x *bp;
12232
34f80b04 12233 if (!dev) {
7995c64e 12234 pr_err("BAD net device from bnx2x_init_one\n");
34f80b04
EG
12235 return -ENODEV;
12236 }
12237 bp = netdev_priv(dev);
a2fbb9ea 12238
34f80b04 12239 rtnl_lock();
a2fbb9ea 12240
34f80b04 12241 pci_save_state(pdev);
228241eb 12242
34f80b04
EG
12243 if (!netif_running(dev)) {
12244 rtnl_unlock();
12245 return 0;
12246 }
a2fbb9ea
ET
12247
12248 netif_device_detach(dev);
a2fbb9ea 12249
da5a662a 12250 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 12251
a2fbb9ea 12252 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 12253
34f80b04
EG
12254 rtnl_unlock();
12255
a2fbb9ea
ET
12256 return 0;
12257}
12258
12259static int bnx2x_resume(struct pci_dev *pdev)
12260{
12261 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 12262 struct bnx2x *bp;
a2fbb9ea
ET
12263 int rc;
12264
228241eb 12265 if (!dev) {
7995c64e 12266 pr_err("BAD net device from bnx2x_init_one\n");
228241eb
ET
12267 return -ENODEV;
12268 }
228241eb 12269 bp = netdev_priv(dev);
a2fbb9ea 12270
34f80b04
EG
12271 rtnl_lock();
12272
228241eb 12273 pci_restore_state(pdev);
34f80b04
EG
12274
12275 if (!netif_running(dev)) {
12276 rtnl_unlock();
12277 return 0;
12278 }
12279
a2fbb9ea
ET
12280 bnx2x_set_power_state(bp, PCI_D0);
12281 netif_device_attach(dev);
12282
da5a662a 12283 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 12284
34f80b04
EG
12285 rtnl_unlock();
12286
12287 return rc;
a2fbb9ea
ET
12288}
12289
f8ef6e44
YG
12290static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12291{
12292 int i;
12293
12294 bp->state = BNX2X_STATE_ERROR;
12295
12296 bp->rx_mode = BNX2X_RX_MODE_NONE;
12297
12298 bnx2x_netif_stop(bp, 0);
12299
12300 del_timer_sync(&bp->timer);
12301 bp->stats_state = STATS_STATE_DISABLED;
12302 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12303
12304 /* Release IRQs */
6cbe5065 12305 bnx2x_free_irq(bp, false);
f8ef6e44
YG
12306
12307 if (CHIP_IS_E1(bp)) {
12308 struct mac_configuration_cmd *config =
12309 bnx2x_sp(bp, mcast_config);
12310
8d9c5f34 12311 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
12312 CAM_INVALIDATE(config->config_table[i]);
12313 }
12314
12315 /* Free SKBs, SGEs, TPA pool and driver internals */
12316 bnx2x_free_skbs(bp);
54b9ddaa 12317 for_each_queue(bp, i)
f8ef6e44 12318 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 12319 for_each_queue(bp, i)
7cde1c8b 12320 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
12321 bnx2x_free_mem(bp);
12322
12323 bp->state = BNX2X_STATE_CLOSED;
12324
12325 netif_carrier_off(bp->dev);
12326
12327 return 0;
12328}
12329
12330static void bnx2x_eeh_recover(struct bnx2x *bp)
12331{
12332 u32 val;
12333
12334 mutex_init(&bp->port.phy_mutex);
12335
12336 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12337 bp->link_params.shmem_base = bp->common.shmem_base;
12338 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12339
12340 if (!bp->common.shmem_base ||
12341 (bp->common.shmem_base < 0xA0000) ||
12342 (bp->common.shmem_base >= 0xC0000)) {
12343 BNX2X_DEV_INFO("MCP not active\n");
12344 bp->flags |= NO_MCP_FLAG;
12345 return;
12346 }
12347
12348 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12349 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12350 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12351 BNX2X_ERR("BAD MCP validity signature\n");
12352
12353 if (!BP_NOMCP(bp)) {
12354 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12355 & DRV_MSG_SEQ_NUMBER_MASK);
12356 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12357 }
12358}
12359
493adb1f
WX
12360/**
12361 * bnx2x_io_error_detected - called when PCI error is detected
12362 * @pdev: Pointer to PCI device
12363 * @state: The current pci connection state
12364 *
12365 * This function is called after a PCI bus error affecting
12366 * this device has been detected.
12367 */
12368static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12369 pci_channel_state_t state)
12370{
12371 struct net_device *dev = pci_get_drvdata(pdev);
12372 struct bnx2x *bp = netdev_priv(dev);
12373
12374 rtnl_lock();
12375
12376 netif_device_detach(dev);
12377
07ce50e4
DN
12378 if (state == pci_channel_io_perm_failure) {
12379 rtnl_unlock();
12380 return PCI_ERS_RESULT_DISCONNECT;
12381 }
12382
493adb1f 12383 if (netif_running(dev))
f8ef6e44 12384 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
12385
12386 pci_disable_device(pdev);
12387
12388 rtnl_unlock();
12389
12390 /* Request a slot reset */
12391 return PCI_ERS_RESULT_NEED_RESET;
12392}
12393
12394/**
12395 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12396 * @pdev: Pointer to PCI device
12397 *
12398 * Restart the card from scratch, as if from a cold-boot.
12399 */
12400static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12401{
12402 struct net_device *dev = pci_get_drvdata(pdev);
12403 struct bnx2x *bp = netdev_priv(dev);
12404
12405 rtnl_lock();
12406
12407 if (pci_enable_device(pdev)) {
12408 dev_err(&pdev->dev,
12409 "Cannot re-enable PCI device after reset\n");
12410 rtnl_unlock();
12411 return PCI_ERS_RESULT_DISCONNECT;
12412 }
12413
12414 pci_set_master(pdev);
12415 pci_restore_state(pdev);
12416
12417 if (netif_running(dev))
12418 bnx2x_set_power_state(bp, PCI_D0);
12419
12420 rtnl_unlock();
12421
12422 return PCI_ERS_RESULT_RECOVERED;
12423}
12424
12425/**
12426 * bnx2x_io_resume - called when traffic can start flowing again
12427 * @pdev: Pointer to PCI device
12428 *
12429 * This callback is called when the error recovery driver tells us that
12430 * its OK to resume normal operation.
12431 */
12432static void bnx2x_io_resume(struct pci_dev *pdev)
12433{
12434 struct net_device *dev = pci_get_drvdata(pdev);
12435 struct bnx2x *bp = netdev_priv(dev);
12436
12437 rtnl_lock();
12438
f8ef6e44
YG
12439 bnx2x_eeh_recover(bp);
12440
493adb1f 12441 if (netif_running(dev))
f8ef6e44 12442 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
12443
12444 netif_device_attach(dev);
12445
12446 rtnl_unlock();
12447}
12448
12449static struct pci_error_handlers bnx2x_err_handler = {
12450 .error_detected = bnx2x_io_error_detected,
356e2385
EG
12451 .slot_reset = bnx2x_io_slot_reset,
12452 .resume = bnx2x_io_resume,
493adb1f
WX
12453};
12454
a2fbb9ea 12455static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
12456 .name = DRV_MODULE_NAME,
12457 .id_table = bnx2x_pci_tbl,
12458 .probe = bnx2x_init_one,
12459 .remove = __devexit_p(bnx2x_remove_one),
12460 .suspend = bnx2x_suspend,
12461 .resume = bnx2x_resume,
12462 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
12463};
12464
12465static int __init bnx2x_init(void)
12466{
dd21ca6d
SG
12467 int ret;
12468
7995c64e 12469 pr_info("%s", version);
938cf541 12470
1cf167f2
EG
12471 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12472 if (bnx2x_wq == NULL) {
7995c64e 12473 pr_err("Cannot create workqueue\n");
1cf167f2
EG
12474 return -ENOMEM;
12475 }
12476
dd21ca6d
SG
12477 ret = pci_register_driver(&bnx2x_pci_driver);
12478 if (ret) {
7995c64e 12479 pr_err("Cannot register driver\n");
dd21ca6d
SG
12480 destroy_workqueue(bnx2x_wq);
12481 }
12482 return ret;
a2fbb9ea
ET
12483}
12484
12485static void __exit bnx2x_cleanup(void)
12486{
12487 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
12488
12489 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
12490}
12491
12492module_init(bnx2x_init);
12493module_exit(bnx2x_cleanup);
12494
993ac7b5
MC
12495#ifdef BCM_CNIC
12496
12497/* count denotes the number of new completions we have seen */
12498static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12499{
12500 struct eth_spe *spe;
12501
12502#ifdef BNX2X_STOP_ON_ERROR
12503 if (unlikely(bp->panic))
12504 return;
12505#endif
12506
12507 spin_lock_bh(&bp->spq_lock);
12508 bp->cnic_spq_pending -= count;
12509
12510 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12511 bp->cnic_spq_pending++) {
12512
12513 if (!bp->cnic_kwq_pending)
12514 break;
12515
12516 spe = bnx2x_sp_get_next(bp);
12517 *spe = *bp->cnic_kwq_cons;
12518
12519 bp->cnic_kwq_pending--;
12520
12521 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12522 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12523
12524 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12525 bp->cnic_kwq_cons = bp->cnic_kwq;
12526 else
12527 bp->cnic_kwq_cons++;
12528 }
12529 bnx2x_sp_prod_update(bp);
12530 spin_unlock_bh(&bp->spq_lock);
12531}
12532
12533static int bnx2x_cnic_sp_queue(struct net_device *dev,
12534 struct kwqe_16 *kwqes[], u32 count)
12535{
12536 struct bnx2x *bp = netdev_priv(dev);
12537 int i;
12538
12539#ifdef BNX2X_STOP_ON_ERROR
12540 if (unlikely(bp->panic))
12541 return -EIO;
12542#endif
12543
12544 spin_lock_bh(&bp->spq_lock);
12545
12546 for (i = 0; i < count; i++) {
12547 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12548
12549 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12550 break;
12551
12552 *bp->cnic_kwq_prod = *spe;
12553
12554 bp->cnic_kwq_pending++;
12555
12556 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12557 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12558 spe->data.mac_config_addr.hi,
12559 spe->data.mac_config_addr.lo,
12560 bp->cnic_kwq_pending);
12561
12562 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12563 bp->cnic_kwq_prod = bp->cnic_kwq;
12564 else
12565 bp->cnic_kwq_prod++;
12566 }
12567
12568 spin_unlock_bh(&bp->spq_lock);
12569
12570 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12571 bnx2x_cnic_sp_post(bp, 0);
12572
12573 return i;
12574}
12575
12576static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12577{
12578 struct cnic_ops *c_ops;
12579 int rc = 0;
12580
12581 mutex_lock(&bp->cnic_mutex);
12582 c_ops = bp->cnic_ops;
12583 if (c_ops)
12584 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12585 mutex_unlock(&bp->cnic_mutex);
12586
12587 return rc;
12588}
12589
12590static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12591{
12592 struct cnic_ops *c_ops;
12593 int rc = 0;
12594
12595 rcu_read_lock();
12596 c_ops = rcu_dereference(bp->cnic_ops);
12597 if (c_ops)
12598 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12599 rcu_read_unlock();
12600
12601 return rc;
12602}
12603
12604/*
12605 * for commands that have no data
12606 */
12607static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12608{
12609 struct cnic_ctl_info ctl = {0};
12610
12611 ctl.cmd = cmd;
12612
12613 return bnx2x_cnic_ctl_send(bp, &ctl);
12614}
12615
12616static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12617{
12618 struct cnic_ctl_info ctl;
12619
12620 /* first we tell CNIC and only then we count this as a completion */
12621 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12622 ctl.data.comp.cid = cid;
12623
12624 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12625 bnx2x_cnic_sp_post(bp, 1);
12626}
12627
12628static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12629{
12630 struct bnx2x *bp = netdev_priv(dev);
12631 int rc = 0;
12632
12633 switch (ctl->cmd) {
12634 case DRV_CTL_CTXTBL_WR_CMD: {
12635 u32 index = ctl->data.io.offset;
12636 dma_addr_t addr = ctl->data.io.dma_addr;
12637
12638 bnx2x_ilt_wr(bp, index, addr);
12639 break;
12640 }
12641
12642 case DRV_CTL_COMPLETION_CMD: {
12643 int count = ctl->data.comp.comp_count;
12644
12645 bnx2x_cnic_sp_post(bp, count);
12646 break;
12647 }
12648
12649 /* rtnl_lock is held. */
12650 case DRV_CTL_START_L2_CMD: {
12651 u32 cli = ctl->data.ring.client_id;
12652
12653 bp->rx_mode_cl_mask |= (1 << cli);
12654 bnx2x_set_storm_rx_mode(bp);
12655 break;
12656 }
12657
12658 /* rtnl_lock is held. */
12659 case DRV_CTL_STOP_L2_CMD: {
12660 u32 cli = ctl->data.ring.client_id;
12661
12662 bp->rx_mode_cl_mask &= ~(1 << cli);
12663 bnx2x_set_storm_rx_mode(bp);
12664 break;
12665 }
12666
12667 default:
12668 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12669 rc = -EINVAL;
12670 }
12671
12672 return rc;
12673}
12674
12675static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12676{
12677 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12678
12679 if (bp->flags & USING_MSIX_FLAG) {
12680 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12681 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12682 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12683 } else {
12684 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12685 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12686 }
12687 cp->irq_arr[0].status_blk = bp->cnic_sb;
12688 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12689 cp->irq_arr[1].status_blk = bp->def_status_blk;
12690 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12691
12692 cp->num_irq = 2;
12693}
12694
12695static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12696 void *data)
12697{
12698 struct bnx2x *bp = netdev_priv(dev);
12699 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12700
12701 if (ops == NULL)
12702 return -EINVAL;
12703
12704 if (atomic_read(&bp->intr_sem) != 0)
12705 return -EBUSY;
12706
12707 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12708 if (!bp->cnic_kwq)
12709 return -ENOMEM;
12710
12711 bp->cnic_kwq_cons = bp->cnic_kwq;
12712 bp->cnic_kwq_prod = bp->cnic_kwq;
12713 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12714
12715 bp->cnic_spq_pending = 0;
12716 bp->cnic_kwq_pending = 0;
12717
12718 bp->cnic_data = data;
12719
12720 cp->num_irq = 0;
12721 cp->drv_state = CNIC_DRV_STATE_REGD;
12722
12723 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12724
12725 bnx2x_setup_cnic_irq_info(bp);
12726 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12727 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12728 rcu_assign_pointer(bp->cnic_ops, ops);
12729
12730 return 0;
12731}
12732
12733static int bnx2x_unregister_cnic(struct net_device *dev)
12734{
12735 struct bnx2x *bp = netdev_priv(dev);
12736 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12737
12738 mutex_lock(&bp->cnic_mutex);
12739 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12740 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12741 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12742 }
12743 cp->drv_state = 0;
12744 rcu_assign_pointer(bp->cnic_ops, NULL);
12745 mutex_unlock(&bp->cnic_mutex);
12746 synchronize_rcu();
12747 kfree(bp->cnic_kwq);
12748 bp->cnic_kwq = NULL;
12749
12750 return 0;
12751}
12752
12753struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12754{
12755 struct bnx2x *bp = netdev_priv(dev);
12756 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12757
12758 cp->drv_owner = THIS_MODULE;
12759 cp->chip_id = CHIP_ID(bp);
12760 cp->pdev = bp->pdev;
12761 cp->io_base = bp->regview;
12762 cp->io_base2 = bp->doorbells;
12763 cp->max_kwqe_pending = 8;
12764 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12765 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12766 cp->ctx_tbl_len = CNIC_ILT_LINES;
12767 cp->starting_cid = BCM_CNIC_CID_START;
12768 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12769 cp->drv_ctl = bnx2x_drv_ctl;
12770 cp->drv_register_cnic = bnx2x_register_cnic;
12771 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12772
12773 return cp;
12774}
12775EXPORT_SYMBOL(bnx2x_cnic_probe);
12776
12777#endif /* BCM_CNIC */
94a78b79 12778